text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import sys
import os
# print(os.path.abspath(os.path.join(os.getcwd(),".")))
sys.path.append(os.path.abspath(os.path.join(os.getcwd(),"..")))
import torch
import torch.backends.cudnn as cudnn
import argparse
import shutil
import sys
import random
import time
import numpy as np
from datetime import datetime
from vsgia_model.config import cfg
from vsgia_model.dataset.gazefollow import GazeFollowLoader
from vsgia_model.utils.model_utils import init_model,setup_model,save_checkpoint,resume_checkpoint,init_checkpoint
from vsgia_model.tester import Tester
def test_engine(opt):
# init model
model=init_model(opt)
# set criterion and optimizer for gaze model
criterion,optimizer=setup_model(model,opt)
random.seed(opt.OTHER.seed)
np.random.seed(opt.OTHER.seed)
torch.manual_seed(opt.OTHER.seed)
cudnn.deterministic=True
# load the model weights
if os.path.isfile(opt.TEST.model_para):
model.load_state_dict(torch.load(opt.TEST.model_para))
else:
raise Exception("No such model file")
dataloader = GazeFollowLoader(opt)
val_loader=dataloader.test_loader
# init trainer and validator for gazemodel
tester=Tester(model,criterion,val_loader,opt,writer=None)
eval_dist,eval_mindist,eval_auc = tester.test(opt)
print("Eval Avg dist.: {} | Eval Min dist.:{} | Eval AUC :{}".format(eval_dist,eval_mindist,eval_auc))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="PyTorch VSG-IA Model"
)
parser.add_argument(
"--cfg",
default="config/gazefollow_cfg.yaml",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"--gpu",
action="store_true",
default=False,
help="choose if use gpus"
)
parser.add_argument(
"--is_train",
action="store_true",
default=False,
help="choose if train"
)
parser.add_argument(
"--is_test",
action="store_true",
default=True,
help="choose if test"
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
cfg.OTHER.device='cuda:0' if (torch.cuda.is_available() and args.gpu) else 'cpu'
print("The model running on {}".format(cfg.OTHER.device))
test_engine(cfg)
|
{"hexsha": "ca8d89382e1895bc056bb5746e5a0b6365c0ae1d", "size": 2547, "ext": "py", "lang": "Python", "max_stars_repo_path": "vsgia_model/main.py", "max_stars_repo_name": "nkuhzx/VSG-IA", "max_stars_repo_head_hexsha": "075b58c2bf89562cc197e721f050396589861c6a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vsgia_model/main.py", "max_issues_repo_name": "nkuhzx/VSG-IA", "max_issues_repo_head_hexsha": "075b58c2bf89562cc197e721f050396589861c6a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vsgia_model/main.py", "max_forks_repo_name": "nkuhzx/VSG-IA", "max_forks_repo_head_hexsha": "075b58c2bf89562cc197e721f050396589861c6a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3421052632, "max_line_length": 114, "alphanum_fraction": 0.6713780919, "include": true, "reason": "import numpy", "num_tokens": 592}
|
#!/usr/bin/env python
import rospy
import sys
from std_msgs.msg import Float32, ColorRGBA, Int32
from geometry_msgs.msg import PoseStamped, Twist, Vector3, Point
from ford_msgs.msg import PedTrajVec, NNActions, PlannerMode, Clusters
from visualization_msgs.msg import Marker, MarkerArray
import numpy as np
import numpy.matlib
import pickle
from matplotlib import cm
import matplotlib.pyplot as plt
import copy
import os
import time
import random
import math
import rospkg
import network
import agent
import util
PED_RADIUS = 0.3
# angle_1 - angle_2
# contains direction in range [-3.14, 3.14]
def find_angle_diff(angle_1, angle_2):
angle_diff_raw = angle_1 - angle_2
angle_diff = (angle_diff_raw + np.pi) % (2 * np.pi) - np.pi
return angle_diff
class NN_jackal():
def __init__(self, veh_name, veh_data, nn, actions):
self.node_name = rospy.get_name()
self.prev_other_agents_state = []
# vehicle info
self.veh_name = veh_name
self.veh_data = veh_data
# self.agent = agent.Agent(0.0, 0.0, 100.0, 100.0, radius, pref_speed, initial_heading, id)
# neural network
self.nn = nn
self.actions = actions
# self.value_net = value_net
self.operation_mode = PlannerMode()
self.operation_mode.mode = self.operation_mode.NN
# for subscribers
self.pose = PoseStamped()
self.vel = Vector3()
self.psi = 0.0
self.ped_traj_vec = []
self.other_agents_state = []
self.feasible_actions = NNActions()
# for publishers
self.global_goal = PoseStamped()
self.goal = PoseStamped()
self.goal.pose.position.x = veh_data['goal'][0]
self.goal.pose.position.y = veh_data['goal'][1]
self.desired_position = PoseStamped()
self.desired_action = np.zeros((2,))
# handle obstacles close to vehicle's front
self.stop_moving_flag = False
self.d_min = 0.0
self.new_subgoal_received = False
self.new_global_goal_received = False
# visualization
self.path_marker = Marker()
# Clusters
self.prev_clusters = Clusters()
self.current_clusters = Clusters()
# subscribers and publishers
self.num_poses = 0
self.num_actions_computed = 0.0
self.pub_others = rospy.Publisher('~other_vels',Vector3,queue_size=1)
self.pub_twist = rospy.Publisher('~nn_cmd_vel',Twist,queue_size=1)
self.pub_pose_marker = rospy.Publisher('~pose_marker',Marker,queue_size=1)
self.pub_agent_marker = rospy.Publisher('~agent_marker',Marker,queue_size=1)
self.pub_agent_markers = rospy.Publisher('~agent_markers',MarkerArray,queue_size=1)
self.pub_path_marker = rospy.Publisher('~path_marker',Marker,queue_size=1)
self.pub_goal_path_marker = rospy.Publisher('~goal_path_marker',Marker,queue_size=1)
self.sub_pose = rospy.Subscriber('~pose',PoseStamped,self.cbPose)
self.sub_vel = rospy.Subscriber('~velocity',Vector3,self.cbVel)
self.sub_nn_actions = rospy.Subscriber('~safe_actions',NNActions,self.cbNNActions)
self.sub_mode = rospy.Subscriber('~mode',PlannerMode, self.cbPlannerMode)
self.sub_global_goal = rospy.Subscriber('~goal',PoseStamped, self.cbGlobalGoal)
self.use_clusters = True
# self.use_clusters = False
if self.use_clusters:
self.sub_clusters = rospy.Subscriber('~clusters',Clusters, self.cbClusters)
else:
self.sub_peds = rospy.Subscriber('~peds',PedTrajVec, self.cbPeds)
# control timer
self.control_timer = rospy.Timer(rospy.Duration(0.01),self.cbControl)
self.nn_timer = rospy.Timer(rospy.Duration(0.1),self.cbComputeActionGA3C)
def cbGlobalGoal(self,msg):
self.new_global_goal_received = True
self.global_goal = msg
self.operation_mode.mode = self.operation_mode.SPIN_IN_PLACE
self.goal.pose.position.x = msg.pose.position.x
self.goal.pose.position.y = msg.pose.position.y
self.goal.header = msg.header
self.new_subgoal_received = True
def cbNNActions(self,msg):
# if msg.header.seq % 20 == 0:
# self.goal.pose.position.x = msg.subgoal.x
# self.goal.pose.position.y = msg.subgoal.y
# self.goal.header = msg.header
# self.new_subgoal_received = True
self.feasible_actions = msg
def cbPlannerMode(self, msg):
self.operation_mode = msg
self.operation_mode.mode = self.operation_mode.NN
def cbPose(self, msg):
self.num_poses += 1
q = msg.pose.orientation
self.psi = np.arctan2(2.0*(q.w*q.z + q.x*q.y), 1-2*(q.y*q.y+q.z*q.z)) # bounded by [-pi, pi]
self.pose = msg
self.visualize_pose(msg.pose.position,msg.pose.orientation)
def cbVel(self, msg):
self.vel = msg
def cbPeds(self, msg):
t_start = rospy.Time.now()
self.ped_traj_vec = [ped_traj for ped_traj in msg.ped_traj_vec if len(ped_traj.traj) > 0]
num_peds = len(self.ped_traj_vec)
# compute relative position with respect to the Jackal
rel_dist = np.zeros((num_peds, ))
rel_angle = np.zeros((num_peds, ))
# (rel_dist, angle)
for i, ped_traj in enumerate(self.ped_traj_vec):
rel_x = ped_traj.traj[-1].pose.x - self.pose.pose.position.x
rel_y = ped_traj.traj[-1].pose.y - self.pose.pose.position.y
rel_dist[i] = np.linalg.norm(np.array([rel_x, rel_y]))
rel_angle[i] = find_angle_diff(np.arctan2(rel_y, rel_x), self.psi)
# ignore people in the back of Jackal (60 deg cone)
valid_inds = np.where(abs(rel_angle)< 5.0 / 6.0 * np.pi)[0]
# get the n closest agents
self.other_agents_state = []
if len(valid_inds) == 0:
return
else:
if len(valid_inds) == 1:
# print "valid_inds:", valid_inds
# print "self.ped_traj_vec:", self.ped_traj_vec
valid_inds = valid_inds[0]
ped_traj_vec = [self.ped_traj_vec[valid_inds]]
rel_dist = np.array([rel_dist[valid_inds]])
elif len(valid_inds) > 1:
# print 'before', len(self.ped_traj_vec)
# print 'valid_inds', valid_inds
ped_traj_vec = [self.ped_traj_vec[tt] for tt in valid_inds]
# print 'after', len(self.ped_traj_vec)
rel_dist = rel_dist[valid_inds]
# sort other agents by rel_dist
# num_neighbors = min(len(rel_dist), self.value_net.num_agents)
# print 'num_neighbors', num_neighbors
# print 'rel_dist', rel_dist
# neighbor_inds = np.argpartition(rel_dist, num_neighbors)[:num_neighbors]
if len(rel_dist) > self.value_net.num_agents-1:
num_neighbors = self.value_net.num_agents-1
neighbor_inds = np.argpartition(rel_dist, num_neighbors)[:num_neighbors]
else:
neighbor_inds = np.arange(len(rel_dist))
# agent state: [pos.x, pos.y, vel.x, vel.y, heading_angle, pref_speed, \
# goals[0].x, goals[0].y, radius, turning_dir]
for tt in neighbor_inds:
ped_traj = ped_traj_vec[tt]
# rel pos, rel vel, size
x = ped_traj.traj[-1].pose.x; y = ped_traj.traj[-1].pose.y
v_x = ped_traj.traj[-1].velocity.x; v_y = ped_traj.traj[-1].velocity.y
radius = PED_RADIUS;turning_dir = 0.0
# helper fields
heading_angle = np.arctan2(v_y, v_x)
pref_speed = np.linalg.norm(np.array([v_x, v_y]))
goal_x = x + 5.0; goal_y = y + 5.0
# filter speed
alpha = 0.2
for prev_other_agent_state in self.prev_other_agents_state:
pos_diff = np.linalg.norm(prev_other_agent_state[0:2] - np.array([x,y]))
heading_diff_abs = abs(find_angle_diff(prev_other_agent_state[4], heading_angle))
if pos_diff < 0.5 and heading_diff_abs < np.pi / 4.0:
v_x = alpha * v_x + (1-alpha) * prev_other_agent_state[2]
v_y = alpha * v_y + (1-alpha) * prev_other_agent_state[3]
# TODO: find the best match rather than the first match
break
if pref_speed < 0.2:
pref_speed = 0; v_x = 0; v_y = 0
other_agent_state = np.array([x, y, v_x, v_y, heading_angle, pref_speed, \
goal_x, goal_y, radius, turning_dir])
self.other_agents_state.append(other_agent_state)
self.prev_other_agents_state = copy.deepcopy(self.other_agents_state)
t_end = rospy.Time.now()
# print "cbPeds took:", (t_end - t_start).to_sec(), "sec"
def cbClusters(self, msg):
other_agents = []
xs = []; ys = []; radii = []; labels = []
num_clusters = len(msg.labels)
for i in range(num_clusters):
index = msg.labels[i]
x = msg.mean_points[i].x; y = msg.mean_points[i].y
v_x = msg.velocities[i].x; v_y = msg.velocities[i].y
# radius = PED_RADIUS
lower_r = np.linalg.norm(np.array([msg.mean_points[i].x-msg.min_points[i].x, msg.mean_points[i].y-msg.min_points[i].y]))
upper_r = np.linalg.norm(np.array([msg.mean_points[i].x-msg.max_points[i].x, msg.mean_points[i].y-msg.max_points[i].y]))
inflation_factor = 1.5
radius = max(PED_RADIUS, inflation_factor * max(upper_r, lower_r))
xs.append(x); ys.append(y); radii.append(radius); labels.append(index)
# self.visualize_other_agent(x,y,radius,msg.labels[i])
# helper fields
heading_angle = np.arctan2(v_y, v_x)
pref_speed = np.linalg.norm(np.array([v_x, v_y]))
goal_x = x + 5.0; goal_y = y + 5.0
# # filter speed
# alpha = 0.2
# for prev_other_agent_state in self.prev_other_agents_state:
# pos_diff = np.linalg.norm(prev_other_agent_state[0:2] - np.array([x,y]))
# heading_diff_abs = abs(find_angle_diff(prev_other_agent_state[4], heading_angle))
# if pos_diff < 0.5 and heading_diff_abs < np.pi / 4.0:
# v_x = alpha * v_x + (1-alpha) * prev_other_agent_state[2]
# v_y = alpha * v_y + (1-alpha) * prev_other_agent_state[3]
# # TODO: find the best match rather than the first match
# break
if pref_speed < 0.2:
pref_speed = 0; v_x = 0; v_y = 0
other_agents.append(agent.Agent(x, y, goal_x, goal_y, radius, pref_speed, heading_angle, index))
self.visualize_other_agents(xs, ys, radii, labels)
self.other_agents_state = other_agents
def stop_moving(self):
twist = Twist()
self.pub_twist.publish(twist)
# print 'Stop Moving.'
def update_action(self, action):
# print 'update action'
self.desired_action = action
self.desired_position.pose.position.x = self.pose.pose.position.x + 1*action[0]*np.cos(action[1])
self.desired_position.pose.position.y = self.pose.pose.position.y + 1*action[0]*np.sin(action[1])
twist = Twist()
twist.linear.x = action[0]
yaw_error = action[1] - self.psi
if yaw_error > np.pi:
yaw_error -= 2*np.pi
if yaw_error < -np.pi:
yaw_error += 2*np.pi
twist.angular.z = 2*yaw_error
def find_vmax(self, d_min, heading_diff):
# Calculate maximum linear velocity, as a function of error in
# heading and clear space in front of the vehicle
# (With nothing in front of vehicle, it's not important to
# track MPs perfectly; with an obstacle right in front, the
# vehicle must turn in place, then drive forward.)
d_min = max(0.0,d_min)
x = 0.3
margin = 0.3
# y = max(d_min - 0.3, 0.0)
y = max(d_min, 0.0)
# making sure x < y
if x > y:
x = 0
w_max = 1
# x^2 + y^2 = (v_max/w_max)^2
v_max = w_max * np.sqrt(x**2 + y**2)
v_max = np.clip(v_max,0.0,self.veh_data['pref_speed'])
# print 'V_max, x, y, d_min', v_max, x, y, d_min
if abs(heading_diff) < np.pi / 18:
return self.veh_data['pref_speed']
return v_max
def cbControl(self, event):
if self.goal.header.stamp == rospy.Time(0) or self.stop_moving_flag \
and not self.new_global_goal_received:
self.stop_moving()
return
elif self.operation_mode.mode==self.operation_mode.NN:
desired_yaw = self.desired_action[1]
yaw_error = desired_yaw - self.psi
if abs(yaw_error) > np.pi:
yaw_error -= np.sign(yaw_error)*2*np.pi
# print 'yaw_error:',yaw_error
# max_yaw_error = 0.8
# yaw_error = self.desired_action[1]
gain = 2
vw = gain*yaw_error
use_d_min = False
if True:
use_d_min = True
# print "vmax:", self.find_vmax(self.d_min,yaw_error)
vx = min(self.desired_action[0], self.find_vmax(self.d_min,yaw_error))
else:
vx = self.desired_action[0]
# print "vx:", vx
# elif abs(yaw_error) < max_yaw_error:
# vw = gain*yaw_error
# else:
# vw = gain*max_yaw_error*np.sign(yaw_error)
twist = Twist()
twist.angular.z = vw
twist.linear.x = vx
self.pub_twist.publish(twist)
self.visualize_action(use_d_min)
return
elif self.operation_mode.mode == self.operation_mode.SPIN_IN_PLACE:
print 'Spinning in place.'
self.stop_moving_flag = False
angle_to_goal = np.arctan2(self.global_goal.pose.position.y - self.pose.pose.position.y, \
self.global_goal.pose.position.x - self.pose.pose.position.x)
global_yaw_error = self.psi - angle_to_goal
if abs(global_yaw_error) > 0.5:
vx = 0.0
vw = 1.0
twist = Twist()
twist.angular.z = vw
twist.linear.x = vx
self.pub_twist.publish(twist)
else:
print 'Done spinning in place'
self.operation_mode.mode = self.operation_mode.NN
self.new_global_goal_received = False
return
else:
self.stop_moving()
return
def cbComputeActionGA3C(self, event):
if self.operation_mode.mode!=self.operation_mode.NN:
print 'Not in NN mode'
print self.operation_mode.mode
return
# construct agent_state
x = self.pose.pose.position.x; y = self.pose.pose.position.y
v_x = self.vel.x; v_y = self.vel.y
radius = self.veh_data['radius']; turning_dir = 0.0
heading_angle = self.psi
pref_speed = self.veh_data['pref_speed']
goal_x = self.goal.pose.position.x; goal_y = self.goal.pose.position.y
# in case current speed is larger than desired speed
v = np.linalg.norm(np.array([v_x, v_y]))
if v > pref_speed:
v_x = v_x * pref_speed / v
v_y = v_y * pref_speed / v
host_agent = agent.Agent(x, y, goal_x, goal_y, radius, pref_speed, heading_angle, 0)
host_agent.vel_global_frame = np.array([v_x, v_y])
# host_agent.print_agent_info()
other_agents_state = copy.deepcopy(self.other_agents_state)
obs = host_agent.observe(other_agents_state)[1:]
obs = np.expand_dims(obs, axis=0)
# print "obs:", obs
predictions = self.nn.predict_p(obs, None)[0]
# print "predictions:", predictions
# print "best action index:", np.argmax(predictions)
raw_action = copy.deepcopy(self.actions[np.argmax(predictions)])
action = np.array([pref_speed*raw_action[0], util.wrap(raw_action[1] + self.psi)])
# print "raw_action:", raw_action
# print "action:", action
# if close to goal
kp_v = 0.5
kp_r = 1
if host_agent.dist_to_goal < 2.0: # and self.percentComplete>=0.9:
# print "somewhat close to goal"
pref_speed = max(min(kp_v * (host_agent.dist_to_goal-0.1), pref_speed), 0.0)
action[0] = min(raw_action[0], pref_speed)
turn_amount = max(min(kp_r * (host_agent.dist_to_goal-0.1), 1.0), 0.0) * raw_action[1]
action[1] = util.wrap(turn_amount + self.psi)
if host_agent.dist_to_goal < 0.3:
self.stop_moving_flag = True
else:
self.stop_moving_flag = False
# print 'chosen action (rel angle)', action[0], action[1]
self.update_action(action)
def update_subgoal(self,subgoal):
self.goal.pose.position.x = subgoal[0]
self.goal.pose.position.y = subgoal[1]
def visualize_subgoal(self,subgoal, subgoal_options=None):
markers = MarkerArray()
# Display GREEN DOT at NN subgoal
marker = Marker()
marker.header.stamp = rospy.Time.now()
marker.header.frame_id = 'map'
marker.ns = 'subgoal'
marker.id = 0
marker.type = marker.CUBE
marker.action = marker.ADD
marker.pose.position.x = subgoal[0]
marker.pose.position.y = subgoal[1]
marker.scale = Vector3(x=0.4,y=0.4,z=0.2)
marker.color = ColorRGBA(g=1.0,a=1.0)
marker.lifetime = rospy.Duration(2.0)
self.pub_goal_path_marker.publish(marker)
if subgoal_options is not None:
for i in xrange(len(subgoal_options)):
marker = Marker()
marker.header.stamp = rospy.Time.now()
marker.header.frame_id = 'map'
marker.ns = 'subgoal'
marker.id = i+1
marker.type = marker.CUBE
marker.action = marker.ADD
marker.pose.position.x = subgoal_options[i][0]
marker.pose.position.y = subgoal_options[i][1]
marker.scale = Vector3(x=0.2,y=0.2,z=0.2)
marker.color = ColorRGBA(b=1.0,r=1.0,a=1.0)
marker.lifetime = rospy.Duration(1.0)
self.pub_goal_path_marker.publish(marker)
def visualize_pose(self,pos,orientation):
# Yellow Box for Vehicle
marker = Marker()
marker.header.stamp = rospy.Time.now()
marker.header.frame_id = 'map'
marker.ns = 'agent'
marker.id = 0
marker.type = marker.CUBE
marker.action = marker.ADD
marker.pose.position = pos
marker.pose.orientation = orientation
marker.scale = Vector3(x=0.7,y=0.42,z=1)
marker.color = ColorRGBA(r=1.0,g=1.0,a=1.0)
marker.lifetime = rospy.Duration(1.0)
self.pub_pose_marker.publish(marker)
# Red track for trajectory over time
marker = Marker()
marker.header.stamp = rospy.Time.now()
marker.header.frame_id = 'map'
marker.ns = 'agent'
marker.id = self.num_poses
marker.type = marker.CUBE
marker.action = marker.ADD
marker.pose.position = pos
marker.pose.orientation = orientation
marker.scale = Vector3(x=0.2,y=0.2,z=0.2)
marker.color = ColorRGBA(r=1.0,a=1.0)
marker.lifetime = rospy.Duration(10.0)
self.pub_pose_marker.publish(marker)
def visualize_other_agents(self,xs,ys,radii,labels):
markers = MarkerArray()
for i in range(len(xs)):
# Orange box for other agent
marker = Marker()
marker.header.stamp = rospy.Time.now()
marker.header.frame_id = 'map'
marker.ns = 'other_agent'
marker.id = labels[i]
marker.type = marker.CYLINDER
marker.action = marker.ADD
marker.pose.position.x = xs[i]
marker.pose.position.y = ys[i]
# marker.pose.orientation = orientation
marker.scale = Vector3(x=2*radii[i],y=2*radii[i],z=1)
marker.color = ColorRGBA(r=1.0,g=0.4,a=1.0)
marker.lifetime = rospy.Duration(0.1)
markers.markers.append(marker)
self.pub_agent_markers.publish(markers)
def visualize_action(self, use_d_min):
# Display BLUE ARROW from current position to NN desired position
marker = Marker()
marker.header.stamp = rospy.Time.now()
marker.header.frame_id = 'map'
marker.ns = 'path_arrow'
marker.id = 0
marker.type = marker.ARROW
marker.action = marker.ADD
marker.points.append(self.pose.pose.position)
marker.points.append(self.desired_position.pose.position)
marker.scale = Vector3(x=0.1,y=0.2,z=0.2)
marker.color = ColorRGBA(b=1.0,a=1.0)
marker.lifetime = rospy.Duration(0.5)
self.pub_goal_path_marker.publish(marker)
# Display BLUE DOT at NN desired position
marker = Marker()
marker.header.stamp = rospy.Time.now()
marker.header.frame_id = 'map'
marker.ns = 'path_trail'
marker.id = self.num_poses
marker.type = marker.CUBE
marker.action = marker.ADD
marker.pose.position = copy.deepcopy(self.desired_position.pose.position)
marker.scale = Vector3(x=0.2,y=0.2,z=0.2)
marker.color = ColorRGBA(b=1.0,a=0.1)
marker.lifetime = rospy.Duration(0.5)
if self.desired_action[0] == 0.0:
marker.pose.position.x += 2.0*np.cos(self.desired_action[1])
marker.pose.position.y += 2.0*np.sin(self.desired_action[1])
self.pub_goal_path_marker.publish(marker)
# Display RED LINE from along minimum clear distance in front
# marker = Marker()
# marker.header.stamp = rospy.Time.now()
# marker.header.frame_id = 'map'
# marker.ns = 'clear_distance'
# marker.id = 0
# marker.type = marker.LINE_LIST
# marker.lifetime = rospy.Duration(0.5)
# marker.scale = Vector3(x=0.08,y=0.08,z=0.08)
# if use_d_min:
# marker.color = ColorRGBA(r=1.0,a=1.0)
# else:
# marker.color = ColorRGBA(r=1.0,g=1.0,a=1.0)
# x_midpt = self.pose.pose.position.x + self.d_min*np.cos(self.psi)
# y_midpt = self.pose.pose.position.y + self.d_min*np.sin(self.psi)
# x_max = x_midpt - 1*np.sin(self.psi)
# x_min = x_midpt + 1*np.sin(self.psi)
# y_max = y_midpt + 1*np.cos(self.psi)
# y_min = y_midpt - 1*np.cos(self.psi)
# marker.points.append(Point(x=x_max,y=y_max))
# marker.points.append(Point(x=x_min,y=y_min))
# self.pub_goal_path_marker.publish(marker)
def on_shutdown(self):
rospy.loginfo("[%s] Shutting down." %(self.node_name))
self.stop_moving()
rospy.loginfo("Stopped %s's velocity." %(self.veh_name))
def run():
print 'hello world from cadrl_node.py'
file_dir = os.path.dirname(os.path.realpath(__file__))
plt.rcParams.update({'font.size': 18})
rospack = rospkg.RosPack()
a = network.Actions()
actions = a.actions
num_actions = a.num_actions
nn = network.NetworkVP_rnn(network.Config.DEVICE, 'network', num_actions)
nn.simple_load(rospack.get_path('cadrl_ros')+'/checkpoints/network_01900000')
rospy.init_node('nn_jackal',anonymous=False)
veh_name = 'JA01'
pref_speed = rospy.get_param("~jackal_speed")
veh_data = {'goal':np.zeros((2,)),'radius':0.5,'pref_speed':pref_speed,'kw':10.0,'kp':1.0,'name':'JA01'}
print "********\n*******\n*********\nJackal speed:", pref_speed, "\n**********\n******"
nn_jackal = NN_jackal(veh_name, veh_data, nn, actions)
rospy.on_shutdown(nn_jackal.on_shutdown)
rospy.spin()
if __name__ == '__main__':
run()
|
{"hexsha": "2c5dbd817a2d33c149aaacd216df4ad0aafcb7d3", "size": 24386, "ext": "py", "lang": "Python", "max_stars_repo_path": "navigation/arena_local_planner/model_based/cadrl_ros/scripts/cadrl_node.py", "max_stars_repo_name": "kilinmao/sarl_star", "max_stars_repo_head_hexsha": "dde9bb2b690c705a615195f4b570af3ea9dfe05e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-11-11T13:25:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-25T21:34:41.000Z", "max_issues_repo_path": "navigation/arena_local_planner/model_based/cadrl_ros/scripts/cadrl_node.py", "max_issues_repo_name": "kilinmao/sarl_star", "max_issues_repo_head_hexsha": "dde9bb2b690c705a615195f4b570af3ea9dfe05e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-20T20:34:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-20T20:34:14.000Z", "max_forks_repo_path": "arena_navigation/arena_local_planner/model_based/cadrl_ros/scripts/cadrl_node.py", "max_forks_repo_name": "ignc-research/arena-marl", "max_forks_repo_head_hexsha": "3b9b2521436ef7f364a250da71a01e915d840296", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.779264214, "max_line_length": 132, "alphanum_fraction": 0.593086197, "include": true, "reason": "import numpy", "num_tokens": 6098}
|
import torch
import torch.nn as nn
import numpy as np
def accuracy(target, y_hat):
seg_pred = torch.argmax(y_hat[:, 1:], dim=1)
seg_acc = (seg_pred == target[:, 1]).float().mean()
edge_pred = (y_hat[:, 0] > 0).float()
edge_acc = (edge_pred == target[:, 0]).float().mean()
return seg_acc , edge_acc
# dice cofficient for BCE
def BCEDiceCofficient(target , y_hat):
smooth = 0.00001
edge_target = target[:, 1]
N = edge_target.size(0)
edge_pred = (y_hat[:, 1:] > 0).float()
edge_pred_flat = edge_pred.view(N, -1)
edge_target_flat = edge_target.view(N, -1)
seg_target = target[:, 0]
n = seg_target.size(0)
seg_pred = (y_hat[:, 0] > 0).float()
seg_pred_flat = seg_pred.view(n, -1)
seg_target_flat = seg_target.view(n, -1)
edge_intersection = (edge_pred_flat * edge_target_flat).sum(1)
edge_unionset = edge_pred_flat.sum(1) + edge_target_flat.sum(1)
edge_acc = (2 * (edge_intersection + smooth) / (edge_unionset + smooth)).mean()
seg_intersection = (seg_pred_flat * seg_target_flat).sum(1)
seg_unionset = seg_pred_flat.sum(1) + seg_target_flat.sum(1)
seg_acc = (2 * (seg_intersection + smooth) / (seg_unionset + smooth)).mean()
return seg_acc, edge_acc
# dice cofficient for CE
if 0:
def oldCEDiceCofficient(target , y_hat):
n = int(y_hat.shape[1] / 2) # 2
# for edge
edge_target = target[:, 1] # [8,256,224]
edge_pred = y_hat[:, 2:] # [8,2,256*224]
edge_probs = edge_pred.reshape(edge_pred.shape[0] * edge_pred.shape[2] * edge_pred.shape[3],
edge_pred.shape[1]) # (B * H * W, C)
_, edge_pred = torch.max(edge_probs, 1)
edge_target = edge_target.reshape(edge_target.shape[0] * edge_target.shape[1] * edge_target.shape[2])
dice_edge = np.zeros(n)
dice_tp_edge = np.zeros(n)
dice_div_edge = np.zeros(n)
for i in range(n):
dice_tp_edge[i] += ((edge_pred == i) & (edge_target == i)).sum().item()
dice_div_edge[i] += ((edge_pred == i).sum().item() + (edge_target == i).sum().item())
dice_edge[i] = (2 * dice_tp_edge[i]) / dice_div_edge[i]
#print("edge_pred: ", (edge_pred == 1).sum())
#print("edge_target: ", (edge_target == 1).sum())
#print("tp: ", ((edge_pred == 1) & (edge_target == 1)).sum())
edge_acc = torch.from_numpy(dice_edge[1:])
# for seg
seg_target = target[:, 0, ::] # [8,256,224]
seg_pred = y_hat[:, :2] # [8,2,256*224]
seg_probs = seg_pred.reshape(seg_pred.shape[0] * seg_pred.shape[2] * seg_pred.shape[3],
seg_pred.shape[1]) # (B * H * W, C)
_, seg_pred = torch.max(seg_probs, 1)
seg_target = seg_target.reshape(seg_target.shape[0] * seg_target.shape[1] * seg_target.shape[2])
dice_seg = np.zeros(n)
dice_tp_seg = np.zeros(n)
dice_div_seg = np.zeros(n)
for i in range(n):
dice_tp_seg[i] += ((seg_pred == i) & (seg_target == i)).sum().item()
dice_div_seg[i] += ((seg_pred == i).sum().item() + (seg_target == i).sum().item())
dice_seg[i] = (2 * dice_tp_seg[i]) / dice_div_seg[i]
#print("seg_pred: ", (seg_pred == 1).sum())
#print("seg_target: ", (seg_target == 1).sum())
#print("tp seg: ", ((seg_pred == 1) & (seg_target == 1)).sum())
seg_acc = torch.from_numpy(dice_seg[1:])
return seg_acc, edge_acc
def oldCEDiceCofficient(target, y_hat):
# target (B, 2, H, W)
# y_hat (B, 2*2, H, W)
#n = int(y_hat.shape[1] / 2)
seg_target = target[:, 0, ::] # [8,256,224]
#seg_target = seg_target.reshape(seg_target.shape[0] * seg_target.shape[1] * seg_target.shape[2])
seg_pred = y_hat[:, :2, ::] # [8,2,256*224]
#seg_probs = seg_pred.reshape(seg_pred.shape[0] * seg_pred.shape[2] * seg_pred.shape[3],
# seg_pred.shape[1]) # (B * H * W, C)
seg_pred = torch.argmax(seg_pred, 1)
q = ((seg_pred == 1) & (seg_target == 1)).sum().item()
p = ((seg_pred == 1).sum().item() + (seg_target == 1).sum().item())
dice = 2 * q / p
#dice = 0.1
dice = torch.from_numpy(np.array([dice]))
return dice, 1 * dice, q, p
# dice cofficient for CE
def CEDiceCofficient(target, y_hat):
smooth = 0.00001
edge_target = target[:, 1]
n = edge_target.size(0)
# for edge
edge_target = target[:, 1] # [8,256,224]
edge_pred = (y_hat[:, 3] > 0).float()
edge_pred = edge_pred.view(n ,-1)
edge_target = edge_target.view(n, -1)
dice_tp_edge = (edge_pred * edge_target).sum(1)
dice_div_edge = edge_pred.sum(1) + edge_target.sum(1)
edge_acc = (2 * (dice_tp_edge +smooth) / (dice_div_edge + smooth)).mean()
# for seg
seg_target = target[:, 0, ::] # [8,256,224]
seg_pred = (y_hat[:, 1] > 0).float()
seg_pred = seg_pred.view(n, -1)
seg_target = seg_target.view(n, -1)
dice_tp_seg = (seg_pred * seg_target).sum(1)
dice_div_seg = seg_pred.sum(1) + seg_target.sum(1)
seg_acc = (2 * (dice_tp_seg + smooth) / (dice_div_seg +smooth)).mean()
return seg_acc, edge_acc
|
{"hexsha": "708115581babfaf051e28d295274936e7e9e2f27", "size": 5184, "ext": "py", "lang": "Python", "max_stars_repo_path": "DiceCofficient.py", "max_stars_repo_name": "changlabntu/HEDUNet", "max_stars_repo_head_hexsha": "7e4122a30bbde5f606311f24328f96a327c00493", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "DiceCofficient.py", "max_issues_repo_name": "changlabntu/HEDUNet", "max_issues_repo_head_hexsha": "7e4122a30bbde5f606311f24328f96a327c00493", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DiceCofficient.py", "max_forks_repo_name": "changlabntu/HEDUNet", "max_forks_repo_head_hexsha": "7e4122a30bbde5f606311f24328f96a327c00493", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1176470588, "max_line_length": 109, "alphanum_fraction": 0.5829475309, "include": true, "reason": "import numpy", "num_tokens": 1616}
|
/-
Copyright (c) 2021 Eric Rodriguez. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Rodriguez
-/
import ring_theory.polynomial.cyclotomic.basic
import tactic.by_contra
import topology.algebra.polynomial
import number_theory.padics.padic_val
import analysis.complex.arg
/-!
# Evaluating cyclotomic polynomials
This file states some results about evaluating cyclotomic polynomials in various different ways.
## Main definitions
* `polynomial.eval(₂)_one_cyclotomic_prime(_pow)`: `eval 1 (cyclotomic p^k R) = p`.
* `polynomial.eval_one_cyclotomic_not_prime_pow`: Otherwise, `eval 1 (cyclotomic n R) = 1`.
* `polynomial.cyclotomic_pos` : `∀ x, 0 < eval x (cyclotomic n R)` if `2 < n`.
-/
namespace polynomial
open finset nat
open_locale big_operators
@[simp] lemma eval_one_cyclotomic_prime {R : Type*} [comm_ring R] {p : ℕ} [hn : fact p.prime] :
eval 1 (cyclotomic p R) = p :=
by simp only [cyclotomic_prime, eval_X, one_pow, finset.sum_const, eval_pow,
eval_finset_sum, finset.card_range, smul_one_eq_coe]
@[simp] lemma eval₂_one_cyclotomic_prime {R S : Type*} [comm_ring R] [semiring S] (f : R →+* S)
{p : ℕ} [fact p.prime] : eval₂ f 1 (cyclotomic p R) = p :=
by simp
@[simp] lemma eval_one_cyclotomic_prime_pow {R : Type*} [comm_ring R] {p : ℕ} (k : ℕ)
[hn : fact p.prime] : eval 1 (cyclotomic (p ^ (k + 1)) R) = p :=
by simp only [cyclotomic_prime_pow_eq_geom_sum hn.out, eval_X, one_pow, finset.sum_const,
eval_pow, eval_finset_sum, finset.card_range, smul_one_eq_coe]
@[simp] lemma eval₂_one_cyclotomic_prime_pow {R S : Type*} [comm_ring R] [semiring S] (f : R →+* S)
{p : ℕ} (k : ℕ) [fact p.prime] : eval₂ f 1 (cyclotomic (p ^ (k + 1)) R) = p :=
by simp
private lemma cyclotomic_neg_one_pos {n : ℕ} (hn : 2 < n) {R} [linear_ordered_comm_ring R] :
0 < eval (-1 : R) (cyclotomic n R) :=
begin
haveI := ne_zero.of_gt hn,
rw [←map_cyclotomic_int, ←int.cast_one, ←int.cast_neg, eval_int_cast_map,
int.coe_cast_ring_hom, int.cast_pos],
suffices : 0 < eval ↑(-1 : ℤ) (cyclotomic n ℝ),
{ rw [←map_cyclotomic_int n ℝ, eval_int_cast_map, int.coe_cast_ring_hom] at this,
exact_mod_cast this },
simp only [int.cast_one, int.cast_neg],
have h0 := cyclotomic_coeff_zero ℝ hn.le,
rw coeff_zero_eq_eval_zero at h0,
by_contra' hx,
have := intermediate_value_univ (-1) 0 (cyclotomic n ℝ).continuous,
obtain ⟨y, hy : is_root _ y⟩ := this (show (0 : ℝ) ∈ set.Icc _ _, by simpa [h0] using hx),
rw is_root_cyclotomic_iff at hy,
rw hy.eq_order_of at hn,
exact hn.not_le linear_ordered_ring.order_of_le_two,
end
lemma cyclotomic_pos {n : ℕ} (hn : 2 < n) {R} [linear_ordered_comm_ring R] (x : R) :
0 < eval x (cyclotomic n R) :=
begin
induction n using nat.strong_induction_on with n ih,
have hn' : 0 < n := pos_of_gt hn,
have hn'' : 1 < n := one_lt_two.trans hn,
dsimp at ih,
have := prod_cyclotomic_eq_geom_sum hn' R,
apply_fun eval x at this,
rw [← cons_self_proper_divisors hn'.ne', finset.erase_cons_of_ne _ hn''.ne',
finset.prod_cons, eval_mul, eval_geom_sum] at this,
rcases lt_trichotomy 0 (∑ i in finset.range n, x ^ i) with h | h | h,
{ apply pos_of_mul_pos_left,
{ rwa this },
rw eval_prod,
refine finset.prod_nonneg (λ i hi, _),
simp only [finset.mem_erase, mem_proper_divisors] at hi,
rw geom_sum_pos_iff hn'.ne' at h,
cases h with hk hx,
{ refine (ih _ hi.2.2 (nat.two_lt_of_ne _ hi.1 _)).le; rintro rfl,
{ exact hn'.ne' (zero_dvd_iff.mp hi.2.1) },
{ exact even_iff_not_odd.mp (even_iff_two_dvd.mpr hi.2.1) hk } },
{ rcases eq_or_ne i 2 with rfl | hk,
{ simpa only [eval_X, eval_one, cyclotomic_two, eval_add] using hx.le },
refine (ih _ hi.2.2 (nat.two_lt_of_ne _ hi.1 hk)).le,
rintro rfl,
exact (hn'.ne' $ zero_dvd_iff.mp hi.2.1) } },
{ rw [eq_comm, geom_sum_eq_zero_iff_neg_one hn'.ne'] at h,
exact h.1.symm ▸ cyclotomic_neg_one_pos hn },
{ apply pos_of_mul_neg_left,
{ rwa this },
rw geom_sum_neg_iff hn'.ne' at h,
have h2 : 2 ∈ n.proper_divisors.erase 1,
{ rw [finset.mem_erase, mem_proper_divisors],
exact ⟨dec_trivial, even_iff_two_dvd.mp h.1, hn⟩ },
rw [eval_prod, ← finset.prod_erase_mul _ _ h2],
apply mul_nonpos_of_nonneg_of_nonpos,
{ refine finset.prod_nonneg (λ i hi, le_of_lt _),
simp only [finset.mem_erase, mem_proper_divisors] at hi,
refine ih _ hi.2.2.2 (nat.two_lt_of_ne _ hi.2.1 hi.1),
rintro rfl,
rw zero_dvd_iff at hi,
exact hn'.ne' hi.2.2.1 },
{ simpa only [eval_X, eval_one, cyclotomic_two, eval_add] using h.right.le } }
end
lemma cyclotomic_pos_and_nonneg (n : ℕ) {R} [linear_ordered_comm_ring R] (x : R) :
(1 < x → 0 < eval x (cyclotomic n R)) ∧ (1 ≤ x → 0 ≤ eval x (cyclotomic n R)) :=
begin
rcases n with _ | _ | _ | n;
simp only [cyclotomic_zero, cyclotomic_one, cyclotomic_two, succ_eq_add_one,
eval_X, eval_one, eval_add, eval_sub, sub_nonneg, sub_pos,
zero_lt_one, zero_le_one, implies_true_iff, imp_self, and_self],
{ split; intro; linarith, },
{ have : 2 < n + 3 := dec_trivial,
split; intro; [skip, apply le_of_lt]; apply cyclotomic_pos this, },
end
/-- Cyclotomic polynomials are always positive on inputs larger than one.
Similar to `cyclotomic_pos` but with the condition on the input rather than index of the
cyclotomic polynomial. -/
lemma cyclotomic_pos' (n : ℕ) {R} [linear_ordered_comm_ring R] {x : R} (hx : 1 < x) :
0 < eval x (cyclotomic n R) :=
(cyclotomic_pos_and_nonneg n x).1 hx
/-- Cyclotomic polynomials are always nonnegative on inputs one or more. -/
lemma cyclotomic_nonneg (n : ℕ) {R} [linear_ordered_comm_ring R] {x : R} (hx : 1 ≤ x) :
0 ≤ eval x (cyclotomic n R) :=
(cyclotomic_pos_and_nonneg n x).2 hx
lemma eval_one_cyclotomic_not_prime_pow {R : Type*} [ring R] {n : ℕ}
(h : ∀ {p : ℕ}, p.prime → ∀ k : ℕ, p ^ k ≠ n) : eval 1 (cyclotomic n R) = 1 :=
begin
rcases n.eq_zero_or_pos with rfl | hn',
{ simp },
have hn : 1 < n := one_lt_iff_ne_zero_and_ne_one.mpr ⟨hn'.ne', (h nat.prime_two 0).symm⟩,
rsuffices (h | h) : eval 1 (cyclotomic n ℤ) = 1 ∨ eval 1 (cyclotomic n ℤ) = -1,
{ have := eval_int_cast_map (int.cast_ring_hom R) (cyclotomic n ℤ) 1,
simpa only [map_cyclotomic, int.cast_one, h, eq_int_cast] using this },
{ exfalso,
linarith [cyclotomic_nonneg n (le_refl (1 : ℤ))] },
rw [←int.nat_abs_eq_nat_abs_iff, int.nat_abs_one, nat.eq_one_iff_not_exists_prime_dvd],
intros p hp hpe,
haveI := fact.mk hp,
have hpn : p ∣ n,
{ apply hpe.trans,
nth_rewrite 1 ←int.nat_abs_of_nat n,
rw [int.nat_abs_dvd_iff_dvd, ←one_geom_sum, ←eval_geom_sum, ←prod_cyclotomic_eq_geom_sum hn'],
apply eval_dvd,
apply finset.dvd_prod_of_mem,
simp [hn'.ne', hn.ne'] },
have := prod_cyclotomic_eq_geom_sum hn' ℤ,
apply_fun eval 1 at this,
rw [eval_geom_sum, one_geom_sum, eval_prod, eq_comm, ←finset.prod_sdiff $
@range_pow_padic_val_nat_subset_divisors' p _ _, finset.prod_image] at this,
simp_rw [eval_one_cyclotomic_prime_pow, finset.prod_const, finset.card_range, mul_comm] at this,
rw [←finset.prod_sdiff $ show {n} ⊆ _, from _] at this,
any_goals {apply_instance},
swap,
{ simp only [singleton_subset_iff, mem_sdiff, mem_erase, ne.def, mem_divisors, dvd_refl,
true_and, mem_image, mem_range, exists_prop, not_exists, not_and],
exact ⟨⟨hn.ne', hn'.ne'⟩, λ t _, h hp _⟩ },
rw [←int.nat_abs_of_nat p, int.nat_abs_dvd_iff_dvd] at hpe,
obtain ⟨t, ht⟩ := hpe,
rw [finset.prod_singleton, ht, mul_left_comm, mul_comm, ←mul_assoc, mul_assoc] at this,
have : (p ^ (padic_val_nat p n) * p : ℤ) ∣ n := ⟨_, this⟩,
simp only [←pow_succ', ←int.nat_abs_dvd_iff_dvd, int.nat_abs_of_nat, int.nat_abs_pow] at this,
exact pow_succ_padic_val_nat_not_dvd hn'.ne' this,
{ rintro x - y - hxy,
apply nat.succ_injective,
exact nat.pow_right_injective hp.two_le hxy }
end
lemma sub_one_pow_totient_lt_cyclotomic_eval {n : ℕ} {q : ℝ} (hn' : 2 ≤ n) (hq' : 1 < q) :
(q - 1) ^ totient n < (cyclotomic n ℝ).eval q :=
begin
have hn : 0 < n := pos_of_gt hn',
have hq := zero_lt_one.trans hq',
have hfor : ∀ ζ' ∈ primitive_roots n ℂ, q - 1 ≤ ‖↑q - ζ'‖,
{ intros ζ' hζ',
rw mem_primitive_roots hn at hζ',
convert norm_sub_norm_le (↑q) ζ',
{ rw [complex.norm_real, real.norm_of_nonneg hq.le], },
{ rw [hζ'.norm'_eq_one hn.ne'] } },
let ζ := complex.exp (2 * ↑real.pi * complex.I / ↑n),
have hζ : is_primitive_root ζ n := complex.is_primitive_root_exp n hn.ne',
have hex : ∃ ζ' ∈ primitive_roots n ℂ, q - 1 < ‖↑q - ζ'‖,
{ refine ⟨ζ, (mem_primitive_roots hn).mpr hζ, _⟩,
suffices : ¬ same_ray ℝ (q : ℂ) ζ,
{ convert lt_norm_sub_of_not_same_ray this;
simp only [hζ.norm'_eq_one hn.ne', real.norm_of_nonneg hq.le, complex.norm_real] },
rw complex.same_ray_iff,
push_neg,
refine ⟨by exact_mod_cast hq.ne', hζ.ne_zero hn.ne', _⟩,
rw [complex.arg_of_real_of_nonneg hq.le, ne.def, eq_comm, hζ.arg_eq_zero_iff hn.ne'],
clear_value ζ,
rintro rfl,
linarith [hζ.unique is_primitive_root.one] },
have : ¬eval ↑q (cyclotomic n ℂ) = 0,
{ erw cyclotomic.eval_apply q n (algebra_map ℝ ℂ),
simpa only [complex.coe_algebra_map, complex.of_real_eq_zero]
using (cyclotomic_pos' n hq').ne' },
suffices : (units.mk0 (real.to_nnreal (q - 1)) (by simp [hq'])) ^ totient n
< units.mk0 (‖(cyclotomic n ℂ).eval q‖₊) (by simp [this]),
{ simp only [←units.coe_lt_coe, units.coe_pow, units.coe_mk0, ← nnreal.coe_lt_coe, hq'.le,
real.to_nnreal_lt_to_nnreal_iff_of_nonneg, coe_nnnorm, complex.norm_eq_abs,
nnreal.coe_pow, real.coe_to_nnreal', max_eq_left, sub_nonneg] at this,
convert this,
erw [(cyclotomic.eval_apply q n (algebra_map ℝ ℂ)), eq_comm],
simp only [cyclotomic_nonneg n hq'.le, complex.coe_algebra_map,
complex.abs_of_real, abs_eq_self], },
simp only [cyclotomic_eq_prod_X_sub_primitive_roots hζ, eval_prod, eval_C,
eval_X, eval_sub, nnnorm_prod, units.mk0_prod],
convert finset.prod_lt_prod' _ _,
swap, { exact λ _, units.mk0 (real.to_nnreal (q - 1)) (by simp [hq']) },
{ simp only [complex.card_primitive_roots, prod_const, card_attach] },
{ simp only [subtype.coe_mk, finset.mem_attach, forall_true_left, subtype.forall,
←units.coe_le_coe, ← nnreal.coe_le_coe, complex.abs.nonneg, hq'.le, units.coe_mk0,
real.coe_to_nnreal', coe_nnnorm, complex.norm_eq_abs, max_le_iff, tsub_le_iff_right],
intros x hx,
simpa only [and_true, tsub_le_iff_right] using hfor x hx, },
{ simp only [subtype.coe_mk, finset.mem_attach, exists_true_left, subtype.exists,
← nnreal.coe_lt_coe, ← units.coe_lt_coe, units.coe_mk0 _, coe_nnnorm],
simpa only [hq'.le, real.coe_to_nnreal', max_eq_left, sub_nonneg] using hex },
end
lemma sub_one_pow_totient_le_cyclotomic_eval {q : ℝ} (hq' : 1 < q) :
∀ n, (q - 1) ^ totient n ≤ (cyclotomic n ℝ).eval q
| 0 := by simp only [totient_zero, pow_zero, cyclotomic_zero, eval_one]
| 1 := by simp only [totient_one, pow_one, cyclotomic_one, eval_sub, eval_X, eval_one]
| (n + 2) := (sub_one_pow_totient_lt_cyclotomic_eval dec_trivial hq').le
lemma cyclotomic_eval_lt_add_one_pow_totient {n : ℕ} {q : ℝ} (hn' : 3 ≤ n) (hq' : 1 < q) :
(cyclotomic n ℝ).eval q < (q + 1) ^ totient n :=
begin
have hn : 0 < n := pos_of_gt hn',
have hq := zero_lt_one.trans hq',
have hfor : ∀ ζ' ∈ primitive_roots n ℂ, ‖↑q - ζ'‖ ≤ q + 1,
{ intros ζ' hζ',
rw mem_primitive_roots hn at hζ',
convert norm_sub_le (↑q) ζ',
{ rw [complex.norm_real, real.norm_of_nonneg (zero_le_one.trans_lt hq').le], },
{ rw [hζ'.norm'_eq_one hn.ne'] }, },
let ζ := complex.exp (2 * ↑real.pi * complex.I / ↑n),
have hζ : is_primitive_root ζ n := complex.is_primitive_root_exp n hn.ne',
have hex : ∃ ζ' ∈ primitive_roots n ℂ, ‖↑q - ζ'‖ < q + 1,
{ refine ⟨ζ, (mem_primitive_roots hn).mpr hζ, _⟩,
suffices : ¬ same_ray ℝ (q : ℂ) (-ζ),
{ convert norm_add_lt_of_not_same_ray this;
simp [real.norm_of_nonneg hq.le, hζ.norm'_eq_one hn.ne', -complex.norm_eq_abs] },
rw complex.same_ray_iff,
push_neg,
refine ⟨by exact_mod_cast hq.ne', neg_ne_zero.mpr $ hζ.ne_zero hn.ne', _⟩,
rw [complex.arg_of_real_of_nonneg hq.le, ne.def, eq_comm],
intro h,
rw [complex.arg_eq_zero_iff, complex.neg_re, neg_nonneg, complex.neg_im, neg_eq_zero] at h,
have hζ₀ : ζ ≠ 0,
{ clear_value ζ,
rintro rfl,
exact hn.ne' (hζ.unique is_primitive_root.zero) },
have : ζ.re < 0 ∧ ζ.im = 0 := ⟨h.1.lt_of_ne _, h.2⟩,
rw [←complex.arg_eq_pi_iff, hζ.arg_eq_pi_iff hn.ne'] at this,
rw this at hζ,
linarith [hζ.unique $ is_primitive_root.neg_one 0 two_ne_zero.symm],
{ contrapose! hζ₀,
ext; simp [hζ₀, h.2] } },
have : ¬eval ↑q (cyclotomic n ℂ) = 0,
{ erw cyclotomic.eval_apply q n (algebra_map ℝ ℂ),
simp only [complex.coe_algebra_map, complex.of_real_eq_zero],
exact (cyclotomic_pos' n hq').ne.symm, },
suffices : units.mk0 (‖(cyclotomic n ℂ).eval q‖₊) (by simp [this])
< (units.mk0 (real.to_nnreal (q + 1)) (by simp; linarith)) ^ totient n,
{ simp only [←units.coe_lt_coe, units.coe_pow, units.coe_mk0, ← nnreal.coe_lt_coe, hq'.le,
real.to_nnreal_lt_to_nnreal_iff_of_nonneg, coe_nnnorm, complex.norm_eq_abs,
nnreal.coe_pow, real.coe_to_nnreal', max_eq_left, sub_nonneg] at this,
convert this,
{ erw [(cyclotomic.eval_apply q n (algebra_map ℝ ℂ)), eq_comm],
simp [cyclotomic_nonneg n hq'.le] },
rw [eq_comm, max_eq_left_iff],
linarith },
simp only [cyclotomic_eq_prod_X_sub_primitive_roots hζ, eval_prod, eval_C,
eval_X, eval_sub, nnnorm_prod, units.mk0_prod],
convert finset.prod_lt_prod' _ _,
swap, { exact λ _, units.mk0 (real.to_nnreal (q + 1)) (by simp; linarith only [hq']) },
{ simp [complex.card_primitive_roots], },
{ simp only [subtype.coe_mk, finset.mem_attach, forall_true_left, subtype.forall,
←units.coe_le_coe, ← nnreal.coe_le_coe, complex.abs.nonneg, hq'.le, units.coe_mk0,
real.coe_to_nnreal, coe_nnnorm, complex.norm_eq_abs, max_le_iff],
intros x hx,
have : complex.abs _ ≤ _ := hfor x hx,
simp [this], },
{ simp only [subtype.coe_mk, finset.mem_attach, exists_true_left, subtype.exists,
← nnreal.coe_lt_coe, ← units.coe_lt_coe, units.coe_mk0 _, coe_nnnorm],
obtain ⟨ζ, hζ, hhζ : complex.abs _ < _⟩ := hex,
exact ⟨ζ, hζ, by simp [hhζ]⟩ },
end
lemma cyclotomic_eval_le_add_one_pow_totient {q : ℝ} (hq' : 1 < q) :
∀ n, (cyclotomic n ℝ).eval q ≤ (q + 1) ^ totient n
| 0 := by simp
| 1 := by simp [add_assoc, add_nonneg, zero_le_one]
| 2 := by simp
| (n + 3) := (cyclotomic_eval_lt_add_one_pow_totient dec_trivial hq').le
lemma sub_one_pow_totient_lt_nat_abs_cyclotomic_eval {n : ℕ} {q : ℕ} (hn' : 1 < n) (hq : q ≠ 1) :
(q - 1) ^ totient n < ((cyclotomic n ℤ).eval ↑q).nat_abs :=
begin
rcases hq.lt_or_lt.imp_left nat.lt_one_iff.mp with rfl | hq',
{ rw [zero_tsub, zero_pow (nat.totient_pos (pos_of_gt hn')), pos_iff_ne_zero, int.nat_abs_ne_zero,
nat.cast_zero, ← coeff_zero_eq_eval_zero, cyclotomic_coeff_zero _ hn'],
exact one_ne_zero },
rw [← @nat.cast_lt ℝ, nat.cast_pow, nat.cast_sub hq'.le, nat.cast_one, int.cast_nat_abs],
refine (sub_one_pow_totient_lt_cyclotomic_eval hn' (nat.one_lt_cast.2 hq')).trans_le _,
exact (cyclotomic.eval_apply (q : ℤ) n (algebra_map ℤ ℝ)).trans_le (le_abs_self _)
end
lemma sub_one_lt_nat_abs_cyclotomic_eval {n : ℕ} {q : ℕ} (hn' : 1 < n) (hq : q ≠ 1) :
q - 1 < ((cyclotomic n ℤ).eval ↑q).nat_abs :=
calc q - 1 ≤ (q - 1) ^ totient n : nat.le_self_pow (nat.totient_pos $ pos_of_gt hn').ne' _
... < ((cyclotomic n ℤ).eval ↑q).nat_abs : sub_one_pow_totient_lt_nat_abs_cyclotomic_eval hn' hq
end polynomial
|
{"author": "leanprover-community", "repo": "mathlib", "sha": "5e526d18cea33550268dcbbddcb822d5cde40654", "save_path": "github-repos/lean/leanprover-community-mathlib", "path": "github-repos/lean/leanprover-community-mathlib/mathlib-5e526d18cea33550268dcbbddcb822d5cde40654/src/ring_theory/polynomial/cyclotomic/eval.lean"}
|
import dash
import dash_html_components as html
import dash_core_components as dcc
import pandas as pd
import simfin as sf
from simfin.names import *
import dash_table
from dash.dependencies import Output, Input, State
from flask import Flask
from flask.helpers import get_root_path
from flask_login import login_required
from config import BaseConfig
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
from flask_migrate import Migrate
from textwrap import dedent
def create_app():
server = Flask(__name__)
server.config.from_object(BaseConfig)
register_dashapps(server)
register_extensions(server)
register_blueprints(server)
return server
def register_dashapps(app):
sf.set_data_dir('~/simfin_data/')
api_key = "ZxGEGRnaTpxMF0pbGQ3JLThgqY2HBL17"
df_income = sf.load(dataset='income', variant='annual', market='us', index=[TICKER])
df_income = df_income.drop(['Currency', 'SimFinId', 'Fiscal Period', 'Publish Date', 'Shares (Basic)',
'Abnormal Gains (Losses)', 'Net Extraordinary Gains (Losses)',
'Income (Loss) from Continuing Operations',
'Net Income (Common)', 'Pretax Income (Loss), Adj.', 'Report Date', 'Restated Date'],
axis=1)
df_income = df_income.fillna(0)
df_income = df_income.apply(lambda x: x / 1000000)
decimals = 0
df_income['Fiscal Year'] = df_income['Fiscal Year'].apply(lambda x: x * 1000000)
df_income['Fiscal Year'] = df_income['Fiscal Year'].apply(lambda x: round(x, decimals))
ticker = "AAPL"
df_income.rename(
columns={FISCAL_YEAR: 'Year', SHARES_DILUTED: 'Shares', SGA: 'SGA', RD: 'R&D', DEPR_AMOR: 'D&A',
OP_INCOME: 'Operating Income', NON_OP_INCOME: 'Non Operating Income',
INTEREST_EXP_NET: 'Interest Expense', PRETAX_INCOME_LOSS: 'Pretax Income',
INCOME_TAX: 'Income Tax'}, inplace=True)
# restated date
df_names = df_income.index.copy()
df_names = df_names.drop_duplicates()
# income signals
df_negative = df_income.copy()
df_negative[['Cost of Revenue', 'R&D', 'Operating Expenses', 'SGA', 'Income Tax', 'D&A', 'Interest Expense']] = \
df_negative[
['Cost of Revenue', 'R&D', 'Operating Expenses', 'SGA', 'Income Tax', 'D&A', 'Interest Expense']].apply(
lambda x: x * -1)
df_negative['Expenses'] = df_negative['Operating Expenses'] + df_negative['SGA'] + df_negative['R&D'] + df_negative[
'D&A']
df_signals = pd.DataFrame(index=df_negative.index)
df_signals['Year'] = df_negative['Year'].copy()
df_signals['Gross Profit Margin %'] = round((df_negative['Gross Profit'] / df_negative['Revenue']) * 100,
2).copy()
df_signals['SGA Of Gross Profit'] = round((df_negative['SGA'] / df_negative['Gross Profit']) * 100, 2).copy()
df_signals['R&D Of Gross Profit'] = round((df_negative['R&D'] / df_negative['Gross Profit']) * 100, 2).copy()
df_signals['D&A Of Gross Profit'] = round((df_negative['D&A'] / df_negative['Gross Profit']) * 100, 2).copy()
df_signals['Operating margin ratio'] = round((df_negative['Operating Income'] / df_negative['Revenue']) * 100,
2).copy()
df_signals['Interest to Operating Income %'] = round((df_negative['Interest Expense'] / df_negative['Operating Income'])
* 100, 2).copy()
df_signals['Taxes paid'] = round((df_negative['Income Tax'] / df_negative['Pretax Income']) * 100, 2).copy()
df_signals['Net income margin'] = round((df_negative['Net Income'] / df_negative['Revenue']) * 100, 2).copy()
df_signals['Interest to Operating Income %'] = df_signals['Interest to Operating Income %'].replace(-np.inf, 0)
df2_original = df_signals.loc[ticker]
# income growth per year
df1_growth = pd.DataFrame(index=df_income.index)
df1_growth['Year'] = df_income['Year'].copy()
df1_growth['Revenue Growth'] = df_income['Revenue'].pct_change().mul(100).round(2).copy()
df1_growth['Profit Growth'] = df_income['Gross Profit'].pct_change().mul(100).round(2).copy()
df1_growth['Operating Income Growth'] = df_income['Operating Income'].pct_change().mul(100).round(2).copy()
df1_growth['Pretax Income Growth'] = df_income['Pretax Income'].pct_change().mul(100).round(2).copy()
df1_growth['Net Income Growth'] = df_income['Net Income'].pct_change().mul(100).round(2).copy()
df1_growth = df1_growth.fillna(0)
# compounded income growth
df_income_compound_original = pd.DataFrame()
df_income_compound_original['Revenue %'] = []
df_income_compound_original['Inventory %'] = []
df_income_compound_original['Gross Profit %'] = []
df_income_compound_original['Operating Income %'] = []
df_income_compound_original['Pre tax %'] = []
df_income_compound_original['Net Income %'] = []
# balance sheet
df_balance = sf.load_balance(variant='annual', market='us', index=[TICKER])
df_balance = df_balance.drop(
['Currency', 'SimFinId', 'Fiscal Period', 'Publish Date', 'Shares (Basic)', 'Report Date',
'Shares (Diluted)', 'Total Liabilities & Equity', 'Restated Date'], axis=1)
df_balance = df_balance.fillna(0)
df_balance = df_balance.apply(lambda x: x / 1000000)
df_balance['Fiscal Year'] = df_balance['Fiscal Year'].apply(lambda x: x * 1000000)
df_balance['Fiscal Year'] = df_balance['Fiscal Year'].apply(lambda x: round(x, 0))
df_balance.rename(columns={FISCAL_YEAR: 'Year', CASH_EQUIV_ST_INVEST: 'Cash & Equivalent',
ACC_NOTES_RECV: 'Accounts Receivable', TOTAL_CUR_ASSETS: 'Current Assets',
PPE_NET: 'Prop Plant & Equipment', LT_INVEST_RECV: 'Long Term Investments',
OTHER_LT_ASSETS: 'Other Long Term Assets', TOTAL_NONCUR_ASSETS: 'Noncurrent assets',
PAYABLES_ACCRUALS: 'Accounts Payable', TOTAL_CUR_LIAB: 'Current Liabilities',
TOTAL_NONCUR_LIAB: 'Noncurrent Liabilities', SHARE_CAPITAL_ADD: 'C&APIC Stock',
ST_DEBT: 'ShortTerm debts', LT_DEBT: 'LongTerm Debts',
INVENTORIES: 'Inventory & Stock'}, inplace=True)
df3_original = df_balance.loc[ticker]
# balance signals
df_balance_signals = pd.DataFrame(index=df_balance.index)
df_balance_signals['Year'] = df_balance['Year'].copy()
df_balance_signals['Return on EquityT'] = round(
(df_income['Net Income'] / (df_balance['Total Equity'] + (-1 * df_balance['Treasury Stock']))), 2).copy()
df_balance_signals['Liabilities to EquityT'] = round(
(df_balance['Total Liabilities'] / (df_balance['Total Equity'] + (-1 * df_balance['Treasury Stock']))),
2).copy()
df_balance_signals['Debt (LS) to EquityT'] = round(
((df_balance['LongTerm Debts'] + df_balance['ShortTerm debts']) / (df_balance['Total Equity'] +
(-1 * df_balance['Treasury Stock']))), 2).copy()
df_balance_signals['Long Term Debt Coverage'] = round((df_income['Net Income'] / df_balance['LongTerm Debts']),
2).copy()
df_balance_signals['Long Term Debt Coverage'] = df_balance_signals['Long Term Debt Coverage'].replace([np.inf, -np.inf],
0)
df_balance_signals['Current Ratio'] = round((df_balance['Current Assets'] / df_balance['Current Liabilities']),
2).copy()
df_balance_signals['Return on Assets%'] = round((df_income['Net Income'] / df_balance['Total Assets']) * 100, 2).copy()
df_balance_signals['Retained Earning to Equity%'] = round(
(df_balance['Retained Earnings'] / df_balance['Total Equity']) * 100, 2).copy()
df_balance_signals['Receivables of Revenue%'] = round((df_balance['Accounts Receivable'] / df_income['Revenue']) * 100,
2).copy()
df_balance_signals['PP&E of Assets%'] = round((df_balance['Prop Plant & Equipment'] / df_balance['Total Assets']) * 100,
2).copy()
df_balance_signals['Inventory of Assets%'] = round((df_balance['Inventory & Stock'] / df_balance['Total Assets']) * 100,
2).copy()
df4_original = df_balance_signals.loc[ticker]
# balance growth per year
balance_growth = pd.DataFrame(index=df_balance.index)
balance_growth['Year'] = df_balance['Year'].copy()
balance_growth['Cash Growth'] = df_balance['Cash & Equivalent'].pct_change().mul(100).round(2).copy()
balance_growth['Inventory Growth'] = df_balance['Inventory & Stock'].pct_change().mul(100).round(2).copy()
balance_growth['Current Assets Growth'] = df_balance['Current Assets'].pct_change().mul(100).round(2).copy()
balance_growth['PP&E Growth'] = df_balance['Prop Plant & Equipment'].pct_change().mul(100).round(2).copy()
balance_growth['Investment Growth'] = df_balance['Long Term Investments'].pct_change().mul(100).round(2).copy()
balance_growth['Asset Growth'] = df_balance['Total Assets'].pct_change().mul(100).round(2).copy()
balance_growth['Liability Growth'] = df_balance['Total Liabilities'].pct_change().mul(100).round(2).copy()
balance_growth['Retained Earnings Growth'] = df_balance['Retained Earnings'].pct_change().mul(100).round(2).copy()
balance_growth['Equity Growth'] = df_balance['Total Equity'].pct_change().mul(100).round(2).copy()
balance_growth = balance_growth.fillna(0)
# balance compound growth
df_balance_compound_original = pd.DataFrame()
df_balance_compound_original['Cash %'] = []
df_balance_compound_original['Inventory %'] = []
df_balance_compound_original['Current Assets %'] = []
df_balance_compound_original['PP&E %'] = []
df_balance_compound_original['Long Term Investment%'] = []
df_balance_compound_original['Assets %'] = []
df_balance_compound_original['Liability %'] = []
df_balance_compound_original['Retained Earnings %'] = []
df_balance_compound_original['Equity %'] = []
# Meta tags for viewport responsiveness
meta_viewport = {"name": "viewport", "content": "width=device-width, initial-scale=1, shrink-to-fit=no"}
dashapp1 = dash.Dash(__name__,
server=app,
url_base_pathname='/dashboard/',
assets_folder=get_root_path(__name__) + '/assets/',
meta_tags=[meta_viewport])
#html.Img(src= dashapp1.get_asset_url('stock-icon.png'))
dashapp1.title = 'Financial Statements'
dashapp1.config['suppress_callback_exceptions'] = True
dashapp1.layout = html.Div([
html.Div([
html.H2('Fundemental Analysis'),
html.A(html.Button(id="logout-button", n_clicks=0, children="Log Out", className="logout2"),
href='https://financial8999.herokuapp.com/logout/'),
html.Img(src= dashapp1.get_asset_url('stock-icon.png')),
# html.Img(src= dashapp1.get_asset_url('stock-icon.png'))
], className="banner"),
html.Div([
dcc.Dropdown(id='drop-down', options=[
{'label': i, 'value': i} for i in df_names
], value=ticker, multi=False, placeholder='Enter a ticker'),
], className='drops'),
dcc.Tabs(id="tabs", value='Tab2', className='custom-tabs-container', children=[
dcc.Tab(label='Portfolio tracker', id='tab1', value='Tab1', selected_className='custom-tab--selected',
children=[]),
dcc.Tab(label='Financial Statements', id='tab2', value='Tab2', selected_className='custom-tab--selected',
children=[
dcc.Tabs(className='sub-tab-container', id='sub-tabs', value='tab-1', children=[
dcc.Tab(label='Income Statement', selected_className='sub-tab', value='tab-1'),
dcc.Tab(label='Balance Sheet', selected_className='sub-tab', value='tab-2'),
dcc.Tab(label='Cash Flow statement ', selected_className='sub-tab', value='tab-3'),
]),
html.Div(id='tabs-content')
]),
dcc.Tab(label='Intrinsic value estimations', id='tab3', value='Tab3', selected_className='custom-tab--selected',
children=["yo"]),
dcc.Tab(label='Machine learning', id='tab4', value='Tab4', selected_className='custom-tab--selected',
children=["yo"]),
]),
html.Div([ # modal div
html.Div([ # content div
html.Img(
id='modal-close-button',
src= dashapp1.get_asset_url('times-circle-solid.svg'),
# html.Img(src= dashapp1.get_asset_url('times-circle-solid.svg'))
n_clicks=0,
className='info-icon2',
style={'margin': 0},
),
html.Div(
children=[
dcc.Markdown(dedent('''
The Income Statement has been simplified by dividing by 1,000,000.
_**SGA**_ - Companies that do not have competitive advantage suffer from intense competition
showing wild variation in SGA (selling, general and administrative) costs as a percentage of
gross profit.
_**R&D**_ - Companies that spend heavily on R&D have an inherent flaw in their competitive
advantage that will always put their long term economics at risk since what seems like long
term competitive advantage is bestowed by a patent or technological advancement that will
expire or be replaced by newer technologies. Furthermore, since they constantly have to
invest in new products they must also redesign and update sales programs increasing
administrative costs.
_**A&D**_ – Machinery and equipment eventually wear out over time with the amount they
depreciate each year deducted from gross profit. Depreciation is a real cost of doing
business because at some point in the future the printing press will need to be replaced.
_**Interest Expense**_ – Interest paid out during the year is reflective of the total debt that
a company is carrying on its books. It can be very informative as to the level of economic
danger a company is in. Generally speaking, in any given industry, the company with the
lowest ratio of interest payments to operating income has some kind of competitive advantage.
_**Pre Tax Income**_ – This is the number Warren Buffet uses when calculating the return
he’ll be getting from a business as all investments are marketed on a pre tax basis. Since
all investments compete with each other, it is easier to think about them on equal terms.
_**Net Income**_ – Must have a historical uptrend with consistent earnings. Share
repurchasing increase per share earnings by decreasing the shares outstanding – while a lot
of analysts look at per share earnings, Warren Buffet looks at the business as a whole and
its net earnings to see what is actually happening.
'''))]
),
],
style={'textAlign': 'center', },
className='modal-content',
),
], id='modal', className='modal', style={"display": "none"}),
html.Div([ # modal div
html.Div([ # content div
html.Img(
id='modal-close-button2',
src= dashapp1.get_asset_url('times-circle-solid.svg'),
# html.Img(src= dashapp1.get_asset_url('stock-icon.png'))
n_clicks=0,
className='info-icon2',
style={'margin': 0},
),
html.Div(
children=[
dcc.Markdown(dedent('''
_**Gross Profit Margin**_ - Companies with excellent economics and high profit margins tend to
have a durable competitive advantage as they have the freedom to price their products well in
excess of costs of goods sold. Without competitive advantage companies have too compete by
lowering their prices of products or service they are selling. As a general rule 40% or better
tend to have durable competitive advantage
_**SGA of Gross Profit**_ – Anything under 30% of gross profit is considered fantastic. However,
there are lots of companies with durable competitive advantage that have SGA expenses in 30-80%.
_**D&A of Gross Profit**_ – Companies with durable competitive advantage have low depreciation
costs e.g. Coca Cola at 6% compared to GM at 22-57%.
_**Interest of Operating Income**_ – Warren Buffet’s favourite durable competitive advantage
holders in the consumer products category have interest pay-outs of less than 15% of operating
income. This changes from industry to industry e.g Wells Fargo has 30% of operating income on
interest because it’s a bank.
_**Tax**_ – Check how much a company pays in taxes. Businesses that are busy misleading the IRS
are usually hard at work misleading their shareholders as well. Companies with long term
competitive advantage make so much money it doesn’t have to mislead anyone to look good.
_**Net Income to Revenue**_ – A company showing net earnings history of more than 20% of revenue
is likely to be benefitting from durable competitive advantage long term. If under 10% it may not
have competitive advantage but 10-20% are lots of good businesses ripe for the mining long term
investment gold. E.g Coca Cola with 21%, Moody’s with 31% compared with Southwest Airlines with a
meagre 7% which reflects the highly competitive nature of the airline business.
Although an exception to this is banks and financial institutions where abnormally high ratios is
seen as a slacking off for the risk management department and acceptance of greater risk for
easier money.
'''))]
),
],
style={'textAlign': 'center', },
className='modal-content',
),
], id='modal2', className='modal', style={"display": "none"}),
html.Div([ # modal div
html.Div([ # content div
html.Img(
id='modal-close-button3',
src= dashapp1.get_asset_url('times-circle-solid.svg'),
# html.Img(src= dashapp1.get_asset_url('stock-icon.png'))
n_clicks=0,
className='info-icon2',
style={'margin': 0},
),
html.Div(
children=[
dcc.Markdown(dedent('''
_**Cash & Short-term Investments**_ – A low amount or lack of cash stockpile usually means that the
company has poor or mediocre economics. Companies that have a surplus of cash resulting from
ongoing business activities, little or no debt, no new sales of shares or assets and a history of
consistent earnings probably have excellent economics and competitive advantage working in their
favour. If we see a lot of cash and marketable securities with little to no debt, chances are the
business will sail through troubled times.
_**Property plant and equipment**_ (net accumulated depreciation) – Companies that are in constant
competition constantly have to update their manufacturing facilities to try to stay competitive
often before equipment is already worn out. This creates an ongoing expense that is often quite
substantial and keeps adding to the amount of plant and equipment the company lists on its
balance sheet. A company with durable competitive advantage doesn’t need to constantly upgrade
its plant and equipment to stay competitive. Instead it replaces equipment as they wear out. PP&E
depreciates in value over time.
_**Short term debts**_ – Money owed and due within a year is historically cheaper than long term
money. Institutions make money by borrowing short term and lending long term but the problem with
this is money borrowed in the short term needs to be payed off. This works fine until short term
rates jump above what we leant long term. This makes aggressive borrowers of short-term money at
the mercy of sudden shifts in the credit market. Smartest and safest way to make money is borrow
money long term and lend it long term. Warren does not invest in companies with lots of
short-term debt. E.g Wells Fargo has $0.57 of short-term debt to every dollar of long-term debt
compared to Bank of America who has $2.09.
_**Long term debt**_ – Some companies lump it with short term debt which creates the illusion
that the company has more short-term debt then it actually does. As a rule, companies with
durable competitive advantage have little to no long-term debt.
Sometimes an excellent business with a consumer monopoly will add large amounts of debt to
finance the acquisition of another business, if so check the acquisition is also a consumer
monopoly – when two combine lots of excess profits quickly reduce these debt mountains but when a
consumer monopoly acquires a commodity business it will only suck out profits to support its poor
economics.
_**Treasury shares**_ – Shares set aside that can be brought back for additional funding and reduces
the number of shares owned by private investors lowering the amount that must be paid out in
dividends. If a company feels the market has undervalued its business, it might buy back some
shares possibly reissuing once the price has been corrected. Reducing the number of shares boosts
certain ratios as a form of financial engineering such as earnings per share which causes short
term investors to flock back to stock seeing improved ratios increasing share price.
_**Retained Earnings**_ – Net Income can either be paid out as a dividend, used to buy back
company shares or it can be retained to keep the business growing. When income is retained it is
put on the balance sheet under shareholders equity and when they are profitability used,
they can greatly improve the long-term economic picture of the business.
It is an accumulated number which means each year new retained earnings are added to the total
accumulated retained earnings years prior. This is one of the most important metrics when
determining if a business has durable competitive advantage – if a company is not adding to its
retained earnings pool it is not growing its long term net worth and is unlikely to make you
super rich long term.
Not all growth in retained earnings is due to incremental increases in sales of existing
products, some off it is due to the acquisition of other businesses. When two companies merge,
their retained earnings pools are joined which creates an even larger pool.
_**Leverage**_ – using debt to increase earnings of a company can give of the illusion of
competitive advantage. The problem is while there seems to be some consistency in the income
stream the source paying the interest payments may not be able to maintain these payments – just
look at the sub prime lending crisis where banks borrowed billions at 6% and loaned at 8% to
homebuyers but when the economy started to slip these buyers started to default on mortgages.
These subprime borrowers did not have a durable source of income which ultimately meant
investment banks didn’t have either.
In assessing the quality and durability of a company’s competitive advantage, Warren Buffet
avoids businesses that use a lot of leverage to generate earnings – in the short run they appear
to be the goose that lays the golden egg but at the end of the day they are not. _**“Only when
the tide goes out do you discover who's been swimming naked.”**_
'''))]
),
],
style={'textAlign': 'center', },
className='modal-content',
),
], id='modal3', className='modal', style={"display": "none"}),
])
# callback
@dashapp1.callback(Output('tabs-content', 'children'),
[Input('sub-tabs', 'value')])
def render_content(tab):
if tab == 'tab-1':
return html.Div([
html.Div([
html.H6('Annual Income Statement'),
html.Img(
id='instructions-button',
src= dashapp1.get_asset_url('question-circle-solid.svg'),
# html.Img(src= dashapp1.get_asset_url('question-circle-solid.svg'))
n_clicks=0,
className='info-icon',
),
], className='annual-income'),
html.Div([
dash_table.DataTable(
style_cell={
'whiteSpace': 'normal',
'height': 'auto',
},
style_table={
'width': '95%',
'margin': '0px 20px 0px'
},
id='table',
columns=[{"name": i, "id": i} for i in df_income.columns]
)
]),
html.Div([
dcc.Graph(id='sales', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
"margin-left": "20px"
}),
dcc.Graph(id='costs', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
# "margin-left":"-100px"
}),
dcc.Graph(id='operating', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
"display": "inline-block",
# "margin-left":"-100px"
}),
], style={"height": "50vh", "width": "98vw", "margin-top": "20px"}),
html.Div([
dcc.Graph(id='interest', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
"margin-left": "20px"
}),
dcc.Graph(id='tax', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block'
}),
dcc.Graph(id='shares', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "30vw",
"float": "left",
'display': 'inline-block'
}),
], style={"height": "50vh", "width": "98vw", "margin-top": "-20px"}),
html.Div([
html.H6('Key Ratios %'),
html.Img(
id='instructions-button2',
src= dashapp1.get_asset_url('question-circle-solid.svg'),
# html.Img(src=dashapp1.get_asset_url('stock-icon.png'))
n_clicks=0,
className='info-icon3',
),
], className='text1'),
html.Div([
dash_table.DataTable(
style_table={
'width': '95%',
'margin': '0px 20px 0px'
},
id='table2',
columns=[{"name": i, "id": i} for i in df2_original.columns]
)
]),
html.Div([
dcc.Graph(id='profit-margin', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "31vw",
"float": "left",
'display': 'inline-block',
"margin-left": "20px"
}),
dcc.Graph(id='SGA', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "31vw",
"float": "left",
'display': 'inline-block',
"margin-left": "20px"
}),
dcc.Graph(id='R&D', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "30vw",
"float": "left",
"display": "inline-block",
"margin-left": "20px"
}),
], style={"height": "50vh", "width": "98vw", "margin-top": "20px"}),
html.Div([
dcc.Graph(id='operating-margin-ratio', config={'displayModeBar': False},
style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
"margin-left": "20px"
}),
dcc.Graph(id='interest-coverage', config={'displayModeBar': False},
style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block'
}),
dcc.Graph(id='taxes-paid', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "30vw",
"float": "left",
'display': 'inline-block'
}),
], style={"height": "50vh", "width": "98vw", "margin-top": "-20px"}),
html.Div([
html.H6('Growth Signals')
], className='text2'),
html.Div([
dash_table.DataTable(
# style_cell={
# 'whiteSpace': 'normal',
# 'height': 'auto',
# },
style_table={
'width': '95%',
'margin': '0px 20px 20px'
},
id='income_compound_table',
columns=[{"name": i, "id": i} for i in df_income_compound_original.columns],
)
]),
html.Div([
dash_table.DataTable(
style_table={
'width': '95%',
'margin': '0px 20px 0px'
},
id='table_growth',
columns=[{"name": i, "id": i} for i in df1_growth.columns]
)
]),
]),
elif tab == 'tab-2':
return html.Div([
html.Div([
html.H6('Annual Balance Sheets'),
html.Img(
id='instructions-button3',
src= dashapp1.get_asset_url('question-circle-solid.svg'),
# html.Img(src= dashapp1.get_asset_url('stock-icon.png'))
n_clicks=0,
className='info-icon4',
),
], className='annual-income'),
html.Div([
dash_table.DataTable(
style_cell={
'whiteSpace': 'normal',
'height': 'auto',
},
style_table={
'width': '95%',
'margin': '0px 20px 0px'
},
id='table3',
columns=[{"name": i, "id": i} for i in df3_original.columns],
),
]),
html.Div([
dcc.Graph(id='balance', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
"margin-left": "20px"
}),
dcc.Graph(id='liquidity', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
# "margin-left":"-100px"
}),
dcc.Graph(id='long-term-assets', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
"display": "inline-block",
# "margin-left":"-100px"
}),
], style={"height": "50vh", "width": "98vw", "margin-top": "20px"}),
html.Div([
dcc.Graph(id='current debts', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
"margin-left": "20px"
}),
dcc.Graph(id='non-current-debts', config={'displayModeBar': False},
style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
# "margin-left":"-100px"
}),
dcc.Graph(id='retained-earnings', config={'displayModeBar': False},
style={
"height": "40vh",
"width": "30vw",
"float": "left",
"display": "inline-block",
# "margin-left":"-100px"
}),
], style={"height": "50vh", "width": "98vw", "margin-top": "-20px"}),
html.Div([
html.H6('Balance Signals')
], className='text2'),
html.Div([
dash_table.DataTable(
style_cell={
'whiteSpace': 'normal',
'height': 'auto',
},
style_table={
'width': '95%',
'margin': '0px 20px 0px'
},
id='table4',
columns=[{"name": i, "id": i} for i in df4_original.columns],
# data=df4.to_dict('records'),
)
]),
html.Div([
dcc.Graph(id='equity_returns', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
"margin-left": "20px"
}),
dcc.Graph(id='retained_equity', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
# "margin-left":"-100px"
}),
dcc.Graph(id='assets_return', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
"display": "inline-block",
# "margin-left":"-100px"
}),
], style={"height": "50vh", "width": "98vw", "margin-top": "20px"}),
html.Div([
html.H6('Growth Signals')
], className='text2'),
html.Div([
dash_table.DataTable(
# style_cell={
# 'whiteSpace': 'normal',
# 'height': 'auto',
# },
style_table={
'width': '95%',
'margin': '0px 20px 0px'
},
id='balance_compound_growth',
columns=[{"name": i, "id": i} for i in df_balance_compound_original.columns]
)
]),
html.Div([
dash_table.DataTable(
style_cell={
'whiteSpace': 'normal',
'height': 'auto',
},
style_table={
'width': '95%',
'margin': '20px 20px 0px'
},
id='balance_growth',
columns=[{"name": i, "id": i} for i in balance_growth.columns],
# data=df4.to_dict('records'),
)
])
])
@dashapp1.callback(
Output('table', 'data'),
[Input("drop-down", "value")])
def update_data(input_value):
try:
df1 = df_income.loc[input_value]
data = df1.to_dict("records")
return data
except TypeError:
pass
@dashapp1.callback(
Output('table_growth', 'data'),
[Input("drop-down", "value")])
def update_data(input_value):
try:
df1_growth.loc[input_value]['Revenue Growth'][0] = 0
df1_growth.loc[input_value]['Profit Growth'][0] = 0
df1_growth.loc[input_value]['Operating Income Growth'][0] = 0
df1_growth.loc[input_value]['Pretax Income Growth'][0] = 0
df1_growth.loc[input_value]['Net Income Growth'][0] = 0
growth = df1_growth.loc[input_value]
data = growth.to_dict("records")
return data
except TypeError:
pass
@dashapp1.callback(
Output('income_compound_table', 'data'),
[Input("drop-down", "value")])
def update_data(input_value):
try:
df1 = df_income.loc[input_value]
df3 = df_balance.loc[input_value]
years_data = df1['Year'][-1] - df1['Year'][0]
income_change = df1['Net Income'][-1] / df1['Net Income'][0]
income_growth_percent = round((((income_change ** (1 / years_data)) - 1) * 100), 2)
revenue_change = df1['Revenue'][-1] / df1['Revenue'][0]
revenue_growth_percent = round((((revenue_change ** (1 / years_data)) - 1) * 100), 2)
profit_change = df1['Gross Profit'][-1] / df1['Gross Profit'][0]
profit_growth_percent = round((((profit_change ** (1 / years_data)) - 1) * 100), 2)
operating_change = df1['Operating Income'][-1] / df1['Operating Income'][0]
operating_growth_percent = round((((operating_change ** (1 / years_data)) - 1) * 100), 2)
pretax_change = df1['Pretax Income'][-1] / df1['Pretax Income'][0]
pretax_growth_percent = round((((pretax_change ** (1 / years_data)) - 1) * 100), 2)
inventory_change = df3['Inventory & Stock'][-1] / df3['Inventory & Stock'][0]
inventory_growth_percent = round((((inventory_change ** (1 / years_data)) - 1) * 100), 2)
df_income_compound = pd.DataFrame()
df_income_compound['Revenue %'] = [revenue_growth_percent]
df_income_compound['Inventory %'] = [inventory_growth_percent]
df_income_compound['Gross Profit %'] = [profit_growth_percent]
df_income_compound['Operating Income %'] = [operating_growth_percent]
df_income_compound['Pre tax %'] = [pretax_growth_percent]
df_income_compound['Net Income %'] = [income_growth_percent]
data = df_income_compound.to_dict("records")
return data
except (TypeError, IndexError):
pass
@dashapp1.callback(
Output('table2', 'data'),
[Input("drop-down", "value")])
def update_data(input_value):
try:
df2 = df_signals.loc[input_value]
data = df2.to_dict("records")
return data
except TypeError:
pass
@dashapp1.callback(
Output('table3', 'data'),
[Input("drop-down", "value")])
def update_data(input_value):
try:
df3 = df_balance.loc[input_value]
data = df3.to_dict("records")
return data
except TypeError:
pass
@dashapp1.callback(
Output('sales', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df11 = df_negative.loc[input_value]
fig1 = make_subplots(specs=[[{"secondary_y": True}]])
fig1.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Revenue']), name="Revenue"))
fig1.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Cost of Revenue']), name="Cost of Revenue"))
fig1.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Gross Profit']), name="Gross Profit"))
fig1.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
# fig1.update_xaxes(title_text="Year")
fig1.update_layout(title={'text': "Sales", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig1.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig1.update_yaxes(rangemode="tozero")
return fig1
except TypeError:
pass
@dashapp1.callback(
Output('costs', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df11 = df_negative.loc[input_value]
fig2 = make_subplots(specs=[[{"secondary_y": True}]])
fig2.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Operating Expenses']), name="Operating Expenses"))
fig2.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['SGA']), name="SGA"))
fig2.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['R&D']), name="R&D"))
fig2.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['D&A']), name="D&A"))
fig2.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
fig2.update_layout(title={'text': "Costs", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig2.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig2.update_yaxes(rangemode="tozero")
return fig2
except TypeError:
pass
@dashapp1.callback(
Output('operating', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df11 = df_negative.loc[input_value]
fig3 = make_subplots(specs=[[{"secondary_y": True}]])
fig3.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Expenses']), name="Expenses"))
fig3.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Gross Profit']), name="Gross Profit"))
fig3.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Operating Income']), name="Operating Income"))
fig3.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
fig3.update_layout(title={'text': "Gross Profit to Operating Income", 'y': 0.96, 'x': 0.5, 'xanchor': 'center',
'yanchor': 'top'})
fig3.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig3.update_yaxes(rangemode="tozero")
return fig3
except TypeError:
pass
@dashapp1.callback(
Output('interest', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df11 = df_negative.loc[input_value]
fig4 = make_subplots(specs=[[{"secondary_y": True}]])
fig4.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Operating Income']), name="Operating Income"))
fig4.add_trace(
go.Scatter(x=list(df11['Year']), y=list(df11['Non Operating Income']), name="Non Operating Income"))
fig4.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Pretax Income']), name="Pretax Income"))
fig4.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Interest Expense']), name="Interest Expense"))
fig4.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
fig4.update_layout(
title={'text': "Measuring Interest Expense", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig4.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig4.update_yaxes(rangemode="tozero")
return fig4
except TypeError:
pass
@dashapp1.callback(
Output('tax', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df11 = df_negative.loc[input_value]
fig5 = make_subplots(specs=[[{"secondary_y": True}]])
fig5.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Net Income']), name="Net Income"))
fig5.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Income Tax']), name="Income Tax"))
fig5.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Pretax Income']), name="Pretax Income"))
fig5.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
fig5.update_layout(title={'text': "Measuring Tax", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig5.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig5.update_yaxes(rangemode="tozero")
return fig5
except TypeError:
pass
@dashapp1.callback(
Output('shares', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df11 = df_negative.loc[input_value]
fig6 = make_subplots()
fig6.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Shares']), name="Shares"))
fig6.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
fig6.update_layout(title={'text': "Shares", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig6.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig6.update_yaxes(rangemode="tozero")
return fig6
except TypeError:
pass
@dashapp1.callback(
Output('profit-margin', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df2 = df_signals.loc[input_value]
fig7 = make_subplots()
fig7.add_trace(go.Scatter(x=list(df2['Year']), y=list(df2['Gross Profit Margin %']), name="proft-maergin"))
fig7.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
fig7.update_layout(
title={'text': "Gross Profit Margin %", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig7.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig7.update_yaxes(rangemode="tozero")
return fig7
except TypeError:
pass
@dashapp1.callback(
Output('SGA', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df2 = df_signals.loc[input_value]
fig8 = make_subplots()
fig8.add_trace(
go.Scatter(x=list(df2['Year']), y=list(df2['SGA Of Gross Profit']), name="SGA", line=dict(color="#EF553B")))
fig8.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
fig8.update_layout(
title={'text': "SGA of Gross Profit % ", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig8.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig8.update_yaxes(rangemode="tozero")
return fig8
except TypeError:
pass
@dashapp1.callback(
Output('R&D', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df2 = df_signals.loc[input_value]
fig9 = make_subplots()
fig9.add_trace(
go.Scatter(x=list(df2['Year']), y=list(df2['R&D Of Gross Profit']), name="R&D", line=dict(color='#00cc96')))
fig9.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
fig9.update_layout(
title={'text': "R&D of Gross Profit % ", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig9.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig9.update_yaxes(rangemode="tozero")
return fig9
except TypeError:
pass
@dashapp1.callback(
Output('operating-margin-ratio', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df2 = df_signals.loc[input_value]
fig10 = make_subplots(specs=[[{"secondary_y": True}]])
fig10.add_trace(go.Scatter(x=list(df2['Year']), y=list(df2['Operating margin ratio']), name="Operating Margin"))
fig10.add_trace(go.Scatter(x=list(df2['Year']), y=list(df2['Net income margin']), name="Net Income"))
fig10.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig10.update_layout(
title={'text': "Margin ratio % ", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig10.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig10.update_yaxes(rangemode="tozero")
return fig10
except TypeError:
pass
@dashapp1.callback(
Output('interest-coverage', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df2 = df_signals.loc[input_value]
fig11 = make_subplots()
fig11.add_trace(
go.Scatter(x=list(df2['Year']), y=list(df2['Interest to Operating Income %']), name="interest-coverage",
line=dict(color='#00cc96')))
fig11.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig11.update_layout(
title={'text': "Interest Coverage ratio % ", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig11.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig11.update_yaxes(rangemode="tozero")
return fig11
except TypeError:
pass
@dashapp1.callback(
Output('taxes-paid', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df2 = df_signals.loc[input_value]
fig12 = make_subplots()
fig12.add_trace(
go.Scatter(x=list(df2['Year']), y=list(df2['Taxes paid']), name="taxes", line=dict(color='#00cc96')))
fig12.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig12.update_layout(
title={'text': "Taxes % ", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig12.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig12.update_yaxes(rangemode="tozero")
return fig12
except TypeError:
pass
@dashapp1.callback(
Output('liquidity', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df3 = df_balance.loc[input_value]
fig13 = make_subplots(specs=[[{"secondary_y": True}]])
fig13.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['Cash & Equivalent']), name="Cash & Equivalent"))
fig13.add_trace(
go.Scatter(x=list(df3['Year']), y=list(df3['Accounts Receivable']), name="Accounts Receivables"))
fig13.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['Inventory & Stock']), name="Inventory"))
fig13.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['Current Assets']), name="Current_Assets"))
fig13.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig13.update_layout(title={'text': "Liquidity", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig13.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig13.update_yaxes(rangemode="tozero")
return fig13
except TypeError:
pass
@dashapp1.callback(
Output('long-term-assets', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df3 = df_balance.loc[input_value]
fig14 = make_subplots(specs=[[{"secondary_y": True}]])
fig14.add_trace(
go.Scatter(x=list(df3['Year']), y=list(df3['Prop Plant & Equipment']), name="Prop Plant & Equipment"))
fig14.add_trace(
go.Scatter(x=list(df3['Year']), y=list(df3['Long Term Investments']), name="Long Term Investments"))
fig14.add_trace(
go.Scatter(x=list(df3['Year']), y=list(df3['Other Long Term Assets']), name="Other Long Term Assets"))
fig14.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['Noncurrent assets']), name="Non current Assets"))
fig14.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig14.update_layout(
title={'text': "Non Current Assets", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig14.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig14.update_yaxes(rangemode="tozero")
return fig14
except TypeError:
pass
@dashapp1.callback(
Output('balance', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df3 = df_balance.loc[input_value]
fig15 = make_subplots(specs=[[{"secondary_y": True}]])
fig15.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['Total Assets']), name="Assets"))
fig15.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['Total Liabilities']), name="Liabilities"))
fig15.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['Total Equity']), name="Equity"))
fig15.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig15.update_layout(title={'text': "Balance", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig15.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig15.update_yaxes(rangemode="tozero")
return fig15
except TypeError:
pass
@dashapp1.callback(
Output('current debts', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df3 = df_balance.loc[input_value]
fig16 = make_subplots(specs=[[{"secondary_y": True}]])
fig16.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['Accounts Payable']), name="Accounts Payable"))
fig16.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['ShortTerm debts']), name="Short Term Debts"))
fig16.add_trace(
go.Scatter(x=list(df3['Year']), y=list(df3['Current Liabilities']), name="Current Liabilities"))
fig16.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig16.update_layout(
title={'text': "Current Debts", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig16.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig16.update_yaxes(rangemode="tozero")
return fig16
except TypeError:
pass
@dashapp1.callback(
Output('non-current-debts', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df3 = df_balance.loc[input_value]
fig17 = make_subplots(specs=[[{"secondary_y": True}]])
fig17.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['LongTerm Debts']), name="Long Term Debts"))
fig17.add_trace(
go.Scatter(x=list(df3['Year']), y=list(df3['Noncurrent Liabilities']), name="Non Current Liabilities"))
fig17.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig17.update_layout(
title={'text': "Non Current Debts", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig17.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig17.update_yaxes(rangemode="tozero")
return fig17
except TypeError:
pass
@dashapp1.callback(
Output('retained-earnings', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df3 = df_balance.loc[input_value]
fig18 = make_subplots()
fig18.add_trace(
go.Scatter(x=list(df3['Year']), y=list(df3['Retained Earnings']), name="retained",
line=dict(color='#00cc96')))
fig18.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig18.update_layout(
title={'text': "Retained Earnings", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig18.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig18.update_yaxes(rangemode="tozero")
return fig18
except TypeError:
pass
@dashapp1.callback(
Output('table4', 'data'),
[Input("drop-down", "value")])
def update_data(input_value):
try:
df4 = df_balance_signals.loc[input_value]
data = df4.to_dict("records")
return data
except TypeError:
pass
@dashapp1.callback(
Output('balance_growth', 'data'),
[Input("drop-down", "value")])
def update_data(input_value):
try:
balance_growth.loc[input_value]['Cash Growth'][0] = 0
balance_growth.loc[input_value]['Inventory Growth'][0] = 0
balance_growth.loc[input_value]['Current Assets Growth'][0] = 0
balance_growth.loc[input_value]['PP&E Growth'][0] = 0
balance_growth.loc[input_value]['Investment Growth'][0] = 0
balance_growth.loc[input_value]['Asset Growth'][0] = 0
balance_growth.loc[input_value]['Liability Growth'][0] = 0
balance_growth.loc[input_value]['Retained Earnings Growth'][0] = 0
balance_growth.loc[input_value]['Equity Growth'][0] = 0
growth_balance = balance_growth.loc[input_value]
data = growth_balance.to_dict("records")
return data
except TypeError:
pass
@dashapp1.callback(
Output('balance_compound_growth', 'data'),
[Input("drop-down", "value")])
def update_data(input_value):
try:
df3 = df_balance.loc[input_value]
years_data = df3['Year'][-1] - df3['Year'][0]
inventory_change = df3['Inventory & Stock'][-1] / df3['Inventory & Stock'][0]
inventory_growth_percent = round((((inventory_change ** (1 / years_data)) - 1) * 100), 2)
current_assets_change = df3['Current Assets'][-1] / df3['Current Assets'][0]
current_assets_growth_percent = round((((current_assets_change ** (1 / years_data)) - 1) * 100), 2)
ppe_change = df3['Prop Plant & Equipment'][-1] / df3['Prop Plant & Equipment'][0]
ppe_percent = round((((ppe_change ** (1 / years_data)) - 1) * 100), 2)
investment_change = df3['Long Term Investments'][-1] / df3['Long Term Investments'][0]
investment_percent = round((((investment_change ** (1 / years_data)) - 1) * 100), 2)
assets_change = df3['Total Assets'][-1] / df3['Total Assets'][0]
assets_percent = round((((assets_change ** (1 / years_data)) - 1) * 100), 2)
liability_change = df3['Total Liabilities'][-1] / df3['Total Liabilities'][0]
liability_percent = round((((liability_change ** (1 / years_data)) - 1) * 100), 2)
retained_earnings_change = df3['Retained Earnings'][-1] / df3['Retained Earnings'][0]
retained_earnings_percent = round((((retained_earnings_change ** (1 / years_data)) - 1) * 100), 2)
equity_change = df3['Total Equity'][-1] / df3['Total Equity'][0]
equity_percent = round((((equity_change ** (1 / years_data)) - 1) * 100), 2)
cash_equivalent_change = df3['Cash & Equivalent'][-1] / df3['Cash & Equivalent'][0]
cash_equivalent_duplicate = round((((cash_equivalent_change ** (1 / years_data)) - 1) * 100), 2)
df_balance_compound = pd.DataFrame()
df_balance_compound['Cash %'] = [cash_equivalent_duplicate]
df_balance_compound['Inventory %'] = [inventory_growth_percent]
df_balance_compound['Current Assets %'] = [current_assets_growth_percent]
df_balance_compound['PP&E %'] = [ppe_percent]
df_balance_compound['Long Term Investment%'] = [investment_percent]
df_balance_compound['Assets %'] = [assets_percent]
df_balance_compound['Liability %'] = [liability_percent]
df_balance_compound['Retained Earnings %'] = [retained_earnings_percent]
df_balance_compound['Equity %'] = [equity_percent]
data = df_balance_compound.to_dict("records")
return data
except (TypeError, IndexError):
pass
@dashapp1.callback(
Output('equity_returns', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df4 = df_balance_signals.loc[input_value]
fig20 = make_subplots(specs=[[{"secondary_y": True}]])
fig20.add_trace(go.Scatter(x=list(df4['Year']), y=list(df4['Return on EquityT']), name="Return on Equity"))
fig20.add_trace(
go.Scatter(x=list(df4['Year']), y=list(df4['Liabilities to EquityT']), name="Liabilities to Equity"))
fig20.add_trace(
go.Scatter(x=list(df4['Year']), y=list(df4['Debt (LS) to EquityT']), name="Debt to Equity"))
fig20.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig20.update_layout(
title={'text': "Risk and Earnings Power", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig20.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig20.update_yaxes(rangemode="tozero")
return fig20
except TypeError:
pass
@dashapp1.callback(
Output('retained_equity', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df4 = df_balance_signals.loc[input_value]
figure21 = make_subplots()
figure21.add_trace(
go.Scatter(x=list(df4['Year']), y=list(df4['Retained Earning to Equity%']), name="retained",
line=dict(color='#00cc96')))
figure21.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
figure21.update_layout(
title={'text': "Retained Earnings to Equity", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
figure21.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
figure21.update_yaxes(rangemode="tozero")
return figure21
except TypeError:
pass
@dashapp1.callback(
Output('assets_return', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df4 = df_balance_signals.loc[input_value]
fig22 = make_subplots(specs=[[{"secondary_y": True}]])
fig22.add_trace(go.Scatter(x=list(df4['Year']), y=list(df4['Return on Assets%']), name="Return om Assets"))
fig22.add_trace(
go.Scatter(x=list(df4['Year']), y=list(df4['PP&E of Assets%']), name="PP&E of Assets"))
fig22.add_trace(
go.Scatter(x=list(df4['Year']), y=list(df4['Inventory of Assets%']), name="Inventory of Assets"))
fig22.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig22.update_layout(
title={'text': "Assets allocation", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig22.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig22.update_yaxes(rangemode="tozero")
return fig22
except TypeError:
pass
@dashapp1.callback(Output('modal', 'style'),
[Input('instructions-button', 'n_clicks')])
def show_modal(n):
if n > 0:
return {"display": "block"}
return {"display": "none"}
@dashapp1.callback(Output('instructions-button', 'n_clicks'),
[Input('modal-close-button', 'n_clicks')])
def close_modal(n):
if n is not None:
# return {"display": "none"}
return 0
@dashapp1.callback(Output('modal2', 'style'),
[Input('instructions-button2', 'n_clicks')])
def show_modal(n):
if n > 0:
return {"display": "block"}
return {"display": "none"}
@dashapp1.callback(Output('instructions-button2', 'n_clicks'),
[Input('modal-close-button2', 'n_clicks')])
def close_modal(n):
if n is not None:
# return {"display": "none"}
return 0
@dashapp1.callback(Output('modal3', 'style'),
[Input('instructions-button3', 'n_clicks')])
def show_modal(n):
if n > 0:
return {"display": "block"}
return {"display": "none"}
@dashapp1.callback(Output('instructions-button3', 'n_clicks'),
[Input('modal-close-button3', 'n_clicks')])
def close_modal(n):
if n is not None:
# return {"display": "none"}
return 0
_protect_dashviews(dashapp1)
def _protect_dashviews(dashapp):
for view_func in dashapp.server.view_functions:
if view_func.startswith(dashapp.config.url_base_pathname):
dashapp.server.view_functions[view_func] = login_required(dashapp.server.view_functions[view_func])
def register_extensions(server):
from app.extensions import db
from app.extensions import login
from app.extensions import migrate
from app.extensions import bootstrap
db.init_app(server)
bootstrap.init_app(server)
login.init_app(server)
login.login_view = 'main.login'
migrate.init_app(server, db)
def register_blueprints(server):
from app.webapp import server_bp
server.register_blueprint(server_bp)
|
{"hexsha": "8461ac15ec093a0d0b54a36af9f790ff81ad6ef2", "size": 77048, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/dashapp1/layout.py", "max_stars_repo_name": "samisyed8999/financial-data", "max_stars_repo_head_hexsha": "4073b73eb600bdec30fd065945171d487f2f7854", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app/dashapp1/layout.py", "max_issues_repo_name": "samisyed8999/financial-data", "max_issues_repo_head_hexsha": "4073b73eb600bdec30fd065945171d487f2f7854", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app/dashapp1/layout.py", "max_forks_repo_name": "samisyed8999/financial-data", "max_forks_repo_head_hexsha": "4073b73eb600bdec30fd065945171d487f2f7854", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.4571054355, "max_line_length": 124, "alphanum_fraction": 0.5151074655, "include": true, "reason": "import numpy", "num_tokens": 17063}
|
# Basic imports
import boto3
import datetime
import openpyxl
import time
import random
import base64
import pickle
import ast
import copy
from haversine import haversine
import os
# Data analysis/viz imports
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import networkx as nx
import plotly.graph_objs as go
import plotly.plotly as py
# Web app imports
import json
import requests
from flask import Flask
from flask import send_from_directory
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash
import dash_auth
"""
# External scripts imports
import config
# TODO: REPLACE BELOW API KEYS WITH YOUR OWN
AWS_SECRET_ACCESS_KEY = config.AWS_SECRET_ACCESS_KEY
AWS_ACCESS_KEY_ID = config.AWS_ACCESS_KEY_ID
AWS_REGION_NAME = config.AWS_REGION_NAME
GOOGLE_MAPS_API_KEY = config.GOOGLE_MAPS_API_KEY
"""
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_REGION_NAME = os.environ['AWS_REGION_NAME']
GOOGLE_MAPS_API_KEY = os.environ['GOOGLE_MAPS_API_KEY']
# Instantiate clients
dynamodb = boto3.client('dynamodb', region_name=AWS_REGION_NAME, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
# ===============================================================================================================================
# Web Framework
# ===============================================================================================================================
server = Flask(__name__)
app = dash.Dash('auth', server=server, url_base_pathname='/travelplanner/')
app.title = "USA Travel Planner"
app.config.suppress_callback_exceptions = True
# Scripts and css served locally for faster response time
# app.scripts.config.serve_locally = True is used as it takes a long time to download plotly.js from CDN
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
# ===============================================================================================================================
# Logic
# ===============================================================================================================================
def state_dropdown():
states = ['Alabama', 'Alaska', 'Arizona','Arkansas', 'California', 'Colorado','Connecticut', 'Delaware', 'Florida','Georgia', 'Hawaii', 'Idaho','Illinois', 'Indiana', 'Iowa',
'Kansas', 'Kentucky', 'Louisiana','Maine', 'Maryland', 'Massachusetts','Michigan', 'Minnesota', 'Mississippi','Missouri', 'Montana', 'Nebraska','Nevada', 'New Hampshire',
'New Jersey', 'New Mexico', 'New York', 'North Carolina','North Dakota', 'Ohio', 'Oklahoma','Oregon', 'Pennsylvania', 'Rhode Island', 'South Carolina', 'South Dakota',
'Tennessee', 'Texas', 'Utah', 'Vermont', 'Virginia', 'Washington', 'West Virginia', 'Wisconsin', 'Wyoming']
dropdown_list = []
for item in states:
line = {'label': item, 'value': item}
dropdown_list.append(line)
return(dropdown_list)
def locate_property(state_value, city_value, property_value,):
# Find place location
address = state_value + " " + city_value + " " + property_value
url = "https://maps.googleapis.com/maps/api/place/textsearch/json?query={}&key={}".format(address, GOOGLE_MAPS_API_KEY)
req = requests.get(url)
data = req.json()
try:
print(str(data))
list_loc = data['results']
property_name = list_loc[0]['name']
property_loc = (list_loc[0]['geometry']['location']['lat'], list_loc[0]['geometry']['location']['lng'])
print ("Properties found!")
print(list_loc)
print("Selecting first property: "+str(property_name))
return(property_name, property_loc)
except:
return ("Error", (0,0))
def locate_nearby_attractions(property_loc, attractions_value, duration_value):
# Find nearby attractions
if (len(attractions_value)==0 or property_loc==(0,0)):
return ([])
else:
radius = 10000
POIs = []
for attraction in attractions_value:
url2 = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={},{}&radius={}&type={}&key={}&rankby=prominence&language=en".format(property_loc[0], property_loc[1], radius, attraction, GOOGLE_MAPS_API_KEY)
print("Sending request to: "+url2)
req2 = requests.get(url2)
data2 = req2.json()
list_near_by = data2['results']
for near_by in list_near_by:
attraction_name = near_by['name']
attraction_loc = (near_by['geometry']['location']['lat'], near_by['geometry']['location']['lng'])
POIs.append([attraction_name.title(), attraction_loc, attraction])
print("Nearby attractions found!")
print(POIs)
return (POIs)
def cluster_attractions(POIs, duration_value):
length_POI = len(POIs)
if length_POI == 0:
return ([0])
elif length_POI <= duration_value:
location_array = []
for attraction in POIs:
location_array.append([attraction[1][0],attraction[1][1]])
X = np.array(location_array)
location_df = pd.DataFrame(X, columns=["lat","long"])
fig = plt.figure(figsize=(8, 6))
location_df.plot(x="lat",y="long",kind="scatter", title='Coordinates of Locations')
fig.savefig('/tmp/scatterplot.png', dpi=fig.dpi)
responseList = [length_POI]
for attraction in POIs:
responseList.append([attraction])
return (responseList)
else:
location_array = []
for attraction in POIs:
location_array.append([attraction[1][0],attraction[1][1]])
X = np.array(location_array)
kmeans = KMeans(n_clusters=duration_value, random_state=0).fit(X)
labels = kmeans.labels_
fig = plt.figure(figsize=(8, 6))
plt.ylabel('Long')
plt.xlabel('Lat')
plt.title('Coordinates of Locations')
plt.scatter(X[:,0], X[:,1], c=kmeans.labels_.astype(float))
fig.savefig('/tmp/scatterplot.png', dpi=fig.dpi)
responseList = [duration_value]
for i in range(1, duration_value+1):
responseList.append([])
for idx, value in enumerate(POIs,0):
responseList[labels[idx]+1].append(value)
return (responseList)
def get_display(idx, row, attractionTypeDic, outputNumber):
attraction = row['Attraction']
location = str(row['Location'])
attraction_type = attractionTypeDic[row['Type']]
# score = row['score']
output = (html.Div(
[
html.P([attraction], className='ad-headline123'),
html.P([attraction_type], className='ad-description123'),
html.P([location], className='ad-description123'),
dcc.Checklist(
options=[
{'label': '', 'value': 'selected'}
],
values=[],
labelStyle={'display': 'inline-block'},
id='adboxCheckbox'+str(outputNumber)+"-"+str(idx)
)
# html.P(["%.2f" % score],)
], id='adbox'+str(outputNumber)+"-"+str(idx), className='col-md-4 adbox'),)
return output
def display_output(df, outputNumber):
attractionTypeDic = {'amusement_park':'Amusement Park',
'aquarium':'Aquarium',
'art_gallery':'Art Gallery',
'museum':'Museum',
'casino':'Casino',
'church':'Church',
'city_hall':'City Hall',
'hindu_temple':'Temple',
'mosque':'Mosque',
'library':'Library',
'park':'Park',
'shopping_mall':'Shopping mall',
'stadium':'Stadium',
'zoo':'Zoo'}
display = []
for idx, i in df.iterrows():
display.extend(get_display(idx, i, attractionTypeDic, outputNumber))
return(display)
def generate_excel_file(generated_ads_list):
report_template_file_name = 'reports/Report-Template.xlsx'
s3 = boto3.client('s3', region_name=AWS_REGION_NAME, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
workbook = openpyxl.load_workbook(filename='{}'.format(report_template_file_name))
sheet = workbook['Report-Template']
DAY_COL = 'A'
START_COL = 'B'
END_COL = 'C'
DISTANCE_COL = 'D'
for row, ad in enumerate(generated_ads_list):
day_cell = DAY_COL + str(row + 2)
start_cell = START_COL + str(row + 2)
end_cell = END_COL + str(row + 2)
distance_cell = DISTANCE_COL + str(row + 2)
sheet[day_cell].value = ad['Day']
sheet[start_cell].value = ad['Start']
sheet[end_cell].value = ad['End']
sheet[distance_cell].value = ad['Distance']
time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%d-%m-%Y-%H-%M')
result_file_name = 'reports/' + time_stamp + '-results.xlsx'
workbook.save(result_file_name)
s3.upload_file(result_file_name, 'travel-planner', 'generated_results/' + result_file_name)
download_url = s3.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': 'travel-planner',
'Key': 'generated_results/' + result_file_name
},
ExpiresIn=3600
)
return download_url
def identifyNodesEdges(locationsList, propertyNode):
if(len(locationsList)==0):
return ([])
else:
extendedLocationsList = copy.deepcopy(locationsList)
extendedLocationsList.append(propertyNode)
complete_dict = {}
for currentItem in extendedLocationsList:
for otherItem in extendedLocationsList:
if (currentItem != otherItem):
if currentItem[0] not in complete_dict:
complete_dict[currentItem[0]] = [[otherItem[0],haversine(currentItem[1], otherItem[1])]]
else:
complete_dict[currentItem[0]].append([otherItem[0],haversine(currentItem[1], otherItem[1])])
visitedNodes_dict = {}
for key in complete_dict:
visitedNodes_dict[key]=[key]
counter_dict = {}
for key in complete_dict:
counter_dict[key] = 2
edgeList = []
while True:
try:
minimum_dict = {}
for key in complete_dict:
distances = []
for value in complete_dict[key]:
distances.append(value[1])
indexOfSmallest = np.argmin(distances)
minimum_dict[key]=complete_dict[key][indexOfSmallest]
shortestDistances=[]
for key,value in minimum_dict.items():
shortestDistances.append(value[1])
shortestDistance = min(shortestDistances)
checkIfValidConnection = []
for key,value in minimum_dict.items():
if value[1]==shortestDistance:
checkIfValidConnection.append(value[0])
finalConnection=checkIfValidConnection
finalConnectionDistance=shortestDistance
if checkIfValidConnection[1] in visitedNodes_dict[checkIfValidConnection[0]]:
# Connecting these 2 nodes forms a cyclic graph
filteredList=copy.deepcopy([item for item in complete_dict[checkIfValidConnection[1]] if item[0]!=checkIfValidConnection[0]])
complete_dict[checkIfValidConnection[1]]=filteredList
filteredList=copy.deepcopy([item for item in complete_dict[checkIfValidConnection[0]] if item[0]!=checkIfValidConnection[1]])
complete_dict[checkIfValidConnection[0]]=filteredList
else:
connectedNodes = []
for key,value in minimum_dict.items():
if value[1]==shortestDistance:
connectedNodes.append(value[0])
newValue = copy.deepcopy(complete_dict[key])
newValue.remove(value)
complete_dict[key] = newValue
counter_dict[key]=counter_dict[key]-1
keysToPop = []
for key in counter_dict:
if counter_dict[key]==0:
complete_dict.pop(key, None)
keysToPop.append(key)
for completeKey, completeValue in complete_dict.items():
filteredList=copy.deepcopy([item for item in completeValue if item[0]!=key])
complete_dict[completeKey]=filteredList
for key in keysToPop:
counter_dict.pop(key, None)
for node in visitedNodes_dict[connectedNodes[0]]:
visitedNodes_dict[node]=visitedNodes_dict[node]+list(set(visitedNodes_dict[connectedNodes[1]])-set(visitedNodes_dict[node]))
for node in visitedNodes_dict[connectedNodes[1]]:
visitedNodes_dict[node]=visitedNodes_dict[node]+list(set(visitedNodes_dict[connectedNodes[0]])-set(visitedNodes_dict[node]))
edgeList.append([tuple(connectedNodes),shortestDistance])
except:
break
edgeList.append([tuple(finalConnection),finalConnectionDistance])
return(edgeList)
def constructGraph(masterGraph):
masterGraph=json.loads(masterGraph)
G=nx.Graph()
for node in masterGraph[-2]:
G.add_node(node[0])
G.add_node(masterGraph[-1][0])
for dayList in masterGraph[:-2]:
if len(dayList)!=0:
for edge in dayList:
G.add_edge(edge[0][0], edge[0][1], weight=round(edge[1],4))
plt.figure(figsize=(15,10))
plt.title('Shortest Route For Each Day')
pos = nx.spring_layout(G)
labels = nx.get_edge_attributes(G,'weight')
nx.draw_networkx(G,pos,node_size=10,node_color='g', font_size=11)
nx.draw_networkx_edge_labels(G,pos,edge_labels=labels)
plt.savefig("/tmp/graphnetwork.png", format="PNG")
# ===============================================================================================================================
# View
# ===============================================================================================================================
app.layout = html.Div(children=[
# Adding stylesheet referenes here as app.css.config.serve_locally = True
# app.css.config.serve_locally = True is used as it takes a long time to download plotly.js from CDN
# So cannot add in external stylesheets
# Have to wait for plotly to release an update to fix this issue
html.Link(
rel='stylesheet',
href='https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css'
),
html.Link(
rel='stylesheet',
# href='assets/dash-stylesheet.css'
href='https://s3.ap-southeast-1.amazonaws.com/travel-planner/stylesheets/dash-stylesheet.css'
),
html.H2(children='USA Travel Planner', style={'text-align': 'center',
'border-radius': '25px',
'padding-bottom': '30px',
'padding-top': '30px'}),
html.Hr(),
html.Details([
html.Summary('Where would you like to go?',style={'font-weight': 'bold'}),
html.Br(),
html.Div([
# contains the text box for user inputs
html.Div([
html.P([
html.Label('STATE'),
dcc.Dropdown(
id='state_box',
options=state_dropdown(),
value='')], style={'width': '570px', 'padding-bottom': '15px', 'margin': 'auto'}
),
html.P([
html.Label('CITY'),
dcc.Dropdown(
id='city_box',
options="",
value='')], style={'width': '570px', 'padding-bottom': '15px', 'margin': 'auto'}
),
html.P([
html.Label('ATTRACTIONS'),
dcc.Dropdown(
id = 'attractions_dropdown',
options=[
{'label': 'Amusement Park', 'value': 'amusement_park'},
{'label': 'Aquarium', 'value': 'aquarium'},
{'label': 'Art Gallery', 'value': 'art_gallery'},
{'label': 'Museum', 'value': 'museum'},
{'label': 'Casino', 'value': 'casino'},
{'label': 'Church', 'value': 'church'},
{'label': 'City Hall', 'value': 'city_hall'},
{'label': 'Temple', 'value': 'hindu_temple'},
{'label': 'Mosque', 'value': 'mosque'},
{'label': 'Library', 'value': 'library'},
{'label': 'Park', 'value': 'park'},
{'label': 'Shopping mall', 'value': 'shopping_mall'},
{'label': 'Stadium', 'value': 'stadium'},
{'label': 'Zoo', 'value': 'zoo'}
],
value=[],
multi=True
)], style={'width': '570px', 'padding-bottom': '15px', 'margin': 'auto'}
),
html.P([
html.Label("ACCOMMODATION"),
html.Br(),
dcc.Input(
id = 'property_text',
placeholder='Where will you be staying? If unknown, leave as blank.',
type='text',
value='',
style={'width': '100%'})], style={'width': '570px', 'padding-bottom': '20px', 'margin': 'auto'}
),
html.P([
html.Label('DURATION OF STAY'),
dcc.Slider(
id = 'duration_slider',
min=1,
max=7,
marks={1: '1 day', 2: '2 days', 3: '3 days', 4: '4 days', 5: '5 days', 6: '6 days', 7: '7 days'},
value=1)], style={'width': '570px', 'padding-bottom': '15px', 'margin': 'auto'}
)
], style={'margin-bottom': '20px', 'text-align': 'center'}),
html.Button(id='submit-button-one', n_clicks=0, n_clicks_timestamp='0', children='Proceed', className="btn btn-success", style={'width': '10%'})
])
], id='step-1-details', open=True),
html.Hr(),
html.Details([
html.Summary('Just a little more planning...',style={'font-weight': 'bold'}),
html.Br(),
html.Img(id='plotImage', src=''),
html.Br(),
html.H2('For each day, select the places you would like to visit'),
html.Div([html.H2('Day One'),html.Div(id='outputDayOne', className='row', children=None)], className='container', style={'display': 'none'}, id='segmentDayOne'),
html.Div([html.H2('Day Two'),html.Div(id='outputDayTwo', className='row', children=None)], className='container', style={'display': 'none'}, id='segmentDayTwo'),
html.Div([html.H2('Day Three'),html.Div(id='outputDayThree', className='row', children=None)], className='container', style={'display': 'none'}, id='segmentDayThree'),
html.Div([html.H2('Day Four'),html.Div(id='outputDayFour', className='row', children=None)], className='container', style={'display': 'none'}, id='segmentDayFour'),
html.Div([html.H2('Day Five'),html.Div(id='outputDayFive', className='row', children=None)], className='container', style={'display': 'none'}, id='segmentDayFive'),
html.Div([html.H2('Day Six'),html.Div(id='outputDaySix', className='row', children=None)], className='container', style={'display': 'none'}, id='segmentDaySix'),
html.Div([html.H2('Day Seven'),html.Div(id='outputDaySeven', className='row', children=None)], className='container', style={'display': 'none'}, id='segmentDaySeven'),
html.Br(),
html.Button(id='submit-button-two', n_clicks=0, n_clicks_timestamp='0', children='Select', className="btn btn-success", style={'width': '10%'})
], id='step-2-details', open=False, style={'width':'75%', 'margin': 'auto'}),
html.Hr(),
html.Details([
html.Summary("Let's kick off the adventure! :)",style={'font-weight': 'bold'}),
html.Img(id='graphImage', src=''),
html.Br(),
html.A(html.Button(n_clicks=0, children='Download', className="btn btn-success", style={'width': '10%', 'margin-top': '10px'}), target="_blank", id='download_excel_button')
],id='step-3-details', open=False),
html.Hr(),
# Hidden div inside the app that stores the intermediate value
html.Div(id='api_base_response',children=None, style={'display': 'none'}),
html.Div(id='graph_api_response',children=None, style={'display': 'none'}),
html.Div(style={'padding-bottom': '200px'})
], style={'padding-left': '10px'}, className="body")
# ===============================================================================================================================
# Control
# ===============================================================================================================================
# =============================
# Controls which step to open
# =============================
@app.callback(Output('step-1-details', 'open'), [Input('submit-button-one', 'n_clicks')])
def openclose_step_one(n_clicks):
if (n_clicks):
return False
else:
return True
@app.callback(Output('step-2-details', 'open'), [Input('submit-button-one', 'n_clicks'), Input('submit-button-two', 'n_clicks')],
[State('submit-button-one', 'n_clicks_timestamp'), State('submit-button-two', 'n_clicks_timestamp')])
def openclose_step_two(buttonOneClicks,buttonTwoClicks, buttonOneTimestamp, buttonTwoTimestamp):
if (int(buttonOneTimestamp)>int(buttonTwoTimestamp)):
time.sleep(6)
return True
else:
return False
@app.callback(Output('step-3-details', 'open'), [Input('submit-button-two', 'n_clicks')])
def openclose_step_three(n_clicks):
if (n_clicks):
time.sleep(6)
return True
else:
return False
@app.callback(Output('api_base_response', 'children'), [Input('submit-button-one', 'n_clicks')],
[State('state_box', 'value'), State('city_box', 'value'), State('attractions_dropdown', 'value'), State('property_text', 'value'), State('duration_slider', 'value')])
def attraction_identifier(n_clicks, state_value, city_value, attractions_value, property_value, duration_value):
if (n_clicks):
print("Step one parameters received: "+state_value + " " + city_value + " " + str(attractions_value) + " " + property_value + " " + str(duration_value))
property_name,property_loc = locate_property(state_value, city_value, property_value)
print(str(property_name))
POIs = locate_nearby_attractions(property_loc, attractions_value, duration_value)
clusteredPOIsResponse = cluster_attractions(POIs, duration_value)
time.sleep(2)
print("===================================================================================")
print("Clustered response: ")
clusteredPOIsResponse.append([property_name,property_loc])
print(clusteredPOIsResponse)
return (json.dumps(clusteredPOIsResponse))
else:
return None
@app.callback(Output('plotImage', 'src'), [Input('api_base_response', 'children')])
def updatePlot(api_response):
print("Change in response detected")
if (api_response is None):
print("Serving emptyplot image")
image_filename = 'img/emptyplot.png'
encoded_image = base64.b64encode(open(image_filename, 'rb').read())
return ('data:image/png;base64,{}'.format(encoded_image.decode()))
else:
api_response = json.loads(api_response)
if (api_response[0]==0):
print("Serving emptyplot image")
image_filename = 'img/emptyplot.png'
encoded_image = base64.b64encode(open(image_filename, 'rb').read())
return ('data:image/png;base64,{}'.format(encoded_image.decode()))
else:
print("Serving scatterplot image")
image_filename = '/tmp/scatterplot.png'
encoded_image = base64.b64encode(open(image_filename, 'rb').read())
return ('data:image/png;base64,{}'.format(encoded_image.decode()))
@app.callback(Output('city_box', 'options'), [Input('state_box', 'value')])
def updateCitiesList(value):
if value == '':
return None
else:
databaseResponse = dynamodb.get_item(TableName='United_States_Cities', Key={'state': {'S': value}},)
citiesList = databaseResponse['Item']['city']['SS']
dropdown_list = []
for item in citiesList:
line = {'label': item, 'value': item}
dropdown_list.append(line)
return dropdown_list
@app.callback(Output('graph_api_response', 'children'), [Input('submit-button-two', 'n_clicks')], [State('step-2-details', 'children'), State('api_base_response', 'children')])
def determineGraphPoints(n_clicks, details, api_base_response):
if n_clicks:
dayOneList=[]
dayTwoList=[]
dayThreeList=[]
dayFourList=[]
dayFiveList=[]
daySixList=[]
daySevenList=[]
for child in details:
try:
if child['props']['id']=="segmentDayOne":
temp = child['props']['children'][1]['props']['children']
for adbox in temp:
childElements = adbox['props']['children']
if len(childElements[3]['props']['values'])==1:
location = ast.literal_eval(childElements[2]['props']['children'][0])
dayOneList.append([childElements[0]['props']['children'][0], location])
if child['props']['id']=="segmentDayTwo":
temp = child['props']['children'][1]['props']['children']
for adbox in temp:
childElements = adbox['props']['children']
if len(childElements[3]['props']['values'])==1:
location = ast.literal_eval(childElements[2]['props']['children'][0])
dayTwoList.append([childElements[0]['props']['children'][0], location])
if child['props']['id']=="segmentDayThree":
temp = child['props']['children'][1]['props']['children']
for adbox in temp:
childElements = adbox['props']['children']
if len(childElements[3]['props']['values'])==1:
location = ast.literal_eval(childElements[2]['props']['children'][0])
dayThreeList.append([childElements[0]['props']['children'][0], location])
if child['props']['id']=="segmentDayFour":
temp = child['props']['children'][1]['props']['children']
for adbox in temp:
childElements = adbox['props']['children']
if len(childElements[3]['props']['values'])==1:
location = ast.literal_eval(childElements[2]['props']['children'][0])
dayFourList.append([childElements[0]['props']['children'][0], location])
if child['props']['id']=="segmentDayFive":
temp = child['props']['children'][1]['props']['children']
for adbox in temp:
childElements = adbox['props']['children']
if len(childElements[3]['props']['values'])==1:
location = ast.literal_eval(childElements[2]['props']['children'][0])
dayFiveList.append([childElements[0]['props']['children'][0], location])
if child['props']['id']=="segmentDaySix":
temp = child['props']['children'][1]['props']['children']
for adbox in temp:
childElements = adbox['props']['children']
if len(childElements[3]['props']['values'])==1:
location = ast.literal_eval(childElements[2]['props']['children'][0])
daySixList.append([childElements[0]['props']['children'][0], location])
if child['props']['id']=="segmentDaySeven":
temp = child['props']['children'][1]['props']['children']
for adbox in temp:
childElements = adbox['props']['children']
if len(childElements[3]['props']['values'])==1:
location = ast.literal_eval(childElements[2]['props']['children'][0])
daySevenList.append([childElements[0]['props']['children'][0], location])
except:
print("Not found")
response = json.loads(api_base_response)
propertyNode = response[-1]
print(str(propertyNode))
masterGraph = []
masterGraph.append(identifyNodesEdges(dayOneList,propertyNode))
masterGraph.append(identifyNodesEdges(dayTwoList,propertyNode))
masterGraph.append(identifyNodesEdges(dayThreeList,propertyNode))
masterGraph.append(identifyNodesEdges(dayFourList,propertyNode))
masterGraph.append(identifyNodesEdges(dayFiveList,propertyNode))
masterGraph.append(identifyNodesEdges(daySixList,propertyNode))
masterGraph.append(identifyNodesEdges(daySevenList,propertyNode))
print("**********************MASTER GRAPH****************************")
print(str(masterGraph))
print("**************************************************************")
onlyAttractionsList=[]
onlyAttractionsList.extend(dayOneList)
onlyAttractionsList.extend(dayTwoList)
onlyAttractionsList.extend(dayThreeList)
onlyAttractionsList.extend(dayFourList)
onlyAttractionsList.extend(dayFiveList)
onlyAttractionsList.extend(daySixList)
onlyAttractionsList.extend(daySevenList)
masterGraph.append(onlyAttractionsList)
masterGraph.append(propertyNode)
return(json.dumps(masterGraph))
else:
return None
@app.callback(Output('graphImage','src'), [Input('graph_api_response', 'children')])
def plotNetworkGraph(children):
print("Calling plotNetworkGraph...")
if children is None:
return None
else:
constructGraph(children)
time.sleep(3)
print("Serving network image")
image_filename = '/tmp/graphnetwork.png'
encoded_image = base64.b64encode(open(image_filename, 'rb').read())
return ('data:image/png;base64,{}'.format(encoded_image.decode()))
@app.callback(Output('download_excel_button', 'href'), [Input('graph_api_response', 'children')])
def generateExcel(children):
print("Calling generateExcel...")
if children is None:
return None
else:
graph_api_response = json.loads(children)
propertyNode = graph_api_response[-1][0]
totalDayList=graph_api_response[:-2]
formattedList = []
for idx, dayList in enumerate(totalDayList, 1):
if(len(dayList)!=0):
firstNodeFound = 0
while(firstNodeFound==0):
for edge in dayList:
if(edge[0][0]==propertyNode):
firstNodeFound = 1
currentNode = edge[0][1]
formattedList.append({'Day':idx, 'Start':propertyNode, 'End':edge[0][1], 'Distance':edge[1]})
dayList.remove(edge)
break
while(firstNodeFound==0):
for edge in dayList:
if(edge[0][1]==propertyNode):
firstNodeFound = 1
currentNode = edge[0][0]
formattedList.append({'Day':idx, 'Start':propertyNode, 'End':edge[0][0], 'Distance':edge[1]})
dayList.remove(edge)
break
while (len(dayList)!=0):
currentNodeFound = 0
for edge in dayList:
if(edge[0][0]==currentNode):
formattedList.append({'Day':idx, 'Start':currentNode, 'End':edge[0][1], 'Distance':edge[1]})
currentNode = edge[0][1]
dayList.remove(edge)
currentNodeFound = 1
break
if(currentNodeFound==0):
for edge in dayList:
if(edge[0][1]==currentNode):
formattedList.append({'Day':idx, 'Start':currentNode, 'End':edge[0][0], 'Distance':edge[1]})
currentNode = edge[0][0]
dayList.remove(edge)
break
excel_download_url = generate_excel_file(formattedList)
return (excel_download_url)
# =============================
# Allocates locations to days
# =============================
@app.callback(Output('outputDayOne', 'children'), [Input('api_base_response', 'children')])
def updateOutputOne(api_response):
if (api_response is None):
return None
else:
api_response = json.loads(api_response)
if (api_response[0]>=1):
limitedResponse = api_response[1][:12]
cols = ['Attraction', 'Location', 'Type']
df = pd.DataFrame(limitedResponse, columns=cols)
return(display_output(df, 1))
else:
return None
@app.callback(Output('segmentDayOne', 'style'), [Input('api_base_response', 'children')])
def showSegmentOne(api_response):
if (api_response is None):
return ({'display':'none'})
else:
api_response = json.loads(api_response)
if (api_response[0]>=1):
return({'display':'inline'})
else:
return ({'display':'none'})
@app.callback(Output('outputDayTwo', 'children'), [Input('api_base_response', 'children')])
def updateOutputTwo(api_response):
if (api_response is None):
return None
else:
api_response = json.loads(api_response)
if (api_response[0]>=2):
limitedResponse = api_response[2][:12]
cols = ['Attraction', 'Location', 'Type']
df = pd.DataFrame(limitedResponse, columns=cols)
return(display_output(df, 2))
else:
return None
@app.callback(Output('segmentDayTwo', 'style'), [Input('api_base_response', 'children')])
def showSegmentTwo(api_response):
if (api_response is None):
return ({'display':'none'})
else:
api_response = json.loads(api_response)
if (api_response[0]>=2):
return({'display':'inline'})
else:
return ({'display':'none'})
@app.callback(Output('outputDayThree', 'children'), [Input('api_base_response', 'children')])
def updateOutputThree(api_response):
if (api_response is None):
return None
else:
api_response = json.loads(api_response)
if (api_response[0]>=3):
limitedResponse = api_response[3][:12]
cols = ['Attraction', 'Location', 'Type']
df = pd.DataFrame(limitedResponse, columns=cols)
return(display_output(df, 3))
else:
return None
@app.callback(Output('segmentDayThree', 'style'), [Input('api_base_response', 'children')])
def showSegmentThree(api_response):
if (api_response is None):
return ({'display':'none'})
else:
api_response = json.loads(api_response)
if (api_response[0]>=3):
return({'display':'inline'})
else:
return ({'display':'none'})
@app.callback(Output('outputDayFour', 'children'), [Input('api_base_response', 'children')])
def updateOutputFour(api_response):
if (api_response is None):
return None
else:
api_response = json.loads(api_response)
if (api_response[0]>=4):
limitedResponse = api_response[4][:12]
cols = ['Attraction', 'Location', 'Type']
df = pd.DataFrame(limitedResponse, columns=cols)
return(display_output(df, 4))
else:
return None
@app.callback(Output('segmentDayFour', 'style'), [Input('api_base_response', 'children')])
def showSegmentFour(api_response):
if (api_response is None):
return ({'display':'none'})
else:
api_response = json.loads(api_response)
if (api_response[0]>=4):
return({'display':'inline'})
else:
return ({'display':'none'})
@app.callback(Output('outputDayFive', 'children'), [Input('api_base_response', 'children')])
def updateOutputFive(api_response):
if (api_response is None):
return None
else:
api_response = json.loads(api_response)
if (api_response[0]>=5):
limitedResponse = api_response[5][:12]
cols = ['Attraction', 'Location', 'Type']
df = pd.DataFrame(limitedResponse, columns=cols)
return(display_output(df, 5))
else:
return None
@app.callback(Output('segmentDayFive', 'style'), [Input('api_base_response', 'children')])
def showSegmentFive(api_response):
if (api_response is None):
return ({'display':'none'})
else:
api_response = json.loads(api_response)
if (api_response[0]>=5):
return({'display':'inline'})
else:
return ({'display':'none'})
@app.callback(Output('outputDaySix', 'children'), [Input('api_base_response', 'children')])
def updateOutputSix(api_response):
if (api_response is None):
return None
else:
api_response = json.loads(api_response)
if (api_response[0]>=6):
limitedResponse = api_response[6][:12]
cols = ['Attraction', 'Location', 'Type']
df = pd.DataFrame(limitedResponse, columns=cols)
return(display_output(df, 6))
else:
return None
@app.callback(Output('segmentDaySix', 'style'), [Input('api_base_response', 'children')])
def showSegmentSix(api_response):
if (api_response is None):
return ({'display':'none'})
else:
api_response = json.loads(api_response)
if (api_response[0]>=6):
return({'display':'inline'})
else:
return ({'display':'none'})
@app.callback(Output('outputDaySeven', 'children'), [Input('api_base_response', 'children')])
def updateOutputSeven(api_response):
if (api_response is None):
return None
else:
api_response = json.loads(api_response)
if (api_response[0]>=7):
limitedResponse = api_response[7][:12]
cols = ['Attraction', 'Location', 'Type']
df = pd.DataFrame(limitedResponse, columns=cols)
return(display_output(df, 7))
else:
return None
@app.callback(Output('segmentDaySeven', 'style'), [Input('api_base_response', 'children')])
def showSegmentSeven(api_response):
if (api_response is None):
return ({'display':'none'})
else:
api_response = json.loads(api_response)
if (api_response[0]>=7):
return({'display':'inline'})
else:
return ({'display':'none'})
# =============================
# Toggle colours of boxes
# =============================
@app.callback(Output('adbox1-0', 'style'), [Input('adboxCheckbox1-0', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox1-1', 'style'), [Input('adboxCheckbox1-1', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox1-2', 'style'), [Input('adboxCheckbox1-2', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox1-3', 'style'), [Input('adboxCheckbox1-3', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox1-4', 'style'), [Input('adboxCheckbox1-4', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox1-5', 'style'), [Input('adboxCheckbox1-5', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox1-6', 'style'), [Input('adboxCheckbox1-6', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox1-7', 'style'), [Input('adboxCheckbox1-7', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox1-8', 'style'), [Input('adboxCheckbox1-8', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox1-9', 'style'), [Input('adboxCheckbox1-9', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox1-10', 'style'), [Input('adboxCheckbox1-10', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox1-11', 'style'), [Input('adboxCheckbox1-11', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox2-0', 'style'), [Input('adboxCheckbox2-0', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox2-1', 'style'), [Input('adboxCheckbox2-1', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox2-2', 'style'), [Input('adboxCheckbox2-2', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox2-3', 'style'), [Input('adboxCheckbox2-3', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox2-4', 'style'), [Input('adboxCheckbox2-4', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox2-5', 'style'), [Input('adboxCheckbox2-5', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox2-6', 'style'), [Input('adboxCheckbox2-6', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox2-7', 'style'), [Input('adboxCheckbox2-7', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox2-8', 'style'), [Input('adboxCheckbox2-8', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox2-9', 'style'), [Input('adboxCheckbox2-9', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox2-10', 'style'), [Input('adboxCheckbox2-10', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox2-11', 'style'), [Input('adboxCheckbox2-11', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox3-0', 'style'), [Input('adboxCheckbox3-0', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox3-1', 'style'), [Input('adboxCheckbox3-1', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox3-2', 'style'), [Input('adboxCheckbox3-2', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox3-3', 'style'), [Input('adboxCheckbox3-3', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox3-4', 'style'), [Input('adboxCheckbox3-4', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox3-5', 'style'), [Input('adboxCheckbox3-5', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox3-6', 'style'), [Input('adboxCheckbox3-6', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox3-7', 'style'), [Input('adboxCheckbox3-7', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox3-8', 'style'), [Input('adboxCheckbox3-8', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox3-9', 'style'), [Input('adboxCheckbox3-9', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox3-10', 'style'), [Input('adboxCheckbox3-10', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox3-11', 'style'), [Input('adboxCheckbox3-11', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox4-0', 'style'), [Input('adboxCheckbox4-0', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox4-1', 'style'), [Input('adboxCheckbox4-1', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox4-2', 'style'), [Input('adboxCheckbox4-2', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox4-3', 'style'), [Input('adboxCheckbox4-3', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox4-4', 'style'), [Input('adboxCheckbox4-4', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox4-5', 'style'), [Input('adboxCheckbox4-5', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox4-6', 'style'), [Input('adboxCheckbox4-6', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox4-7', 'style'), [Input('adboxCheckbox4-7', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox4-8', 'style'), [Input('adboxCheckbox4-8', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox4-9', 'style'), [Input('adboxCheckbox4-9', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox4-10', 'style'), [Input('adboxCheckbox4-10', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox4-11', 'style'), [Input('adboxCheckbox4-11', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox5-0', 'style'), [Input('adboxCheckbox5-0', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox5-1', 'style'), [Input('adboxCheckbox5-1', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox5-2', 'style'), [Input('adboxCheckbox5-2', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox5-3', 'style'), [Input('adboxCheckbox5-3', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox5-4', 'style'), [Input('adboxCheckbox5-4', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox5-5', 'style'), [Input('adboxCheckbox5-5', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox5-6', 'style'), [Input('adboxCheckbox5-6', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox5-7', 'style'), [Input('adboxCheckbox5-7', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox5-8', 'style'), [Input('adboxCheckbox5-8', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox5-9', 'style'), [Input('adboxCheckbox5-9', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox5-10', 'style'), [Input('adboxCheckbox5-10', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox5-11', 'style'), [Input('adboxCheckbox5-11', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox6-0', 'style'), [Input('adboxCheckbox6-0', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox6-1', 'style'), [Input('adboxCheckbox6-1', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox6-2', 'style'), [Input('adboxCheckbox6-2', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox6-3', 'style'), [Input('adboxCheckbox6-3', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox6-4', 'style'), [Input('adboxCheckbox6-4', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox6-5', 'style'), [Input('adboxCheckbox6-5', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox6-6', 'style'), [Input('adboxCheckbox6-6', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox6-7', 'style'), [Input('adboxCheckbox6-7', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox6-8', 'style'), [Input('adboxCheckbox6-8', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox6-9', 'style'), [Input('adboxCheckbox6-9', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox6-10', 'style'), [Input('adboxCheckbox6-10', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox6-11', 'style'), [Input('adboxCheckbox6-11', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox7-0', 'style'), [Input('adboxCheckbox7-0', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox7-1', 'style'), [Input('adboxCheckbox7-1', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox7-2', 'style'), [Input('adboxCheckbox7-2', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox7-3', 'style'), [Input('adboxCheckbox7-3', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox7-4', 'style'), [Input('adboxCheckbox7-4', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox7-5', 'style'), [Input('adboxCheckbox7-5', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox7-6', 'style'), [Input('adboxCheckbox7-6', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox7-7', 'style'), [Input('adboxCheckbox7-7', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox7-8', 'style'), [Input('adboxCheckbox7-8', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox7-9', 'style'), [Input('adboxCheckbox7-9', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox7-10', 'style'), [Input('adboxCheckbox7-10', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
@app.callback(Output('adbox7-11', 'style'), [Input('adboxCheckbox7-11', 'values')])
def toggleColor(values):
if (len(values)==1):
return {'backgroundColor':'#ebebeb'}
else:
return {'backgroundColor':'transparent'}
# ==============================================================================================================================
if __name__ == '__main__':
app.run_server(host='0.0.0.0', port=5000)
# Execute the following to run server:
# gunicorn --bind unix:/var/tmp/dash.sock --log-level=debug --timeout 1200 --preload --workers 5 index:app.server
|
{"hexsha": "d35b5358983f3e96204a7e42713362bb99e82622", "size": 56942, "ext": "py", "lang": "Python", "max_stars_repo_path": "index.py", "max_stars_repo_name": "Jordan396/USA-Travel-Planner", "max_stars_repo_head_hexsha": "2b40bed13c2834f58c48f7eb5edb7b90dd68c0e2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "index.py", "max_issues_repo_name": "Jordan396/USA-Travel-Planner", "max_issues_repo_head_hexsha": "2b40bed13c2834f58c48f7eb5edb7b90dd68c0e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "index.py", "max_forks_repo_name": "Jordan396/USA-Travel-Planner", "max_forks_repo_head_hexsha": "2b40bed13c2834f58c48f7eb5edb7b90dd68c0e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9273670558, "max_line_length": 224, "alphanum_fraction": 0.6216149766, "include": true, "reason": "import numpy,import networkx", "num_tokens": 13636}
|
[STATEMENT]
lemma length_trace[simp]: "\<And>i. length(trace d i xs) = length xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>i. length (trace d i xs) = length xs
[PROOF STEP]
by (induct "xs") simp_all
|
{"llama_tokens": 86, "file": "Functional-Automata_RegSet_of_nat_DA", "length": 1}
|
#!/usr/bin/env python
# coding: utf-8
# OBJECTIVE : The dataset contains detailed attributes for every player registered in the latest edition of FIFA 19 database. Our objective is to create Linear, Multiple and Polynomail Regression models to predict the potential of a player based on several attributes.
# In[88]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# In[89]:
# reading dataset
data=pd.read_csv("../../../input/karangadiya_fifa19/data.csv")
# In[90]:
# displaying first 5 rows
data.head()
# In[91]:
data.shape #(no. of rows, no. of columns)
# In[92]:
data.describe()
# In[93]:
# finding any null values in data
data.isnull().any()
# # Linear Regression - Predicting Potential based on Age of the player
# In[94]:
# x = Age(independent variable)
x=data.iloc[:,3]
# In[95]:
x.head()
# In[96]:
x.isnull().any()
# In[97]:
# y = Potential(dependent variable)
y=data.iloc[:,8]
# In[98]:
y.head()
# In[99]:
y.isnull().any()
# In[100]:
plt.bar(data["Age"],data["Potential"])
plt.xlabel("Age of Player")
print()
# In[101]:
# splitting data into train and tet set
from sklearn.model_selection import train_test_split
# In[102]:
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=0)
# In[103]:
from sklearn.linear_model import LinearRegression
# In[104]:
# making object regressor of class LinearRegression
regressor=LinearRegression()
# I was facing errors with fitting the data so we reshape x_train and y_train by first converting them into ndarray.
# In[105]:
type(x_train)
type(y_train)
# In[106]:
x_train=np.array(x_train)
y_train=np.array(y_train)
# In[107]:
type(x_train)
type(y_train)
# In[108]:
x_train=x_train.reshape(-1,1)
y_train=y_train.reshape(-1,1)
# In[109]:
# fitting training set into object regressor
regressor.fit(x_train,y_train)
# To avoid error in prediting we reshape x_test also.
# In[110]:
x_test=np.array(x_test)
# In[111]:
x_test=x_test.reshape(-1,1)
# In[112]:
# Predicting y from test set
y_pred= regressor.predict(x_test)
# In[113]:
# Visualising training dataset
plt.scatter(x_train,y_train,color="red")
plt.xlabel("Age of Player")
plt.ylabel("Potential of Player")
plt.plot(x_train, regressor.predict(x_train),color="blue") # To draw line of regression
print()
# In[114]:
# Visualising test dataset
plt.scatter(x_test,y_test,color="red")
plt.xlabel("Age of Player")
plt.ylabel("Potential of Player")
plt.plot(x_train, regressor.predict(x_train),color="blue")
print()
# In[115]:
# Finding intercept of linear regression line
regressor.intercept_
# In[116]:
# Finding coefficient of linear regression line
regressor.coef_
# In[117]:
# Finding mean squared error of linear regression model
from sklearn.metrics import mean_squared_error
# In[118]:
mean_squared_error(y_test,y_pred)
# # Multiple regression - Predicting potential based on age, agility, balance, stamina, strength, composure
#
# In[119]:
# independent variables are - Age, Agility, Balance, stamina, Strength, Composure
x=data.iloc[:,[3,66,68,71,72,79]]
# In[120]:
x.head()
# In[121]:
# checking if there are null values in x and then filling them.
x.isnull().any()
# In[122]:
x=x.fillna(method='ffill')
# In[123]:
x.isnull().any()
# In[124]:
# dependent variable = Potential
y=data.iloc[:,8]
# In[125]:
y.head()
# In[126]:
y.isnull().any()
# In[127]:
sns.lineplot(x="Potential", y="Age",data=data,label="Age", ci= None)
sns.lineplot(x="Potential", y="Agility",data=data,label="Agility", ci= None)
sns.lineplot(x="Potential", y="Balance",data=data,label="Balance", ci= None)
sns.lineplot(x="Potential", y="Stamina",data=data,label="Stamina", ci= None)
sns.lineplot(x="Potential", y="Strength",data=data,label="Strength", ci= None)
sns.lineplot(x="Potential", y="Composure",data=data,label="Composure", ci= None)
# In[128]:
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2)
# In[129]:
regressor=LinearRegression()
# In[130]:
regressor.fit(x_train,y_train)
# In[131]:
regressor.predict(x_test)
# In[132]:
# Visualising Actual and predicted values of Potential of player
plt.scatter(y_test,y_pred)
plt.xlabel("Actual Potential")
plt.ylabel("Predicted Potential")
print()
# Seems like the actual and predicted values are very close to each other.
# In[133]:
regressor.intercept_
# In[134]:
regressor.coef_
# Backward Elimination - Making optimal regression model by finding the statistical significance of all independent variables
# In[135]:
# let us take the significance level (SL)= 0.05
import statsmodels.formula.api as sm
# In[136]:
# fitting all variables in the model
regressor_OLS=sm.OLS(endog=y,exog=x).fit()
# In[137]:
# Finding statistical summary of all variables
regressor_OLS.summary()
# As we see all the P- values are less than SL(0.05), that means all the variables are significant and none of them can be removed.
# t-value shows the statistical significane of each variable.
# F-static shows us how significant the fit is.
# Adjusted- R is 0.986 that means our model explains 98.6% variables in dependent variables.
# # Polynomial Regression - Predicting potential based on the age of player
# In[138]:
# independent variable= age
x=data.iloc[:,3]
# In[139]:
x.head()
# In[140]:
# dependent variable = potential
y=data.iloc[:,8]
# In[141]:
y.head()
# In[142]:
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=42)
# In[143]:
x_train=np.array(x_train)
y_train=np.array(y_train)
# In[144]:
x_train=x_train.reshape(-1,1)
y_train=y_train.reshape(-1,1)
# In[145]:
lin_reg_1=LinearRegression()
# In[146]:
lin_reg_1.fit(x_train,y_train)
# In[147]:
x_test=np.array(x_test)
# In[148]:
x_test=x_test.reshape(-1,1)
# In[149]:
y_pred_1=lin_reg_1.predict(x_test)
# In[150]:
# Making polynomail regression model
from sklearn.preprocessing import PolynomialFeatures
# In[151]:
poly_reg=PolynomialFeatures(degree=3)
# In[152]:
x=np.array(x)
# In[153]:
x=x.reshape(-1,1)
# In[154]:
# Making polynomial matrix of x of degree 3
x_poly=poly_reg.fit_transform(x)
# In[155]:
x_poly
# In[156]:
x_poly_train,x_poly_test,y_train,y_test=train_test_split(x_poly,y,test_size=0.2, random_state=42)
# In[157]:
# Making another object to fit polynomial set
lin_reg_2=LinearRegression()
# In[158]:
lin_reg_2.fit(x_poly_train,y_train)
# In[159]:
y_pred_2=lin_reg_2.predict(x_poly_test)
# In[160]:
# Visualizing Linear Regression Model
plt.scatter(x_test,y_test,color='red')
plt.xlabel("Age of Player")
plt.ylabel("Potential of Player")
plt.title("Linear Regression Curve ")
plt.plot(x_train,lin_reg_1.predict(x_train),color='blue')
print()
# In[161]:
# Visualizing Polynomial Regression Model
plt.scatter(x_test,y_test,color='red')
plt.xlabel("Age of Player")
plt.ylabel("Potential of Player")
plt.title("Polynomial Regression Curve ")
plt.plot(x_train,lin_reg_2.predict(poly_reg.fit_transform(x_train)),color='blue')
print()
# In[162]:
mean_squared_error(y_test,y_pred_2)
# We can see the mean squared error of polynomail regression model < mean squared error of linear regression model. So polynomail regression model is more accurate.
|
{"hexsha": "4f11ee7416c953586a41f57e9a0d3523675fb21e", "size": 7355, "ext": "py", "lang": "Python", "max_stars_repo_path": "relancer-exp/original_notebooks/karangadiya_fifa19/fifa-2019-regression-model.py", "max_stars_repo_name": "Chenguang-Zhu/relancer", "max_stars_repo_head_hexsha": "bf1a175b77b7da4cff12fbc5de17dd55246d264d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-05T22:27:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T22:27:49.000Z", "max_issues_repo_path": "relancer-exp/original_notebooks/karangadiya_fifa19/fifa-2019-regression-model.py", "max_issues_repo_name": "Chenguang-Zhu/relancer", "max_issues_repo_head_hexsha": "bf1a175b77b7da4cff12fbc5de17dd55246d264d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "relancer-exp/original_notebooks/karangadiya_fifa19/fifa-2019-regression-model.py", "max_forks_repo_name": "Chenguang-Zhu/relancer", "max_forks_repo_head_hexsha": "bf1a175b77b7da4cff12fbc5de17dd55246d264d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.5701107011, "max_line_length": 268, "alphanum_fraction": 0.7118966689, "include": true, "reason": "import numpy,import statsmodels", "num_tokens": 1940}
|
# This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
import numpy as np
import pandas as pd
import os
from time import time
import pickle
import ray
from scipy.optimize import minimize
from scipy.special import softmax
def get_data(raw):
eps = int(raw.readline().strip())
data = []
for ep in range(eps):
ep_data = []
T = int(raw.readline().strip())
for i in range(T):
ep_data.append(list(map(float, raw.readline().strip().split(','))))
data.append(ep_data)
return data
def estimate(pi_e, D, n, gamma=0.95):
est = 0.0
print(f"Starting PDIS estimation for {n} samples")
for ep in D:
w_t = 1
gamma_t = 1
for t in range(len(ep)):
s_t, a_t, r_t, pi_b = ep[t]
s_t = int(s_t)
a_t = int(a_t)
w_t *= softmax(pi_e[s_t])[a_t] / pi_b
est += gamma_t * w_t * r_t / n
gamma_t *= gamma
print(f"Average estimate of return: {est}")
return est
@ray.remote
def estimate_ray(pi_e, D, n, gamma=0.95):
est = 0.0
print(f"Starting PDIS estimation for {n} samples")
for ep in D:
w_t = 1
gamma_t = 1
for t in range(len(ep)):
s_t, a_t, r_t, pi_b = ep[t]
s_t = int(s_t)
a_t = int(a_t)
w_t *= softmax(pi_e[s_t])[a_t] / pi_b
est += gamma_t * w_t * r_t / n
gamma_t *= gamma
print(f"Average estimate of return: {est}")
return est
@ray.remote
def estimate_ray_vec(pi_e, D, n, gamma=0.95):
est = 0.0
pi_e = softmax(pi_e, axis=1)
# print(f"Starting PDIS estimation for {n} samples")
for ep in D:
ep = np.array(ep, dtype=np.float)
weights = np.cumprod(
pi_e[ep[:, 0].astype(np.int), ep[:, 1].astype(np.int)] * gamma / ep[:,
3]) / gamma
est += weights.dot(ep[:, 2])
# print(f"Average estimate of return: {est/n}")
return est/n
def estimate_vec(pi_e, D, n, gamma=0.95):
est = 0.0
pi_e = softmax(pi_e, axis=1)
# print(f"Policy: ")
# print(pi_e)
# print(f"Starting PDIS estimation for {n} samples")
for ep in D:
ep = np.array(ep, dtype=np.float)
weights = np.cumprod(
pi_e[ep[:, 0].astype(np.int), ep[:, 1].astype(np.int)] * gamma / ep[:,
3]) / gamma
est += weights.dot(ep[:, 2])
return est/n
def pdis_estimate(pi_e, D, S, A, gamma=0.95, minimize=True, verbose=False):
if D is None:
raise ValueError("Data D is None")
n = len(D)
if verbose:
print(f"Running PDIS estimation for the entire candidate data of {len(D)} samples")
a = time()
pi_e = pi_e.reshape(S, A)
# est = 0.0
# R = []
n_work = 12
idx = 0
works = []
for i in range(n_work):
start = int(n * i / n_work)
end = int(n * (i + 1) / n_work)
works.append(estimate_ray_vec.remote(pi_e, D[start:end], n, gamma))
results = ray.get(works)
if verbose:
print(f"Estimation for one complete run done in {time() - a} seconds")
est = sum(results)
# for ep in D:
# w_t = 1
# gamma_t = 1
# for t in range(len(ep)):
# s_t, a_t, r_t, pi_b = ep[t]
# s_t = int(s_t)
# a_t = int(a_t)
# w_t *= softmax(pi_e[s_t])[a_t] / pi_b
# est += gamma_t * w_t * r_t / n
# gamma_t *= gamma
print(f"Average estimate of return: {est}")
return est * (-1 if minimize else 1)
# Press the green button in the gutter to run the script.
if __name__ == '__main__1':
# data_raw = open("sample.txt", 'r')
data_raw = open("data.csv", 'r')
data = get_data(data_raw)
print(len(data))
pickle.dump(data, open("data.p", 'wb'))
if __name__ == '__main__2':
print("Loading data")
a = time()
data = pickle.load(open("data.p", "rb"))
print(f"Loaded data in {time() - a} seconds")
n = len(data)
print(f"number of episodes: {n}")
test = int(0.5 * n)
h_te = data[-test:]
h_tr = data[:-test]
S = 18
A = 4
t_0 = np.random.randn(S * A)
print(f"Running minimization over {n - test} episodes")
a = time()
pi_s = minimize(pdis_estimate, t_0, args=(h_tr, S, A), method='Powell')
print(f"Result: {pi_s}")
print(f"time taken: {time() - a} seconds")
pickle.dump(pi_s, open("results/res.p", "w"))
if __name__ == '__main__':
ray.init()
print("Loading data")
a = time()
data = np.load("data_np.npy", allow_pickle=True)
print(f"Loaded data in {time() - a} seconds")
n = len(data)
print(f"number of episodes: {n}")
test = int(0.5 * n)
h_te = data[-test:]
h_tr = data[:-test]
S = 18
A = 4
t_0 = np.random.randn(S * A)
print(f"Running minimization over {n - test} episodes")
a = time()
pi_s = minimize(pdis_estimate, t_0, args=(h_tr, S, A), method='Powell')
print(f"Result: {pi_s}")
print(f"time taken: {time() - a} seconds")
pickle.dump(pi_s, open("results/res.p", "w"))
if __name__ == '__main__1':
ray.init()
print("Loading data")
a = time()
data = pickle.load(open("data.p", "rb"))
print(f"Loaded data in {time() - a} seconds")
n = len(data)
print(f"number of episodes: {n}")
test = int(0.5 * n)
S = 18
A = 4
t_0 = np.ones(S * A)
print(f"Estimate: {pdis_estimate(t_0, data, S, A)}")
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
{"hexsha": "cec2b2ee49d62502cd3b40f61cf33105704b0f33", "size": 5751, "ext": "py", "lang": "Python", "max_stars_repo_path": "main_ray.py", "max_stars_repo_name": "hannanabdul55/seldonian-fairness", "max_stars_repo_head_hexsha": "d02aaa3b62170df66f7a2962a32fa7d54028de78", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-01-05T05:25:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T15:01:41.000Z", "max_issues_repo_path": "main_ray.py", "max_issues_repo_name": "hannanabdul55/seldonian-fairness", "max_issues_repo_head_hexsha": "d02aaa3b62170df66f7a2962a32fa7d54028de78", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-02-21T18:31:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-21T18:31:35.000Z", "max_forks_repo_path": "main_ray.py", "max_forks_repo_name": "hannanabdul55/seldonian-fairness", "max_forks_repo_head_hexsha": "d02aaa3b62170df66f7a2962a32fa7d54028de78", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-13T15:01:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-13T15:01:45.000Z", "avg_line_length": 29.953125, "max_line_length": 94, "alphanum_fraction": 0.5430359937, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1718}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 7 15:47:38 2018
@author: jdietric
"""
#pyqt import
#from PyQt5 import QtCore, QtGui, QtWidgets
# other imports
import os
import sys
import numpy as np
import pandas as pd
import sympy.geometry as spg
import matplotlib.path as mplPath
from datetime import datetime
# MAIN PROGRAM HELPER FUNCTIONS (run on OK button)
def footprints(cam, sensor, base_elev):
"""
This function calculates the instantaneous field of view (IFOV) for
the camera(s) that are passed.\n
Vars:\n
\t cam = pandas dataframe (n x ~6, fields: x,y,z,yaw,pitch,roll)\n
\t sensor = pandas dataframe (1 x 3, fields: focal, sensor_x, sensor_y):
\t focal length (mm), sensor x dim (mm), sensor y dim (mm)\n
\t base_elev = average elevation of your site (meters, or in the same
\t measure as your coordinates)\n
Creates approx. coordinates for sensor
corners (north-oriented and zero pitch) at the camera's x,y,z. Rotates
the sensor coords in 3D space to the camera's pitch and yaw angles (roll
angles are ignored for now) and projects corner rays through the camera
x,y,z to a approx ground plane. The intersection of the rays with the
ground are the corners of the photo footprint.\n
*** Photos that have picth angles that cause the horizon to be visable will
cause the UL and UR path coordniates to wrong. These cameras are
disreguarded and the footprint will be set to NaN in the output.***\n
RETURNS: footprints = Pandas dataframe (n x 1) of Matplotlib Path objects()
"""
# Setup DF to house camera footprint polygons
footprints = pd.DataFrame(np.zeros((cam.shape[0],1)), columns=['fov'])
# debug - blank 3d array for inter_points
# itp_f = '//thor.ad.uni.edu/users/jdietric/Documents/Python Scripts/py_sfm_depth/WhiteR_2016/itp.npy'
# itp = np.zeros((cam.shape[0],4,2))
# convert sensor dimensions to meters, divide x/y for corner coord calc
f = sensor.focal[0] * 0.001
sx = sensor.sensor_x[0] / 2 * 0.001
sy = sensor.sensor_y[0] / 2 * 0.001
# calculate the critical pitch (in degrees) where the horizon will be
# visible with the horizon viable, the ray projections go backward
# and produce erroneous IFOV polygons (90 - 0.5*vert_fov)
crit_pitch = 90 - np.rad2deg(np.arctan(sy / f))
# User Feedback
print("Proccesing Camera IFOVs (%i total)..." %(cam.shape[0]))
sys.stdout.flush()
# for each camera...
for idx, row in cam.iterrows():
# check is the camera pitch is over the critical value
if row.pitch < crit_pitch:
# sensor corners (UR,LR,LL,UL), north-oriented and zero pitch
corners = np.array([[row.x+sx,row.y-f,row.z+sy],
[row.x+sx,row.y-f,row.z-sy],
[row.x-sx,row.y-f,row.z-sy],
[row.x-sx,row.y-f,row.z+sy]])
# offset corner points by cam x,y,z for rotation
cam_pt = np.atleast_2d(np.array([row.x, row.y, row.z]))
corner_p = corners - cam_pt
# get pitch and yaw from the camera, convert to radians
pitch = np.deg2rad(90.0-row.pitch)
roll = np.deg2rad(row.roll)
yaw = np.deg2rad(row.yaw)
# setup picth rotation matrix (r_x) and yaw rotation matrix (r_z)
r_x = np.matrix([[1.0,0.0,0.0],
[0.0,np.cos(pitch),-1*np.sin(pitch)],
[0.0,np.sin(pitch),np.cos(pitch)]])
r_y = np.matrix([[np.cos(roll),0.0,np.sin(roll)],
[0.0,1.0,0.0],
[-1*np.sin(roll),0.0,np.cos(roll)]])
r_z = np.matrix([[np.cos(yaw),-1*np.sin(yaw),0],
[np.sin(yaw),np.cos(yaw),0],
[0,0,1]])
# rotate corner_p by r_x, then r_z, add back cam x,y,z offsets
# produces corner coords rotated for pitch and yaw
p_pr = np.matmul(np.matmul(corner_p, r_x),r_y)
p_out = np.matmul(p_pr, r_z) + cam_pt
# GEOMETRY
# Set Sympy 3D point for the camera and a 3D plane for intersection
cam_sp = spg.Point3D(row.x, row.y, row.z)
plane = spg.Plane(spg.Point3D(row.x, row.y, base_elev),
normal_vector=(0,0,1))
# blank array for footprint intersection coords
inter_points = np.zeros((corners.shape[0],2))
# for each sensor corner point
idx_b = 0
for pt in np.asarray(p_out):
# create a Sympy 3D point and create a Sympy 3D ray from
# corner point through camera point
pt_sp = spg.Point3D(pt[0],pt[1],pt[2])
ray = spg.Ray3D(pt_sp,cam_sp)
# calculate the intersection of the ray with the plane
inter_pt = plane.intersection(ray)
# Extract out the X,Y coords fot eh intersection point
# ground intersect points will be in this order (LL,UL,UR,LR)
inter_points[idx_b,0] = inter_pt[0].x.evalf()
inter_points[idx_b,1] = inter_pt[0].y.evalf()
idx_b += 1
# if crit_pitch is exceeded set inter_points to NaN
else:
inter_points = np.full((4,2),np.nan)
# append inter_points to footprints as a matplotlib path object
footprints.fov[idx] = mplPath.Path(inter_points)
#debug - save inter_points
# itp[idx,:,:] = inter_points
# User feedback and progress bar
if (idx+1) % 10 == 0:
print("%i cameras processed..." %(idx+1))
# gui.top_progBar.setValue(idx)
# gui.topProg_Lbl.setText("Calculating Camera Footprints - " + str(idx+1))
#sys.stdout.flush()
#debug - save inter_points
#np.save(itp_f,itp)
return footprints
# END - footprints
def visibility(cam, sensor, footprints, targets):
"""
This function tests is the target points (x,y only) are "visable" (i.e.
within the photo footprints) and calculates the "r" angle for the refraction
correction\n
Vars:\n
\t cam = Pandas dataframe (n x ~6, fields: x,y,z,yaw,pitch,roll)\n
\t footprints = Pandas dataframe (n x 1) of Matplotlib Path objects\n
\t targets = Pandas dataframe (n x ~3, fields: x,y,sfm_z...)\n
RETURNS: r_filt = numpy array (n_points x n_cams) of filtered "r" angles.\n
Points that are not visable to a camera will have a NaN "r" angle.
"""
# Setup boolean array for visability
vis = np.zeros((targets.shape[0],cam.shape[0]))
# for each path objec in footprints, check is the points in targets are
# within the path polygon. path.contains_points returns boolean.
# the results are accumulated in the vis array.
for idx in range(footprints.shape[0]):
path = footprints.fov[idx]
vis[:,idx] = path.contains_points(np.array([targets.x.values, targets.y.values]).T)
# calculate the coord. deltas between the cameras and the target
dx = np.atleast_2d(cam.x.values) - np.atleast_2d(targets.x.values).T
dy = np.atleast_2d(cam.y.values) - np.atleast_2d(targets.y.values).T
dz = np.atleast_2d(cam.z.values) - np.atleast_2d(targets.z.values).T
# calc xy distance (d)
d = np.sqrt((dx)**2+(dy)**2)
# calc inclination angle (r) from targets to cams
r = np.rad2deg(np.arctan(d/dz))
# radial distance from the image center
# calc mm/pix in the image size, convert focal length to pixels
mm_pix = ((sensor.sensor_x/sensor.pix_x)+(sensor.sensor_y/sensor.pix_y))/2
f_pix = sensor.focal / mm_pix[0]
rad_dist = f_pix[0] * np.tan(np.radians(r))
rad_dist = rad_dist * vis
rad_dist[rad_dist == 0] = np.nan
max_radial_dist = sensor.pix_x[0]
rad_dist_pcent = rad_dist / max_radial_dist
# slope dist calc.
# cosine
SD = dz/np.cos(np.radians(r))
#filter r, d, and sd values by vis matrix
r_filt = r * vis
r_filt[r_filt == 0] = np.nan
d_filt = d * vis
d_filt[d_filt == 0] = np.nan
SD_filt = SD * vis
SD_filt[SD_filt == 0] = np.nan
if 'quality' in cam.columns:
qual = cam.quality.values
qual_mat = np.repeat(np.atleast_2d(qual),targets.shape[0],axis=0)
qual_vis = qual_mat * vis
qual_vis[qual_vis==0] = np.nan
else:
qual_vis = np.zeros((targets.shape[0],cam.shape[0]))
qual_vis[:] = np.nan
return r_filt, d_filt, SD_filt, rad_dist, qual_vis
#def correction(r, target, extras):
# """Performs the per camera refraction correction on a target point.
# Refer to the documentation for the specifics.
# extras = [smAng, stats, camStats, weight]"""
#
# # convert r array to radians for trig calculations
# ang_r = np.radians(r)
#
# # calculate the refraction angle i
# ang_i = np.arcsin(1.0/1.337 * np.sin(ang_r))
#
# # calculate the apparent depth from the water surface elev. and the
# # target SfM elevation
# target['h_a'] = target.w_surf - target.sfm_z
#
# # calculate the distance from the point to the air/water interface
# x_dist = np.array([target.h_a.values]).T * np.tan(ang_r)
#
# # calculate the corrected (actual) depth
# h = x_dist / np.tan(ang_i)
#
# # subtract the corrected depth from the water surface elevation to get the
# # corrected elevation
# cor_elev = np.array([target.w_surf]).T - h
#
# # append the mean values for the actual depth and corrected elevation to
# # the target data frame
# target['h_avg'] = np.nanmean(h, axis = 1)
# target['corElev_avg'] = np.nanmean(cor_elev, axis = 1)
#
# if extras[0]:
# # calc small angle approximation
# target['smAng_h'] = target['h_a'] * 1.34
# target['smAng_elev'] = target.w_surf - target['smAng_h']
#
#if extras[1]:
# some extra statistics for playing around (option from GUI)
# target['h_std'] = np.nanstd(h, axis = 1)
# target['h_med'] = np.nanmedian(h, axis = 1)
# target['h_min'] = np.nanmin(h, axis = 1)
# target['h_max'] = np.nanmax(h, axis = 1)
# target['h_per15'] = np.nanpercentile(h, 15,axis = 1)
# target['h_per25'] = np.nanpercentile(h, 25,axis = 1)
# target['h_per55'] = np.nanpercentile(h, 55,axis = 1)
# target['h_per60'] = np.nanpercentile(h, 60,axis = 1)
# target['h_per75'] = np.nanpercentile(h, 75,axis = 1)
# target['h_per80'] = np.nanpercentile(h, 80,axis = 1)
# target['h_per90'] = np.nanpercentile(h, 90,axis = 1)
# target['h_per95'] = np.nanpercentile(h, 95,axis = 1)
# target['h_iqr'] = target['h_per75'] - target['h_per25']
# target['h_lif'] = target['h_per25'] - (1.5 * target['h_iqr'])
# target['h_uif'] = target['h_per75'] + (1.5 * target['h_iqr'])
# target['h_lof'] = target['h_per25'] - (3 * target['h_iqr'])
# target['h_uof'] = target['h_per75'] + (3 * target['h_iqr'])
# l_whisk = np.repeat(np.atleast_2d(target['h_lif']).T,188,axis=1)
# u_whisk = np.repeat(np.atleast_2d(target['h_uif']).T,188,axis=1)
# target['u_whisk'] = np.nanmax(np.ma.masked_less_equal(h,u_whisk).data,axis=1)
# target['l_whisk'] = np.nanmin(np.ma.masked_greater_equal(h,l_whisk).data,axis=1)
# mild_out = np.zeros_like(h)
# ext_out = np.zeros_like(h)
# mild_out[h < np.atleast_2d(target['h_lif']).T] = 1
# mild_out[h > np.atleast_2d(target['h_uif']).T] = 1
# ext_out[h < np.atleast_2d(target['h_lof']).T] = 1
# ext_out[h > np.atleast_2d(target['h_uof']).T] = 1
# target['h_mildout'] = np.nansum(mild_out, axis = 1)
# target['h_extout'] = np.nansum(ext_out, axis = 1)
# if extras[3]:
# # if the weighted checkbox is selected
# max_angle = np.radians(45.0)
# h_weight = h
# h_weight[ang_i > max_angle] = np.nan
#
# cor_elev = np.array([target.w_surf]).T - h_weight
#
# # append the "weighted" mean values for the actual depth and corrected elevation to
# # the target data frame
# target['h_avg_weight1'] = np.nanmean(h, axis = 1)
# target['corElev_avg_weight1'] = np.nanmean(cor_elev, axis = 1)
#
# max_angle = np.radians(35.0)
# h_weight = h
# h_weight[ang_i > max_angle] = np.nan
#
# cor_elev = np.array([target.w_surf]).T - h_weight
#
# # append the "weighted" mean values for the actual depth and corrected elevation to
# # the target data frame
# target['h_avg_weight2'] = np.nanmean(h, axis = 1)
# target['corElev_avg_weight2'] = np.nanmean(cor_elev, axis = 1)
#
# max_angle = np.radians(30.0)
# h_weight = h
# h_weight[ang_i > max_angle] = np.nan
#
# cor_elev = np.array([target.w_surf]).T - h_weight
#
# # append the "weighted" mean values for the actual depth and corrected elevation to
# # the target data frame
# target['h_avg_weight3'] = np.nanmean(h, axis = 1)
# target['corElev_avg_weight3'] = np.nanmean(cor_elev, axis = 1)
# return the target dataframe
# return target, ang_r, x_dist, h, cor_elev
# END def correction
def pointFilter(tar,h,r,d):
# tar = target out table from F(correction)
# h = full corrected depth table from F(correction)
# r = camera angles from F(visability)
# d = camera distances from F(visability)
max_ang = 35
max_dist = 100
h_filt = h
h_filt[r > max_ang] = np.nan
h_filt[d > max_dist] = np.nan
tar['h_filt'] = np.nanmean(h_filt, axis = 1)
cor_elev_filt = np.array([tar.w_surf]).T - h_filt
tar['corElev_avg_filt'] = np.nanmean(cor_elev_filt, axis = 1)
return tar
def timer(length,start_t):
"""timer function to calculate the running time"""
num_proc = sum(length)
# time since processing started
t_step = datetime.now() - start_t
if t_step.total_seconds() <= 60:
print("-> Finished %i points in %0.2f secs" %(num_proc,t_step.total_seconds()))
else:
ts = t_step.total_seconds() / 60
print("-> Finished %i points in %0.2f mins" %(num_proc,ts))
#END def timer
# END - MAIN PROGRAM HELPER FUNCTIONS
def main_prog():
# CSV point cloud
target_file = 'D:/Dropbox/Python/SfMAI/data/Mochlos_densesub_1-5mill.csv'
# Camera Coords Exported from Metashape
cam_file = 'D:/Dropbox/Python/SfMAI/data/mochlos_cams.csv'
# sensor paramater file
sensor_file = 'D:/Dropbox/Python/SfMAI/data/P3_sensor.csv'
# output filename
outfile = 'D:/Dropbox/Python/SfMAI/Mochlos_Test2.csv'
# for a given dataset, the first run you can save the camera footprints (exportCam = True)
# for subsequent testing you can set exportCam = False, precalcCam = True
# pickle_file = path to exported pickle file
exportCam = False
precalcCam = True
pickle_file = 'D:/Dropbox/Python/SfMAI/data/mochlos_cams_cam_foot.pkl' # .pkl
extraOpt = np.array([True, False, False, True])
print("The extra options are: ")
print(extraOpt)
# INPUTS - see sample_data folder in GitHub repository for file header formats
# target points - as CSV point cloud (x,y,z,w_surf,r,g,b) from CloudCompare
# will be read in 10000 point chunks for memory managment purposes
targets = pd.read_csv(target_file, chunksize = 10000)
# camera file - from Photoscan (Name, Position, Orientation...)
# check for precalc checkbox, if so read directly to cam_r variable
if precalcCam:
foot_prints = pd.read_pickle(pickle_file)
cams = pd.read_csv(cam_file)
else:
cams = pd.read_csv(cam_file)
# camera sensor parameters - user generated csv
sensor = pd.read_csv(sensor_file)
# user feedback
print("Data Loaded...")
#sys.stdout.flush()
# record the start time of the actual processing
start_time = datetime.now()
# array for count of total points
count = []
# Main Processing Loop, for each chunk of points from the reader
for idx, tar in enumerate(targets):
# Error check for mislabeled columns, from CloudCompare the column
# header starts '//X,Y,Z
if tar.columns.values[0] == '//X':
tar.columns.values[0] = 'x'
tar.columns.values[1] = 'y'
tar.columns.values[2] = 'z'
if tar.columns.values[0] == 'X':
tar.columns.values[0] = 'x'
tar.columns.values[1] = 'y'
tar.columns.values[2] = 'z'
count.append(tar.shape[0])
# if the index(idx) equals 0, use the mean elevation of the first chunk
# use the mean elevation of the first chunk of points to calculate the camera footprints
if idx == 0 and not precalcCam:
# establish mean elevation for footprint mapping from the mean
# elevation of the target points
base_elev = np.mean(tar.z)
# build camera footprints
foot_prints = footprints(cams,sensor,base_elev)
if exportCam:
print("Saving footprints")
foot_file = os.path.dirname(outfile) + '/' + os.path.basename(cam_file)[:-4] + '_cam_foot.pkl'
foot_prints.to_pickle(foot_file)
# timer
cam_end_time = datetime.now()
mins_c = (cam_end_time - start_time).total_seconds() / 60
print("Processed %i cameras in %0.2f minutes" %(np.count_nonzero(cams.x),mins_c))
# use feedback and timer start
if idx == 0 and precalcCam:
refract_start_time = datetime.now()
# test the visability of target point based on the camera footprints
cam_r,cam_dist,cam_slpDist,radial_dist,cam_qual = visibility(cams,sensor,foot_prints,tar)
# Save cam_R and for Debug targets
# if self.exportCam_box.isChecked():
# r_file = os.path.dirname(outfile) + '/' + os.path.basename(cam_file)[:-4] + '_cam_r.csv'
# np.savetxt(r_file,cam_r,delimiter=",")
# tar_file = os.path.dirname(outfile) + '/d_tar.csv'
# tar.to_csv(tar_file, header=True, index=False)
# perform the refraction correction
# tar_out, ang_r, x_dist, h, cor_elev = correction(cam_r, tar, extraOpt)
# out dataframe
tar_out = tar.copy(deep=True)
#if filter checkbox is ticked
# if extraOpt[3]:
# tar_out = pointFilter(tar_out,h,cam_r,cam_dist)
# OUTPUT STATS
tar_out['cam_count'] = np.count_nonzero(~np.isnan(cam_r),axis=1)
# QUALITY
if "quality" in cams.columns:
tar_out['cam_qual_mean'] = np.nanmean(cam_qual, axis = 1)
tar_out['cam_qual_median'] = np.nanmean(cam_qual, axis = 1)
tar_out['cam_qual_std'] = np.nanstd(cam_qual, axis = 1)
tar_out['cam_qual_min'] = np.nanmin(cam_qual, axis = 1)
tar_out['cam_qual_max'] = np.nanmax(cam_qual, axis = 1)
else:
tar_out['cam_qual_mean'] = 0
tar_out['cam_qual_median'] = 0
tar_out['cam_qual_std'] = 0
tar_out['cam_qual_min'] = 0
tar_out['cam_qual_max'] = 0
# Angle
tar_out['cam_ang_mean'] = np.nanmean(cam_r, axis = 1)
tar_out['cam_ang_median'] = np.nanmean(cam_r, axis = 1)
tar_out['cam_ang_std'] = np.nanstd(cam_r, axis = 1)
tar_out['cam_ang_min'] = np.nanmin(cam_r, axis = 1)
tar_out['cam_ang_max'] = np.nanmax(cam_r, axis = 1)
# radial image dist
tar_out['cam_rD_mean'] = np.nanmean(radial_dist, axis = 1)
tar_out['cam_rD_median'] = np.nanmean(radial_dist, axis = 1)
tar_out['cam_rD_std'] = np.nanstd(radial_dist, axis = 1)
tar_out['cam_rD_min'] = np.nanmin(radial_dist, axis = 1)
tar_out['cam_rD_max'] = np.nanmax(radial_dist, axis = 1)
# Distance
tar_out['cam_xydist_mean'] = np.nanmean(cam_dist, axis = 1)
tar_out['cam_xydist_median'] = np.nanmean(cam_dist, axis = 1)
tar_out['cam_xydist_std'] = np.nanstd(cam_dist, axis = 1)
tar_out['cam_xydist_min'] = np.nanmin(cam_dist, axis = 1)
tar_out['cam_xydist_max'] = np.nanmax(cam_dist, axis = 1)
#slope distance
tar_out['cam_slpDist_mean'] = np.nanmean(cam_slpDist, axis = 1)
tar_out['cam_slpDist_median'] = np.nanmean(cam_slpDist, axis = 1)
tar_out['cam_slpDist_std'] = np.nanstd(cam_slpDist, axis = 1)
tar_out['cam_slpDist_min'] = np.nanmin(cam_slpDist, axis = 1)
tar_out['cam_slpDist_max'] = np.nanmax(cam_slpDist, axis = 1)
# DEBUG export
# if extraOpt[2]:
# file = os.path.dirname(outfile) + '/' + os.path.basename(target_file)[:-4] + '_tar_out.pkl'
# tar_out.to_pickle(file)
#
# file = os.path.dirname(outfile) + '/' + os.path.basename(target_file)[:-4] + '_cam_r.csv'
# np.savetxt(file,cam_r,delimiter=",")
#
# file = os.path.dirname(outfile) + '/' + os.path.basename(target_file)[:-4] + '_cam_dist.csv'
# np.savetxt(file,cam_dist,delimiter=",")
#
# file = os.path.dirname(outfile) + '/' + os.path.basename(target_file)[:-4] + '_ang_r.csv'
# np.savetxt(file,ang_r,delimiter=",")
#
# file = os.path.dirname(outfile) + '/' + os.path.basename(target_file)[:-4] + '_x_dist.csv'
# np.savetxt(file,x_dist,delimiter=",")
#
# file = os.path.dirname(outfile) + '/' + os.path.basename(target_file)[:-4] + '_h_all.csv'
# np.savetxt(file,h,delimiter=",")
#
# file = os.path.dirname(outfile) + '/' + os.path.basename(target_file)[:-4] + '_cor_elev.csv'
# np.savetxt(file,cor_elev,delimiter=",")
# output - for the first chunk write header row, else append subsequent
# chunks without headers
if idx == 0:
with open(outfile, 'a') as f:
tar_out.to_csv(f, header=True, index=False)
else:
with open(outfile, 'a') as f:
tar_out.to_csv(f, header=False, index=False)
# user feedback, def timer and bottom progress bar
# self.bot_progBar.setValue(idx)
if precalcCam:
timer(count, refract_start_time)
else:
timer(count, cam_end_time)
# User feedback on the total processing time
tot_count = sum(count)
tot_time = (datetime.now() - start_time).total_seconds() / 60
print("%i points processed, Total Running Time = %0.2f minutes" %(tot_count,tot_time))
# self.botProg_lbl.setText('Processing Complete')
if __name__ == "__main__":
main_prog()
|
{"hexsha": "7648a24ff70f35bd161d5f89a5576aee465d23c7", "size": 24267, "ext": "py", "lang": "Python", "max_stars_repo_path": "py_bathySfM_debug_SfMAi.py", "max_stars_repo_name": "geojames/SfM_AI", "max_stars_repo_head_hexsha": "715f763fbaf829e4b75f9b0ee086c41896333a04", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "py_bathySfM_debug_SfMAi.py", "max_issues_repo_name": "geojames/SfM_AI", "max_issues_repo_head_hexsha": "715f763fbaf829e4b75f9b0ee086c41896333a04", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "py_bathySfM_debug_SfMAi.py", "max_forks_repo_name": "geojames/SfM_AI", "max_forks_repo_head_hexsha": "715f763fbaf829e4b75f9b0ee086c41896333a04", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.6958762887, "max_line_length": 112, "alphanum_fraction": 0.5729179544, "include": true, "reason": "import numpy,import sympy", "num_tokens": 6487}
|
"""Solar geometry, angles and shading.
"""
import numpy as np
import pandas as pd
from sitka.utils.time_series import TimeSeriesComponent
class SolarAngles(TimeSeriesComponent):
"""
Store solar angles for a site.
Parameters
----------
time
site
Attributes
----------
gamma : Series
Gamma angle.
equation_of_time: Series
Equation of time.
apparent_solar_time: Series
Apparent solar time.
declination: Series
Solar declination angle.
hour_angle: Series
Hour angle of the sun.
number_of_sunlight_hours: Series
Number of sunlight hours per day.
sun_up: Series
Flag to define when the sun is above the
horizon (1 = sun up, 0 = sun is down).
solar_zenith: Series
Solar zenith angle.
solar_altitude: Series
Solar altitude angle.
solar_azimuth: Series
Solar azimuth angle.
"""
def __init__(self, time, site):
# Solar angles
self.gamma = None
self.equation_of_time = None
self.apparent_solar_time = None
self.declination = None
self.hour_angle = None
self.sunrise_hour_angle = None
self.sunset_hour_angle = None
self.number_of_sunlight_hours = None
self.sun_up = None
self.solar_zenith = None
self.solar_altitude = None
self.solar_azimuth = None
# General Properties
self._site = site
# Add attributes from super class
super().__init__(time)
# Run method to update all calculated values
self.update_calculated_values()
def update_calculated_values(self):
"""Update all calculated values.
"""
print('Updating solar angles')
self.calculate_gamma()
self.calculate_equation_of_time()
self.calculate_apparent_solar_time()
self.calculate_declination()
self.calculate_hour_angle()
self.calculate_sunrise_hour_angle()
self.calculate_sunset_hour_angle()
self.calculate_number_of_sunlight_hours()
self.calculate_sun_up()
self.calculate_solar_zenith()
self.calculate_solar_altitude()
self.calculate_solar_azimuth()
def calculate_gamma(self):
"""
Calculate Gamma from the julian_day.
Parameters
----------
julian_day : Series
Julian day number [1-365]
Yields
--------
gamma : Series
References
--------
"""
gamma = 360*(self.time.julian_day-1)/365
self.gamma = pd.Series(gamma)
def calculate_equation_of_time(self):
"""
Calculate equation of time from Gamma.
Parameters
----------
gamma : Series
Yields
--------
equation_of_time : Series
References
--------
"""
gamma = self.gamma
cosgamma = np.cos(np.deg2rad(gamma))
singamma = np.sin(np.deg2rad(gamma))
cos2gamma = np.cos(np.deg2rad(2*gamma))
sin2gamma = np.sin(np.deg2rad(2*gamma))
equation_of_time = 2.2918*(0.0075+0.1868*cosgamma-3.2077*singamma-1.4615*cos2gamma-4.089*sin2gamma)
self.equation_of_time = pd.Series(equation_of_time)
def calculate_apparent_solar_time(self):
"""
Calculate the apparent solar time for each item in the series.
Parameters
----------
hour : Series
equation_of_time : Series
longitude : float
local_standard_meridian : float
Yields
----------
apparent_solar_time : Series
References
--------
"""
hr = self.time.datetime_range.hour
apparent_solar_time = hr + self.equation_of_time/60 + (self.site.longitude-self.site.local_standard_meridian)/15
self.apparent_solar_time = pd.Series(apparent_solar_time)
def calculate_declination(self):
"""
Calculate the solar declination angle for each item in the series.
Parameters
----------
julian_day : Series
Yields
----------
declination : Series
References
--------
"""
declination = 23.45*(np.sin(np.deg2rad(360*(self.time.julian_day+284)/365)))
self.declination = pd.Series(declination)
def calculate_hour_angle(self):
"""
Calculate the solar hour angle for each item in the series.
Parameters
----------
apparent_solar_time : Series
Yields
----------
hour_angle : Series
References
--------
"""
hour_angle = 15*(self.apparent_solar_time-12)
self.hour_angle = pd.Series(hour_angle)
def calculate_sunrise_hour_angle(self):
"""
Calculate the sunrise hour angle for each item in the series.
Parameters
----------
latitude : float
declination : Series
Yields
----------
sunrise_hour_angle : Series
References
--------
"""
sunrise_hour_angle = -np.rad2deg(np.arccos(-np.tan(np.deg2rad(self.site.latitude))*np.tan(np.deg2rad(self.declination))))
self.sunrise_hour_angle = pd.Series(sunrise_hour_angle)
def calculate_sunset_hour_angle(self):
"""
Calculate the sunset hour angle for each item in the series.
Parameters
----------
latitude : float
declination : Series
Yields
----------
sunset_hour_angle : Series
References
--------
"""
sunset_hour_angle = np.rad2deg(np.arccos(-np.tan(np.deg2rad(self.site.latitude))*np.tan(np.deg2rad(self.declination))))
self.sunset_hour_angle = pd.Series(sunset_hour_angle)
def calculate_number_of_sunlight_hours(self):
"""
Calculate the number of sunlight hours per day for each item in the series.
Parameters
----------
latitude : float
declination : Series
Yields
----------
number_of_sunlight_hours : Series
References
--------
"""
number_of_sunlight_hours = 2/15*np.rad2deg(np.arccos(-np.tan(np.deg2rad(self.site.latitude))*np.tan(np.deg2rad(self.declination))))
self.number_of_sunlight_hours = pd.Series(number_of_sunlight_hours)
def calculate_sun_up(self):
"""
Set a flag for whether the sun is above the horizon for each item in the series.
Parameters
----------
hour_angle : Series
sunrise_hour_angle : Series
sunset_hour_angle : Series
Yields
----------
sun_up : Series
References
--------
"""
sun_up = np.ones(self.time.length)*False
sun_up[
(self.hour_angle > self.sunrise_hour_angle) &
(self.hour_angle < self.sunset_hour_angle)
] = True
self.sun_up = pd.Series(sun_up)
def calculate_solar_zenith(self):
"""
Calculate the solar zenith angle for each item in the series.
Parameters
----------
latitude : float
sun_up : Series
declination : Series
hour_angle : Series
Yields
----------
solar_zenith : Series
References
--------
"""
sun_up = self.sun_up
latitude = np.deg2rad(self.site.latitude)
declination = np.deg2rad(self.declination)
hour_angle = np.deg2rad(self.hour_angle)
solar_zenith = sun_up*np.rad2deg(np.arccos(np.cos(latitude)*np.cos(declination)*np.cos(hour_angle)+np.sin(latitude)*np.sin(declination)))
self.solar_zenith = pd.Series(solar_zenith)
def calculate_solar_altitude(self):
"""
Calculate the solar altitude angle for each item in the series.
Parameters
----------
calculate_sun_up : Series
solar_zenith : Series
Yields
----------
solar_altitude : Series
References
--------
"""
sun_up = self.sun_up
solar_zenith = self.solar_zenith
solar_altitude = sun_up*(90-solar_zenith)
self.solar_altitude = pd.Series(solar_altitude)
def calculate_solar_azimuth(self):
"""
Calculate the solar azimuth angle for each item in the series.
Parameters
----------
latitude : float
declination : Series
hour_angle : Series
solar_altitude : Series
Yields
----------
solar_azimuth : Series
References
--------
"""
rad_latitude = np.deg2rad(self.site.latitude)
rad_declination = np.deg2rad(self.declination)
rad_hour_angle = np.deg2rad(self.hour_angle)
rad_solar_altitude = np.deg2rad(self.solar_altitude)
rad_solar_azimuth = np.arcsin(np.sin(rad_hour_angle)*np.cos(rad_declination)/np.cos(rad_solar_altitude)) #azimuth = 0 is due south
solar_azimuth = np.rad2deg(rad_solar_azimuth)
self.solar_azimuth = pd.Series(solar_azimuth)
@property
def site(self):
return self._site
@site.setter
def site(self, value):
self._site= value
self.update_calculated_values()
class SurfaceSolarAngles(TimeSeriesComponent):
"""
Solar angles on a surface.
Parameters
----------
time
solar_angles
surface
Attributes
----------
gamma : Series
Gamma angle.
sun_surface_azimuth: Series
Sun to surface azimuth angle [deg].
incidence_angle: Series
Incidence angle of the sun on the surface [deg].
sun_on_surface: Series
Flag defining whether the sun is incident on the surface.
profile_angle: Series
The profile angle of the sun [deg].
"""
def __init__(self, time, solar_angles, surface):
# General properties
self.sun_surface_azimuth = None
self.incidence_angle = None
self.sun_on_surface = None
self.profile_angle = None
# Associated objects
self._surface = surface
self._solar_angles = solar_angles
# Add attributes from super class
super().__init__(time)
# Run method to update all calculated values
self.update_calculated_values()
def update_calculated_values(self):
print('Updating surface solar calculations.')
self.calculate_sun_surface_azimuth() # Sun-surface azimuth angle [deg]
self.calculate_sun_on_surface() # Times where surface is sunlit [date-time]
self.calculate_incidence_angle() # Sun-surface incidence angle [deg]
self.calculate_profile_angle()
def calculate_sun_surface_azimuth(self):
"""
Calculate the sun to surface azimuth angle for each item in the series.
Parameters
----------
surface.azimuth : Series
solar_azimuth : Series
Yields
----------
sun_surface_azimuth : Series
References
--------
"""
surface_azimuth = np.ones(self.time.length)*self.surface.azimuth
solar_azimuth = self.solar_angles.solar_azimuth
sun_surface_azimuth = np.abs(surface_azimuth - solar_azimuth)
self.sun_surface_azimuth = pd.Series(sun_surface_azimuth)
def calculate_sun_on_surface(self):
"""
Calculate whether the sun is incident on the surface for each item in the series.
Parameters
----------
solar_altitude : Series
sun_surface_azimuth : Series
Yields
----------
sun_on_surface : Series
References
--------
"""
sun_on_surface = False*np.ones(self.time.length)
sun_on_surface[
(self.solar_angles.solar_altitude > 0) &
(self.sun_surface_azimuth < 90) &
(self.sun_surface_azimuth > -90)
] = True
self.sun_on_surface = pd.Series(sun_on_surface)
def calculate_incidence_angle(self):
"""
Calculate the incidence angle of the sun on the surface for each item in the series.
Parameters
----------
sun_on_surface : Series
latitude : float
declination : Series
hour_angle : Series
solar_altitude : Series
surface_tilt : Series
surface_azimuth : Series
sun_surface_azimuth : Series
Yields
----------
incidence_angle : Series
References
--------
"""
sun_on_surface = self.sun_on_surface
latitude = np.deg2rad(self.solar_angles.site.latitude)
declination = np.deg2rad(self.solar_angles.declination)
hour_angle = np.deg2rad(self.solar_angles.hour_angle)
solar_altitude = np.deg2rad(self.solar_angles.solar_altitude)
surface_tilt = np.deg2rad(self.surface.tilt)
surface_azimuth = np.deg2rad(self.surface.azimuth)
sun_surface_azimuth = np.deg2rad(self.sun_surface_azimuth)
incidence_angle = np.rad2deg(np.arccos(np.cos(solar_altitude)*np.cos(sun_surface_azimuth)*np.sin(surface_tilt)+np.sin(solar_altitude)*np.cos(surface_tilt)))
self.incidence_angle = pd.Series(incidence_angle)
def calculate_profile_angle(self):
"""
Calculate the profile angle of the sun on the surface for each item in the series.
Parameters
----------
sun_on_surface : Series
solar_altitude : Series
sun_surface_azimuth : Series
Yields
----------
profile_angle : Series
References
--------
"""
sun_on_surface = self.sun_on_surface
solar_altitude = np.deg2rad(self.solar_angles.solar_altitude)
sun_surface_azimuth = np.deg2rad(self.sun_surface_azimuth)
profile_angle = np.rad2deg(np.arctan(np.tan(solar_altitude)/(np.cos(sun_surface_azimuth))))
self.profile_angle = pd.Series(profile_angle)
@property
def surface(self):
return self._surface
@surface.setter
def surface(self, value):
self._surface= value
self.update_calculated_values()
@property
def solar_angles(self):
return self._solar_angles
@solar_angles.setter
def solar_angles(self, value):
self._solar_angles = value
self.update_calculated_values()
|
{"hexsha": "5b810cccac0c1bf2fcd0d5f5007acc6f29b98231", "size": 14471, "ext": "py", "lang": "Python", "max_stars_repo_path": "sitka/calculations/solar.py", "max_stars_repo_name": "mcneillj/sitka", "max_stars_repo_head_hexsha": "1a50e009433d0296426765303406157ab39b8632", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2018-11-10T21:44:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-20T13:38:22.000Z", "max_issues_repo_path": "sitka/calculations/solar.py", "max_issues_repo_name": "mcneillj/sitka", "max_issues_repo_head_hexsha": "1a50e009433d0296426765303406157ab39b8632", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-11-11T21:53:22.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-17T19:51:05.000Z", "max_forks_repo_path": "sitka/calculations/solar.py", "max_forks_repo_name": "mcneillj/sitka", "max_forks_repo_head_hexsha": "1a50e009433d0296426765303406157ab39b8632", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.263671875, "max_line_length": 164, "alphanum_fraction": 0.6000276415, "include": true, "reason": "import numpy", "num_tokens": 3156}
|
"""
Name: create_edge_images.py
Desc: Creates and saves edge images for each point. The edges are computed from the depth
zbuffer images by using a Canny edge detector.
Requires (to be run):
- generate_points.py
- create_depth_zbuffer_images.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import scipy
from scipy import misc
import skimage
from skimage import filters
from skimage import io as io
from skimage import img_as_uint
import cv2
import numpy as np
import warnings
# Import from project
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from load_settings import settings
import io_utils
from nonblender_utils import Profiler
import nonblender_utils
basepath = settings.MODEL_PATH
TASK_NAME = 'edge_occlusion'
INPUT_NAME, INPUT_TYPE = 'depth_zbuffer', 'L'
def main():
global logger
logger = io_utils.create_logger(__name__)
point_infos = io_utils.load_saved_points_of_interest(basepath)
n_images = io_utils.get_number_imgs(point_infos)
if not os.path.exists(os.path.join(basepath, TASK_NAME)):
os.mkdir(os.path.join(basepath, TASK_NAME))
image_number = 1
with Profiler("Render", logger=logger) as pflr:
for point_number, point_info in enumerate(point_infos):
for view_num, view_of_point in enumerate(point_info):
depth_fpath = io_utils.get_file_name_for(os.path.join(basepath, INPUT_NAME),
view_of_point['point_uuid'], view_of_point['view_id'], #view_num,
view_of_point['camera_uuid'], INPUT_NAME,
settings.PREFERRED_IMG_EXT.lower())
depth_img = scipy.misc.imread(depth_fpath, INPUT_TYPE)
mask = depth_img < 2 ** 16 - 500
input_img = np.sqrt(depth_img) / np.sqrt(float(2 ** 16))
def fsmooth(x):
return skimage.filters.gaussian(x, 1., mode='constant')
smooth_with_function_and_mask(input_img, fsmooth, mask)
edge_img = skimage.filters.sobel(input_img, mask=mask)
edge_img = img_as_uint(edge_img) # convert to 16-bit image
edge_img = edge_img.astype(np.uint16)
if settings.EDGE_3D_THRESH:
edge_img = 1.0 * (edge_img > settings.EDGE_3D_THRESH)
edge_fpath = io_utils.get_file_name_for(os.path.join(basepath, TASK_NAME), view_of_point['point_uuid'],
view_of_point['view_id'], #view_num,
view_of_point['camera_uuid'], TASK_NAME,
settings.PREFERRED_IMG_EXT.lower())
with warnings.catch_warnings(): # ignore 'low contrast image' warning
warnings.simplefilter('ignore', UserWarning)
cv2.imwrite(edge_fpath, edge_img)
pflr.step('finished img {}/{}'.format(image_number, n_images))
image_number += 1
def smooth_with_function_and_mask(image, function, mask):
"""Smooth an image with a linear function, ignoring masked pixels
Parameters
----------
image : array
Image you want to smooth.
function : callable
A function that does image smoothing.
mask : array
Mask with 1's for significant pixels, 0's for masked pixels.
Notes
------
This function calculates the fractional contribution of masked pixels
by applying the function to the mask (which gets you the fraction of
the pixel data that's due to significant points). We then mask the image
and apply the function. The resulting values will be lower by the
bleed-over fraction, so you can recalibrate by dividing by the function
on the mask to recover the effect of smoothing from just the significant
pixels.
"""
bleed_over = function(mask.astype(float))
masked_image = np.zeros(image.shape, image.dtype)
masked_image[mask] = image[mask]
smoothed_image = function(masked_image)
output_image = smoothed_image / (bleed_over + np.finfo(float).eps)
return output_image
if __name__ == "__main__":
with Profiler(os.path.dirname(os.path.basename(__file__))):
main()
|
{"hexsha": "e70b7a26e805b0d84047b74202a4a0e523771f1f", "size": 4482, "ext": "py", "lang": "Python", "max_stars_repo_path": "omnidata_annotator/scripts/create_edge_3d_images.py", "max_stars_repo_name": "EPFL-VILAB/omnidata", "max_stars_repo_head_hexsha": "a1e31eb26172ecf8a3e49ba8a5c82ab3038a9c01", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-10-15T22:11:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T13:28:17.000Z", "max_issues_repo_path": "omnidata_annotator/scripts/create_edge_3d_images.py", "max_issues_repo_name": "EPFL-VILAB/omnidata", "max_issues_repo_head_hexsha": "a1e31eb26172ecf8a3e49ba8a5c82ab3038a9c01", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-12-20T22:01:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-10T02:23:35.000Z", "max_forks_repo_path": "omnidata_annotator/scripts/create_edge_3d_images.py", "max_forks_repo_name": "EPFL-VILAB/omnidata", "max_forks_repo_head_hexsha": "a1e31eb26172ecf8a3e49ba8a5c82ab3038a9c01", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-11-30T02:07:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T13:16:46.000Z", "avg_line_length": 36.737704918, "max_line_length": 122, "alphanum_fraction": 0.6414547077, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 927}
|
"""Torch Module for EdgeConv Layer"""
# pylint: disable= no-member, arguments-differ, invalid-name
from torch import nn
from ....base import DGLError
from .... import function as fn
from ....utils import expand_as_pair
class EdgeConv(nn.Module):
r"""
Description
-----------
EdgeConv layer.
Introduced in "`Dynamic Graph CNN for Learning on Point Clouds
<https://arxiv.org/pdf/1801.07829>`__". Can be described as follows:
.. math::
h_i^{(l+1)} = \max_{j \in \mathcal{N}(i)} (
\Theta \cdot (h_j^{(l)} - h_i^{(l)}) + \Phi \cdot h_i^{(l)})
where :math:`\mathcal{N}(i)` is the neighbor of :math:`i`.
:math:`\Theta` and :math:`\Phi` are linear layers.
.. note::
The original formulation includes a ReLU inside the maximum operator.
This is equivalent to first applying a maximum operator then applying
the ReLU.
Parameters
----------
in_feat : int
Input feature size; i.e, the number of dimensions of :math:`h_j^{(l)}`.
out_feat : int
Output feature size; i.e., the number of dimensions of :math:`h_i^{(l+1)}`.
batch_norm : bool
Whether to include batch normalization on messages. Default: ``False``.
allow_zero_in_degree : bool, optional
If there are 0-in-degree nodes in the graph, output for those nodes will be invalid
since no message will be passed to those nodes. This is harmful for some applications
causing silent performance regression. This module will raise a DGLError if it detects
0-in-degree nodes in input graph. By setting ``True``, it will suppress the check
and let the users handle it by themselves. Default: ``False``.
Note
----
Zero in-degree nodes will lead to invalid output value. This is because no message
will be passed to those nodes, the aggregation function will be appied on empty input.
A common practice to avoid this is to add a self-loop for each node in the graph if
it is homogeneous, which can be achieved by:
>>> g = ... # a DGLGraph
>>> g = dgl.add_self_loop(g)
Calling ``add_self_loop`` will not work for some graphs, for example, heterogeneous graph
since the edge type can not be decided for self_loop edges. Set ``allow_zero_in_degree``
to ``True`` for those cases to unblock the code and handle zere-in-degree nodes manually.
A common practise to handle this is to filter out the nodes with zere-in-degree when use
after conv.
Examples
--------
>>> import dgl
>>> import numpy as np
>>> import torch as th
>>> from dgl.nn import EdgeConv
>>> # Case 1: Homogeneous graph
>>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
>>> g = dgl.add_self_loop(g)
>>> feat = th.ones(6, 10)
>>> conv = EdgeConv(10, 2)
>>> res = conv(g, feat)
>>> res
tensor([[-0.2347, 0.5849],
[-0.2347, 0.5849],
[-0.2347, 0.5849],
[-0.2347, 0.5849],
[-0.2347, 0.5849],
[-0.2347, 0.5849]], grad_fn=<CopyReduceBackward>)
>>> # Case 2: Unidirectional bipartite graph
>>> u = [0, 1, 0, 0, 1]
>>> v = [0, 1, 2, 3, 2]
>>> g = dgl.bipartite((u, v))
>>> u_fea = th.rand(2, 5)
>>> v_fea = th.rand(4, 5)
>>> conv = EdgeConv(5, 2, 3)
>>> res = conv(g, (u_fea, v_fea))
>>> res
tensor([[ 1.6375, 0.2085],
[-1.1925, -1.2852],
[ 0.2101, 1.3466],
[ 0.2342, -0.9868]], grad_fn=<CopyReduceBackward>)
"""
def __init__(self,
in_feat,
out_feat,
batch_norm=False,
allow_zero_in_degree=False):
super(EdgeConv, self).__init__()
self.batch_norm = batch_norm
self._allow_zero_in_degree = allow_zero_in_degree
self.theta = nn.Linear(in_feat, out_feat)
self.phi = nn.Linear(in_feat, out_feat)
if batch_norm:
self.bn = nn.BatchNorm1d(out_feat)
def set_allow_zero_in_degree(self, set_value):
r"""
Description
-----------
Set allow_zero_in_degree flag.
Parameters
----------
set_value : bool
The value to be set to the flag.
"""
self._allow_zero_in_degree = set_value
def forward(self, g, feat):
"""
Description
-----------
Forward computation
Parameters
----------
g : DGLGraph
The graph.
feat : Tensor or pair of tensors
:math:`(N, D)` where :math:`N` is the number of nodes and
:math:`D` is the number of feature dimensions.
If a pair of tensors is given, the graph must be a uni-bipartite graph
with only one edge type, and the two tensors must have the same
dimensionality on all except the first axis.
Returns
-------
torch.Tensor
New node features.
Raises
------
DGLError
If there are 0-in-degree nodes in the input graph, it will raise DGLError
since no message will be passed to those nodes. This will cause invalid output.
The error can be ignored by setting ``allow_zero_in_degree`` parameter to ``True``.
"""
with g.local_scope():
if not self._allow_zero_in_degree:
if (g.in_degrees() == 0).any():
raise DGLError('There are 0-in-degree nodes in the graph, '
'output for those nodes will be invalid. '
'This is harmful for some applications, '
'causing silent performance regression. '
'Adding self-loop on the input graph by '
'calling `g = dgl.add_self_loop(g)` will resolve '
'the issue. Setting ``allow_zero_in_degree`` '
'to be `True` when constructing this module will '
'suppress the check and let the code run.')
h_src, h_dst = expand_as_pair(feat, g)
g.srcdata['x'] = h_src
g.dstdata['x'] = h_dst
g.apply_edges(fn.v_sub_u('x', 'x', 'theta'))
g.edata['theta'] = self.theta(g.edata['theta'])
g.dstdata['phi'] = self.phi(g.dstdata['x'])
if not self.batch_norm:
g.update_all(fn.e_add_v('theta', 'phi', 'e'), fn.max('e', 'x'))
else:
g.apply_edges(fn.e_add_v('theta', 'phi', 'e'))
# Although the official implementation includes a per-edge
# batch norm within EdgeConv, I choose to replace it with a
# global batch norm for a number of reasons:
#
# (1) When the point clouds within each batch do not have the
# same number of points, batch norm would not work.
#
# (2) Even if the point clouds always have the same number of
# points, the points may as well be shuffled even with the
# same (type of) object (and the official implementation
# *does* shuffle the points of the same example for each
# epoch).
#
# For example, the first point of a point cloud of an
# airplane does not always necessarily reside at its nose.
#
# In this case, the learned statistics of each position
# by batch norm is not as meaningful as those learned from
# images.
g.edata['e'] = self.bn(g.edata['e'])
g.update_all(fn.copy_e('e', 'e'), fn.max('e', 'x'))
return g.dstdata['x']
|
{"hexsha": "861de78cc4f76e8722cdcddd2ccd635fd4f5ddef", "size": 7919, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/dgl/nn/pytorch/conv/edgeconv.py", "max_stars_repo_name": "Padarn/dgl", "max_stars_repo_head_hexsha": "5087a21279be98021fddfd1ba61487be4adfede8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-12T08:14:04.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-12T08:14:04.000Z", "max_issues_repo_path": "python/dgl/nn/pytorch/conv/edgeconv.py", "max_issues_repo_name": "Padarn/dgl", "max_issues_repo_head_hexsha": "5087a21279be98021fddfd1ba61487be4adfede8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/dgl/nn/pytorch/conv/edgeconv.py", "max_forks_repo_name": "Padarn/dgl", "max_forks_repo_head_hexsha": "5087a21279be98021fddfd1ba61487be4adfede8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-08T11:27:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T11:27:24.000Z", "avg_line_length": 38.818627451, "max_line_length": 95, "alphanum_fraction": 0.5501957318, "include": true, "reason": "import numpy", "num_tokens": 1946}
|
SUBROUTINE E03105 (USEPRM)
C E03105 tests the handling of error number 105
COMMON /GLOBNU/ CTLHND, ERRSIG, ERRFIL, IERRCT, UNERR,
1 TESTCT, IFLERR, PASSSW, ERRSW, MAXLIN,
2 CONID, MEMUN, WKID, WTYPE, GLBLUN, INDLUN,
3 DUMINT, DUMRL
INTEGER CTLHND, ERRSIG, ERRFIL, IERRCT, UNERR,
1 TESTCT, IFLERR, PASSSW, ERRSW, MAXLIN,
2 CONID, MEMUN, WKID, WTYPE, GLBLUN, INDLUN,
3 DUMINT(20), ERRIND
REAL DUMRL(20)
COMMON /ERRINF/ ERRCOM,FUNCOM,FILCOM, ERNMSW, EXPSIZ,EXPERR,
1 USRERR, ERRSAV, FUNSAV, FILSAV,
2 EFCNT, EFID
INTEGER ERRCOM,FUNCOM,FILCOM, ERNMSW, EXPSIZ,EXPERR(10),
1 USRERR, ERRSAV(200), FUNSAV(200), FILSAV(200),
2 EFCNT, EFID(100)
COMMON /ERRCHR/ CURCON, ERRSRS, ERRMRK, ERFLNM,
1 CONTAB
CHARACTER CURCON*200, ERRSRS*40, ERRMRK*20, ERFLNM*80,
1 CONTAB(40)*150
C type of returned value
INTEGER PSET, PREALI
PARAMETER (PSET=0, PREALI=1)
INTEGER USEPRM, IX, NMTYP1, MXTYPE, MARKTY,MARKCL
INTEGER IDUM1,IDUM2,IDUM3,UNTYPE, THISMT, SPECWT
REAL RDUM1, RDUM2, RDUM3, MARKSC
LOGICAL STREQ, APPEQ
CURCON = 'the specified marker type is not available '//
1 'on the specified workstation'
CALL SETVS ('105', EXPERR, EXPSIZ)
ERRSRS = '5'
CALL ESETUP (USEPRM)
CALL POPWK (WKID, CONID, WTYPE)
CALL PQWKC (WKID, ERRIND, CONID, SPECWT)
CALL CHKINQ ('pqwkc', ERRIND)
CALL PQPMF (SPECWT, 1, ERRIND, NMTYP1, MXTYPE,IDUM2,RDUM1,RDUM2,
1 RDUM3, IDUM3)
CALL CHKINQ ('pqpmf', ERRIND)
DO 50 IX = 1, ABS(NMTYP1)
CALL PQPMF (SPECWT,IX,ERRIND,IDUM1,THISMT,IDUM2,RDUM1,
1 RDUM2, RDUM3, IDUM3)
CALL CHKINQ ('pqpmf',ERRIND)
IF (THISMT .GT. MXTYPE) MXTYPE = THISMT
50 CONTINUE
UNTYPE = MXTYPE + 1
C
CALL PSPMR (WKID, 1, THISMT, 1., 0)
CALL RSPMR (WKID, 1, UNTYPE, .5, 1)
CALL PQPMR (WKID, 1, PSET, ERRIND, MARKTY,MARKSC,MARKCL)
CALL CHKINQ ('pqpmr', ERRIND)
CALL TSTIGN (STREQ('OO**') .AND.
1 MARKTY.EQ.THISMT .AND.
2 APPEQ(MARKSC,1.,0.01,0.01) .AND.
3 MARKCL.EQ.0)
CALL PCLWK (WKID)
CALL ENDERR
END
|
{"hexsha": "8d9fc028b537ed2b4e6fb8ca39af3fe586bb30e4", "size": 2505, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "third_party/Phigs/PVT/PVT_fort/V2LIB/e03105.f", "max_stars_repo_name": "n1ckfg/Telidon", "max_stars_repo_head_hexsha": "f4e2c693ec7d67245974b73a602d5d40df6a6d69", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2017-07-08T02:34:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-08T03:42:48.000Z", "max_issues_repo_path": "third_party/Phigs/PVT/PVT_fort/V2LIB/e03105.f", "max_issues_repo_name": "n1ckfg/Telidon", "max_issues_repo_head_hexsha": "f4e2c693ec7d67245974b73a602d5d40df6a6d69", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "third_party/Phigs/PVT/PVT_fort/V2LIB/e03105.f", "max_forks_repo_name": "n1ckfg/Telidon", "max_forks_repo_head_hexsha": "f4e2c693ec7d67245974b73a602d5d40df6a6d69", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-02-03T04:44:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-05T15:31:18.000Z", "avg_line_length": 35.2816901408, "max_line_length": 70, "alphanum_fraction": 0.5481037924, "num_tokens": 936}
|
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
DEFAULT_CAMERA_CONFIG = {
'distance': 4.0,
}
class AntEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='ant.xml',
ctrl_cost_weight=0.5,
contact_cost_weight=5e-4,
healthy_reward=1.0,
terminate_when_unhealthy=True,
healthy_z_range=(0.2, 1.0),
contact_force_range=(-1.0, 1.0),
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True):
utils.EzPickle.__init__(**locals())
self._ctrl_cost_weight = ctrl_cost_weight
self._contact_cost_weight = contact_cost_weight
self._healthy_reward = healthy_reward
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_z_range = healthy_z_range
self._contact_force_range = contact_force_range
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
@property
def healthy_reward(self):
return float(
self.is_healthy
or self._terminate_when_unhealthy
) * self._healthy_reward
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
@property
def contact_forces(self):
raw_contact_forces = self.sim.data.cfrc_ext
min_value, max_value = self._contact_force_range
contact_forces = np.clip(raw_contact_forces, min_value, max_value)
return contact_forces
@property
def contact_cost(self):
contact_cost = self._contact_cost_weight * np.sum(
np.square(self.contact_forces))
return contact_cost
@property
def is_healthy(self):
state = self.state_vector()
min_z, max_z = self._healthy_z_range
is_healthy = (np.isfinite(state).all() and min_z <= state[2] <= max_z)
return is_healthy
@property
def done(self):
done = (not self.is_healthy
if self._terminate_when_unhealthy
else False)
return done
def step(self, action):
action[4:]=0
xy_position_before = self.get_body_com("torso")[:2].copy()
self.do_simulation(action, self.frame_skip)
xy_position_after = self.get_body_com("torso")[:2].copy()
xy_velocity = (xy_position_after - xy_position_before) / self.dt
x_velocity, y_velocity = xy_velocity
ctrl_cost = self.control_cost(action)
contact_cost = self.contact_cost
forward_reward = x_velocity
healthy_reward = self.healthy_reward
rewards = forward_reward + healthy_reward
costs = ctrl_cost + contact_cost
reward = rewards - costs
done = self.done
observation = self._get_obs()
info = {
'reward_forward': forward_reward,
'reward_ctrl': -ctrl_cost,
'reward_contact': -contact_cost,
'reward_survive': healthy_reward,
'x_position': xy_position_after[0],
'y_position': xy_position_after[1],
'distance_from_origin': np.linalg.norm(xy_position_after, ord=2),
'x_velocity': x_velocity,
'y_velocity': y_velocity,
'forward_reward': forward_reward,
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
contact_force = self.contact_forces.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[2:]
observations = np.concatenate((position, velocity, contact_force))
return observations
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
|
{"hexsha": "5df479bf6004a77f2978fcc89378dcf6fd42843d", "size": 4708, "ext": "py", "lang": "Python", "max_stars_repo_path": "gym/envs/mujoco/ant_t_v3.py", "max_stars_repo_name": "caoxixiya/od_irl", "max_stars_repo_head_hexsha": "cc5da49344174859b74ad67b9abe6d910b4159d0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gym/envs/mujoco/ant_t_v3.py", "max_issues_repo_name": "caoxixiya/od_irl", "max_issues_repo_head_hexsha": "cc5da49344174859b74ad67b9abe6d910b4159d0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gym/envs/mujoco/ant_t_v3.py", "max_forks_repo_name": "caoxixiya/od_irl", "max_forks_repo_head_hexsha": "cc5da49344174859b74ad67b9abe6d910b4159d0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-06T02:58:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-06T02:58:09.000Z", "avg_line_length": 31.8108108108, "max_line_length": 79, "alphanum_fraction": 0.6338147833, "include": true, "reason": "import numpy", "num_tokens": 1045}
|
import cv2
import numpy as np
from .camera.parameters import CameraParams, IntrinsicParams, ExtrinsicParams
from .camera.coordinate_transformation import CoordinateTransformation, rotationMatrix3D#, reverseX, reverseY
from .camera import basic_tools
class InversePerspectiveMapping(object):
def __init__(self, param, sensor):
self.sensor = sensor
intrinsic_params = IntrinsicParams(sensor)
extrinsic_params = ExtrinsicParams(sensor)
self.camera_params = CameraParams(intrinsic_params, extrinsic_params)
self.img_width = 400#eval(sensor.attributes['image_size_x'])
self.img_height = 200#eval(sensor.attributes['image_size_y'])
#self.max_pixel = np.array([self.img_height, self.img_width]).reshape(2,1)
#self.min_pixel = np.array([0, 0]).reshape(2,1)
self.empty_image = np.zeros((self.img_height, self.img_width), dtype=np.dtype("uint8"))
self.longitudinal_length = param.longitudinal_length
self.ksize = param.ksize
f = float(self.img_height) / self.longitudinal_length
self.pesudo_K = np.array([ [f, 0, self.img_width/2],
[0, f, self.img_height],
[0, 0, 1] ])
self.reverseXY = basic_tools.np_dot(rotationMatrix3D(0,0,-np.pi/2))
def getIPM(self, image):
self.empty_image = np.zeros((self.img_height, self.img_width), dtype=np.dtype("uint8"))
index_array = np.argwhere(image > 200)
index_array = index_array[:,:2]
index_array = np.unique(index_array, axis=0)
index_array = np.array([index_array[:,1], index_array[:,0]])
vehicle_vec = CoordinateTransformation.image2DToWorld3D2(index_array, self.camera_params.K, self.camera_params.R, self.camera_params.t)
vehicle_vec[:,2,0] = 1.0
temp = np.dot(self.pesudo_K, self.reverseXY)
vehicle_vec = np.squeeze(vehicle_vec, axis = 2)
new_image_vec = np.dot(temp, vehicle_vec.T)
new_image_vec = new_image_vec[:2,:]
new_image_vec = new_image_vec[::-1,:]
new_image_y_pixel = new_image_vec[0,:].astype(int)
new_image_x_pixel = new_image_vec[1,:].astype(int)
#self.empty_image[new_image_y_pixel, new_image_x_pixel] = 255
mask = np.where((new_image_x_pixel >= 0)&(new_image_x_pixel < self.img_width))[0]
new_image_x_pixel = new_image_x_pixel[mask]
new_image_y_pixel = new_image_y_pixel[mask]
mask = np.where((new_image_y_pixel >= 0)&(new_image_y_pixel < self.img_height))[0]
new_image_x_pixel = new_image_x_pixel[mask]
new_image_y_pixel = new_image_y_pixel[mask]
self.empty_image[new_image_y_pixel, new_image_x_pixel] = 255
self.empty_image[np.clip(new_image_y_pixel+1,0, self.img_height-1),new_image_x_pixel] = 255
self.empty_image[np.clip(new_image_y_pixel-1,0, self.img_height-1),new_image_x_pixel] = 255
#self.empty_image = cv2.GaussianBlur(self.empty_image, (self.ksize, self.ksize), 25)
return self.empty_image
|
{"hexsha": "cb42f4272294eaee98e74b77f32cdfd138eae13d", "size": 3115, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/collect_ipm.py", "max_stars_repo_name": "Czworldy/GP_traj", "max_stars_repo_head_hexsha": "96261f39a5a322092e3a6be98938bb4601f0f746", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-02-09T05:08:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T07:51:30.000Z", "max_issues_repo_path": "utils/collect_ipm.py", "max_issues_repo_name": "Czworldy/GP_traj", "max_issues_repo_head_hexsha": "96261f39a5a322092e3a6be98938bb4601f0f746", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/collect_ipm.py", "max_forks_repo_name": "Czworldy/GP_traj", "max_forks_repo_head_hexsha": "96261f39a5a322092e3a6be98938bb4601f0f746", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-03-30T06:30:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T14:15:00.000Z", "avg_line_length": 48.671875, "max_line_length": 143, "alphanum_fraction": 0.6712680578, "include": true, "reason": "import numpy", "num_tokens": 776}
|
import numpy as np
import pandas as pd
def updateMap(replaced, replacement, map, fitness, genes, values, extraMapValues):
mapIsTuple = isinstance(map, tuple)
# print("genes")
# print(genes)
# print("replaced")
# print(replaced)
# print("replacement")
# print(replacement)
fitness = pd.DataFrame(data=fitness.transpose())
# print("df_fitness")
# print(df_fitness)
# print("fitness")
# print(fitness) # fitness OK
mapfit_re = map[0].fitness.reshape(map[0].fitness.shape[0] * map[0].fitness.shape[1], 1, order='F')
# print("mapfit_re")
# print(mapfit_re) # only nans OK
mapfit_re[replaced] = fitness.iloc[0][replacement,np.newaxis] # 2d list
# print("mapfit_re")
# print(mapfit_re) # few values OK
# print(mapfit_re.shape)
if (isinstance(map,tuple)):
if (isinstance(map[0],tuple)):
map[0][0].fitness = mapfit_re.reshape(map[0][0].fitness.shape[0], map[0][0].fitness.shape[1], order='F')
r, c = np.shape(map[0][0].fitness)
# print("case 1")
# print("map[0][0].fitness")
# print(map[0][0].fitness)
else:
map[0].fitness = mapfit_re.reshape(map[0].fitness.shape[0], map[0].fitness.shape[1], order='F')
r, c = np.shape(map[0].fitness)
# print("case 2")
# print("map[0].fitness")
# print(map[0].fitness) # OK
# exit()
else:
map.fitness = mapfit_re.reshape(map.fitness.shape[0], map.fitness.shape[1], order='F')
r, c = np.shape(map.fitness)
# print("case 3")
# print("map.fitness")
# print(map.fitness)
# Assign Fitness
# for i in zip(replaced,replacement):
# map[0].fitness[i[0]] = mapfit_re[i[1]]
# Assign Genomes
# print("replaced") # OK - always has different size
# print(replaced)
# print("replacement")# OK - same size as replaced
# print(replacement)
# print("r " + str(r)) # OK 25
# print("c " + str(c)) # OK 25
# exit()
replacedI, replacedJ = np.unravel_index(replaced, shape=(r,c), order='F')
# print("replacedI") # OK
# print(replacedI)
# print("replacedJ") # OK
# print(replacedJ)
# exit()
# print("replacement")
# print(replacement)
# print(map[0].genes.shape)
# print(map[0].genes)
# TODO: check if necessary
if (isinstance(map,tuple)):
if (isinstance(map[0],tuple)):
for iReplace in range(0,len(replaced)):
for gen in range(len(map[0][0].genes)):
map[0][0].genes[gen].iloc[replacedI[iReplace]][replacedJ[iReplace]] = genes.iloc[replacement[iReplace]][gen] # DONE: needs to be adapted for more than 2 genes
# map[0][0].genes[1].iloc[replacedI[iReplace]][replacedJ[iReplace]] = genes.iloc[replacement[iReplace]][1]
# Assign Miscellaneaous Map values
if extraMapValues: # not empty
for iValues in range(0,len(extraMapValues)):
for i in zip(replaced,replacement):
exec('map[0][0].'+extraMapValues[iValues]+'[i[0]] = values['+str(iValues)+'][i[1]]')
else:
# print("replacedI")
# print(replacedI)
# print("replacedJ")
# print(replacedJ)
# print("replacement")
# print(replacement)
for iReplace in range(0,len(replaced)):
for gen in range(len(map[0].genes)):
# print(genes.iloc[replacement[iReplace]][0])
map[0].genes[gen][replacedI[iReplace]][replacedJ[iReplace]] = genes.iloc[replacement[iReplace]][gen] # DONE: needs to be adapted for more than 2 genes
# print(genes.iloc[replacement[iReplace]][1])
# map[0].genes[1][replacedI[iReplace]][replacedJ[iReplace]] = genes.iloc[replacement[iReplace]][1]
# Assign Miscellaneaous Map values
# print("replaced")
# print(replaced)
# print("replacement")
# print(replacement)
if extraMapValues: # not empty
for iValues in range(0,len(extraMapValues)):
for i in zip(replaced,replacement):
exec('map[0].'+extraMapValues[iValues]+'[i[0]] = values['+str(iValues)+'][i[1]]')
# print(map[0].genes)
# print("map[0].genes[0]")
# print(map[0].genes[0])
# print("map[0].genes[1]")
# print(map[0].genes[1])
# print("genes.iloc[replacement[0]][0]")
# print(genes.iloc[replacement[0]][0])
# print(genes.iloc[replacement[0]][1])
else:
for iReplace in range(0,len(replaced)):
for gen in range(len(map.genes)):
map.genes[gen].iloc[replacedI[iReplace]][replacedJ[iReplace]] = genes.iloc[replacement[iReplace]][gen] # DONE: needs to be adapted for more than 2 genes
# map.genes[1].iloc[replacedI[iReplace]][replacedJ[iReplace]] = genes.iloc[replacement[iReplace]][1]
# Assign Miscellaneaous Map values
if extraMapValues: # not empty
for iValues in range(0,len(extraMapValues)):
for i in zip(replaced,replacement):
exec('map.'+extraMapValues[iValues]+'[i[0]] = values['+str(iValues)+'][i[1]]')
# print(map[0].fitness)
return map
|
{"hexsha": "c9eb5bfc1fbe2476f69b35d6d83d671fbe0b8ef8", "size": 5491, "ext": "py", "lang": "Python", "max_stars_repo_path": "mapElites/updateMap.py", "max_stars_repo_name": "Sascha0912/SAIL", "max_stars_repo_head_hexsha": "5dfb8d0b925d5e61933bf10591d959433fffaf26", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-03-12T10:21:54.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-17T14:56:33.000Z", "max_issues_repo_path": "mapElites/updateMap.py", "max_issues_repo_name": "Sascha0912/SAIL", "max_issues_repo_head_hexsha": "5dfb8d0b925d5e61933bf10591d959433fffaf26", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mapElites/updateMap.py", "max_forks_repo_name": "Sascha0912/SAIL", "max_forks_repo_head_hexsha": "5dfb8d0b925d5e61933bf10591d959433fffaf26", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-31T07:22:09.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-31T07:22:09.000Z", "avg_line_length": 39.7898550725, "max_line_length": 178, "alphanum_fraction": 0.5572755418, "include": true, "reason": "import numpy", "num_tokens": 1470}
|
! Program for checking formatting in Fortran
program format_spec
implicit none
integer:: i = 12345
real:: j = 5.8938492847
real, dimension (5):: v = (/ 1.1,1.2,1.4,1.6,1.9 /)
print '(i5)', i ! integer in field width of 5
print '(f10.8)', j ! real no. which has a field width of 10 of which 8 characters reserved for fractional character
print '(5f8.3)', v ! 5 real no. which has field width of 5 but each has a width of 8 and 3 characters reserved for
! the fractional part
print '(e10.3)', j ! exponential no. which has a width of 10 and 3 characters for fractional part
print '(i5 f15.8)', i,j ! both integer and real
end program format_spec
|
{"hexsha": "49abeff36154dfb5e11d8e5a3d5bbc8f2ec773b3", "size": 677, "ext": "f95", "lang": "FORTRAN", "max_stars_repo_path": "practice/format_spec.f95", "max_stars_repo_name": "adisen99/fortran_programs", "max_stars_repo_head_hexsha": "04d3a528200e27a25b109f5d3a0aff66b22f94a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "practice/format_spec.f95", "max_issues_repo_name": "adisen99/fortran_programs", "max_issues_repo_head_hexsha": "04d3a528200e27a25b109f5d3a0aff66b22f94a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "practice/format_spec.f95", "max_forks_repo_name": "adisen99/fortran_programs", "max_forks_repo_head_hexsha": "04d3a528200e27a25b109f5d3a0aff66b22f94a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.08, "max_line_length": 115, "alphanum_fraction": 0.6809453471, "num_tokens": 205}
|
# Create your views here.
from django.http import HttpResponse
from core import models
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.template import RequestContext
from django.views.decorators.http import require_http_methods, require_POST, require_GET
from django.views.decorators.csrf import csrf_exempt
from django.core.mail import EmailMessage, EmailMultiAlternatives
import json
import string, random, thread, math
import numpy as np
from matplotlib.path import Path
from PIL import Image
import os
#################
##### VIEWS #####
#################
@require_GET
def submitView(request):
data = {}
return render_to_response("submit.html", data, context_instance=RequestContext(request))
@require_POST
@csrf_exempt
def submitPic(request):
print os.path.dirname(os.path.realpath(__file__))
email = request.POST.get('email')
if email == None or email == "": # check that there is an email
raise Exception("No email provided")
imgData = request.FILES.get('photo')
if imgData == None: # check that there is an image
raise Exception("No image provided")
polygon = request.POST.get('selectedPixels')
if polygon == None: # check that there is an image
raise Exception("No polygon provided")
img = Image.open(imgData)
# scaleImage(img)
orig_width, orig_height = img.size
img = padImage(img)
p_width, p_height = img.size
selectedPixels = poly2Pixels(json.loads(polygon), p_width, p_height)
# print(selectedPixels)
image = models.Image(
height = orig_height, width=orig_width,
padded_height = p_height,
padded_width = p_width,
email=email)
image.save()
# save image blocks in new thread
thread.start_new_thread(saveBlocks, (img, image, selectedPixels))
# saveBlocks(img, image)
return HttpResponse()
# try to reconstruct original image from blocks
# reImg = constructImgFromBlocks(image, img)
# re_width, re_height = reImg.size
# reImg = finishImage(image)
# send image
# response = HttpResponse(mimetype="image/png")
# reImg.save(response, "png")
# return response
# comparison = compareImgs(img, reImg)
# return HttpResponse("ERRORS --> r: %d, g: %d, b: %d, a: %d" % comparison)
# return HttpResponse("orig: (%d,%d), reconstruced: (%d,%d)" % (p_width,p_height, re_width,re_height))
@require_GET
@csrf_exempt
def getBlock(request):
block = models.Block.pickBlock()
if block == None:
print("no block found")
return JsonResponse({})
else:
print("block found")
pixels = pixelsToFrontEnd(block.getPixels())
data = {'id': block.key, 'height':100,
'width':100, 'pixels':pixels}
return JsonResponse(data)
@require_POST
@csrf_exempt
def returnBlock(request):
key = request.POST.get('id')
pixels = request.POST.get('pixels')
if key == None:
raise Exception("missing key in post data")
if pixels == None:
raise Exception("missing pixels in post data")
block = get_object_or_404(models.Block, key=key)
thread.start_new_thread(processReturnBlock, (pixels, block))
return HttpResponse()
def processReturnBlock(pixels, block):
# parse json
pixels =json.loads(pixels)
# in case someone else already submitted this block
if block.done:
return
updateBlock(pixels, block)
block.done = True
image = block.image
image.blocksLeft -= 1;
image.save()
block.save()
print("blocksLeft: %d" % image.blocksLeft)
if image.blocksLeft <= 0:
finishImage(image)
###################
##### HELPERS #####
###################
def finishImage(image):
img = constructImgFromBlocks(image)
# todo: crop image here
emailImg(img, image.email)
blocks = models.Block.objects.filter(image=image)
for block in blocks:
block.delete()
image.delete()
return img
def emailImg(img, email):
print ("saving image")
img.save('tmp/img1.png', 'PNG')
print ("emailing image")
fromEmail = ' crowdimage@gmail.com'
subject = 'Your image has been processed'
toList = [email]
text_content = ""
html_content = '<h1>image:</h1><img src="img1.png">'
email = EmailMultiAlternatives(subject, text_content, fromEmail, toList)
email.attach_alternative(html_content, "text/html")
# email.attach('your_image.png', img, 'image/png')
email.attach_file('tmp/img1.png')
print ("message sent")
# email.attach_file(img)
email.send()
# return None
# updates the opacity in each block from the
# selections from the front end
def updateBlock(selectionArray, block):
h = w = 100
originalPixels = block.getPixels()
newPixels = [[(0,0,0,0) for x in xrange(w)] for x in xrange(h)]
for y in xrange(h):
for x in xrange(w):
(r,g,b,_) = originalPixels[y][x]
a = (selectionArray[y][x]) * 255
newPixels[y][x] = (r,g,b,a)
block.setPixels(newPixels)
# takes 2D array of pixels as 4 channels and returns
# 2D array of pixels as (hex,alpha[0-1])
def pixelsToFrontEnd(pixelsArray):
width = len(pixelsArray[0])
height = len(pixelsArray)
rows = []
for y in xrange(height):
row = []
for x in xrange(width):
p = pixelsArray[y][x]
color = channelsToHex(p[0],p[1],p[2])
a = round(p[3]/250.0)
row.append([color,a])
rows.append(row)
return rows
def channelsToHex(r,g,b):
h = hex((r<<16)+(g<<8)+(b<<0))[2:]
while len(h)<6: h = '0'+h
return h
def compareImgToArr(img1, imgArr):
r=0
g=0
b=0
a=0
width, height = img1.size
for y in xrange(height):
for x in xrange(width):
px1 = img1.getpixel((x,y))
px2 = imgArr[y][x]
if px1[0] != px2[0]: r += 1
if px1[1] != px2[1]: g += 1
if px1[2] != px2[2]: b += 1
if px1[3] != px2[3]: a += 1
return (r,g,b,a)
# takes an image object and looks in the db for all
# of its blocks and then reconstructs the original
# image from them
def constructImgFromBlocks(image, paddedImg=None):
blocks = models.Block.objects.filter(image=image).order_by('index')
# construct image array
width = image.padded_width
height = image.padded_height
newImgArr = [[(0,0,0,0) for x in xrange(width)] for x in xrange(height)]
blocksPerRow = width/50 - 1
for i in xrange(len(blocks)):
block = blocks[i]
blockPixels = block.getPixels()
xOffset = (i % blocksPerRow) * 50
yOffset = (i / blocksPerRow) * 50
for y in xrange(100):
for x in xrange(100):
absx = x+xOffset
absy = y+yOffset
(r,g,b, newA) = tuple(blockPixels[y][x])
(_,_,_,oldA) = newImgArr[y+yOffset][x+xOffset]
if absx<50 and absy<50:
d=1
elif absx<50:
d=2
elif absy<50:
d=2
else:
d=4
if absx>=width-50 and absy<50:
d=1
elif absx>=width-50:
d=2
if absy>=height-50 and absx<50:
d=1
elif absy>=height-50 and absx>=width-50:
d=1
elif absy>=height-50:
d=2
a = oldA + (newA/d)
if(a>255):
print("(%d,%d)" % (absx,absy))
print(a)
newImgArr[absy][absx] = (r,g,b,a)
print("done constructing image array")
# newImgNumpyArr = numpy.array(newImgArr)
if paddedImg != None:
comparison = compareImgToArr(paddedImg, newImgNumpyArr)
print("ERRORS --> r: %d, g: %d, b: %d, a: %d" % comparison)
newImg = createImageFromArray(newImgArr)
# newImg = Image.fromarray(newImgNumpyArr, mode="RGBA")
return newImg
def createImageFromArray(arr):
w = len(arr[0])
h = len(arr)
img = Image.new(mode='RGBA', size=(w,h), color=(0,0,0,0))
for y in xrange(h):
for x in xrange(w):
pixel = arr[y][x]
img.putpixel((x,y),pixel)
return img
# takes in the padded image data and the model that stores
# the original image sizes, email, etc
def saveBlocks(imgData, image, selectedPixels):
width, height = imgData.size
print("width: %d, height: %d" % (width,height))
rows = []
for y in xrange(height):
row = []
for x in xrange(width):
(r,g,b,_) = imgData.getpixel((x,y))
a = (selectedPixels[y][x]) * 255
row.append((r,g,b,a))
rows.append(row)
numBlocks = getNumBlocks(width,height)
image.blocksLeft = numBlocks
image.totalBlocks = numBlocks
for i in xrange(numBlocks):
(blockPixels, done) = getBlockFromArray(rows, i)
block = models.Block(image=image, index=i)
block.setPixels(blockPixels)
block.done = done
if(done):
image.blocksLeft -=1
block.save()
image.save()
print("all blocks saved")
def getNumBlocks(width, height):
# -1 because blocks are actuall 100x100
return ((width/50)-1) * ((height/50)-1)
def getBlockFromArray(pixelArray, i):
width = len(pixelArray[0])
height = len(pixelArray)
blocksPerRow = (width/50) - 1
xOffset = (i % blocksPerRow) * 50
yOffset = (i / blocksPerRow) * 50
var = 0
rows = []
for y in xrange(100):
y = y + yOffset
row = []
for x in xrange(100):
x = x+xOffset
(r,g,b,a) = pixelArray[y][x]
var += a/255
row.append((r,g,b,a))
rows.append(row)
done = var == 0 or var == 100*100
return (rows,done)
# scales image to about 0.5 megapixels
# because thats still a crap ton of workers
def scaleImage(img):
w, h = img.size
f = 750.0 # (750*750) / (50*50) = 225 blocks
factor = math.sqrt((f*f)/(w*h)) # calculate scale factor
if(factor>1):
return
print("original w: "+str(w))
print("original h: "+str(h))
w = int(w * factor)
h = int(h * factor)
print("new w: "+str(w))
print("new h: "+str(h))
print("factor: "+str(factor))
img.thumbnail((w,h), Image.ANTIALIAS)
# pads image with alpha=0 pixels so that 100x100 blocks fit nicely
def padImage(img):
def rnd(x, base=100):
return base * ((x+(base-1))/base)
w, h = img.size
w = rnd(w)
h = rnd(h)
newImg = Image.new(mode='RGBA', size=(w,h), color=(0,0,0,0))
newImg.paste(img, (0,0))
return newImg
# given the location of an image, this
# creates blocks out of the image and
# saves them into the db
# meant to be called in a new thread
# def handleBlockCreation(image,poly):
# imageLoc = image.img
# blocks = image_utils.makeBlocks(img,poly)
# i = 0
# for b in blocks:
# block = models.Block(
# image = image,
# pixels = b,
# index = i
# )
# i += 1
# return None
def JsonResponse(data):
return HttpResponse(json.dumps(data), content_type="application/json")
# def generatePixels(size):
# inPixelLabeled = ['444444', 1]
# # outPixelLabeled = ['999999', 1]
# # inPixelUnLabeled = ['444444', 0]
# outPixelUnLabeled = ['999999', 0]
# rows = []
# for r in xrange(size):
# row = []
# for p in xrange(size):
# if p <= int(r**1.03):
# row.append(outPixelUnLabeled)
# else:
# row.append(inPixelLabeled)
# rows.append(row)
# return rows
def poly2Pixels(points, w, h):
rows = []
path = Path(points)
for y in xrange(h):
row = []
for x in xrange(w):
row.append(path.contains_point((x,y)))
rows.append(row)
return rows
# pixelarray = poly2pixels(poly, width, height) # result in pixelarray
# helper function to convert polygon into a 2d array of selected pixels
# def poly2Pixels(poly_verts, nx, ny):
# # Create vertex coordinates for each grid cell...
# # (<0,0> is at the top left of the grid in this system)
# x, y = np.meshgrid(np.arange(nx), np.arange(ny))
# x, y = x.flatten(), y.flatten()
# points = np.vstack((x,y)).T
# grid = points_inside_poly(points, poly_verts)
# grid = grid.reshape((ny,nx))
# return grid
|
{"hexsha": "e200dfdadfffd03c8f5b44ce339cf04103467dc9", "size": 12620, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/views.py", "max_stars_repo_name": "jackm321/CrowdImage", "max_stars_repo_head_hexsha": "3c6bd11274a89615f17678346b25a439a597fd85", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-20T02:30:36.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-20T02:30:36.000Z", "max_issues_repo_path": "core/views.py", "max_issues_repo_name": "jackm321/CrowdImage", "max_issues_repo_head_hexsha": "3c6bd11274a89615f17678346b25a439a597fd85", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core/views.py", "max_forks_repo_name": "jackm321/CrowdImage", "max_forks_repo_head_hexsha": "3c6bd11274a89615f17678346b25a439a597fd85", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7471526196, "max_line_length": 106, "alphanum_fraction": 0.5848652932, "include": true, "reason": "import numpy", "num_tokens": 3355}
|
import numpy as np
import pandas as pd
import math
import pickle
from pandas_datareader import data as web
import datetime
from linearmodels import PanelOLS
from rpy2.robjects import r
from rpy2.robjects import pandas2ri
pandas2ri.activate()
pd.options.mode.chained_assignment = "raise"
"""
This script takes PSID data created from psid_download.R and:
1) Creates variables at the "tax filing unit" (equal to family unit in
PSID since there is no info on the filing status chosen).
2) Selects a sample of observations to work with (e.g., dropping very
old, very low income, etc.).
3) Computes a measure of lifetime income and places each household into
a lifetime income percentile group
"""
# Read data from R into pandas dataframe
r["load"]("psid1968to2017.RData")
raw_df = r("psid_df")
# Create unique identifier for each household
# note that will define a new household if head or spouse changes
# keep only current heads
# before 1983, head is relation.head == 1, 1983+ head is given by
# relation.head == 10
# Select just those in the SRC sampe, which is representitive of the
# population and so will not require the use of sampling weights
# SRC sample families have 1968 family interview numbers less than 3000
raw_df = raw_df[raw_df["ID1968"] < 3000].copy()
raw_df["relation.head"][
(raw_df["year"] < 1983) & (raw_df["relation.head"] == 1)
] = 10
raw_df["relation.head"][
(raw_df["year"] < 1983) & (raw_df["relation.head"] == 2)
] = 20
head_df = raw_df.loc[
raw_df.index[(raw_df["relation.head"] == 10) & (raw_df["sequence"] == 1)],
:,
]
head_df.rename(columns={"pid": "head_id"}, inplace=True)
# keep legal spouse or long term partners
spouse_df = raw_df.loc[
raw_df.index[
(raw_df["relation.head"] >= 20)
& (raw_df["relation.head"] <= 22)
& (raw_df["sequence"] == 2)
],
["pid", "ID1968", "year", "interview_number"],
]
spouse_df.rename(columns={"pid": "spouse_id"}, inplace=True)
psid_df = head_df.merge(
spouse_df, how="left", on=["ID1968", "year", "interview_number"]
)
# create unique household id for combination of head and a specific spouse
psid_df["hh_id"] = (psid_df["head_id"] * 1000000) + psid_df[
"spouse_id"
].fillna(0)
# Fix ages to increment by one (or two) between survey waves. They do
# not always do this because the survey may be asked as different times
# of year
min_age_df = psid_df.groupby("hh_id").agg(["min"])["head_age"]
min_age_df.rename(columns={"min": "min_age"}, inplace=True)
min_year_df = psid_df.groupby("hh_id").agg(["min"])["year"]
min_year_df.rename(columns={"min": "min_year"}, inplace=True)
psid_df = psid_df.merge(min_age_df, on="hh_id", how="left")
psid_df = psid_df.merge(min_year_df, on="hh_id", how="left")
psid_df.sort_values(by=["hh_id", "year"], inplace=True)
psid_df["age"] = psid_df["year"] - psid_df["min_year"] + psid_df["min_age"]
# Deflate nominal variables
# because surveys ask about prior year
psid_df["year_data"] = psid_df["year"] - 1
# create spouse labor income, since not consistent variable name
# across time
psid_df["spouse_labor_inc"] = (
psid_df["spouse_labor_inc_pre1993"] + psid_df["spouse_labor_inc_post1993"]
)
psid_df.loc[psid_df["year"] == 1993, "spouse_labor_inc"] = psid_df[
"spouse_labor_inc_post1993"
]
# set beginning and end dates for data
start = datetime.datetime(1968, 1, 1)
end = datetime.datetime(2015, 1, 1)
# pull series of interest using pandas_datareader
fred_data = web.DataReader(["CPIAUCSL"], "fred", start, end)
# Make data annual by averaging over months in year
fred_data = fred_data.resample("A").mean()
fred_data["year_data"] = fred_data.index.year
psid_df2 = psid_df.merge(fred_data, how="left", on="year_data")
psid_df = psid_df2
cpi_2010 = fred_data.loc[datetime.datetime(2010, 12, 31), "CPIAUCSL"]
nominal_vars = [
"head_labor_inc",
"spouse_labor_inc",
"head_whether_receive_afdc_prior_year",
"spouse_whether_receive_afdc_prior_year",
"head_ssi_prior_year",
"spouse_ssi_prior_year",
"other_familyunit_ssi_prior_year",
"head_other_welfare_prior_year",
"spouse_other_welfare_prior_year",
"other_familyunit_other_welfare_prior_year",
"head_unemp_inc_prior_year",
"spouse_unemp_inc_prior_year",
"other_familyunit_unemp_inc_prior_year",
"head_workers_comp_prior_year",
"spouse_workers_comp_prior_year",
"other_familyunit_workers_comp_prior_year",
"head_vet_pen_prior_year",
"spouse_vet_pen_prior_year",
"other_familyunit_vet_pen_prior_year",
"head_spouse_taxable_inc",
"other_familyunit_taxable_inc",
"head_spouse_tax_table",
"food_out_expend",
"food_in_expend",
"other_familyunit_asset_inc",
"head_dividend_inc",
"spouse_dividend_inc",
"head_interest_inc",
"spouse_interest_inc",
"head_rent_inc",
"spouse_rent_inc",
"family_total_inc",
"head_and_spouse_transfer_income",
"other_familyunit_transfer_income",
"head_socsec_income",
"spouse_socsec_income",
"other_familyunit_socsec_income",
"head_noncorp_bus_asset_income",
"spouse_noncorp_bus_asset_income",
"head_noncorp_bus_labor_income",
"spouse_noncorp_bus_labor_income",
"noncorp_businc",
"net_wealth",
"inheritance",
"value_inheritance_1st",
"value_inheritance_2nd",
"value_inheritance_3rd",
]
for item in nominal_vars:
psid_df[item] = (psid_df[item] * cpi_2010) / psid_df["CPIAUCSL"]
# remove intermediate dataframes
del raw_df, spouse_df, head_df, fred_data, psid_df2
# Fill in missing values with zeros
psid_df[nominal_vars] = psid_df[nominal_vars].fillna(0)
psid_df[["head_annual_hours", "spouse_annual_hours"]] = psid_df[
["head_annual_hours", "spouse_annual_hours"]
].fillna(0)
# Construct family ("filing unit") level variables
psid_df["incwage_hh"] = psid_df["head_labor_inc"] + psid_df["spouse_labor_inc"]
psid_df["earninc_hh"] = (
psid_df["incwage_hh"]
+ psid_df["head_noncorp_bus_labor_income"]
+ psid_df["spouse_noncorp_bus_labor_income"]
)
psid_df["businc_hh"] = (
psid_df["head_noncorp_bus_labor_income"]
+ psid_df["spouse_noncorp_bus_labor_income"]
)
# note that PSID doesn't separate hours towards employed and business
# work
psid_df["earnhours_hh"] = (
psid_df["head_annual_hours"] + psid_df["spouse_annual_hours"]
)
psid_df["wage_rate"] = psid_df["incwage_hh"] / psid_df["earnhours_hh"]
psid_df["earn_rate"] = psid_df["earninc_hh"] / psid_df["earnhours_hh"]
with np.errstate(divide="ignore"):
psid_df["ln_wage_rate"] = np.log(psid_df["wage_rate"])
psid_df["ln_earn_rate"] = np.log(psid_df["earn_rate"])
psid_df["singlemale"] = (psid_df["head_gender"] == 1) & (
psid_df["marital_status"] != 1
)
psid_df["singlefemale"] = (psid_df["head_gender"] == 2) & (
psid_df["marital_status"] != 1
)
psid_df["marriedmalehead"] = (psid_df["head_gender"] == 1) & (
psid_df["marital_status"] == 1
)
psid_df["marriedfemalehead"] = (psid_df["head_gender"] == 2) & (
psid_df["marital_status"] == 1
)
psid_df["married"] = (psid_df["marital_status"] == 1).astype(int)
# sample selection
# drop very young, very old, those with very low earnings, and any
# outliers with very high earnings, those working at least 200 hrs
# should check to see if we want to drop any particular years... (e.g.,
# I think some data is missing before 1970)
psid_df.query(
"age >= 20 & age <= 80 & incwage_hh >= 5"
+ " & wage_rate >= 5 & wage_rate <= 25000"
+ " & earnhours_hh > 200",
inplace=True,
)
# Indicator for obs beign from PSID not interpolated value
# used to make drops later
psid_df.sort_values(by=["hh_id", "year"], inplace=True)
psid_df[
[
"head_id",
"spouse_id",
"hh_id",
"head_age",
"age",
"spouse_age",
"ID1968",
"year",
"interview_number",
"head_marital_status",
"marital_status",
]
].to_csv("psid_to_check.csv")
# The next several lines try to identify and then drop from the sample
# hh_ids that report more than one type of marital status
# there are 179 of these, 26 are men who report being married and not at
# different times, even when a spouse id is not present
marriedmale_df = psid_df.groupby("hh_id").agg(["max"])["marriedmalehead"]
singlemale_df = psid_df.groupby("hh_id").agg(["max"])["singlemale"]
marriedfemale_df = psid_df.groupby("hh_id").agg(["max"])["marriedfemalehead"]
singlefemale_df = psid_df.groupby("hh_id").agg(["max"])["singlefemale"]
marriedmale_df.rename(columns={"max": "m_marriedmalehead"}, inplace=True)
singlemale_df.rename(columns={"max": "m_singlemale"}, inplace=True)
marriedfemale_df.rename(columns={"max": "m_marriedfemalehead"}, inplace=True)
singlefemale_df.rename(columns={"max": "m_singlefemale"}, inplace=True)
merged_df = marriedmale_df.join(
[singlemale_df, marriedfemale_df, singlefemale_df], how="outer", sort=True
)
merged_df["sum_status"] = (
merged_df["m_singlemale"].astype(int)
+ merged_df["m_singlefemale"].astype(int)
+ merged_df["m_marriedfemalehead"].astype(int)
+ merged_df["m_marriedmalehead"].astype(int)
)
merged_df_to_list = merged_df[merged_df["sum_status"] > 1]
merged_df_to_list.to_csv("hh_id_two_statuses.csv")
hhid_to_drop = merged_df_to_list.copy()
hhid_to_drop["keep"] = False
psid_df = psid_df.merge(hhid_to_drop, on="hh_id", how="left")
psid_df["keep"].fillna(True, inplace=True)
psid_df = psid_df[psid_df["keep"]].copy()
psid_df["in_psid"] = True
# print number of obs by year
print(
"Number of obs by year = ",
psid_df["hh_id"].groupby([psid_df.year]).agg("count"),
)
num_obs_psid = psid_df.shape[0]
psid_df.sort_values(by=["hh_id", "year"], inplace=True)
test_psid_df = psid_df.copy()
# "fill in" observations - so have observation for each household
# from age 20-80
# note that do this before running regression, but that's ok since
# wages missing here so these obs don't affect regression
uid = psid_df["hh_id"].unique()
all_ages = list(range(20, 81)) # for list of ages 20 to 80
ids_full = np.array([[x] * len(all_ages) for x in list(uid)]).flatten()
ages = all_ages * len(uid)
balanced_panel = pd.DataFrame({"hh_id": ids_full, "age": ages})
rebalanced_data = balanced_panel.merge(
psid_df, how="left", on=["hh_id", "age"]
)
# Backfill and then forward fill variables that are constant over time
# within hhid
constant_vars = [
"head_race",
"head_gender",
"singlemale",
"singlefemale",
"marriedmalehead",
"marriedfemalehead",
"ID1968",
"pernum",
]
rebalanced2 = rebalanced_data
for item in constant_vars:
rebalanced_data[item] = rebalanced_data.groupby("hh_id")[item].fillna(
method="bfill"
)
rebalanced_data[item] = rebalanced_data.groupby("hh_id")[item].fillna(
method="ffill"
)
### NOTE: we seem to get some cases where the marital status is not constant
# despite trying to set up the indentifcation of a household such that it
# has to be. Why this is happening needs to be checked.
# Fill in year by doing a cumulative counter within each hh_id and then
# using the difference between age and this counter to infer what the
# year should be'
rebalanced_data.sort_values(["hh_id", "age"], inplace=True)
rebalanced_data["counter"] = rebalanced_data.groupby("hh_id").cumcount()
rebalanced_data["diff"] = rebalanced_data["year"] - rebalanced_data["counter"]
rebalanced_data["diff"].fillna(0, inplace=True) # because NaNs if year missing
max_df = rebalanced_data.groupby("hh_id").agg(["max"])["diff"]
rebalanced_data = rebalanced_data.join(max_df, how="left", on=["hh_id"])
rebalanced_data["year"] = rebalanced_data["max"] + rebalanced_data["counter"]
### Check that there are 61 obs for each hh_id
# create additional variables for first stage regressions
df = rebalanced_data.reset_index()
df["age2"] = df["age"] ** 2
df["age3"] = df["age"] ** 3
df["age_smale"] = df["age"] * df["singlemale"]
df["age_sfemale"] = df["age"] * df["singlefemale"]
df["age_mmale"] = df["age"] * df["marriedmalehead"]
df["age_mfemale"] = df["age"] * df["marriedfemalehead"]
df["age_smale2"] = df["age2"] * df["singlemale"]
df["age_sfemale2"] = df["age2"] * df["singlefemale"]
df["age_mmale2"] = df["age2"] * df["marriedmalehead"]
df["age_mfemale2"] = df["age2"] * df["marriedfemalehead"]
df["age_smale3"] = df["age3"] * df["singlemale"]
df["age_sfemale3"] = df["age3"] * df["singlefemale"]
df["age_mmale3"] = df["age3"] * df["marriedmalehead"]
df["age_mfemale3"] = df["age3"] * df["marriedfemalehead"]
# run regressions to impute wages for years not observed in sample
df.set_index(["hh_id", "year"], inplace=True)
list_of_statuses = [
"Single Males",
"Single Females",
"Married, Male Head",
"Married, Female Head",
]
list_of_dfs = [
df[df["singlemale"]].copy(),
df[df["singlefemale"]].copy(),
df[df["marriedmalehead"]].copy(),
df[df["marriedfemalehead"]].copy(),
]
list_of_dfs_with_fitted_vals = []
first_stage_model_results = {
"Names": [
"Head Age",
"",
"Head Age^2",
"",
"Head Age^3",
"",
"R-Squared",
"Observations",
"Households",
],
"Single Males": [],
"Single Females": [],
"Married, Male Head": [],
"Married, Female Head": [],
}
for i, data in enumerate(list_of_dfs):
# Note that including entity and time effects leads to a collinearity
# I think this is because there are some years at begin and end of
# sample with just one person
# mod = PanelOLS(data.ln_wage_rate,
# data[['age', 'age2', 'age3']],
# weights=data.fam_smpl_wgt_core,
# entity_effects=True, time_effects=True)
mod = PanelOLS(
data.ln_wage_rate, data[["age", "age2", "age3"]], entity_effects=True
)
res = mod.fit(cov_type="clustered", cluster_entity=True)
print("Summary for ", list_of_statuses[i])
print(res.summary)
# Save model results to dictionary
first_stage_model_results[list_of_statuses[i]] = [
res.params["age"],
res.std_errors["age"],
res.params["age2"],
res.std_errors["age2"],
res.params["age3"],
res.std_errors["age3"],
res.rsquared,
res.nobs,
res.entity_info["total"],
]
fit_values = res.predict(fitted=True, effects=True, missing=True)
fit_values["predictions"] = (
fit_values["fitted_values"] + fit_values["estimated_effects"]
)
list_of_dfs_with_fitted_vals.append(
data.join(fit_values, how="left", on=["hh_id", "year"])
)
df_w_fit = list_of_dfs_with_fitted_vals[0].append(
list_of_dfs_with_fitted_vals[1].append(
list_of_dfs_with_fitted_vals[2].append(list_of_dfs_with_fitted_vals[3])
)
)
df_w_fit.rename(columns={"predictions": "ln_fillin_wage"}, inplace=True)
print("Descritpion of data coming out of estimation: ", df_w_fit.describe())
# Seems to be the same as going into estimation
# Compute lifetime income for each filer
int_rate = 0.04 # assumed interest rate to compute NPV of lifetime income
time_endow = 4000 # assumed time endowment - set at 4000 hours !!! May want
# to change this to be different for single households than married !!!
df_w_fit["time_wage"] = np.exp(df_w_fit["ln_fillin_wage"]) * time_endow
df_w_fit["lifetime_inc"] = df_w_fit["time_wage"] * (
(1 / (1 + int_rate)) ** (df_w_fit["age"] - 20)
)
li_df = (df_w_fit[["lifetime_inc"]].groupby(["hh_id"]).sum()).copy()
# find percentile in distrubtion of lifetime income
li_df["li_percentile"] = li_df.lifetime_inc.rank(pct=True)
# Put in bins
groups = [0.0, 0.25, 0.5, 0.7, 0.8, 0.9, 0.99, 1.0]
cats_pct = ["0-25", "26-50", "51-70", "71-80", "81-90", "91-99", "100"]
li_df = li_df.join(
pd.get_dummies(pd.cut(li_df["li_percentile"], groups, labels=cats_pct))
).copy()
li_df["li_group"] = pd.cut(li_df["li_percentile"], groups)
deciles = list(np.arange(0.0, 1.1, 0.10))
cats_10 = ["D1", "D2", "D3", "D4", "D5", "D6", "D7", "D8", "D9", "D10"]
li_df = li_df.join(
pd.get_dummies(pd.cut(li_df["li_percentile"], deciles, labels=cats_10))
).copy()
li_df["li_decile"] = pd.cut(li_df["li_percentile"], deciles)
# Merge lifetime income to panel
df_w_fit.drop(columns="lifetime_inc", inplace=True)
df_fit2 = df_w_fit.join(
li_df, how="left", on=["hh_id"], lsuffix="_x", rsuffix="_y"
)
# Drop of from balanced panel that were not in original panel
df_fit2["in_psid"].fillna(False, inplace=True)
panel_li = (df_fit2[df_fit2["in_psid"]]).copy()
# Save dictionary of regression results
pickle.dump(
first_stage_model_results, open("first_stage_reg_results.pkl", "wb")
)
# Save dataframe
pickle.dump(panel_li, open("psid_lifetime_income.pkl", "wb"))
# Do some checks on the data
# Check that number of obs in final data equals what in psid after
# sample selection
if panel_li.shape[0] != num_obs_psid:
print("Number of observations in final data set is not right")
print("Obs in PSID after selection = ", num_obs_psid)
print("Obs in final panel = ", panel_li.shape[0])
assert False
# Check that have at least 1000 obs in each year
panel_li.sort_values(by=["hh_id", "year"], inplace=True)
var_list = nominal_vars + constant_vars
for item in var_list:
print("Checking ", item)
try:
assert np.allclose(panel_li[item], test_psid_df[item], atol=1e-5)
except TypeError:
print("Had to skip ", item)
# check everyone has a group and decile and that fraction in each is
# correct. Note that can't check the latter with final unbalanced panel.
print("Checking counts of percentile groupings: ")
for item in cats_10 + cats_pct:
assert panel_li[item].count() == panel_li.shape[0]
print("Checking percentile groupings: ")
for d in cats_10:
assert math.isclose(li_df[d].mean(), 0.1, rel_tol=0.03)
for i, g in enumerate(cats_pct):
percent_in_g = groups[i + 1] - groups[i]
assert math.isclose(li_df[g].mean(), percent_in_g, rel_tol=0.03)
|
{"hexsha": "11406cd35b88c79c3ba127221346bcc91d749423", "size": 17905, "ext": "py", "lang": "Python", "max_stars_repo_path": "ogusa/psid_data_setup.py", "max_stars_repo_name": "jdebacker/OG-USA-Calibration", "max_stars_repo_head_hexsha": "1e82a6f3cb52674c2c6a6055c552a1029e7b5d52", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2019-01-30T09:47:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T17:19:38.000Z", "max_issues_repo_path": "ogusa/psid_data_setup.py", "max_issues_repo_name": "PSLmodels/OG-USA", "max_issues_repo_head_hexsha": "1e82a6f3cb52674c2c6a6055c552a1029e7b5d52", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 307, "max_issues_repo_issues_event_min_datetime": "2018-12-20T13:56:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T06:32:49.000Z", "max_forks_repo_path": "ogusa/psid_data_setup.py", "max_forks_repo_name": "duncanhobbs/OG-USA", "max_forks_repo_head_hexsha": "1e82a6f3cb52674c2c6a6055c552a1029e7b5d52", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 42, "max_forks_repo_forks_event_min_datetime": "2019-02-07T20:21:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T15:19:26.000Z", "avg_line_length": 37.0703933747, "max_line_length": 79, "alphanum_fraction": 0.697570511, "include": true, "reason": "import numpy", "num_tokens": 5211}
|
/************************************************************************
MIT License
Copyright (c) 2021 Deqi Tang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
************************************************************************/
// @file cmd/atomsciflow_calc_vasp.cpp
// @author: DeqiTang
// Mail: deqitang@gmail.com
// Created Time: Mon 07 Mar 2022 09:45:38 PM CST
#include "atomsciflow_calc_vasp.h"
#include <boost/program_options.hpp>
//#include <filesystem>
#include <boost/filesystem.hpp>
#include <iostream>
#include <string>
#include <regex>
#include <cstdlib>
#include "atomsciflow/parser/cif.h"
#include "atomsciflow/parser/xyz.h"
#include "atomsciflow/parser/tools.h"
#include "atomsciflow/base/crystal.h"
#include "atomsciflow/utils.h"
#include "atomsciflow/vasp/vasp.h"
#include "atomsciflow/vasp/static.h"
namespace po = boost::program_options;
//namespace fs = std::filesystem;
namespace fs = boost::filesystem; // --std=c++11 -lboost_filesystem -lboost_system
void atomsciflow_calc_vasp(po::parsed_options& parsed, po::variables_map& vm) {
// convert subcommand has the following options:
po::options_description opt_vasp("vasp options");
opt_vasp.add_options()
("help, h", "Print out help information for vasp sub command")
("runtype, r", po::value<int>()->default_value(0), "Choice of the calculation type: 0 -> static run, 1 -> geometric optimization")
//("input, i", po::value<std::string>()->required(), "input structure file")
("xyz", po::value<std::string>()->required(), "input xyz structure file")
("directory, d", po::value<std::string>()->default_value("asflow-calc-running"), "The directory to put all the resources")
("runopt", po::value<std::string>()->default_value("gen"), "Rnning option, generation only, or running at the same time")
("auto", po::value<int>()->default_value(3), "Automation level: 0 -> doing nothing, 1 -> copying files to server")
("mpi", po::value<std::string>()->default_value(""))
("server", po::value<std::string>()->default_value("pbs"))
("jobname", po::value<std::string>()->default_value("atomsciflow-job"))
("nodes", po::value<int>()->default_value(1))
("ppn", po::value<int>()->default_value(32))
// llhpc
("parition", po::value<std::string>()->default_value("free"))
("ntask", po::value<int>()->default_value(24))
("stdout", po::value<std::string>()->default_value("slurm.out"))
("stderr", po::value<std::string>()->default_value("slurm.err"))
;
std::vector<std::string> opts = po::collect_unrecognized(parsed.options, po::include_positional);
opts.erase(opts.begin());
//parse again...
po::store(po::command_line_parser(opts).options(opt_vasp).style(po::command_line_style::unix_style | po::command_line_style::allow_long_disguise).extra_style_parser(&allow_negative_numbers).run(), vm);
if (vm.count("help")) {
std::cout << opt_vasp << std::endl;
std::exit(1);
}
po::notify(vm);
std::cout << "working directory: " << vm["directory"].as<std::string>() << std::endl;
auto task = atomsciflow::VaspStatic();
task.get_xyz(vm["xyz"].as<std::string>());
//std::cout << task.incar->to_string() << std::endl;;
//std::cout << task.kpoints->to_string() << std::endl;;
//std::cout << task.poscar->to_string("Cartesian") << std::endl;;
task.run(vm["directory"].as<std::string>());
}
|
{"hexsha": "82405622b59f6aff60d8d65ee2a39b93a070bcaa", "size": 4462, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "cmd/atomsciflow_calc_vasp.cpp", "max_stars_repo_name": "DeqiTang/build-test-atomsciflow", "max_stars_repo_head_hexsha": "6fb65c79e74993e2100fbbca31b910d495076805", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-01-25T01:44:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T01:44:32.000Z", "max_issues_repo_path": "cmd/atomsciflow_calc_vasp.cpp", "max_issues_repo_name": "DeqiTang/build-test-atomsciflow", "max_issues_repo_head_hexsha": "6fb65c79e74993e2100fbbca31b910d495076805", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cmd/atomsciflow_calc_vasp.cpp", "max_forks_repo_name": "DeqiTang/build-test-atomsciflow", "max_forks_repo_head_hexsha": "6fb65c79e74993e2100fbbca31b910d495076805", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.4791666667, "max_line_length": 205, "alphanum_fraction": 0.6671896011, "num_tokens": 1085}
|
import pytest
import numpy.testing as npt
@pytest.fixture
def graphs_and_features():
import numpy as np
import torch
permutation_idx = np.random.permutation(5)
permutation_matrix = np.zeros((5, 5), dtype=np.float32)
permutation_matrix[
np.arange(5),
permutation_idx,
] = 1
permutation_matrix = torch.tensor(permutation_matrix, dtype=torch.float32)
import dgl
g0 = dgl.rand_graph(5, 20)
g1 = dgl.reorder_graph(
g0,
"custom",
permute_config={"nodes_perm": permutation_idx}
)
import hpno
g0 = hpno.heterograph(g0)
g1 = hpno.heterograph(g1)
h0 = torch.randn(5, 3)
h1 = permutation_matrix @ h0
return g0, g1, h0, h1, permutation_matrix
def test_layer_equivariance(graphs_and_features):
g0, g1, h0, h1, permutation_matrix = graphs_and_features
import hpno
layer = hpno.HierarchicalPathNetworkLayer(3, 4, 5, max_level=4)
y0 = layer(g0, h0)
y1 = layer(g1, h1)
npt.assert_almost_equal(
(permutation_matrix @ y0).detach().numpy(),
y1.detach().numpy(),
decimal=5,
)
def test_model_equivariance(graphs_and_features):
g0, g1, h0, h1, permutation_matrix = graphs_and_features
import hpno
model = hpno.HierarchicalPathNetwork(3, 4, 5, 2, max_level=4)
y0 = model(g0, h0)
y1 = model(g1, h1)
npt.assert_almost_equal(
(permutation_matrix @ y0).detach().numpy(),
y1.detach().numpy(),
decimal=5,
)
def test_readout_invariance(graphs_and_features):
g0, g1, h0, h1, permutation_matrix = graphs_and_features
import hpno
readout = hpno.GraphReadout(3, 4, 5, max_level=4)
y0 = readout(g0, h0)
y1 = readout(g1, h1)
npt.assert_almost_equal(
y0.detach().numpy(),
y1.detach().numpy(),
decimal=5,
)
def test_model_and_readout_invariance(graphs_and_features):
g0, g1, h0, h1, permutation_matrix = graphs_and_features
import hpno
readout = hpno.HierarchicalPathNetwork(
3, 5, 5, 2,
max_level=4,
readout=hpno.GraphReadout(5, 5, 6)
)
y0 = readout(g0, h0)
y1 = readout(g1, h1)
npt.assert_almost_equal(
y0.detach().numpy(),
y1.detach().numpy(),
decimal=5,
)
|
{"hexsha": "d4d4dd03ec909dbe796cf65b28fcd73800875d3a", "size": 2279, "ext": "py", "lang": "Python", "max_stars_repo_path": "hpno/tests/test_index_space_equivariance.py", "max_stars_repo_name": "choderalab/hpnotiq", "max_stars_repo_head_hexsha": "fa791fcbdd24150e238218c4c5799a75a6882b16", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hpno/tests/test_index_space_equivariance.py", "max_issues_repo_name": "choderalab/hpnotiq", "max_issues_repo_head_hexsha": "fa791fcbdd24150e238218c4c5799a75a6882b16", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hpno/tests/test_index_space_equivariance.py", "max_forks_repo_name": "choderalab/hpnotiq", "max_forks_repo_head_hexsha": "fa791fcbdd24150e238218c4c5799a75a6882b16", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.606741573, "max_line_length": 78, "alphanum_fraction": 0.6401930671, "include": true, "reason": "import numpy", "num_tokens": 694}
|
import pandas as pd
# import matplotlib.pyplot as plt
# import seaborn as sns
import numpy as np
# import copy
# from scipy.stats import norm
# from sklearn import preprocessing
fileName = '/home/kazim/Desktop/projects/IE490/input/tubitak_data2_processesed2.csv'
df = pd.read_csv(fileName, sep = ',')
#preview data
df_prev = df
### Ilce 79
# df.drop(df.index[df.ilce_kod != 79], inplace=True)
# # df
# df.drop('ilce_kod', axis=1, inplace=True)
#
# df.info()
mahalle = df["mahalle_kod"]
ilce = df["ilce_kod"]
# df['mahalle_kod'].describe()
#we can drop yasal burut alani as it has almost 1 correlation with mevcut alan
df = df.drop('yasal_burut_alani', axis=1)
# df_pre = copy.deepcopy(df)
### One Hot Encoding for Categorical Variables
df = pd.get_dummies(df, columns=["ilce_kod"])
df = pd.get_dummies(df, columns=["mahalle_kod"])
# df.head()
# df.shape
# Model Training and Evaulation
#
# 1. Random Forest
from sklearn.ensemble import RandomForestRegressor
X = df.drop('adil_piyasa_degeri_yasal_durum', axis=1)
y = df['adil_piyasa_degeri_yasal_durum']
RANDOM_STATE = 42
# regr = RandomForestRegressor(bootstrap=True,
# oob_score=True,
# # max_depth=10,
# # max_features=5,
# min_samples_leaf = 5,
# min_samples_split = 5,
# n_estimators = 500,
# random_state=RANDOM_STATE)
# rf = regr.fit(X, y)
from sklearn.externals import joblib
# joblib.dump(rf, 'trainedRF.pkl')
# Later you can load back the pickled model (possibly in another Python process) with:
rf = joblib.load('trainedRF.pkl')
preds = rf.oob_prediction_
df['prediction'] = preds
df['error'] = np.abs(df['adil_piyasa_degeri_yasal_durum'] -
df['prediction'])/df['adil_piyasa_degeri_yasal_durum']
# oob_error = 1 - rf.oob_score_
# print("oob error: %0.2f" % oob_error)
# # print(":",regr.oob_score_)# print(":",regr.oob_score_)
# print("R^2: %0.2f" % rf.score(X, y, sample_weight=None))
# print("20% error quantile: {0:.3f}".format(
# ((df[df.error <= 0.2].shape[0])/df.shape[0])))
# print(rf.predict(X.head(1)), y.head(1))
|
{"hexsha": "45099ed8c871b99dcff80ca033fda09d7b5c9c84", "size": 2324, "ext": "py", "lang": "Python", "max_stars_repo_path": "shiny/Python2.py", "max_stars_repo_name": "kazimsanlav/RealEstate", "max_stars_repo_head_hexsha": "3abdb8a4a35b3975b8f6ad4b11b64cd73a97901a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "shiny/Python2.py", "max_issues_repo_name": "kazimsanlav/RealEstate", "max_issues_repo_head_hexsha": "3abdb8a4a35b3975b8f6ad4b11b64cd73a97901a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "shiny/Python2.py", "max_forks_repo_name": "kazimsanlav/RealEstate", "max_forks_repo_head_hexsha": "3abdb8a4a35b3975b8f6ad4b11b64cd73a97901a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-08T18:41:21.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-08T18:41:21.000Z", "avg_line_length": 27.3411764706, "max_line_length": 87, "alphanum_fraction": 0.6148881239, "include": true, "reason": "import numpy,from scipy", "num_tokens": 653}
|
"""
Experiments of the paper 'The Approximation of the Dissimilarity
Projection' accepted at PRNI2012.
Quantification of the dissimilarity approximation of tractography data
across different prototype selection policies and number of prototypes.
Copyright (c) 2012, Emanuele Olivetti
Distributed under the New BSD license (3-clauses)
"""
import numpy as np
import nibabel as nib
from dipy.tracking.distances import bundles_distances_mam
from dipy.io.dpy import Dpy
from dissimilarity_common import *
if __name__ == '__main__':
np.random.seed(0)
figure = 'small_dataset' # 'big_dataset' #
if figure=='small_dataset':
filename = 'data/subj_05/101_32/DTI/tracks_dti_10K.dpy'
prototype_policies = ['random', 'fft', 'sff']
color_policies = ['ko--', 'kx:', 'k^-']
elif figure=='big_dataset':
filename = 'data/subj_05/101_32/DTI/tracks_dti_3M.dpy'
prototype_policies = ['random', 'sff']
color_policies = ['ko--', 'k^-']
num_prototypes = [3, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50]
iterations = 50
print "Loading tracks."
dpr = Dpy(filename, 'r')
tracks = dpr.read_tracks()
dpr.close()
tracks = np.array(tracks, dtype=np.object)
# tracks = tracks[:100]
print "tracks:", tracks.size
rho = compute_correlation(tracks, bundles_distances_mam, prototype_policies, num_prototypes, iterations)
plot_results(rho, num_prototypes, prototype_policies, color_policies)
|
{"hexsha": "c95f16bac3610aa860fb9747396cc7047253aa68", "size": 1471, "ext": "py", "lang": "Python", "max_stars_repo_path": "dissimilarity_streamlines.py", "max_stars_repo_name": "emanuele/prni2012_dissimilarity", "max_stars_repo_head_hexsha": "499cdb7715c47ba59f8eab2396d56111d9b86cee", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-08-28T14:04:47.000Z", "max_stars_repo_stars_event_max_datetime": "2017-08-28T14:04:47.000Z", "max_issues_repo_path": "dissimilarity_streamlines.py", "max_issues_repo_name": "emanuele/prni2012_dissimilarity", "max_issues_repo_head_hexsha": "499cdb7715c47ba59f8eab2396d56111d9b86cee", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dissimilarity_streamlines.py", "max_forks_repo_name": "emanuele/prni2012_dissimilarity", "max_forks_repo_head_hexsha": "499cdb7715c47ba59f8eab2396d56111d9b86cee", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9782608696, "max_line_length": 108, "alphanum_fraction": 0.6981645139, "include": true, "reason": "import numpy", "num_tokens": 414}
|
[STATEMENT]
lemma perfect_injective_eq_homeomorphic_map:
"perfect_map X Y f \<and> inj_on f (topspace X) \<longleftrightarrow> homeomorphic_map X Y f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (perfect_map X Y f \<and> inj_on f (topspace X)) = homeomorphic_map X Y f
[PROOF STEP]
by (simp add: homeomorphic_eq_injective_perfect_map)
|
{"llama_tokens": 132, "file": null, "length": 1}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 24 09:40:28 2018
@author: Paulo Augusto
"""
import numpy as np
#from numpy import fft
import matplotlib.pyplot as plt
#import scipy.signal as sig
import os
import random
import emgReaderClass_v2 as erc
import threading
import multiprocessing
#import dataPlotter
import snkbrain
import scipy.stats as st
import cProfile
import re
# This script is compatible with 'emgReaderClass_v2', that
# reads the .csv files generated by 'movementSaver.m', from
# the folder './csv/'
profiling=False
bias=0 # If bias = 1, every cromossome will have a non frequency dependant DNA
maxGen=2000 # The max number of generations
startOver=True# If True, the code will not consider the last simulation
tamPop=30 # Population number
maxFreq=180 # This is the max Frequency to consider #240
freqStep=3 # For freqStep=3 -> The code will consider [1,2,3],[3,4,5], etc# 3
taxaMut=0.01 # The mutation rate
taxaMutMin=0.01 # Minimum mutation rate
taxaMutMax=5.0 # Maximum mutation rate
chanceMut=60 # The chance of mutation/10000 (only for the "absolute" mutation)
bestTypes=[] # Logging variable
continuous=True # If True, the code will use a continuous fitness function (not recommended)
binaryFit=False # If True, the fitness of each individual will be 1 for each right guess
# If False, it will be continuous if "continuous" is True, or 1 point if
# it guesses correctly, and 1.5 if it guesses with an confidence above
# a "multFac" threshold
multFac=1.5 #
binaryCrossChance=0.5 # The chance of ocurring a binary cross. 1 minus this
# is the chance of ans mean crossing
vectorialMutationChance=0.5 # The chance of vectorial mutation. 1 minus this is
# chance of an absolute mutation
taxaMutMult=4.0 # The factor by which taxaMut will be multiplied
initialModule=1 # Initial module of cromossome values
sourceType='ninapro'
ninaprofolders=['csv1','csv2','csv3','csv6','csv7','csv8']
fs=2000
##############################################################################
guid=0 # Individual ID (logging variable)
param=[]
real=[] # DATA
origin=[] # DATA
fv=[] # DATA
frv=[] # DATA
nArq=0 # DATA
# lastValues, botThs and topThs to be used in each archive
parameters={'bicepsinteiro.txt': [400,20,10],\
'bicepsmetade.txt': [400,20,10],\
'emgwk.txt': [400,20,10],\
'emgmed.txt':[400,20,10],\
# 'xoxoxo.txt':[300,40,30],\
'emgabrindo.txt':[500,20,20],\
'emgapertando.txt':[400,20,20]}
curtose = st.kurtosis
obliquidade = st.skew
variancia = np.var
media = np.mean
desvio_padrao = np.std
def rms(v):
temp=sum([pow(i,2) for i in v])
return np.sqrt(temp/len(v))
def centroide_espectral(v):
# tam=len(v)
# *fs/tam
global fs
max_freq=240
min_freq=6
max_i=int(max_freq*len(v)/fs)
min_i=int(min_freq*len(v)/fs)
temp=v[min_i:max_i]
temp = sum([i*value for i,value in zip(range(1,1+len(temp)),temp)])
soma = sum(temp)
return float(temp)/soma
def feature_scaling(v):
mean = np.mean(v)
temp = [a-mean for a in v]
return temp/desvio_padrao(v)
def get_parameters(timeValues,freqValues):
maxt=np.max(timeValues)
maxf=np.max(freqValues)
# freq=[a*2/(len(freqValues)) for a in freqValues]
# tyme=[a/(1000) for a in timeValues]
freq=[a*2/(maxf) for a in freqValues]
tyme=[a/(maxt) for a in timeValues]
temp=[curtose(tyme),
obliquidade(tyme),
variancia(tyme),
media(tyme),
desvio_padrao(tyme),
rms(tyme),
curtose(freq),
obliquidade(freq),
variancia(freq)/1000,
media(freq),
desvio_padrao(freq),
rms(freq)]
return temp
## Method that return the number of right guesses of and individual
#def countGuesses(indiv):
#
#
# score=0
#
# for arq in range(0,nArq):
#
# for i in range(0,len(real[arq])):
# tam=len(real[arq][i])
#
# x= getFreqVector(fv[arq][i])
# x=np.array(x)
#
# pont=x*indiv.cromo.freqFactor
## test.append(pont)
#
# if np.argmax(pont[0]) == arq:
#
# score+=1
#
# return score
#
## This function just multiplies the chromossome of an individual by the frequency
## vector of an signal, return the result. The position that gets the higher
## number represent from which archive it thinks this signal belongs
#def sayWho(indiv,real,fv):
# tam=len(fv)
# x= getFreqVector(fv)
# x=np.array(x)
# pont=x*indiv.cromo.freqFactor
# return pont
# Gets the *.txt files
def getArqs():
arqVec=[]
for arq in os.listdir('.'):
if os.path.splitext(arq)[1]=='.txt':
arqVec.append(arq)
arqVec.reverse()
return arqVec
# Individual class
class ind:
def __init__(self):
global guid
self.uid=guid
guid+=1
self.fit=-1000
self.marker='none'
self.brain=snkbrain.brain([12,12,6],[1,1,0])
def getParameters():
global param
param=[[feature_scaling(get_parameters(realV,freqV)) for realV,freqV in zip(sr,sf)] for sr,sf in zip(real,frv)]
# This function takes the fft data od an signal, and returns a similar vector,
# but instead of getting one element per frequency it take a number of freqStep
# frequencies, sum it and divide by freqStep
def getFreqVector(fv):
x=[]
tam=float(len(fv))
norm=int(np.ceil(tam*1/fs))
step=freqStep*norm
for j in range(0,norm*maxFreq,step):
x.append(sum(fv[j:j+step])*2/tam)
##### BIAS ######
if bias==1 and j==step*maxFreq-1:
x.append(-1)
#################
return x
# Read the data archives. The original signal is stored in origin. Each signal
# Is stored in real. real[arq][5] will contain the 5th signal of the arq'th file
# (as read by getArqs). The fft data will be stored at "fv" (indexes works the
# the same as for "real"). The frequency vector as got by getFrequencyVector
# is stored at frv
def readArqs(source,muscle,interval):
it=interval
reader=erc.emgReader()
arqVec=getArqs()
nArq=len(arqVec)
global real,fv,frv
if source=='bioplux':
print nArq
for arq in range(0,nArq):
origin.append([])
real.append([])
fv.append([])
frv.append([])
reader.lastValues=parameters[arqVec[arq]][0]
reader.topThs=parameters[arqVec[arq]][1]
reader.botThs=parameters[arqVec[arq]][2]
origin[arq],real[arq],fv[arq] = reader.analyzeEmg(arqVec[arq],fs)
elif source=='ninapro':
global ninaprofolders
realt,fvt=[],[]
for folder in ninaprofolders:
realt.append([])
fvt.append([])
realt[-1],fvt[-1]=reader.getCsvData(muscle,folder)
for arq in range(0,len(realt[0])):
real.append([])
fv.append([])
for r,f in zip(realt,fvt):
real[arq].extend(r[arq][ it[0]:it[1] ])
# real[arq].extend(real2[arq][ it[0]:it[1] ])
# real[arq].extend(real3[arq][ it[0]:it[1] ])
# real[arq].extend(real6[arq][ it[0]:it[1] ])
# real[arq].extend(real7[arq][ it[0]:it[1] ])
# real[arq].extend(real8[arq][ it[0]:it[1] ])
fv[arq].extend(f[arq][ it[0]:it[1] ])
# fv[arq].extend(fv2[arq][ it[0]:it[1] ])
# fv[arq].extend(fv3[arq][ it[0]:it[1] ])
# fv[arq].extend(fv6[arq][ it[0]:it[1] ])
# fv[arq].extend(fv7[arq][ it[0]:it[1] ])
# fv[arq].extend(fv8[arq][ it[0]:it[1] ])
training=[18-1,21-1,22-1,25-1,26-1,31-1]
real=[real[i] for i in training]
fv=[fv[i] for i in training]
for arq in range(0,len(real)):
frv.append([])
for i in range(0,len(fv[arq])):
frv[arq].append(getFreqVector(fv[arq][i]))
# Fitness method. Each signal frequency vector is multiplied by indiv
# chromossome. The numbers got are reconized as the score of each archive.
# Let's say that the 0th element gets the largest number. That mean this
# individual "thinks" that that signal belongs to archive 4 (getArqs()[0])
# The fitness is then calculated by the number of right guesses of each
# individual
def fitness(indiv):
global nArq
score=0
for arq in range(0,len(real)):
for i in range(0,len(fv[arq])):
tam=len(real[arq][i])
# inpt=frv[arq][i]
inpt=param[arq][i]
l,pont= indiv.brain.run(inpt)#np.array(frv[arq][i])*indiv.cromo.freqFactor
def error(pont,ref):
score=0
for i in range(0,len(pont)):
if i==ref:
t=1
else:
t=0
score+= t * np.log(pont[i]) + (1-t) * np.log(1-pont[i])
# score+= t*np.log((pont[i]+1)/2)+(1-t)*np.log(1-(pont[i]+1)/2)
return score
if True:
score+=error(pont,arq)
else:
maxIndex= np.argmax(pont)
if maxIndex == arq:
if not binaryFit:
###############################################################################
if continuous:
score+=pont[maxIndex]
###############################################################################
else:
if np.max(np.array(pont)) >=multFac*np.mean(np.array(pont)):
score+=1.5
else:
score+=1
###########################################################################
else:
score+=1
return score
# Population class
class population:
def __init__(self):
self.population=[]
def initPop(self,tamPop):
for i in range(0,tamPop):
self.population.append(ind())
def evaluateAll(self):
for ind in self.population:
ind.fit=fitness(ind)
def getBest(self):
return self.population[np.argmax(self.population)]
# Mutation method. The mutation can be vetorial or absolute.
def mutate(indiv):
global taxaMut,chanceMut
if random.random()<vectorialMutationChance:
vec= ind()#tknake.snake(indiv.root,x=0,y=0,lenght=indiv.lenght,width=indiv.width,segDis=indiv.segmentDistance).brainMatrixes
for indivLayer,layer in zip(indiv.brain.brainMatrixes,vec.brain.brainMatrixes):
amp=np.sqrt(sum([sum([pow(i,2) for i in line.A1]) for line in layer]))
layer/=amp
layer*=taxaMut
indivLayer+=layer
# for i in range(0,len(vec.brain.biasMatrix)):
# soma=sum(vec.brain.biasMatrix[i])
# for j in range(0,len(vec.brain.biasMatrix[i])):
# vec.brain.biasMatrix[i][j]*=taxaMut/soma
# indiv.brain.biasMatrix[i][j]+=vec.brain.biasMatrix[i][j]
indiv.marker='vectorial'
else:
for layer in indiv.brain.brainMatrixes:
for line in layer:
for value in line.A1:
if random.random()*10000<chanceMut:
mut=(2*random.random()-1)*taxaMut
value+=mut
# for i in range(0,len(indiv.brain.biasMatrix)):
# for j in range(0,len(indiv.brain.biasMatrix[i])):
# if random.random()*10000<chanceMut:
# mut=(2*random.random()-1)*taxaMut
# indiv.brain.biasMatrix[i][j]+=mut
indiv.marker='absolute'
# Crossover by adding different chromossomes and dividing by the number of
# fathers
def meanCrossover(pais):
filho= ind()
soma=[]
soma = [sum([pai.brain.brainMatrixes[i] for pai in pais]) for i in range(0,len(pais[0].brain.brainMatrixes))]
tam= len(pais)
filho.brain.brainMatrixes=[mtx/tam for mtx in soma]
# for i in range(0,len(pais[0].brain.biasMatrix)):
# for j in range(0,len(pais[0].brain.biasMatrix[i])):
# soma=sum([pai.brain.biasMatrix[i][j] for pai in pais])/len(pais)
# filho.brain.biasMatrix[i][j]=soma
mutate(filho)
filho.marker+=' meaned '
return filho
# Crossover by replacing the sons genes by his mother's or his father's, with
# 50% chance
def binaryCrossover(pais):
filho= ind()#tknake.snake(pais[0].root,x=0,y=0,lenght=pais[0].lenght,width=pais[0].width,segDis=pais[0].segmentDistance)
for i in range(0,len(filho.brain.brainMatrixes)):
for j in range(0,len(filho.brain.brainMatrixes[i])):
for k in range(0,len(filho.brain.brainMatrixes[i][j])):
if random.random()<0.5:
filho.brain.brainMatrixes[i][j,k]=pais[0].brain.brainMatrixes[i][j,k]
else:
filho.brain.brainMatrixes[i][j,k]=pais[1].brain.brainMatrixes[i][j,k]
# for i in range(0,len(pais[0].brain.biasMatrix)):
# for j in range(0,len(pais[0].brain.biasMatrix[i])):
# if random.random()<0.5:
# filho.brain.biasMatrix[i][j]=pais[0].brain.biasMatrix[i][j]
# else:
# filho.brain.biasMatrix[i][j]=pais[1].brain.biasMatrix[i][j]
mutate(filho)
filho.marker+=' binerized '
return filho
# Mixed crossover
def weightedCrossover(pais):
if random.random()<binaryCrossChance:
return binaryCrossover(pais)
else:
return meanCrossover(pais)
# Tournament. Returns the best fitted individual
def torneio(pop):
bestIndiv=pop.population[0]
for indiv in pop.population:
if indiv.fit>=bestIndiv.fit:
bestIndiv=indiv
return bestIndiv
# Generate a new population by performing crossovers with best and the reminder
# population
def genNewPop(best,pop):
newpop=population()
for indiv in pop.population:
if indiv == best:
newpop.population.append(indiv)
continue
else:
temp=weightedCrossover([best,indiv])
newpop.population.append(temp)
return newpop
# Remove the n less fitted individuals, replacing them by new ones
def removeSuckers(pop,n):
def getFit(indiv):
return indiv.fit
pop.population.sort(reverse=False,key=getFit)
for i in range(0,n):
pop.population[i]=ind()
pop.population[i].marker= 'new'
# Returns the mean fitness of poppulation in pop
def getPopMean(pop):
temp=0.0
tam=len(pop.population)
for indiv in pop.population:
temp+=indiv.fit
return temp/tam
## Not used. Divide all chromossomes of a population by the highest number
## amongst them
#def normalizePop(pop):
# for indiv in pop.population:
# maxF=0
# for line in indiv.cromo.freqFactor:
# for i in range(0,len(np.array(line)[0])):
# if abs(line[0,i]) > maxF:
# maxF=abs(line[0,i])
#
# for line in indiv.cromo.freqFactor:
# for i in range(0,len(np.array(line)[0])):
# line[0,i]/=maxF
# Plot a graph
def plotGens(best,mean):
plt.plot(best,'go')
plt.plot(mean,'b-')
# Class for controlling the GA variables
class populationControl():
global tamPop,\
taxaMut,\
chanceMut,\
bestAll,\
bias,\
maxGen,\
tamPop,\
taxaMut,\
taxaMutMax,\
chanceMut,\
continuous,\
binaryFit,\
multFac,\
binaryCrossChance,\
taxaMutMult,\
taxaMutMin
def __init__(self):
self._tamPop=tamPop
self._taxaMut=taxaMut
self._chanceMut=chanceMut
self._bias=bias
self._maxGen=maxGen
self._tamPop=tamPop
self._taxaMutMin=taxaMutMin
self._taxaMutMax=taxaMutMax
self._chanceMut=chanceMut
self._continuous=continuous
self._binaryFit=binaryFit
self._multFac=multFac
self._binaryCrossChance=binaryCrossChance
self._taxaMutMult=taxaMutMult
self._counter=0
self._expansion=False
def control(self,gen,counter,best,last):
global taxaMut
# taxaMut=self._taxaMutMax
ascendingCounter=0
if gen>25:
if best.fit<=last.fit*1.001: #If the fitness doesnt grow by 0.1%
self._counter+=1
else:
# taxaMut=self._taxaMut
chanceMut=self._chanceMut
self._expansion=False
self._counter=0
ascendingCounter=0
if self._counter==10: # If the fitness doesnt grow in n generations
if self._expansion: # If it the taxaMut is increasing
if taxaMut<self._taxaMutMax: # If taxaMut is less than the maximum
taxaMut*=self._taxaMutMult
else: # If taxaMut bigger than the maximum
self._expansion=False
else: # If taxaMut is decreasing
if taxaMut>self._taxaMutMin: # If it is bigger than the minimum
taxaMut/=self._taxaMutMult
else: # If it is less than the minimum
self._expansion=True
self._counter=0
def main():
global maxFreq,\
freqStep,\
tamPop,\
taxaMut,\
chanceMut,\
nArq,\
bestAll,\
startOver,\
bestTypes
gen=0
counter=0
last=ind()
bestVec=[]
meanVec=[]
taxaVec=[]
taxaMut=taxaMutMax
# plotter=dataPlotter.dataPlotter('Geracao','Melhor de Todos',bestVec)
# threading.Thread(target=plotter.start).start()
controller=populationControl()
if sourceType=='bioplux':
nArq=len(getArqs())
elif sourceType=='ninapro':
nArq=len(real)
else:
1
if startOver:
readArqs(sourceType,'flx',[0,3])
getParameters()
pop = population()
pop.initPop(tamPop)
else:
print 'Didnt start over'
pop=bestAll
while gen<maxGen:
gen+=1
pop.evaluateAll()
removeSuckers(pop,tamPop/3)
best=torneio(pop)
if not last.uid==best.uid:
bestTypes.append(best.marker)
print(gen,best.fit,':',best.marker,tamPop,taxaMut,chanceMut,maxGen)#,':', [p.fit for p in population]
pop=genNewPop(best,pop)
###########################################################################
controller.control(gen,counter,best,last)
last=best
taxaVec.append(20*np.log(taxaMut))
bestVec.append(last.fit)
meanVec.append(getPopMean(pop))
bestAll=pop
###########################################################################
# createSuckers(pop.tamPop/3)
# normalizePop(pop)
plotGens(bestVec,meanVec)
plotGens(bestVec,taxaVec)
pop.evaluateAll()
print([p.fit for p in pop.population])
return pop
def trainSomeone(indiv,number,learning_rate):
count=0
while count<number:
for arq in range(0,len(real)):
for i in range(0,len(fv[arq])):
target=[0.001,0.001,0.001,0.001,0.001,0.001]
target[arq]=0.999
indiv.brain.train(param[arq][i],target,learning_rate)
count+=1
# Ver acertos
#score=0
#total=0
#for arq in range(0,len(real)):
# for i in range(0,len(fv[arq])):
# total+=1
# l,o=bestAll.population[tamPop-1].brain.run(param[arq][i])
# if np.argmax(o)==arq:
# score+=1
#print score,' de ',total
#Treinar
#last=-100
#count=0
#lr=0.01
#errorcount=0
#flag=0
#while fitness(bestAll.population[tamPop-2])<-17:
# f=fitness(bestAll.population[tamPop-2])
# if last>f:
# lr/=1.1
# errorcount+=1
# else:
# count+=1
# if count==11:
# lr*=1.03
# count=0
# if errorcount==3:
# if flag>=3:
# print 'to many errors'
# break
# else:
# flag+=1
# lr=0.01
# errorcount=0
# trainSomeone(bestAll.population[tamPop-2],50,lr)
# last = f
# print f,lr
def treinar(ind,init_lr,goal):
global bestAll,tamPop
last=-1000
count=0
lr=init_lr
errorcount=0
flag=0
f=-1000
lastbrain=[a.copy() for a in ind.brain.brainMatrixes]
while f<goal:
f=fitness(ind)
if last>f:
lr/=1.1
errorcount+=1
ind.brain.brainMatrixes=lastbrain
else:
lastbrain=[a.copy() for a in ind.brain.brainMatrixes]
errorcount=0
count+=1
if count==11:
lr*=1.03
count=0
if errorcount==3:
if flag>=3:
print 'to many errors'
break
else:
flag+=1
lr=init_lr
errorcount=0
trainSomeone(ind,100,lr)
last = f
print f,lr
return lr
def treinar_r(ind,goal):
global bestAll,tamPop
last=-1000
count=0
lr=0.01
min_lr=0.00001
max_lr=0.01
errorcount=0
assertcount=0
descended=False
flag=0
lastbrain=[a.copy() for a in ind.brain.brainMatrixes]
bestbrain=[a.copy() for a in ind.brain.brainMatrixes]
bestfit=last
while fitness(ind)<-goal:
trainSomeone(ind,20,lr)
f=fitness(ind)
if f>bestfit:
bestfit=f
bestbrain=[a.copy() for a in ind.brain.brainMatrixes]
if last>f:
ind.brain.brainMatrixes=last_brain
if descended:
lr/=1.12
if lr<=min_lr:
print 'Too much errors'
return bestbrain,lr
else:
lr/=1.1
if lr<=min_lr:
lr=max_lr
descended=True
else:
last_brain=[a.copy() for a in ind.brain.brainMatrixes]
assertcount+=1
descended=False
# if assertcount==10:
# assertcount=0
# lr*=1.03
last = f
print f,lr
return lr
if profiling:
cProfile.run('main()')
else:
1
# main()
|
{"hexsha": "a7e69ce889e8fb7447a38e0924445a5a1cffd1a2", "size": 24331, "ext": "py", "lang": "Python", "max_stars_repo_path": "EMG_NN_v3.py", "max_stars_repo_name": "Kotzly/EMG_AG", "max_stars_repo_head_hexsha": "b88b2a14d1d11df3857b1832654a119894d4f97c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "EMG_NN_v3.py", "max_issues_repo_name": "Kotzly/EMG_AG", "max_issues_repo_head_hexsha": "b88b2a14d1d11df3857b1832654a119894d4f97c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "EMG_NN_v3.py", "max_forks_repo_name": "Kotzly/EMG_AG", "max_forks_repo_head_hexsha": "b88b2a14d1d11df3857b1832654a119894d4f97c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2264900662, "max_line_length": 133, "alphanum_fraction": 0.5197073692, "include": true, "reason": "import numpy,from numpy,import scipy", "num_tokens": 6258}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
config.py
Configuration settings for Mekamon control
"""
__author__ = "Alex Watson"
__copyright__ = "Copyright 2019"
import numpy as np
# Replay these messages to take control of the Mekamon
init_cmd_1 = [16] # 02101300
init_cmd_2 = [7,1] # 0307010c00
stop_motion_cmd = [6,0,0,0] # 02060101010c00
pwn_mekamon_list= np.array([init_cmd_1, init_cmd_2, stop_motion_cmd])
# Mekamon settings
default_height = 40 # [0...127]
# text messages
default_cmd_desc = "Executing command"
# BLE settings
message_delay = 0.05 # Time to sleep after sending message
# Server settings
# Here we define the UDP IP address as well as the port number that we have
# already defined in the client python script.
UDP_IP_ADDRESS = "192.168.4.2"
UDP_PORT_NO = 6789
|
{"hexsha": "abf2b7dfc650770ff09cc604ac0fe8a50c433aac", "size": 804, "ext": "py", "lang": "Python", "max_stars_repo_path": "mekamon_api/config.py", "max_stars_repo_name": "zredlined/control-my-mekamon", "max_stars_repo_head_hexsha": "2ce7096710608002db8e5dbf2cc3ebb044a494c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-02-06T21:35:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T11:53:02.000Z", "max_issues_repo_path": "mekamon_api/config.py", "max_issues_repo_name": "zredlined/control-my-mekamon", "max_issues_repo_head_hexsha": "2ce7096710608002db8e5dbf2cc3ebb044a494c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-03-11T20:09:47.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-11T20:09:47.000Z", "max_forks_repo_path": "mekamon_api/config.py", "max_forks_repo_name": "zredlined/control-my-mekamon", "max_forks_repo_head_hexsha": "2ce7096710608002db8e5dbf2cc3ebb044a494c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-02-07T13:00:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-04T17:05:18.000Z", "avg_line_length": 22.9714285714, "max_line_length": 75, "alphanum_fraction": 0.7350746269, "include": true, "reason": "import numpy", "num_tokens": 243}
|
longmult <- function(xstr, ystr)
{
#get the number described in each string
getnumeric <- function(xstr) as.numeric(unlist(strsplit(xstr, "")))
x <- getnumeric(xstr)
y <- getnumeric(ystr)
#multiply each pair of digits together
mat <- apply(x %o% y, 1, as.character)
#loop over columns, then rows, adding zeroes to end of each number in the matrix to get the correct positioning
ncols <- ncol(mat)
cols <- seq_len(ncols)
for(j in cols)
{
zeroes <- paste(rep("0", ncols-j), collapse="")
mat[,j] <- paste(mat[,j], zeroes, sep="")
}
nrows <- nrow(mat)
rows <- seq_len(nrows)
for(i in rows)
{
zeroes <- paste(rep("0", nrows-i), collapse="")
mat[i,] <- paste(mat[i,], zeroes, sep="")
}
#add zeroes to the start of the each number, so they are all the same length
len <- max(nchar(mat))
strcolumns <- formatC(cbind(as.vector(mat)), width=len)
strcolumns <- gsub(" ", "0", strcolumns)
#line up all the numbers below each other
strmat <- matrix(unlist(strsplit(strcolumns, "")), byrow=TRUE, ncol=len)
#convert to numeric and add them
mat2 <- apply(strmat, 2, as.numeric)
sum1 <- colSums(mat2)
#repeat the process on each of the totals, until each total is a single digit
repeat
{
ntotals <- length(sum1)
totals <- seq_len(ntotals)
for(i in totals)
{
zeroes <- paste(rep("0", ntotals-i), collapse="")
sum1[i] <- paste(sum1[i], zeroes, sep="")
}
len2 <- max(nchar(sum1))
strcolumns2 <- formatC(cbind(as.vector(sum1)), width=len2)
strcolumns2 <- gsub(" ", "0", strcolumns2)
strmat2 <- matrix(unlist(strsplit(strcolumns2, "")), byrow=TRUE, ncol=len2)
mat3 <- apply(strmat2, 2, as.numeric)
sum1 <- colSums(mat3)
if(all(sum1 < 10)) break
}
#Concatenate the digits together
ans <- paste(sum1, collapse="")
ans
}
a <- "18446744073709551616"
longmult(a, a)
|
{"hexsha": "aa91151788b9e864a46eb482525bac15f8cc7ef0", "size": 1962, "ext": "r", "lang": "R", "max_stars_repo_path": "Task/Long-multiplication/R/long-multiplication-2.r", "max_stars_repo_name": "LaudateCorpus1/RosettaCodeData", "max_stars_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_stars_repo_licenses": ["Info-ZIP"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-11-09T22:08:38.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-09T22:08:38.000Z", "max_issues_repo_path": "Task/Long-multiplication/R/long-multiplication-2.r", "max_issues_repo_name": "seanwallawalla-forks/RosettaCodeData", "max_issues_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_issues_repo_licenses": ["Info-ZIP"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Task/Long-multiplication/R/long-multiplication-2.r", "max_forks_repo_name": "seanwallawalla-forks/RosettaCodeData", "max_forks_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_forks_repo_licenses": ["Info-ZIP"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-11-09T22:08:40.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-09T22:08:40.000Z", "avg_line_length": 29.2835820896, "max_line_length": 114, "alphanum_fraction": 0.6116207951, "num_tokens": 565}
|
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import LogNorm
from mpl_toolkits.mplot3d import Axes3D
try:
import numpy as np
except:
exit()
from surrogate import benchmarks
# NUMMAX = 5
# A = 10 * np.random.rand(NUMMAX, 2)
# C = np.random.rand(NUMMAX)
A = [[0.5, 0.5], [0.25, 0.25], [0.25, 0.75], [0.75, 0.25], [0.75, 0.75]]
C = [0.002, 0.005, 0.005, 0.005, 0.005]
def shekel_arg0(sol):
return benchmarks.shekel(sol, A, C)[0]
fig = plt.figure()
# ax = Axes3D(fig, azim = -29, elev = 50)
ax = Axes3D(fig)
X = np.arange(0, 1, 0.01)
Y = np.arange(0, 1, 0.01)
X, Y = np.meshgrid(X, Y)
Z = np.fromiter(map(shekel_arg0, zip(X.flat, Y.flat)), dtype=np.float, count=X.shape[0] * X.shape[1]).reshape(X.shape)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, norm=LogNorm(), cmap=cm.jet, linewidth=0.2)
plt.xlabel("x")
plt.ylabel("y")
plt.show()
|
{"hexsha": "3d3c92319ad36510704b0bbc2de1bce803a5d4ac", "size": 892, "ext": "py", "lang": "Python", "max_stars_repo_path": "docs/code/benchmarks/shekel.py", "max_stars_repo_name": "liujiamingustc/phd", "max_stars_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-01-06T03:01:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T03:02:55.000Z", "max_issues_repo_path": "docs/code/benchmarks/shekel.py", "max_issues_repo_name": "liujiamingustc/phd", "max_issues_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/code/benchmarks/shekel.py", "max_forks_repo_name": "liujiamingustc/phd", "max_forks_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.8717948718, "max_line_length": 118, "alphanum_fraction": 0.6479820628, "include": true, "reason": "import numpy", "num_tokens": 339}
|
from __future__ import print_function
import sys
sys.path.append('..')
from Game import Game
from .UltimateTicTacToeLogic import Board
import numpy as np
class UltimateTicTacToeGame(Game):
square_content = {
-1: "X",
+0: "-",
+1: "O",
+2: " "
}
@staticmethod
def getSquarePiece(piece):
return UltimateTicTacToeGame.square_content[piece]
def __init__(self):
pass
def getInitBoard(self):
# return initial board (numpy board)
b = Board()
return np.array(b.pieces)
def getBoardSize(self):
# (a,b) tuple
return (11, 9)
def getActionSize(self):
# return number of actions
return 9*9 + 1
def getNextState(self, board, player, action):
# if player takes action on board, return next (board,player)
# action must be a valid move
if action == 9*9:
return (board, -player)
b = Board()
b.pieces = np.copy(board)
move = (int(action/9), action%9)
b.execute_move(move, player)
return (b.pieces, -player)
def getValidMoves(self, board, player):
# return a fixed size binary vector
valids = [0]*self.getActionSize()
b = Board()
b.pieces = np.copy(board)
legalMoves = b.get_legal_moves()
if len(legalMoves) == 0:
valids[-1] = 1
return np.array(valids)
for x, y in legalMoves:
valids[9*x + y] = 1
return np.array(valids)
def getGameEnded(self, board, player):
# return 0 if not ended, 1 if player 1 won, -1 if player 1 lost
# player = 1
b = Board()
b.pieces = np.copy(board)
win = b.is_win()
if win == player:
return 1
elif win == -player:
return -1
elif win == 2:
return 1e-4
return 0
def getCanonicalForm(self, board, player):
# return state if player==1, else return -state if player==-1
l = np.array([[player*p for p in r] for r in board[:9]] + \
[[player*p if p in (-1, 1) else p for p in board[9]]] + \
[board[10]])
return l
def getSymmetries(self, board, pi):
# mirror, rotational
assert(len(pi) == 9**2+1) # 1 for pass
pi_board = np.reshape(pi[:-1], (9, 9))
piece_board = np.array(board[:9])
wins_board = np.reshape(board[9], (3, 3))
next_board = np.reshape(board[10], (3, 3))
l = []
for i in range(1, 5):
for j in [True, False]:
newB = np.rot90(piece_board, i)
newW = np.rot90(wins_board, i)
newN = np.rot90(next_board, i)
newPi = np.rot90(pi_board, i)
if j:
newB = np.fliplr(newB)
newW = np.fliplr(newW)
newN = np.fliplr(newN)
newPi = np.fliplr(newPi)
B = np.append(newB, [newW.ravel(), newN.ravel()], axis=0)
l += [(B, list(newPi.ravel()) + [pi[-1]])]
return l
def stringRepresentation(self, board):
return board.tostring()
@staticmethod
def display(board):
print(" ", end="")
for Y in range(3):
for y in range(3):
print(3*Y+y, end=" ")
print(end=" ")
print("")
print(" ----- ----- -----")
for Y in range(3):
for y in range(3):
print(3*Y+y, "| ", end="") # print the row #
for X in range(3):
for x in range(3):
piece = board[3*X+x, 3*Y+y] # get the piece to print
if piece in (-1, 1):
print(UltimateTicTacToeGame.square_content[piece], end=" ")
elif board[10][3*X + Y] == 0:
print(".", end = " ")
else:
print(" ", end = " ")
if X < 2:
print(end=" ")
print("|", end="")
if Y == 1:
for x in range(3):
piece = board[9][3*x + y]
if piece in (-1, 1):
print(UltimateTicTacToeGame.square_content[piece], end=" ")
elif piece == 2:
print("T", end=" ")
else:
print(" ", end=" ")
print()
if Y < 2:
print("")
print(" ----- ----- -----")
|
{"hexsha": "005842737196af706de0ebdb10835c0f83265474", "size": 4712, "ext": "py", "lang": "Python", "max_stars_repo_path": "ultimatetictactoe/UltimateTicTacToeGame.py", "max_stars_repo_name": "taylor-santos/alpha-zero-general", "max_stars_repo_head_hexsha": "2f22d68bd5337de56dbf0482229e301c16377f04", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ultimatetictactoe/UltimateTicTacToeGame.py", "max_issues_repo_name": "taylor-santos/alpha-zero-general", "max_issues_repo_head_hexsha": "2f22d68bd5337de56dbf0482229e301c16377f04", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ultimatetictactoe/UltimateTicTacToeGame.py", "max_forks_repo_name": "taylor-santos/alpha-zero-general", "max_forks_repo_head_hexsha": "2f22d68bd5337de56dbf0482229e301c16377f04", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2739726027, "max_line_length": 87, "alphanum_fraction": 0.4494906621, "include": true, "reason": "import numpy", "num_tokens": 1188}
|
import numpy as np
import pandas as pd
import xarray as xr
from matplotlib import pyplot as plt
def get_var_names(variable):
"""
get the long variable names from 'flow' or 'temp'
:param variable: [str] either 'flow' or 'temp'
:return: [str] long variable names
"""
if variable == "flow":
obs_var = "discharge_cms"
seg_var = "seg_outflow"
elif variable == "temp":
obs_var = "temp_c"
seg_var = "seg_tave_water"
else:
raise ValueError('variable param must be "flow" or "temp"')
return obs_var, seg_var
def load_if_not_df(pred_data):
if isinstance(pred_data, str):
return pd.read_feather(pred_data)
else:
return pred_data
def trim_obs(obs, preds, spatial_idx_name="seg_id_nat", time_idx_name="date"):
obs_trim = obs.reset_index()
trim_preds = preds.reset_index()
obs_trim = obs_trim[
(obs_trim[time_idx_name] >= trim_preds[time_idx_name].min())
& (obs_trim[time_idx_name] <= trim_preds[time_idx_name].max())
& (obs_trim[spatial_idx_name].isin(trim_preds[spatial_idx_name].unique()))
]
return obs_trim.set_index([time_idx_name, spatial_idx_name])
def fmt_preds_obs(pred_data,
obs_file,
spatial_idx_name="seg_id_nat",
time_idx_name="date"):
"""
combine predictions and observations in one dataframe
:param pred_data:[str] filepath to the predictions file
:param obs_file:[str] filepath to the observations file
:param spatial_idx_name: [str] name of column that is used for spatial
index (e.g., 'seg_id_nat')
:param time_idx_name: [str] name of column that is used for temporal index
(usually 'time')
"""
pred_data = load_if_not_df(pred_data)
if {time_idx_name, spatial_idx_name}.issubset(pred_data.columns):
pred_data.set_index([time_idx_name, spatial_idx_name], inplace=True)
obs = xr.open_zarr(obs_file).to_dataframe()
variables_data = {}
for var_name in pred_data.columns:
obs_var = obs.copy()
obs_var = obs_var[[var_name]]
obs_var.columns = ["obs"]
preds_var = pred_data[[var_name]]
preds_var.columns = ["pred"]
# trimming obs to preds speeds up following join greatly
obs_var = trim_obs(obs_var, preds_var, spatial_idx_name, time_idx_name)
combined = preds_var.join(obs_var)
variables_data[var_name] = combined
return variables_data
def plot_obs(prepped_data, variable, outfile, partition="trn"):
"""
plot training observations
:param prepped_data: [str] path to npz file of prepped data
:param variable: [str] which variable to plot, 'flow' or 'temp'
:param outfile: [str] where to store the resulting file
:return: None
"""
data = np.load(prepped_data, allow_pickle=True)
df = prepped_array_to_df(
data[f"y_obs_{partition}"],
data[f"dates_{partition}"],
data[f"ids_{partition}"],
data["y_vars"],
)
_, seg_var = get_var_names(variable)
df_piv = df.pivot(index="date", columns="seg_id_nat", values=seg_var)
df_piv.dropna(axis=1, how="all", inplace=True)
try:
df_piv.plot(subplots=True, figsize=(8, 12))
except TypeError:
fig, ax = plt.subplots()
ax.text(0.5, 0.5, "NO DATA")
plt.tight_layout()
plt.savefig(outfile)
def plot_ts(pred_file, obs_file, variable, out_file):
combined = fmt_preds_obs(pred_file, obs_file, variable)
combined = combined.droplevel("seg_id_nat")
ax = combined.plot(alpha=0.65)
plt.tight_layout()
plt.savefig(out_file)
def prepped_array_to_df(data_array, dates, ids, col_names):
"""
convert prepped x or y_dataset data in numpy array to pandas df
(reshape and make into pandas DFs)
:param data_array:[numpy array] array of x or y_dataset data [nbatch, seq_len,
n_out]
:param dates:[numpy array] array of dates [nbatch, seq_len, n_out]
:param ids: [numpy array] array of seg_ids [nbatch, seq_len, n_out]
:return:[pd dataframe] df with cols
['date', 'seg_id_nat', 'temp_c', 'discharge_cms]
"""
data_array = np.reshape(
data_array,
[data_array.shape[0] * data_array.shape[1], data_array.shape[2]],
)
dates = np.reshape(dates, [dates.shape[0] * dates.shape[1], dates.shape[2]])
ids = np.reshape(ids, [ids.shape[0] * ids.shape[1], ids.shape[2]])
df_preds = pd.DataFrame(data_array, columns=col_names)
df_dates = pd.DataFrame(dates, columns=["date"])
df_ids = pd.DataFrame(ids, columns=["seg_id_nat"])
df = pd.concat([df_dates, df_ids, df_preds], axis=1)
return df
|
{"hexsha": "6f990fcdafb3c7a45ca98f2557d847d99f0eabd6", "size": 4679, "ext": "py", "lang": "Python", "max_stars_repo_path": "river_dl/postproc_utils.py", "max_stars_repo_name": "SimonTopp/river-dl", "max_stars_repo_head_hexsha": "6356c3f3e8012bc7930909f2b5d6b9f8507225f1", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-05-20T10:39:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T11:12:51.000Z", "max_issues_repo_path": "river_dl/postproc_utils.py", "max_issues_repo_name": "SimonTopp/river-dl", "max_issues_repo_head_hexsha": "6356c3f3e8012bc7930909f2b5d6b9f8507225f1", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 78, "max_issues_repo_issues_event_min_datetime": "2021-05-17T13:50:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:09:50.000Z", "max_forks_repo_path": "river_dl/postproc_utils.py", "max_forks_repo_name": "SimonTopp/river-dl", "max_forks_repo_head_hexsha": "6356c3f3e8012bc7930909f2b5d6b9f8507225f1", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-05-13T14:23:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-16T18:06:05.000Z", "avg_line_length": 35.446969697, "max_line_length": 82, "alphanum_fraction": 0.6621072879, "include": true, "reason": "import numpy", "num_tokens": 1209}
|
## Imports
import sys
import numpy as np
import warnings
warnings.filterwarnings("ignore")
sys.path.append("../.")
import handybeam
import handybeam.world
import handybeam.tx_array_library
import handybeam.visualise
import handybeam.samplers.clist_sampler as clist_sampler
from handybeam.translator import Translator
from handybeam.solver import Solver
# Create world.
world = handybeam.world.World()
# Add a transmitter array to the world.
world.tx_array = handybeam.tx_array_library.rectilinear(parent=world)
# Instantiate translator and solver objects.
translator = Translator(parent = world)
solver = Solver(parent = world)
# Instruct the solver to solve for the activation coefficients.
solver.single_focus_solver(x_focus = 0,y_focus = 0,z_focus = 100e-3)
# Add clist sampler object to the world
volume_sampler = world.add_sampler(clist_sampler.ClistSampler( parent=world ))
# Specify points in the volume to sample the acoustic field on
no_points = 150
x = np.linspace(-500e-3,500e-3,no_points)
y = np.linspace(-500e-3,500e-3,no_points)
z = np.linspace(10e-3,500e-3,no_points)
x_mesh,y_mesh,z_mesh = np.meshgrid(x,y,z)
x_list = x_mesh.ravel()
y_list = y_mesh.ravel()
z_list = z_mesh.ravel()
# Add these sample points to the sampler
volume_sampler.add_sampling_points(x_list,y_list,z_list)
# Propagate the field
world.propagate()
# Save original pressure field.
original_field = np.copy(volume_sampler.pressure_field)
# Translate array.
translator.xyz_translate(x_focus = 0,y_focus = 0, z_focus = 100e-3,
x_translate = 0,y_translate = 50e-3, z_translate =50e-3)
# Propagate the acoustic field.
world.propagate()
# Visualise the acoustic field.
print('pre vis')
handybeam.visualise.visualise_translation_3D(world, original_pressure_field = original_field,
sampler = volume_sampler, threshold = 50)
print('post vis')
|
{"hexsha": "248b51f47ed3f9eb40d201bb06a95e457656fb55", "size": 1924, "ext": "py", "lang": "Python", "max_stars_repo_path": "demos/translation_xyz_volume_demo.py", "max_stars_repo_name": "ultraleap/HandyBeam", "max_stars_repo_head_hexsha": "9f80b97742cde4b75d3478d554dc9bc2cd9dfd96", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-10-20T09:15:46.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-03T00:31:23.000Z", "max_issues_repo_path": "demos/translation_xyz_volume_demo.py", "max_issues_repo_name": "ultraleap/HandyBeam", "max_issues_repo_head_hexsha": "9f80b97742cde4b75d3478d554dc9bc2cd9dfd96", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-04-04T18:36:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-12T22:57:34.000Z", "max_forks_repo_path": "demos/translation_xyz_volume_demo.py", "max_forks_repo_name": "ultraleap/HandyBeam", "max_forks_repo_head_hexsha": "9f80b97742cde4b75d3478d554dc9bc2cd9dfd96", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-11-29T16:05:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-01T22:56:39.000Z", "avg_line_length": 23.7530864198, "max_line_length": 93, "alphanum_fraction": 0.751039501, "include": true, "reason": "import numpy", "num_tokens": 457}
|
import numpy as np
from scipy.interpolate import interp1d
class Adiabatic:
def __init__(self,index,initial_value,initial_parameter,initial_vector):
"""
contains the evolution of an adiabatic state. methods to compare a given eigenvector and value
to this to see if it is part of the same state. initialise it with the
Parameters
----------
index: int
when italised records which eigenstate of the unperturbed hamiltonian this
adiabatic state belongs to.
initial_value: float
initial eigenvalue
initial_parameter: float
initial value of the interaction parameter
initial_vector: numpy array:
the initial vector
"""
self.index = index
self.parameter = np.asarray([initial_parameter])
self.vals = np.asarray([initial_value])
self.vecs = np.asarray([initial_vector])
def _add_vector(self,vector):
"""
adds a new vector by stacking it under the list of vectors.
"""
#if not already a numpy array try and convert
if type(vector) != np.ndarray:
try:
vector = np.asarray(vector)
except:
raise AttributeError("could not convert vector to correct format for adibatic state, type provided: " \
+ type(vector))
try:
self.vecs = np.append(self.vecs,[vector],axis=0)
except:
raise AttributeError("Could not add value to adiabatic state " + str(vector))
def _add_value(self,value):
"""
adds a new value to end of values array
"""
try:
self.vals = np.append(self.vals,value)
except:
raise AttributeError("Could not add value to adiabatic state")
def _add_parameter(self,parameter): #test
"""
adds a new parameter value to the array
"""
try:
self.parameter = np.append(self.parameter,parameter)
except:
raise AttributeError("could not add parameter to adiabatic state")
def add(self,val,vec,param):
"""
given a vec/val/parameter will add them.
"""
self._add_value(val)
self._add_vector(vec)
self._add_parameter(param)
#METHODS FOR RETREIVING INFORMATION ON THE ADIABATIC STATE
def get_length(self):
"""
gets the current number of values this state holds of the adibatic state.
"""
return len(self.parameter)
def get_current_parameter(self):
"""
returns the most recent parameter of this state
"""
try:
return self.parameter[-1]
except:
raise IndexError("Couldnt access the last value of values of length " + str(len(self.parameter)))
def get_current_value(self): #test
"""
returns the current energy of this state
"""
try:
return self.vals[-1]
except:
raise IndexError("Couldnt access the last value of values of length " + str(len(self.vals)))
def get_current_coefficient(self,index =None): #test
"""
gets the current coefficient of the target state,defaults to
the states own adiabatic coefficient
"""
if index == None:
return self.vecs[-1][self.index]
else:
try:
return self.vecs[-1][index]
except:
raise IndexError("Could not return value for index: " + str(index))
|
{"hexsha": "257f5d02fbebdb1d90e89be3ff98fd8fe4ef0dcc", "size": 3759, "ext": "py", "lang": "Python", "max_stars_repo_path": "rydprop/hohi/adiabatic_solver/adiabatic_state.py", "max_stars_repo_name": "jdrtommey/rydprops", "max_stars_repo_head_hexsha": "cdc7e14d61ff33929844ee5d779a18fd64f89f4f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rydprop/hohi/adiabatic_solver/adiabatic_state.py", "max_issues_repo_name": "jdrtommey/rydprops", "max_issues_repo_head_hexsha": "cdc7e14d61ff33929844ee5d779a18fd64f89f4f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rydprop/hohi/adiabatic_solver/adiabatic_state.py", "max_forks_repo_name": "jdrtommey/rydprops", "max_forks_repo_head_hexsha": "cdc7e14d61ff33929844ee5d779a18fd64f89f4f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8648648649, "max_line_length": 119, "alphanum_fraction": 0.5629156691, "include": true, "reason": "import numpy,from scipy", "num_tokens": 759}
|
import numpy as np
import os
import cv2
import argparse
import sys
import tensorflow as tf
#from collections import defaultdict
#from io import StringIO
#from matplotlib import pyplot as plt
#from PIL import Image
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops
if tf.__version__ < '1.4.0':
raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')
from utils import label_map_util
from utils.app_utils import WebcamVideoStream
from utils import visualization_utils as vis_util
# # Model preparation
# Which model
MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
# ## Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
#intializing the web camera device
"""if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-src', '--source', dest='video_source', type=int,
default=0, help='Device index of the camera.')
parser.add_argument('-wd', '--width', dest='width', type=int,
default=640, help='Width of the frames in the video stream.')
parser.add_argument('-ht', '--height', dest='height', type=int,
default=480, help='Height of the frames in the video stream.')
args = parser.parse_args()
#cap = WebcamVideoStream(src=args.video_source,width=args.width,height=args.height).start()
"""
cap = cv2.VideoCapture(0)
# Running the tensorflow session
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
ret=True
while (True):
ret,image_np = cap.read()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
cv2.imshow('image',cv2.resize(image_np, (640, 480)) )
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
|
{"hexsha": "a7b2e26c5fb81fa3e89d487d14d67063e1117dd5", "size": 4359, "ext": "py", "lang": "Python", "max_stars_repo_path": "object_detection/object_detection_tutorial.py", "max_stars_repo_name": "maheshmj24/ASROD", "max_stars_repo_head_hexsha": "9cde2dafba9b78e0f186aa2fd517677d1a09f226", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "object_detection/object_detection_tutorial.py", "max_issues_repo_name": "maheshmj24/ASROD", "max_issues_repo_head_hexsha": "9cde2dafba9b78e0f186aa2fd517677d1a09f226", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "object_detection/object_detection_tutorial.py", "max_forks_repo_name": "maheshmj24/ASROD", "max_forks_repo_head_hexsha": "9cde2dafba9b78e0f186aa2fd517677d1a09f226", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.1226415094, "max_line_length": 279, "alphanum_fraction": 0.7047487956, "include": true, "reason": "import numpy", "num_tokens": 960}
|
CCHECK
C SUBROUTINE TO CHECK SENSE LIGHTS
c SUBROUTINE CHECK(J)
C
c REAL MFSTOP
c LOGICAL PREVER
c COMMON /SNTCP/G,AJ,PRPC,ICASE,PREVER,MFSTOP,JUMP,LOPIN,ISCASE,
c 1KN,GAMF,IP,SCRIT,PTRN,ISECT,KSTG,WTOL,RHOTOL,PRTOL,TRLOOP,LSTG,
c 2LBRC,IBRC,ICHOKE,ISORR,CHOKE,PT0PS1(6,8),PTRS2(6,8),TRDIAG,SC,RC,
c 3DELPR,PASS,IPC,LOPC,ISS
C
C DO 1 I=1,4
C CALL SLITET(I,J)
C GO TO (2,1),J
C1 CONTINUE
c J=2
c RETURN
C2 J=1
C PREVER=.TRUE.
C RETURN
c END
C ESTABLISH VALUES FOR STATOR EXIT FLOW
SUBROUTINE FLOW1(I)
C
REAL MFSTOP
LOGICAL PREVER
COMMON /SNTCP/G,AJ,PRPC,ICASE,PREVER,MFSTOP,JUMP,LOPIN,ISCASE,
1 K,GAMF,IP,SCRIT,PTRN,ISECT,KSTG,WTOL,RHOTOL,PRTOL,TRLOOP,LSTG,
2LBRC,IBRC,ICHOKE,ISORR,CHOKE,PT0PS1(6,8),PTRS2(6,8),TRDIAG,SC,RC,
3DELPR,PASS,IPC,LOPC,ISS
C
COMMON /SINIT/H1(6,8),H2(6,8),DP0(6,8),DP1(6,8),DP1A(6,8),DP2(6,8)
1,DP2A(6,8),CSALF1(6,8),ALF1(6,8),CSBET2(6,8),BET2(6,8),RADSD(6,8),
2RADRD(6,8),ANN1(6,8),ANN2(6,8),ANN2A(6,8),ANN1A(6,8),U1A(6,8),
3U2(6,8),ANN0(6,8),PT0(6,8),TT0(6,8),ALPHA0(6,8),PTP(6,8)
C
COMMON /SINPUT/
1PTPS,PTIN,TTIN,WAIR,FAIR,DELC,DELL,DELA,AACS,VCTD,STG,SECT,EXPN,
2EXPP,EXPRE,RG,RPM,PAF,SLI,STGCH,ENDJOB,XNAME(20),TITLE(20),
3PCNH(6),GAM(6,8),DR(6,8),DT(6,8),RWG(6,8),ALPHAS(6,8),ALPHA1(6,8),
4ETARS(6,8),ETAS(6,8),CFS(6,8),ANDO(6,8),BETA1(6,8),BETA2(6,8),ETAR
5R(6,8),ETAR(6,8),CFR(6,8),TFR(6,8),ANDOR(6,8),OMEGAS(6,8),AS0(6,8)
6,ASMP0(6,8),ACMN0(6,8),A1(6,8),A2(6,8),A3(6,8),A4(6,8),A5(6,8),A6(
76,8),OMEGAR(6,8),BSIA(6,8),BSMPIA(6,8),BCMNIA(6,8),B1(6,8),B2(6,8)
8,B3(6,8),B4(6,8),B5(6,8),B6(6,8),SESTHI(8),RERTHI(8)
9,fairx(5,8),wairx(5,8),rg1(8),rg1a(8),rg2(8),rg2a(8)
C
REAL M0
COMMON /SSTA01/CP0(8),w0(6), PS0(6,8),V0(6,8),TS0(6,
18),VU0(6,8),VZ0(6,8),RHOS0(6,8),PS1(6,8),WGT1(8),TA1(8),WG1(6,8),
2 DPDR1(6,8),SI(6,8), CP1(8),PHI1(6,8),TS1(6,8),V1(6,8)
3,RHOS1(6,8),ALF1E(6,8),VU1(6,8),VZ1(6,8),M0(6,8)
COMMON/DESOPT/RVU1(6,8),RVU2(6,8),WG,EPR
COMMON/TPT1/PT1(6),TT1(6,8)
dimension tangms(6,8),tangmr(6,8),tangm1(6,8),tangm2(6,8),tang0(6)
common/slope/tangms,tangmr,tangm1,tangm2,tang0,iar,icyl
C
C
C
C print *,' Entering In flow1 PT1(I)= ',PT1(I)
ETAO1=1.
cat1=cos(atan(tangm1(i,k)))
EX=(GAM(2,K)-1.)/GAM(2,K)
C COMPUTE ISENTROPIC STATOR TEMPERATURE RATIO
PHI1(I,K)=PT0PS1(I,K)**EX
C TEST FOR LOSS COEFFICIENT INPUT
C IF (OMEGAS(1,1))2,2,1
C replaced by ....................
IF(OMEGAS(1,1).LE.0.) THEN
GO TO 2
ELSE
GO TO 1
ENDIF
1 CALL LOSS1(I,K,EX)
2 IF(EPR.GT.0.) CALL ETAPR(PT0PS1(I,K),ETAO1)
ETA=ETAS(I,K)*ETAO1
TS1(I,K)=TT1(I,K)*(1.-ETA *(1.-1./PHI1(I,K)))
C IF(I-IP)6,3,6
C replaced by ....................
IF((I-IP).NE.0) THEN
GO TO 6
ELSE
GO TO 3
ENDIF
C3 IF(GAMF)4,4,5
C replaced by ....................
3 IF(GAMF.LE.0.) THEN
GO TO 4
ELSE
GO TO 5
ENDIF
4 TA1(K)=.5*(TT1(I,K)+TS1(I,K))
CALL GAMA(PT0(IP,K),TA1(K),FAIRx(2,k),WAIRx(2,k),GAM(2,K))
5 EX=(GAM(2,K)-1.0)/GAM(2,K)
EXI=1./EX
C CRITICAL PRESSURE RATIO
CALL PHIM(EXI,ETA ,PHI1C ,PTPS1C )
CP1(K)=RG1(k)*EXI/AJ
C EXIT VELOCITY
6 V1(I,K)=SQRT(2.*G*AJ*CP1(K)*(TT1(I,K)-TS1(I,K)))
c Pressure ratio correction to flow coefficient
if (i.eq.ip) call etacf(pt0ps1(i,k),ptps1c,xcf)
C EXIT PRESSURE
C print *,' In flow1 PT1(I)= ',PT1(I),' PT0PS1(k)=',PT0PS1(I,K)
PS1(I,K)=PT1(I )/PT0PS1(I,K)
C EXIT DENSITY
C print *,' In flow1 PS1(I,K)= ',PS1(I,K),' rg1(k)=',rg1(k)
C print *,' TS1(I,K)= ',TS1(I,K)
RHOS1(I,K)=144.*PS1(I,K)/(rg1(k)*TS1(I,K))
C TEST CRITICAL PRESSURE RATIO
C IF(RVU1(I,K))120,130,120
C replaced by ....................
IF(RVU1(I,K).NE.0.) THEN
GO TO 120
ELSE
GO TO 130
ENDIF
120 SNALF1=RVU1(I,K)*2./DP1(I,K)/V1(I,K)
ASNALF=ABS(SNALF1)
IF(ASNALF.GT.1.)SNALF1=SNALF1/(ASNALF+.01)
CSAL1E=SQRT (1.-SNALF1**2)
ALF1E(I,K)=ATAN(SNALF1/CSAL1E)
C130 IF(PT0PS1(I,K)-PTPS1C)140, 8,8
C replaced by ....................
130 IF((PT0PS1(I,K)-PTPS1C).LT.0.) THEN
GO TO 140
ELSE
GO TO 8
ENDIF
C GREATER THAN CRITICAL
8 IF(RVU1(I,K).NE.0.) GOTO 11
C IF (IP-I) 21,9,21
C replaced by ....................
IF((IP-I).NE.0) THEN
GO TO 21
ELSE
GO TO 9
ENDIF
C9 IF (PRPC)10,10,22
C replaced by ....................
9 IF(PRPC.LE.0.) THEN
GO TO 10
ELSE
GO TO 22
ENDIF
C PREVIOUS PITCH NONCRITICAL
10 PRPC=1.
GO TO 22
21 IF (PT0PS1(I,K).LE.PT0PS1(IP,K)) GO TO 22
GO TO 11
22 IF ((I.EQ.1).OR.(I.EQ.ISECT)) SCRIT=1.
GO TO 11
C PITCH OR OUTBOARD SECTOR
11 CONTINUE
C PRESSURE RATIO ABOVE CRITICAL
IF(EPR.GT.0.) CALL ETAPR(PTPS1C,ETAO1)
ETAC=ETAS(I,K)*ETAO1
V1C =SQRT(2.*G*AJ*CP1(K)*TT1(I,K)*ETAC *(PHI1C
1-1.)/PHI1C )
TS1C =TT1(I,K)*(1.-ETAC *(1.-1./PHI1C ))
RHOS1C =144.*PT1(I )/( PTPS1C *TS1C *rg1(k))
C IF(RVU1(I,K))15,150,15
C replaced by ....................
C print *,' RVU1(I,K)=',RVU1(I,K),' I=',I,' K=',K
IF(RVU1(I,K).NE.0.) THEN
GO TO 15
ELSE
GO TO 150
ENDIF
150 continue
C print *,' in flow1... after 150...'
cscyl=sqrt(1./(1.+(1./csalf1(i,k)**2-1.)/cat1**2))
WG1(I,K)=RHOS1C *V1C *ANN1(I,K)*CScyl
& *cat1*cfs(i,k)*xcf
if (ando(1,k).gt.0.0) wg1(i,k)=wg1(i,k)/cfs(i,k)/xcf
CScyle =WG1(I,K)/(RHOS1(I,K)*V1(I,K)*ANN1(I,K))
& /cat1/cfs(i,k)/xcf
if (ando(1,k).gt.0.0) cscyle=cscyle*cfs(i,k)*xcf
csal1e=sqrt(1./(1.+(1./cscyle**2-1.)*cat1**2))
C EFFECTIVE STATOR EXIT ANGLE
ALF1E(I,K)=ATAN2(SQRT(1.-CSAL1E *CSAL1E ),
1CSAL1E)
GO TO 16
C PRESSURE RATIO LESS THAN CRITICAL
140 IF(RVU1(I,K).NE.0.)GO TO 15
CSAL1E =CSALF1(I,K)
ALF1E(I,K)=ALF1(I,K)
15 continue
C print *,' in flow1... after 15...'
cscyle=sqrt(1./(1.+(1./csal1e**2-1.)/cat1**2))
C print *,' cscyle =',cscyle,' in flow1....'
c print *,' ANN1(I,K)=',ANN1(I,K),' I=',I,' K=',K
C print *,' RHOS1(I,K)=',RHOS1(I,K),' cat1 =',cat1
C print *,' cfs(I,K)=',cfs(I,K),' xcf =',xcf
WG1(I,K)=RHOS1(I,K)*V1(I,K)*ANN1(I,K)*CScyle
& *cat1*cfs(i,k)*xcf
C print *,' WG1(I,K) = ',WG1(I,K),' in flow1..1.'
if (ando(1,k).gt.0.0) wg1(i,k)=wg1(i,k)/cfs(i,k)/xcf
C print *,' WG1(I,K) = ',WG1(I,K),' in flow1..2.'
IF(RVU1(I,K).EQ.0.) GO TO 16
C IF(PT0PS1(I,K)-PTPS1C)170,180,180
C replaced by ....................
IF((PT0PS1(I,K)-PTPS1C).LT.0.) THEN
GO TO 170
ELSE
GO TO 180
ENDIF
170 CSALF1(I,K)=CSAL1E
ALF1(I,K)=ALF1E(I,K)
GO TO 16
180 CScyl=WG1(I,K)/RHOS1C/V1C/ANN1(I,K)/cfs(i,k)/xcf
& /cat1
if (ando(1,k).gt.0.0) cscyl=cscyl*cfs(i,k)*xcf
csalf1(i,k)=sqrt(1./(1.+(1./cscyl**2-1.)*cat1**2))
ALF1(I,K)=ATAN(SQRT(1.-CSALF1(I,K)**2)/CSALF1(I,K))*SNALF1/ASNALF
16 VU1(I,K)=V1(I,K)*SIN(ALF1E(I,K))
VZ1(I,K)=V1(I,K)*CSAL1E
IF(I.LT.ISECT) GO TO 17
IF(PRPC.EQ.1.) PRPC=2.
17 j=2
GO TO (19,20),J
19 CALL DIAGT(2)
20 RETURN
END
|
{"hexsha": "b5dae307c65c376e54fcd31f646969362d3dd5b8", "size": 7607, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "contrib/axod/src/flow1.f", "max_stars_repo_name": "mjfwest/OpenMDAO-Framework", "max_stars_repo_head_hexsha": "a5521f47ad7686c25b203de74e1c7dff5fd7a52b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 69, "max_stars_repo_stars_event_min_datetime": "2015-01-02T19:10:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-14T04:42:28.000Z", "max_issues_repo_path": "contrib/axod/src/flow1.f", "max_issues_repo_name": "jcchin/OpenMDAO-Framework", "max_issues_repo_head_hexsha": "038e89b06da1c74f00918f4c6fbd8bd365e25657", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2015-01-15T23:08:18.000Z", "max_issues_repo_issues_event_max_datetime": "2015-03-11T16:57:35.000Z", "max_forks_repo_path": "contrib/axod/src/flow1.f", "max_forks_repo_name": "jcchin/OpenMDAO-Framework", "max_forks_repo_head_hexsha": "038e89b06da1c74f00918f4c6fbd8bd365e25657", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 31, "max_forks_repo_forks_event_min_datetime": "2015-09-16T00:37:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-10T06:27:55.000Z", "avg_line_length": 33.8088888889, "max_line_length": 72, "alphanum_fraction": 0.5279347969, "num_tokens": 3642}
|
"""
Classes with observation shapes, action shapes
and reward functions
"""
import numpy as np
import pybullet as p
# import time
class ObservationShapes:
""" Implements observations shapes 1 to 7 """
def __init__(
self,
endeffector_pos,
endeffector_orient,
torso_pos,
torso_orient,
goal_pos,
goal_orient,
joint_positions):
self.endeffector_pos = endeffector_pos
self.endeffector_orient = endeffector_orient
self.torso_pos = torso_pos
self.torso_orient = torso_orient
self.goal_pos = goal_pos
self.goal_orient = goal_orient
self.joint_positions = joint_positions
self.end_torso_pos = self.endeffector_pos - self.torso_pos
self.end_goal_pos = self.endeffector_pos - self.goal_pos
self.end_torso_orient = self.endeffector_orient - self.torso_orient
self.end_goal_orient = self.endeffector_orient - self.goal_orient
def get_obs1(self):
""" Returns observation #1 """
robot_obs = np.concatenate(
[self.endeffector_pos, self.joint_positions]).ravel()
return robot_obs
def get_obs2(self):
""" Returns observation #2 """
robot_obs = np.concatenate(
[self.goal_pos, self.joint_positions]).ravel()
return robot_obs
def get_obs3(self):
""" Returns observation #3 """
robot_obs = np.concatenate(
[self.end_torso_pos, self.end_goal_pos, self.joint_positions]).ravel()
return robot_obs
def get_obs4(self):
""" Returns observation #4 """
robot_obs = np.concatenate(
[self.end_goal_pos, self.joint_positions]).ravel()
return robot_obs
def get_obs5(self):
""" Returns observation #5 """
robot_obs = np.concatenate(
[self.end_torso_pos, self.end_goal_pos, self.goal_pos, self.joint_positions]).ravel()
return robot_obs
def get_obs6(self):
""" Returns observation #6 """
robot_obs = np.concatenate(
[
self.end_torso_pos,
self.end_goal_pos,
self.end_torso_orient,
self.end_goal_orient,
self.goal_pos,
self.goal_orient,
self.endeffector_pos,
self.endeffector_orient,
self.joint_positions
]).ravel()
return robot_obs
def get_obs7(self):
""" Returns observation #7 """
robot_obs = np.concatenate(
[
self.end_torso_pos,
self.end_goal_pos,
self.goal_pos,
self.endeffector_pos,
self.joint_positions
]).ravel()
return robot_obs
class ActionShapes:
""" Implement actions 1 and 2 """
def __init__(
self,
pybullet_action,
joint_positions,
joint_min,
joint_max,
arm,
physics_client,
frame_skip):
self.pybullet_action = pybullet_action
self.joint_positions = joint_positions
self.joint_min = joint_min
self.joint_max = joint_max
self.arm = arm
self.physics_client = physics_client
self.frame_skip = frame_skip
# Update the new joint position with the action
self.new_joint_positions = self.joint_positions + self.pybullet_action
# Clip the joint position to fit the joint's allowed boundaries
self.new_joint_positions = np.clip(
np.array(self.new_joint_positions),
self.joint_min,
self.joint_max)
def take_action1(self):
""" select action #1 (increments from previous joint position) """
# Instantaneously reset the joint position (no torque applied)
self.force_joint_positions(self.new_joint_positions)
def take_action2(self):
""" select action #2: position control """
# Position control
self.set_joint_positions(self.new_joint_positions)
for _ in range(self.frame_skip):
p.stepSimulation(physicsClientId=self.physics_client)
# p.stepSimulation(physicsClientId=self.physics_client)
# time.sleep(1/240)
# p.setRealTimeSimulation(1)
def set_joint_positions(self, joint_positions):
""" Position control (not reset) """
# In Pybullet, gripper halves are controlled separately
joint_positions = list(joint_positions) + [joint_positions[-1]]
p.setJointMotorControlArray(
self.arm,
[0, 1, 2, 3, 4, 7, 8],
controlMode=p.POSITION_CONTROL,
targetPositions=joint_positions
)
def force_joint_positions(self, joint_positions):
""" Instantaneous reset of the joint angles (not position control) """
for i in range(5):
p.resetJointState(
self.arm,
i,
joint_positions[i]
)
# In Pybullet, gripper halves are controlled separately
for i in range(7, 9):
p.resetJointState(
self.arm,
i,
joint_positions[-1]
)
class RewardFunctions:
def __init__(
self,
dist,
alpha_reward,
action,
delta_dist,
delta_pos,
orient,
collision):
self.dist = dist
self.alpha_reward = alpha_reward
self.action = action
self.delta_dist = delta_dist
self.delta_pos = delta_pos
self.orient = orient
self.collision = collision
self.term1 = 0
self.term2 = 0
def get_reward1(self):
""" Compute reward function 1 (dense) """
self.term1 = - self.dist ** 2
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward2(self):
""" Compute reward function 2 (dense) """
self.term1 = - self.dist
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward3(self):
""" Compute reward function 3 (dense) """
self.term1 = - self.dist ** 3
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward4(self):
""" Compute reward function 4 (dense) """
self.term1 = - self.dist ** 4
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward5(self):
""" Compute reward function 5 (dense) """
self.term1 = - self.dist ** 2
self.term2 = - self.alpha_reward * np.linalg.norm(self.action)
rew = self.term1 + self.term2
return rew
def get_reward6(self):
""" Compute reward function 6 (dense) """
self.term1 = - self.dist ** 2
self.term2 = - self.alpha_reward * np.linalg.norm(self.action) / self.dist ** 2
rew = self.term1 + self.term2
return rew
def get_reward7(self):
""" Compute reward function 7 (dense) """
self.term1 = self.delta_dist
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward8(self):
""" Compute reward function 8 (dense) """
self.term1 = - self.dist ** 2
self.term2 = self.alpha_reward * abs(self.delta_dist / self.dist)
rew = self.term1 + self.term2
return rew
def get_reward9(self):
""" Compute reward function 9 (dense) """
self.term1 = self.delta_pos
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward10(self):
""" Compute reward function 10 (dense) """
self.term1 = - self.dist ** 2
self.term2 = - self.alpha_reward * self.delta_pos / self.dist
rew = self.term1 + self.term2
return rew
def get_reward11(self):
""" Compute reward function 11 (sparse) """
if self.dist >= 0.001:
self.term1 = -1
else:
self.term1 = 0
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward12(self):
""" Compute reward function 12 (sparse) """
if self.dist >= 0.001:
self.term1 = 0
else:
self.term1 = 1
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward13(self):
""" Compute reward function 13 (sparse) """
if self.dist >= 0.001:
self.term1 = -0.02
else:
self.term1 = 1
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward14(self):
""" Compute reward function 14 (sparse) """
if self.dist >= 0.001:
self.term1 = -0.001
else:
self.term1 = 10
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward15(self):
""" Compute reward function 15 (sparse + dense) """
if self.dist >= 0.001:
self.term1 = - self.dist
else:
self.term1 = 1
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward16(self):
""" Compute reward function 16 (sparse + dense) """
if self.dist >= 0.001:
self.term1 = self.delta_dist
else:
self.term1 = self.delta_dist + 10
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward17(self):
""" Compute reward function 17 (dense) """
self.term1 = - self.orient ** 2
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward18(self):
""" Compute reward function 18 (dense) """
self.term1 = - self.dist ** 2
self.term2 = - self.alpha_reward * self.orient ** 2
rew = self.term1 + self.term2
return rew
def get_reward19(self):
""" Compute reward function 19 (sparse + dense) """
if ((self.dist >= 0.001) or (self.orient >= 0.01)):
self.term1 = - self.dist **2 - self.alpha_reward * self.orient ** 2
else:
self.term1 = 1
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward20(self):
""" Compute reward function 20 (sparse + dense) + penalty for collision """
if self.dist >= 0.001:
self.term1 = - self.dist
else:
self.term1 = 1
if self.collision:
self.term2 = -1
else:
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward21(self):
""" Compute reward function 21 (dense): maximise action 1 """
self.term1 = self.action[0]
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward22(self):
""" Compute reward function 22 (sparse + dense) """
if self.dist >= 0.001:
self.term1 = - self.dist
else:
self.term1 = 50
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward23(self):
""" Compute reward function 23 (sparse + dense) """
if self.dist >= 0.005:
self.term1 = - self.dist
else:
self.term1 = 50
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward24(self):
""" Compute reward function 24 (sparse + dense) """
if self.dist >= 0.01:
self.term1 = - self.dist
else:
self.term1 = 50
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward25(self):
""" Compute reward function 25 (sparse + dense) """
if self.dist >= 0.01:
self.term1 = - self.dist
elif self.dist <= 0.01 and self.dist >= 0.005:
self.term1 = 10
elif self.dist <= 0.005 and self.dist >= 0.001:
self.term1 = 20
else:
self.term1 = 30
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward26(self):
""" Compute reward function 26 (dense) """
self.term1 = 1 / self.dist
self.term2 = 0
rew = self.term1 + self.term2
return rew
def get_reward27(self):
""" Compute reward function 27 (dense) """
self.term1 = - np.log(self.dist)
self.term2 = 0
rew = self.term1 + self.term2
return rew
|
{"hexsha": "6a528725db11a5e3fad0097f89b95e8537c523d5", "size": 12575, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/gym_envs/gym_envs/jaco_env/env_description.py", "max_stars_repo_name": "PierreExeter/rl_reach", "max_stars_repo_head_hexsha": "4f9c46c8503a84edaa48f9dfd58054548552253a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2020-12-04T15:06:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T16:13:35.000Z", "max_issues_repo_path": "code/gym_envs/gym_envs/jaco_env/env_description.py", "max_issues_repo_name": "PierreExeter/rl_reach", "max_issues_repo_head_hexsha": "4f9c46c8503a84edaa48f9dfd58054548552253a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-03-02T12:16:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-21T02:18:16.000Z", "max_forks_repo_path": "code/gym_envs/gym_envs/jaco_env/env_description.py", "max_forks_repo_name": "PierreExeter/rl_reach", "max_forks_repo_head_hexsha": "4f9c46c8503a84edaa48f9dfd58054548552253a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-12-16T11:12:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-02T15:53:10.000Z", "avg_line_length": 28.5795454545, "max_line_length": 97, "alphanum_fraction": 0.5547514911, "include": true, "reason": "import numpy", "num_tokens": 3017}
|
import numpy as np
from bokeh.plotting import figure
from dq_poc.util import plot_grid
def plot(f):
x = np.linspace(0, 2 * 3.14159)
p = figure(plot_height=1500, plot_width=2000)
p.line(x, f(x))
return p
title = 'Coffee Machine Uptime'
content = plot_grid(2, plot(np.sin), plot(np.cos), plot(np.tan), plot(np.sin))
description = 'Availability of the coffee machine. The availability of the machine itself as well as the supply of coffee beans are measured.'
|
{"hexsha": "a68b34b641c443df985e8fd562096f32bcf4e5ff", "size": 479, "ext": "py", "lang": "Python", "max_stars_repo_path": "dq_poc/content/telescope/coffee.py", "max_stars_repo_name": "hettlage/data-quality-poc", "max_stars_repo_head_hexsha": "a77ebd41285ddf587b6e73145a7c42e647e0ea77", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dq_poc/content/telescope/coffee.py", "max_issues_repo_name": "hettlage/data-quality-poc", "max_issues_repo_head_hexsha": "a77ebd41285ddf587b6e73145a7c42e647e0ea77", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dq_poc/content/telescope/coffee.py", "max_forks_repo_name": "hettlage/data-quality-poc", "max_forks_repo_head_hexsha": "a77ebd41285ddf587b6e73145a7c42e647e0ea77", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6111111111, "max_line_length": 142, "alphanum_fraction": 0.7160751566, "include": true, "reason": "import numpy", "num_tokens": 129}
|
//
// $Id$
//
// -------------------------------------------------------------------------
// This file is part of ZeroBugs, Copyright (c) 2010 Cristian L. Vlasceanu
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// -------------------------------------------------------------------------
//
#include <boost/bind.hpp>
#include <boost/python/extract.hpp>
#include "zdk/check_ptr.h"
#include "zdk/data_type.h"
#include "zdk/debug_sym.h"
#include "zdk/shared_string_impl.h"
#include "zdk/type_system.h"
#include "typez/public/debug_symbol.h"
#include "debug_sym_wrap.h"
#include "marshaller.h"
using namespace std;
using namespace boost;
using namespace boost::python;
DebugSymbolWrap::~DebugSymbolWrap() throw()
{
}
const char* DebugSymbolWrap::name() const
{
RefPtr<SharedString> v;
if (sym_)
{
v = sym_->name();
}
return v ? CHKPTR(v->c_str()) : "";
}
const char* DebugSymbolWrap::value() const
{
SharedString* v = NULL;
if (sym_)
{
v = sym_->value();
}
return v ? CHKPTR(v->c_str()) : "";
}
const char* DebugSymbolWrap::type_name() const
{
SharedString* v = NULL;
if (sym_)
{
v = sym_->type_name();
}
return v ? CHKPTR(v->c_str()) : "";
}
boost::python::object DebugSymbolWrap::type() const
{
RefPtr<DataType> dataType;
if (sym_)
{
dataType = sym_->type();
}
if (RefPtr<ClassType> classType = interface_cast<ClassType>(dataType))
{
return object(classType);
}
return object(dataType);
}
object DebugSymbolWrap::thread() const
{
RefPtr<Thread> thread;
if (sym_)
{
thread = sym_->thread();
}
return object(thread);
}
object DebugSymbolWrap::process() const
{
RefPtr<Process> process;
if (sym_)
{
if (RefPtr<Thread> thread = sym_->thread())
{
process = thread->process();
}
}
return object(process);
}
object DebugSymbolWrap::type_system() const
{
RefPtr<TypeSystem> typesys;
if (sym_)
{
if (RefPtr<Thread> thread = sym_->thread())
{
typesys = interface_cast<TypeSystem*>(thread->process());
}
}
return object(typesys);
}
bool DebugSymbolWrap::has_children() const
{
return sym_ ? sym_->enum_children(events()) : false;
}
namespace
{
class DebugSymbolObserver : public DebugSymbolCallback
{
mutable Mutex mutex_;
boost::python::list list_;
DebugSymbolEvents* events_;
bool notify(DebugSymbol* sym)
{
Lock<Mutex> lock(mutex_);
sym->read(events_);
list_.append(RefPtr<DebugSymbolWrap>(new DebugSymbolWrap(sym, events_)));
return false;
}
BEGIN_INTERFACE_MAP(DebugSymbolObserver)
INTERFACE_ENTRY(DebugSymbolCallback)
END_INTERFACE_MAP()
public:
explicit DebugSymbolObserver(DebugSymbolEvents* events)
: events_(events)
{
}
boost::python::list list() const
{
Lock<Mutex> lock(mutex_);
return list_;
}
};
}
static void
enum_children(RefPtr<DebugSymbol> sym, DebugSymbolCallback* cb)
{
sym->enum_children(cb);
}
boost::python::list DebugSymbolWrap::children() const
{
if (!childrenRead_)
{
DebugSymbolObserver obs(events());
ThreadMarshaller::instance().send_command(
bind(enum_children, sym_, &obs),
__func__);
children_ = obs.list();
childrenRead_ = true;
}
return children_;
}
static void add(RefPtr<DebugSymbol> sym, RefPtr<DebugSymbol> child)
{
sym->add_child(child.get());
}
void DebugSymbolWrap::add_child(RefPtr<DebugSymbolWrap> child)
{
if (child)
{
ThreadMarshaller::instance().send_command(bind(add, sym_, child->sym_), __func__);
}
}
void DebugSymbolWrap::read_on_main_thread()
{
if (sym_)
{
sym_->read(events());
// Explicitly limit the ExprEvents object's life time;
// the DebuggerEngine keeps track of all live ExprEvents
// instances, and they should not live beyond expression
// evaluation, otherwise the engine may get confused.
//
// Resetting (null-ing) the object completely would not work
// (because we may need to re-evaluate this DebugSymbol).
if (context_)
{
context_->clone_expr_events();
}
#if 0
clog << __func__ << " " << sym_->name();
clog << ": events=" << events_;
if (events_)
{
clog << " (" << events_->_name() << ")";
}
clog << endl;
if (sym_->value())
{
clog << __func__ << ": " << sym_->value()->c_str() << endl;
}
#endif
}
}
void DebugSymbolWrap::read()
{
Marshaller& marshal = ThreadMarshaller::instance();
if (marshal.is_main_thread())
{
read_on_main_thread();
}
else
{
marshal.send_command(bind(&DebugSymbolWrap::read_on_main_thread, this), __func__);
}
}
void DebugSymbolWrap::set_type_name(DataType* type)
{
if (DebugSymbolImpl* impl = interface_cast<DebugSymbolImpl*>(sym_.get()))
{
impl->set_type_name(type->name());
}
else
{
throw runtime_error("DebugSymbol::set_type_name(): unknown impl");
}
}
/**
* Create a brand new debug symbol on the main debugger thread
*/
static void
create_symbol(RefPtr<DebugSymbol> tmpl,
RefPtr<DataType> type,
const string name, // by value on purpose
addr_t addr,
RefPtr<DebugSymbol>& sym)
{
if (RefPtr<Thread> thread = tmpl->thread())
{
if (RefPtr<TypeSystem> typesys =
interface_cast<TypeSystem*>(thread->process()))
{
sym = typesys->create_debug_symbol(
NULL, // no reader, since we make up the sym
thread.get(),
type.get(),
shared_string(name).get(),
addr,
tmpl->decl_file(),
tmpl->decl_line(),
tmpl->is_return_value());
}
}
}
RefPtr<DebugSymbolWrap>
DebugSymbolWrap::create(const char* name, DataType* type, addr_t addr)
{
assert(name);
RefPtr<DebugSymbol> sym;
if (sym_)
{
ThreadMarshaller::instance().send_command(
bind(create_symbol, sym_, type, name, addr, boost::ref(sym)),
__func__);
}
return new DebugSymbolWrap(sym, events());
}
void DebugSymbolWrap::set_constant()
{
if (DebugSymbolImpl* impl = interface_cast<DebugSymbolImpl*>(sym_.get()))
{
impl->set_constant();
}
else
{
throw runtime_error("DebugSymbol::set_constant(): unknown impl");
}
}
void DebugSymbolWrap::set_value(const char* value)
{
if (DebugSymbolImpl* impl = interface_cast<DebugSymbolImpl*>(sym_.get()))
{
impl->set_value(shared_string(value));
}
else
{
throw runtime_error("DebugSymbol::set_value(): unknown impl");
}
}
const char* DebugSymbolWrap::tooltip() const
{
const char* tip = NULL;
if (sym_.get())
{
tip = sym_->tooltip();
}
return tip ? tip : "";
}
void DebugSymbolWrap::set_tooltip(const char* tip)
{
if (DebugSymbolImpl* impl = interface_cast<DebugSymbolImpl*>(sym_.get()))
{
if (tip && !tip[0])
{
tip = NULL;
}
impl->set_tooltip(tip);
}
else
{
throw runtime_error("DebugSymbol::set_tooltip(): unknown impl");
}
}
void DebugSymbolWrap::set_numeric_base(int numericBase)
{
const int oldBase = events_.numeric_base(sym_.get());
events_.set_numeric_base(numericBase);
if (oldBase != events_.numeric_base(sym_.get()))
{
set_value(NULL);
}
}
// vim: tabstop=4:softtabstop=4:expandtab:shiftwidth=4
|
{"hexsha": "2cd65db53b2ed955f5c7b3a961687a7dbf31327b", "size": 8080, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "zero_python/debug_sym_wrap.cpp", "max_stars_repo_name": "cristivlas/zerobugs", "max_stars_repo_head_hexsha": "5f080c8645b123d7887fd8a64f60e8d226e3b1d5", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2018-03-19T23:27:47.000Z", "max_stars_repo_stars_event_max_datetime": "2018-06-24T16:15:19.000Z", "max_issues_repo_path": "zero_python/debug_sym_wrap.cpp", "max_issues_repo_name": "cristivlas/zerobugs", "max_issues_repo_head_hexsha": "5f080c8645b123d7887fd8a64f60e8d226e3b1d5", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "zero_python/debug_sym_wrap.cpp", "max_forks_repo_name": "cristivlas/zerobugs", "max_forks_repo_head_hexsha": "5f080c8645b123d7887fd8a64f60e8d226e3b1d5", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-11-28T05:39:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-28T05:39:05.000Z", "avg_line_length": 21.4323607427, "max_line_length": 90, "alphanum_fraction": 0.5768564356, "num_tokens": 1889}
|
#!/usr/bin/env python3
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import argparse
from ansible.parsing.dataloader import DataLoader
from ansible.inventory.manager import InventoryManager
parser = argparse.ArgumentParser(description='Visualize data as plot')
parser.add_argument('--resource',
choices=['cpu_s', 'cpu_w',
'private_bytes_s', 'private_bytes_w',
'working_set_s', 'working_set_w',
'sent_bytes', 'received_bytes',
'disk_reads', 'disk_writes'],
default='cpu')
parser.add_argument('--base-path', default='')
args = parser.parse_args()
if args.resource == 'cpu_s':
resource_key = '\\\\fluentd-winserv\\Process(ruby)\\% Processor Time'
xlabel_message = 'message length (bytes)'
ylabel_message = 'CPU Usage (%)'
ylimit = 100
fig_title = 'CPU Usage (Supervisor)'
fig_name = 'CPU_usage_on_supervisor.png'
divide_base = -1
elif args.resource == 'cpu_w':
resource_key = '\\\\fluentd-winserv\\Process(ruby#1)\\% Processor Time'
xlabel_message = 'message length (bytes)'
ylabel_message = 'CPU Usage (%)'
ylimit = 100
fig_title = 'CPU Usage (Worker)'
fig_name = 'CPU_usage_on_worker.png'
divide_base = -1
elif args.resource == 'private_bytes_s':
resource_key = '\\\\fluentd-winserv\\Process(ruby)\\Private Bytes'
xlabel_message = 'message length (bytes)'
ylabel_message = 'Private Bytes Usage (MB)'
ylimit = 100
fig_title = 'Private Bytes Usage (Supervisor)'
fig_name = 'Private_Bytes_usage_on_supervisor.png'
divide_base = 1024*1024
elif args.resource == 'private_bytes_w':
resource_key = '\\\\fluentd-winserv\\Process(ruby#1)\\Private Bytes'
xlabel_message = 'message length (bytes)'
ylabel_message = 'Private Bytes (MB)'
ylimit = 100
fig_title = 'Private Bytes Usage (Worker)'
fig_name = 'Private_Bytes_usage_on_worker.png'
divide_base = 1024*1024
elif args.resource == 'working_set_s':
resource_key = '\\\\fluentd-winserv\\Process(ruby)\\Working Set'
xlabel_message = 'message length (bytes)'
ylabel_message = 'Working Set (MB)'
ylimit = 100
fig_title = 'Working Set Usage (Supervisor)'
fig_name = 'Working_Set_usage_on_supervisor.png'
divide_base = 1024*1024
elif args.resource == 'working_set_w':
resource_key = '\\\\fluentd-winserv\\Process(ruby#1)\\Working Set'
xlabel_message = 'message length (bytes)'
ylabel_message = 'Working Set (MB)'
ylimit = 100
fig_title = 'Working Set Usage (Worker)'
fig_name = 'Working_Set_usage_on_worker.png'
divide_base = 1024*1024
elif args.resource == 'sent_bytes':
resource_key = '\\\\fluentd-winserv\\Network Interface(Microsoft Hyper-V Network Adapter)\\Bytes Sent/sec'
xlabel_message = 'message length (bytes)'
ylabel_message = 'Bytes Sent (KiB/sec)'
ylimit = 2000
fig_title = 'Bytes Sent Usage'
fig_name = 'Bytes_Sent_usage.png'
divide_base = 1024
elif args.resource == 'received_bytes':
resource_key = '\\\\fluentd-winserv\\Network Interface(Microsoft Hyper-V Network Adapter)\\Bytes Received/sec'
xlabel_message = 'message length (bytes)'
ylabel_message = 'Bytes Received (KiB/sec)'
ylimit = 2000
fig_title = 'Bytes Received Usage'
fig_name = 'Bytes_Received_usage.png'
divide_base = 1024
elif args.resource == 'disk_reads':
resource_key = '\\\\fluentd-winserv\\PhysicalDisk(_Total)\\Disk Reads/sec'
xlabel_message = 'message length (bytes)'
ylabel_message = 'Disk Read (bytes/sec)'
ylimit = 1000
fig_title = 'Disk Read Usage'
fig_name = 'Disk_Read_usage.png'
divide_base = -1
elif args.resource == 'disk_writes':
resource_key = '\\\\fluentd-winserv\\PhysicalDisk(_Total)\\Disk Writes/sec'
xlabel_message = 'message length (bytes)'
ylabel_message = 'Disk Write (bytes/sec)'
ylimit = 1000
fig_title = 'Disk Write Usage'
fig_name = 'Disk_Write_usage.png'
divide_base = -1
if args.base_path == '':
pwd = os.path.dirname(os.path.realpath(__file__))
inventory_file_name = os.path.join(pwd, '..', 'ansible/hosts')
data_loader = DataLoader()
inventory = InventoryManager(loader=data_loader,
sources=[inventory_file_name])
collector = inventory.get_groups_dict()['windows'][0]
print(collector)
base_path = os.path.join(pwd, '..', "ansible", "output", collector, "C:", "tools")
else:
base_path = args.base_path
print(base_path)
sns.set(font_scale = 1.5)
sns.set_style('whitegrid')
sns.set_palette('Set3')
events_50_appends_50 = pd.read_csv(os.path.join(base_path, '50events-50lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_50_appends_1200 = pd.read_csv(os.path.join(base_path, '50events-1200lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_50_appends_1500 = pd.read_csv(os.path.join(base_path, '50events-1500lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_50_appends_2000 = pd.read_csv(os.path.join(base_path, '50events-2000lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_80_appends_1200 = pd.read_csv(os.path.join(base_path, '80events-1200lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_80_appends_1500 = pd.read_csv(os.path.join(base_path, '80events-1500lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_100_appends_100 = pd.read_csv(os.path.join(base_path, '100events-100lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_100_appends_200 = pd.read_csv(os.path.join(base_path, '100events-200lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_100_appends_400 = pd.read_csv(os.path.join(base_path, '100events-400lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_100_appends_800 = pd.read_csv(os.path.join(base_path, '100events-800lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_100_appends_1000 = pd.read_csv(os.path.join(base_path, '100events-1000lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_120_appends_200 = pd.read_csv(os.path.join(base_path, '120events-200lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_120_appends_400 = pd.read_csv(os.path.join(base_path, '120events-400lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_120_appends_800 = pd.read_csv(os.path.join(base_path, '120events-800lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_140_appends_200 = pd.read_csv(os.path.join(base_path, '140events-200lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_140_appends_400 = pd.read_csv(os.path.join(base_path, '140events-400lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_140_appends_600 = pd.read_csv(os.path.join(base_path, '140events-600lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_150_appends_200 = pd.read_csv(os.path.join(base_path, '150events-200lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
events_150_appends_300 = pd.read_csv(os.path.join(base_path, '150events-300lines-resource-usage.csv'), sep=',', na_values='.', skipfooter=2, engine='python')
print(events_50_appends_50)
df = pd.DataFrame({
"50events_50lines": events_50_appends_50[resource_key],
"50events_1200lines": events_50_appends_1200[resource_key],
"50events_1500lines": events_50_appends_1500[resource_key],
"50events_2000lines": events_50_appends_2000[resource_key],
"80events_1200lines": events_80_appends_1200[resource_key],
"80events_1500lines": events_80_appends_1500[resource_key],
"100events_100lines": events_100_appends_100[resource_key],
"100events_200lines": events_100_appends_200[resource_key],
"100events_400lines": events_100_appends_400[resource_key],
"100events_800lines": events_100_appends_800[resource_key],
"100events_1000lines": events_100_appends_1000[resource_key],
"120events_200lines": events_120_appends_200[resource_key],
"120events_400lines": events_120_appends_400[resource_key],
"120events_800lines": events_120_appends_800[resource_key],
"140events_200lines": events_140_appends_200[resource_key],
"140events_400lines": events_140_appends_400[resource_key],
"140events_600lines": events_140_appends_600[resource_key],
"150events_200lines": events_150_appends_200[resource_key],
"150events_300lines": events_150_appends_300[resource_key],
})
if divide_base > 1:
df = df.divide(divide_base)
medians = {"50events_50lines": np.round(df["50events_50lines"].median(), 2),
"50events_1200lines": np.round(df["50events_1200lines"].median(), 2),
"50events_1500lines": np.round(df["50events_1500lines"].median(), 2),
"50events_2000lines": np.round(df["50events_2000lines"].median(), 2),
"80events_1200lines": np.round(df["80events_1200lines"].median(), 2),
"80events_1500lines": np.round(df["80events_1500lines"].median(), 2),
"100events_100lines": np.round(df["100events_100lines"].median(), 2),
"100events_200lines": np.round(df["100events_200lines"].median(), 2),
"100events_400lines": np.round(df["100events_400lines"].median(), 2),
"100events_800lines": np.round(df["100events_800lines"].median(), 2),
"100events_1000lines": np.round(df["100events_1000lines"].median(), 2),
"120events_200lines": np.round(df["120events_200lines"].median(), 2),
"120events_400lines": np.round(df["120events_400lines"].median(), 2),
"120events_800lines": np.round(df["120events_800lines"].median(), 2),
"140events_200lines": np.round(df["140events_200lines"].median(), 2),
"140events_400lines": np.round(df["140events_400lines"].median(), 2),
"140events_600lines": np.round(df["140events_600lines"].median(), 2),
"150events_200lines": np.round(df["150events_200lines"].median(), 2),
"150events_300lines": np.round(df["150events_300lines"].median(), 2)}
median_labels = [str(s) for s in medians]
print(medians)
df_melt = pd.melt(df)
print(df_melt.head())
fig = plt.figure(figsize=(23, 12))
plt.title(fig_title)
ax = fig.add_subplot(1, 1, 1)
ax.set_ylim(0, ylimit)
plot = sns.boxplot(x='variable', y='value', data=df_melt, showfliers=False,
ax=ax, showmeans=True)
plot.set(
xlabel=xlabel_message,
ylabel=ylabel_message
)
plot.set_xticklabels(plot.get_xticklabels(), rotation=30, horizontalalignment='right')
pos = range(len(medians))
data_range = [
"50events_50lines", "50events_1200lines", "50events_1500lines", "50events_2000lines",
"80events_1200lines", "80events_1500lines",
"100events_100lines", "100events_200lines", "100events_400lines",
"100events_800lines", "100events_1000lines",
"120events_200lines", "120events_400lines", "120events_800lines",
"140events_200lines", "140events_400lines", "140events_600lines",
"150events_200lines", "150events_300lines"
]
tick = 0
for item in data_range:
plot.text(tick+0.1, medians[item], medians[item],
color='w', weight='semibold', size=12,
bbox=dict(facecolor='#445A64'))
tick = tick + 1
chart = sns.stripplot(x='variable', y='value', data=df_melt,
jitter=False, color='black', ax=ax)
chart.set(
xlabel=xlabel_message,
ylabel=ylabel_message
)
chart.set_xticklabels(chart.get_xticklabels(), rotation=30, horizontalalignment='right')
plt.savefig(fig_name)
|
{"hexsha": "0c9629df49e606e445dd6f6048d2aa7719b46980", "size": 12080, "ext": "py", "lang": "Python", "max_stars_repo_path": "winevtlog_bench/visualize/plot_pandas_tailing_Usage.py", "max_stars_repo_name": "kenhys/fluentd-benchmark-azure-environment", "max_stars_repo_head_hexsha": "024b6fe3c9ac667562e444e7ae216e2f30d8a0b5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "winevtlog_bench/visualize/plot_pandas_tailing_Usage.py", "max_issues_repo_name": "kenhys/fluentd-benchmark-azure-environment", "max_issues_repo_head_hexsha": "024b6fe3c9ac667562e444e7ae216e2f30d8a0b5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-06-29T06:01:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-10T00:39:25.000Z", "max_forks_repo_path": "winevtlog_bench/visualize/plot_pandas_tailing_Usage.py", "max_forks_repo_name": "kenhys/fluentd-benchmark-azure-environment", "max_forks_repo_head_hexsha": "024b6fe3c9ac667562e444e7ae216e2f30d8a0b5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-02T00:43:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-02T00:43:20.000Z", "avg_line_length": 52.0689655172, "max_line_length": 159, "alphanum_fraction": 0.7053807947, "include": true, "reason": "import numpy", "num_tokens": 3351}
|
# Programador Sergio Luis Beleño Díaz
import cv2
import numpy as np
from tkinter import *
from tensorflow.keras.models import load_model
from easygui import *
from lime import lime_image
from PIL import ImageTk, Image
from skimage.segmentation import mark_boundaries
from time import sleep
#Load the best model trained
model = load_model('Model')
root = Tk()
root.title('Rx-Ray')
def show_image():
try:
global name_file
except:
print()
name_file = fileopenbox()
try:
global inp_img
except:
print()
inp_img = cv2.imread(name_file)
inp_img = cv2.resize(inp_img, (512, 512), interpolation=cv2.INTER_CUBIC)
try:
global input_image
except:
print()
input_image = inp_img
inp_img = Image.fromarray(inp_img)
inp_img = ImageTk.PhotoImage(inp_img)
my_label = Label(image=inp_img)
my_label.grid(row=0, column=0, columnspan=1)
my_label2 = Label(image=inp_img)
my_label2.grid(row=0, column=2, columnspan=1)
def save_image():
try:
global contad
contad = 1
except:
contad = contad + 1
try:
dir_save = diropenbox()
name_of_save = (str(dir_save) + "\predicted_" + str(contad) + ".png")
cv2.imwrite(name_of_save, endo)
except:
print("Don't use dots in the name of the file")
def detection():
try:
global end2
except:
print()
num_c = 8
cam = input_image
# ImagenInput
inputoimage = cam
x = inputoimage.reshape((-1, 512, 512, 3))
predictor = model.predict(x)
predIdxs = np.argmax(predictor, axis=1)
explainer = lime_image.LimeImageExplainer()
explanation = explainer.explain_instance(x[-1], model.predict,
hide_color=0,
num_features=100,
num_samples=1000)
print("Predicted:", predIdxs[-1])
temp, mask = explanation.get_image_and_mask(explanation.top_labels[0],
positive_only=True,
num_features=num_c,
hide_rest=True)
mask = np.array(mark_boundaries(temp / 2 + 1, mask))
kernel = np.ones((30, 30), np.uint8)
mask = cv2.dilate(mask, kernel, iterations=2)
mask = cv2.blur(mask, (30, 30))
mask = cv2.blur(mask, (15, 15))
mask = cv2.blur(mask, (10, 10))
mask = cv2.blur(mask, (5, 5))
mask = np.array(mask, dtype=np.uint8)
mask = cv2.medianBlur(mask, 5)
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
if predIdxs[-1] == 0:
message = 'Healthy'
#mask = mask*0
if predIdxs[-1] == 1:
message = 'Pneumonia &'
messagetwo = 'Covid-19'
if predIdxs[-1] == 2:
message = 'Cardiomegaly'
if predIdxs[-1] == 3:
message = 'Other Diseases'
if predIdxs[-1] == 4:
message = 'Pleural Effusion'
mask2 = cv2.applyColorMap((mask), cv2.COLORMAP_JET)
#heatmap
mask = cv2.blur(mask, (60, 60))
mask = cv2.blur(mask, (30, 30))
mask = cv2.blur(mask, (15, 15))
mask = cv2.blur(mask, (10, 10))
mask = cv2.blur(mask, (5, 5))
mask = cv2.applyColorMap(mask, cv2.COLORMAP_HOT)
#heatmap
mask = ((mask * 1.1 + mask2 * 0.7) / 255) * (3 / 2)
end = cv2.addWeighted(x[-1] / 255, 0.8, mask2 / 255, 0.3, 0)
# font
font = cv2.FONT_HERSHEY_SIMPLEX
# org
org = (20, 50)
# fontScale
fontScale = 1.5
# Blue color in BGR
color = (203, 194, 126)
# Line thickness of 3 px
thickness = 3
# Using cv2.putText() method
end2 = cv2.putText((end * 250), str(message), org, font,
fontScale, (44, 4, 4), 10, cv2.LINE_AA)
# Using cv2.putText() method
end2 = cv2.putText((end2), str(message), org, font,
fontScale, color, thickness, cv2.LINE_AA)
if message == 'Pneumonia &':
# Using cv2.putText() method
end2 = cv2.putText((end2), str(messagetwo), (20, 100), font,
fontScale, (44, 4, 4), 10, cv2.LINE_AA)
# Using cv2.putText() method
end2 = cv2.putText((end2), str(messagetwo), (20, 100), font,
fontScale, color, thickness, cv2.LINE_AA)
preone = int(np.round((predictor[0][0]) * 100))
pretwo = int(np.round(predictor[0][1] * 100))
prethr = int(np.round(predictor[0][2] * 100))
prefou = int(np.round(predictor[0][3] * 100))
prefiv = int(np.round(predictor[0][4] * 100))
try:
print(str(type(end2)))
print(str((end2.shape)))
except:
print("Nothing")
cv2.imwrite("n.png", end2)
sleep(10)
try:
global endo
except:
print()
try:
end2 = cv2.imread("n.png")
endo = end2
end2 = cv2.cvtColor(end2, cv2.COLOR_BGR2RGB)
except:
sleep(10)
end2 = cv2.imread("n.png")
endo = end2
end2 = cv2.cvtColor(end2, cv2.COLOR_BGR2RGB)
end2 = Image.fromarray(end2)
end2 = ImageTk.PhotoImage(end2)
my_label_end2 = Label(image=end2)
my_label_end2.grid(row=0, column=2, columnspan=1)
my_label_dis = Label(text=('Healthy: ' + str(preone) + '% \n' +
'Pneumonia & Covid-19: ' + str(pretwo) + '% \n' +
'Cardiomegaly: ' + str(prethr) + '% \n' +
'Other Diseases: ' + str(prefou) + '% \n' +
'Pleural Effusion: ' + str(prefiv) + '% \n'))
my_label_dis.grid(row=0, column=1)
button_open = Button(root, text="Open", command=show_image)
button_save = Button(root, text="Save", command=save_image)
button_Detect = Button(root, text="Detect", command=detection)
button_open.grid(row=1, column=0)
button_save.grid(row=1, column=1)
button_Detect.grid(row=1, column=2)
root.mainloop()
|
{"hexsha": "6a391a07d0d4218530607cdde08c55a2dc7fe7c8", "size": 5814, "ext": "py", "lang": "Python", "max_stars_repo_path": "Tkinter/RxForCovid-19.py", "max_stars_repo_name": "Serbeld/RX-COVID-19", "max_stars_repo_head_hexsha": "d5936dbccdeed7dc80fbdbcc5b19c4c7eefcc237", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-24T15:28:17.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-24T15:28:17.000Z", "max_issues_repo_path": "Tkinter/RxForCovid-19.py", "max_issues_repo_name": "Serbeld/RX-COVID-19", "max_issues_repo_head_hexsha": "d5936dbccdeed7dc80fbdbcc5b19c4c7eefcc237", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Tkinter/RxForCovid-19.py", "max_forks_repo_name": "Serbeld/RX-COVID-19", "max_forks_repo_head_hexsha": "d5936dbccdeed7dc80fbdbcc5b19c4c7eefcc237", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-05-19T02:49:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-30T00:01:31.000Z", "avg_line_length": 27.4245283019, "max_line_length": 77, "alphanum_fraction": 0.5780873753, "include": true, "reason": "import numpy", "num_tokens": 1731}
|
# set to use CPU
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
os.environ['KERAS_BACKEND'] = 'tensorflow'
#os.environ['KERAS_BACKEND'] = 'theano'
import tensorflow as tf
from tensorflow import keras
print(keras.__version__)
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.models import Model
class DNN(Model):
def __init__(self, Nin=2, Nh_l=[2,2], Nout=2):
super(DNN, self).__init__()
self.dense1 = Dense(Nh_l[0], activation='relu')
self.dense2 = Dense(Nh_l[1], activation='relu')
self.dense3 = Dense(Nout, activation='softmax')
def call(self, x):
x = self.dense1(x)
x = Dropout(0.5)(x)
x = self.dense2(x)
x = Dropout(0.25)(x)
return self.dense3(x)
import numpy as np
from tensorflow.keras import datasets # mnist
def Data_func():
(X_train, y_train), (X_test, y_test) = datasets.mnist.load_data()
L, H, W = X_train.shape
X_train = X_train.reshape(-1, H * W)
X_test = X_test.reshape(-1, H * W)
X_train = (X_train / 255.0).astype(np.float32)
X_test = (X_test / 255.0).astype(np.float32)
return (X_train, y_train), (X_test, y_test)
cost_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
opt = keras.optimizers.Adam()
tr_loss = keras.metrics.Mean(name='train_loss')
te_loss = keras.metrics.Mean(name='test_loss')
tr_acc = keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
te_acc = keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
def train(model, X_train, Y_train, N_tr, batch_size):
for b in range(N_tr // batch_size):
X_tr_b = X_train[batch_size * (b-1):batch_size * b]
Y_tr_b = Y_train[batch_size * (b-1):batch_size * b]
with tf.GradientTape() as tape:
pred = model(X_tr_b, training=True)
cost = cost_fn(Y_tr_b, pred)
grad = tape.gradient(cost, model.trainable_variables)
opt.apply_gradients(zip(grad, model.trainable_variables))
tr_loss(cost)
tr_acc(Y_tr_b, pred)
def validation(model, X_test, Y_test):
pred = model(X_test, training=False)
cost = cost_fn(Y_test, pred)
te_loss(cost)
te_acc(Y_test, pred)
from keraspp.skeras import plot_loss, plot_acc
import matplotlib.pyplot as plt
def main(epochs=20):
Nin = 784
Nh_l = [100, 50]
number_of_class = 10
Nout = number_of_class
data = Data_func()
model = DNN(Nin, Nh_l, Nout)
batch_size = 100
(X_train, Y_train), (X_test, Y_test) = data
N_tr = X_train.shape[0]
loss_l = {"loss":[], "val_loss":[]}
acc_l = {"accuracy":[], "val_accuracy":[]}
for epoch in range(epochs):
# Train
train(model, X_train, Y_train, N_tr, batch_size)
# Validation
validation(model, X_test, Y_test)
print(
f'Epoch {epoch}, '
f'Loss: {tr_loss.result():.3}, '
f'Acc: {tr_acc.result() * 100:.3}, '
f'Test Loss: {te_loss.result():.3}, '
f'Test Accuracy: {te_acc.result() * 100:.3}')
loss_l["loss"].append(tr_loss.result())
acc_l["accuracy"].append(tr_acc.result())
loss_l["val_loss"].append(te_loss.result())
acc_l["val_accuracy"].append(te_acc.result())
plot_loss(loss_l)
plot_acc(acc_l)
plt.show()
if __name__ == '__main__':
main()
|
{"hexsha": "c66403e7ddcea8e4db7e51677f7eeb13ee5059cc", "size": 3403, "ext": "py", "lang": "Python", "max_stars_repo_path": "cpu_only/ex9_5_tfwithkeras2-cpu.py", "max_stars_repo_name": "jskDr/keraspp_2022", "max_stars_repo_head_hexsha": "e10f4f849ad6a7354a05084e2cd9cec8acd62ef2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-09-21T15:35:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-14T12:14:44.000Z", "max_issues_repo_path": "ex9_5_tfwithkeras2-cpu.py", "max_issues_repo_name": "jskDr/keraspp_2021", "max_issues_repo_head_hexsha": "dc46ebb4f4dea48612135136c9837da7c246534a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ex9_5_tfwithkeras2-cpu.py", "max_forks_repo_name": "jskDr/keraspp_2021", "max_forks_repo_head_hexsha": "dc46ebb4f4dea48612135136c9837da7c246534a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-29T13:15:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T13:15:22.000Z", "avg_line_length": 31.2201834862, "max_line_length": 71, "alphanum_fraction": 0.6294446077, "include": true, "reason": "import numpy", "num_tokens": 941}
|
program pgm
integer :: a(3,3,3), i , j, k, c
c = 1
do i = 1, 3
do j = 1, 3
do k = 1, 3
a(j,i,k) = c
c = c + 1
enddo
enddo
enddo
do k = 1, 3
do j = 1, 3
do i = 1, 3
if (a(k,j,i) <= a(i,j,k)) then
print *, a(k,j,i)
endif
enddo
enddo
enddo
end
|
{"hexsha": "d73d8e304410a677c7549783e3888a30b8dbeace", "size": 249, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "test/mlir_out_tests/array_multidim3.f90", "max_stars_repo_name": "clementval/fc", "max_stars_repo_head_hexsha": "a5b444963c1b46e4eb34d938d992836d718010f7", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/mlir_out_tests/array_multidim3.f90", "max_issues_repo_name": "clementval/fc", "max_issues_repo_head_hexsha": "a5b444963c1b46e4eb34d938d992836d718010f7", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/mlir_out_tests/array_multidim3.f90", "max_forks_repo_name": "clementval/fc", "max_forks_repo_head_hexsha": "a5b444963c1b46e4eb34d938d992836d718010f7", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 10.375, "max_line_length": 32, "alphanum_fraction": 0.5060240964, "num_tokens": 141}
|
import numpy as np
import pytest
from marl_coop.component.sum_tree import SumTree
def test_update_a_3_leaf_tree_works():
'''
6
/ \
4 2
/ \
3 1
'''
memory = SumTree(3)
memory.add(10,2)
memory.add(20,3)
memory.add(30,1)
tree = memory.tree
assert (tree.priority == 6)
assert (tree.left_node.priority == 4)
assert (tree.right_node.priority == 2)
assert (tree.left_node.left_node.priority == 3)
assert (tree.left_node.right_node.priority == 1)
assert (np.all(memory.values == np.array([10,20,30])))
def test_update_a_3_leaf_tree_more_than_3_times_works():
'''
4
/ \
3 1
/ \
2 1
'''
memory = SumTree(3)
memory.add(10,2)
memory.add(20,3)
memory.add(30,1)
memory.add(300,1)
memory.add(200,2)
tree = memory.tree
assert (tree.priority == 4)
assert (tree.left_node.priority == 3)
assert (tree.right_node.priority == 1)
assert (tree.left_node.left_node.priority == 2)
assert (tree.left_node.right_node.priority == 1)
assert (np.all(memory.values == np.array([300,200,30])))
def test_update_a_7_leaf_tree_works():
'''
22
/ \
16 6
/ \ / \
7 9 5 1
/ \ / \ / \
5 2 4 5 2 3
'''
memory = SumTree(7)
memory.add(10,1)
memory.add(20,2)
memory.add(30,1)
memory.add(40,4)
memory.add(50,5)
memory.add(60,2)
memory.add(70,3)
memory.add(80,1)
memory.add(90,5)
memory.add(00,2)
tree = memory.tree
assert (tree.priority == 22)
assert (tree.left_node.priority == 16)
assert (tree.right_node.priority == 6)
assert (tree.left_node.left_node.priority == 7)
assert (tree.left_node.right_node.priority == 9)
assert (tree.right_node.left_node.priority == 5)
assert (tree.right_node.right_node.priority == 1)
assert np.all(memory.values == np.array([80,90,00,40,50,60,70]))
def test_3_leaf_tree_can_be_sampled_when_one_leaf_priority_is_non_null():
memory = SumTree(3)
memory.add(17,1)
memory.add(13,0)
memory.add(11,0)
sampled_values_idx, sampled_values = memory.sample(3, replacement=True)
sampled_values_idx = np.array(sampled_values_idx)
sampled_values = np.array(sampled_values)
assert np.all(sampled_values_idx == np.array([0, 0, 0]))
assert np.all(sampled_values == np.array([17, 17, 17]))
def test_3_leaf_tree_can_be_sampled_when_two_leaves_priority_are_non_null():
memory = SumTree(3)
memory.add(17,2)
memory.add(13,0)
memory.add(11,2)
sampled_values_idx, sampled_values = memory.sample(10, replacement=True)
sampled_values_idx = np.array(sampled_values_idx)
sampled_values = np.array(sampled_values)
assert np.all((sampled_values==17) | (sampled_values==11))
assert np.all((sampled_values_idx==0) | (sampled_values_idx==2))
def test_6_leaf_tree_can_be_sampled():
'''
9
/ \
6 3
/ \ / \
4 2 3 0
/ \ / \
4 0 2 0
'''
memory = SumTree(6)
memory.add(1,1)
memory.add(1,2)
memory.add(1,3)
memory.add(1,1)
memory.add(19,2)
memory.add(13,0)
memory.add(7,3)
memory.add(5,0)
memory.add(27,4)
memory.add(3,0)
tree = memory.tree
assert (tree.priority == 9)
assert (tree.left_node.priority == 6)
assert (tree.right_node.priority == 3)
assert (tree.left_node.left_node.priority == 4)
assert (tree.left_node.right_node.priority == 2)
assert (tree.right_node.left_node.priority == 3)
assert (tree.right_node.right_node.priority == 0)
sampled_values_idx, sampled_values = memory.sample(20, replacement=True)
sampled_values_idx = np.array(sampled_values_idx)
sampled_values = np.array(sampled_values)
assert np.all((sampled_values_idx==4) | (sampled_values_idx==0) | (sampled_values_idx==2))
assert np.all((sampled_values==19) | (sampled_values==7) | (sampled_values==27))
def test_4_leaf_tree_can_be_sampled_without_replacement():
'''
111
/ \
110 1
/ \ / \
100 10 1 0
'''
memory = SumTree(4)
memory.add(1,100)
memory.add(2,10)
memory.add(3,1)
memory.add(4,0)
_, sampled_values = memory.sample(3, replacement=False)
assert sampled_values == [1, 2, 3]
def test_that_sampling_without_replacement_too_much_will_raise_exception():
'''
111
/ \
110 1
/ \ / \
100 10 1 0
'''
memory = SumTree(4)
memory.add(1,100)
memory.add(2,10)
memory.add(3,1)
memory.add(4,0)
_, _ = memory.sample(3, replacement=False)
with pytest.raises(Exception):
_, _ = memory.sample(4, replacement=False)
|
{"hexsha": "7f4836da5a40a6fc1be765358e19ea2ccbc88449", "size": 4906, "ext": "py", "lang": "Python", "max_stars_repo_path": "marl_coop/component/tests/sumTree_test.py", "max_stars_repo_name": "PierreMsy/DRL_cooperation", "max_stars_repo_head_hexsha": "0385f4c88857659f44ddd5fc8c5c6c33344a38cc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-05T14:04:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-05T14:04:29.000Z", "max_issues_repo_path": "marl_coop/component/tests/sumTree_test.py", "max_issues_repo_name": "PierreMsy/DRL_cooperation", "max_issues_repo_head_hexsha": "0385f4c88857659f44ddd5fc8c5c6c33344a38cc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "marl_coop/component/tests/sumTree_test.py", "max_forks_repo_name": "PierreMsy/DRL_cooperation", "max_forks_repo_head_hexsha": "0385f4c88857659f44ddd5fc8c5c6c33344a38cc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.0490196078, "max_line_length": 94, "alphanum_fraction": 0.6037505096, "include": true, "reason": "import numpy", "num_tokens": 1481}
|
import numpy as np
import matplotlib.pyplot as plt
import pymc3 as pm
import pymc3.distributions.continuous as pmc
import pymc3.distributions.discrete as pmd
import pymc3.math as pmm
# PyMC 3 Installation instructions (https://github.com/pymc-devs/pymc3)
# Pip: pip install pymc3
# Conda: conda install -c conda-forge pymc3
#
# In case of issues with h5py with an Anaconda distribution, please update the package: conda install h5py
# Set random seed for reproducibility
np.random.seed(1000)
nb_samples = 500
if __name__ == '__main__':
# Create a PyMC3 model
model = pm.Model()
# Define the model structure
with model:
passenger_onboarding = pmc.Wald('Passenger Onboarding', mu=0.5, lam=0.2)
refueling = pmc.Wald('Refueling', mu=0.25, lam=0.5)
departure_traffic_delay = pmc.Wald('Departure Traffic Delay', mu=0.1, lam=0.2)
departure_time = pm.Deterministic('Departure Time',
12.0 + departure_traffic_delay +
pmm.switch(passenger_onboarding >= refueling,
passenger_onboarding,
refueling))
rough_weather = pmd.Bernoulli('Rough Weather', p=0.35)
flight_time = pmc.Exponential('Flight Time', lam=0.5 - (0.1 * rough_weather))
arrival_traffic_delay = pmc.Wald('Arrival Traffic Delay', mu=0.1, lam=0.2)
arrival_time = pm.Deterministic('Arrival time',
departure_time +
flight_time +
arrival_traffic_delay)
# Sample from the model
# On Windows with Anaconda 3.5 there can be an issue with joblib, therefore I recommend to set n_jobs=1
with model:
samples = pm.sample(draws=nb_samples, njobs=1, random_seed=1000)
# Plot the summary
pm.summary(samples)
# Show the diagrams
fig, ax = plt.subplots(8, 2, figsize=(14, 18))
pm.traceplot(samples, ax=ax)
for i in range(8):
for j in range(2):
ax[i, j].grid()
ax[2, 0].set_xlim([0.05, 1.0])
ax[3, 0].set_xlim([0.05, 0.4])
ax[4, 0].set_xlim([12, 16])
ax[5, 0].set_xlim([0, 10])
ax[6, 0].set_xlim([0.05, 0.4])
ax[7, 0].set_xlim([14, 20])
plt.show()
|
{"hexsha": "93a2cce8122489bf6f4d06944b1300b28c47e567", "size": 2452, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter04/pymc3_example.py", "max_stars_repo_name": "arifmudi/Mastering-Machine-Learning-Algorithms", "max_stars_repo_head_hexsha": "8655e8e3f1e94f4d65bb92465033ebf54c193409", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 48, "max_stars_repo_stars_event_min_datetime": "2018-05-28T12:16:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T12:49:10.000Z", "max_issues_repo_path": "Chapter04/pymc3_example.py", "max_issues_repo_name": "arifmudi/Mastering-Machine-Learning-Algorithms", "max_issues_repo_head_hexsha": "8655e8e3f1e94f4d65bb92465033ebf54c193409", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-08-19T05:48:22.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-19T05:48:22.000Z", "max_forks_repo_path": "Chapter04/pymc3_example.py", "max_forks_repo_name": "arifmudi/Mastering-Machine-Learning-Algorithms", "max_forks_repo_head_hexsha": "8655e8e3f1e94f4d65bb92465033ebf54c193409", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 41, "max_forks_repo_forks_event_min_datetime": "2018-05-28T12:16:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T18:48:12.000Z", "avg_line_length": 33.5890410959, "max_line_length": 108, "alphanum_fraction": 0.5701468189, "include": true, "reason": "import numpy,import pymc3", "num_tokens": 651}
|
"""
title: "Merlin-py initial draft"
author: "Kellen O'Connor"
date: "January 2020"
"""
import os, shutil
import tabula
import pandas as pd
import numpy as np
from os.path import expanduser, getsize
home = os.path.expanduser('~')
pd.set_option('display.max_colwidth', 255)
import camelot
import queue
import math
def page_index (path, field, avoid, thread):
folder = path
files = os.listdir(folder)[0:]
ID_L3 = []
ID_L4 = []
num_files = len([f for f in os.listdir(folder)if os.path.isfile(os.path.join(path, f))])
progress_count=0
for i, file in enumerate(files):
path = folder + "/" + files[i]
ID_L3.append("flagged")
ID_L4.append(0)
PAGEINDEX = []
for i, file in enumerate(files):
progress_count=progress_count+1
path = folder + "/" + files[i]
print("Thread " + str(thread) + " - Generating table of contents... "+files[i]+" "+str(progress_count)+"/"+str(num_files))
ID_L5 = []
ID_L6 = []
y = []
if ID_L4[i] == 1:
PAGEINDEX.append(ID_L6)
else:
x = tabula.read_pdf(path, pages = "all", multiple_tables = True,
area=(0, 0, 30, 100), relative_area=True,
pandas_options={'header': None})
if isinstance(x, list):
for xpage in range(0,len(x)):
y.append(x[xpage].to_string())
for ypage in range(0,len(y)):
if y[ypage].find(field) > -1 and y[ypage].find(avoid) == -1:
ID_L5.append(ypage)
ID_L6 = [p for p in ID_L5 if p > 23]
temp = ID_L5 if len(ID_L5) > 0 else [1,2]
temp.append(temp[-1] + 1)
ID_L6 = ID_L5 if len(ID_L5)==0 else temp
PAGEINDEX.append(ID_L6)
else:
PAGEINDEX.append(ID_L6)
return files, ID_L3, ID_L4, PAGEINDEX
def create_tblcntnts_artifact (index_results):
index_results=np.array(index_results, dtype=object)
index_shape=index_results.shape
field_number=index_shape[1]
table_pages=pd.DataFrame(columns = ['filename', 'filetype','non_990','needed_pages'])
num = 0
while num < field_number:
filename=index_results[0][num]
filetype=index_results[1][num]
non_990=index_results[2][num]
needed_pages=index_results[3][num]
table_pages.loc[num] = [filename, filetype, non_990, needed_pages]
num=num+1
table_pages['needed_page_length']=table_pages['needed_pages'].str.len()
return table_pages
def generate_tblcntnts (path, field, avoid, results, thread):
index_results=page_index(path, field, avoid, thread)
table=create_tblcntnts_artifact(index_results)
table['needed_pages'] = table['needed_pages'].astype(str)
table['needed_pages'] = table['needed_pages'].str.replace(r"\[","")
table['needed_pages'] = table['needed_pages'].str.replace(r"\]","")
results.put(table)
def data_pull(table,path,out_shape, output_path, miss_path):
missed_list=[]
progress_count=0
num_pdf=str(len(table))
table.dropna(subset=['needed_pages'])
for index, row in table.iterrows():
ned_pg=row['needed_pages']
ned_pg=str(ned_pg)
if ned_pg=='nan':
ned_pg=''
ned_pg_len=len(ned_pg)
progress_count = progress_count + 1
print("Worker PDF Count: " + str(progress_count) + "/" + num_pdf + " on " + str(row['filename']))
if ned_pg_len!=0:
file=path+'/'+row['filename']
tbls=camelot.read_pdf(file,pages=ned_pg)
for i in tbls:
temp_df=i.df
if temp_df.shape[1]==out_shape:
temp_df = temp_df.replace(to_replace='\n', value=' ', regex= True)
temp_df = temp_df.replace({3: ' '}, {3: ''}, regex= True)
temp_df['file'] = row['filename']
temp_df.to_csv(output_path, mode='a', header='False')
print("Tables found matching selected shape in file "+row['filename'])
else:
temp_df.to_csv(miss_path, mode='a', header='False')
print("No tables matching selected shape in file "+row['filename']+" despite being flagged in step 1, appending to missed output file...")
else:
print('Issue with '+row['filename']+'... Its likely that it didnt somehow meet the search critera.')
missed_list.append(row['filename'])
print("Thread with " + str(num_pdf) + " files completed.")
def input_data_split(table,threads=1):
bins = [pd.DataFrame(columns = ['filename', 'filetype','non_990','needed_pages', 'needed_page_length'])]*threads
if threads == 1:
bins.append(table)
else:
ideal_bin_size = 0
for index, row in table.iterrows():
ideal_bin_size += int(row['needed_page_length'])
ideal_bin_size = math.ceil(ideal_bin_size/threads)
current_bin_size = 0
current_bin = 0
i = 0
num_rows = table.shape[0]
while i < num_rows:
if int(table.iloc[i]['needed_page_length']) <= ideal_bin_size - current_bin_size:
bins[current_bin] = bins[current_bin].append(table.iloc[i])
else:
print("Start: " + table.iloc[i]['filename'])
pages = table.iloc[i]['needed_pages'].split(",")
pages1 = pages[0:ideal_bin_size-current_bin_size]
pages2 = pages[ideal_bin_size-current_bin_size:]
table.iloc[i]['needed_pages'] = ','.join(pages1)
table.iloc[i]['needed_page_length'] = len(pages1)
bins[current_bin] = bins[current_bin].append(table.iloc[i])
new_row = pd.DataFrame({
'filename': [table.iloc[i]['filename']],
'filetype': [table.iloc[i]['filetype']],
'non_990': [table.iloc[i]['non_990']],
'needed_pages': [','.join(pages2)],
'needed_page_length': [len(pages2)]
})
table = insert_rows(i+1, table, new_row)
num_rows += 1
print("End: " + table.iloc[i]['filename'])
current_bin_size += int(table.iloc[i]['needed_page_length'])
if current_bin_size >= ideal_bin_size:
current_bin += 1
current_bin_size = 0
i += 1
return bins
def multi_run_wrapper(args):
return data_pull(*args)
def extract(path, field, avoid, output, missedoutput, tableshape, threads, tblcntntspath):
if threads == 1:
results = queue.Queue()
generate_tblcntnts(path=path, field=field,avoid=avoid, results=results, thread=1) #, tblcntntspath=tblcntntspath)
table = results.get()
table.to_csv(tblcntntspath)
data_pull(table=table,path=path,out_shape=tableshape,output_path=output,miss_path=missedoutput)
else:
table = pd.DataFrame(columns = ['filename', 'filetype','non_990','needed_pages', 'needed_page_length'])
files = [f for f in os.listdir(path) if not f.startswith('.')]
files.sort(key=lambda file: sort_files(file, path), reverse=True)
for i in range(threads):
os.mkdir(path+"/tmp"+str(i))
dir_sizes = [0]*threads
for file in files:
dir = dir_sizes.index(min(dir_sizes))
shutil.copy(path+'/'+file, path+"/tmp"+str(dir))
dir_sizes[dir] += os.path.getsize(path+'/'+file)
from threading import Thread
thread_list = [None]*threads
results = queue.Queue()
for i in range(threads):
thread_list[i] = Thread(target=generate_tblcntnts, args=(path+"/tmp"+str(i), field, avoid, results, i)) #, tblcntntspath))
thread_list[i].start()
for i in range(threads):
thread_list[i].join()
shutil.rmtree(path+"/tmp"+str(i))
while(results.qsize() > 0):
result = results.get()
table = table.append(result)
table.to_csv(tblcntntspath)
bins = input_data_split(table=table, threads=threads)
data_pull_args = []
for i in range(len(bins)):
data_pull_args.append((bins[i], path, tableshape, output, missedoutput))
from multiprocessing import Pool
pool = Pool(threads)
pool.map(multi_run_wrapper,data_pull_args)
print("Thank you for using Merlin.py! Have a nice day!")
def sort_files(file, path):
return os.path.getsize(path+'/'+file)
def insert_rows(index, df, new_rows):
return df[0:index].append(new_rows, ignore_index=True).append(df[index:], ignore_index=True)
|
{"hexsha": "7e66a4d415e27c26916280ba1fea55bfb4e4761f", "size": 8789, "ext": "py", "lang": "Python", "max_stars_repo_path": "merlin_pull.py", "max_stars_repo_name": "kellen-t-oconnor/merlin-py", "max_stars_repo_head_hexsha": "9a9bc1a95b662787a7741abc7687e8b84904edfb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-06-05T17:33:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-21T08:59:38.000Z", "max_issues_repo_path": "merlin_pull.py", "max_issues_repo_name": "kellen-t-oconnor/merlin-py", "max_issues_repo_head_hexsha": "9a9bc1a95b662787a7741abc7687e8b84904edfb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-12-18T15:49:24.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-18T15:49:24.000Z", "max_forks_repo_path": "merlin_pull.py", "max_forks_repo_name": "kellen-t-oconnor/merlin-py", "max_forks_repo_head_hexsha": "9a9bc1a95b662787a7741abc7687e8b84904edfb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8790697674, "max_line_length": 158, "alphanum_fraction": 0.5863010581, "include": true, "reason": "import numpy", "num_tokens": 2092}
|
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
from numpy.linalg import inv
import matplotlib.colors as colors
from matplotlib import cm
from matplotlib import rc
from matplotlib import rcParams
__author__ = 'ernesto'
# if use latex or mathtext
rc('text', usetex=True)
rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
####### Parámetros #######
# número de muestras
N = 200
# parámetro a de la ecuación de estado
a = 0.995
# varianza del ruido de excitación
var_u = 0.000001
# varianza del ruido de observación
var_w = 0.5
# media y varianza de f0[-1]
mu_f0_i = 0.25
var_f0_i = 0.01
# parámetros del filtro de Kalman
# número de parámetros
p = 2
# matriz de transcición de estados
A = np.array([[a, 0], [2 * np.pi * a, 1]])
B = np.array([1, 2 * np.pi])
# condiciones iniciales del filtro de Kalman
# s[-1|-1]
s_est_i = np.array([[mu_f0_i], [0]])
# M[-1|-1]
C_s_i = 100 * np.eye(p)
### Fin de parámetros ###
ns = np.arange(N)
# generación de f0[n], que es un proceso de Gauss-Markov de primer orden
# se sortea f0[-1]
f0_i = np.random.normal(mu_f0_i, np.sqrt(var_f0_i), 1)
# generación del proceso
# ruido de excitación
u = np.random.normal(0, np.sqrt(var_u), N)
# filtrado del ruido de excitación con condiciones iniciales
f0, z_f = signal.lfilter([1], [1, -a], u, zi=a * f0_i)
# generación de las observaciones
phi = 2 * np.pi * np.cumsum(f0)
y = np.cos(phi)
x = y + np.random.normal(0, np.sqrt(var_w), N)
# variables para guardar los resultados
s_ests = np.zeros((p, N))
Ms = np.zeros((p, N))
s_est = s_est_i
M_est = C_s_i
for n in ns:
s_pred = A @ s_est
M_pred = A @ M_est @ A.T + var_u * B @ B
H = np.array([[0, -np.sin(s_pred[1])]])
K = M_pred @ H.T / (var_w + H @ M_pred @ H.T)
s_est = s_pred + K * (x[n] - np.cos(s_pred[1]))
M_est = (np.eye(p) - K @ H) @ M_pred
s_ests[:, n] = s_est.ravel()
Ms[:, n] = np.diag(M_est)
f0s = s_ests[0, :]
int_part = np.floor(f0s/0.5)
f0s_unw = f0s
for n in ns:
if int_part[n] != 0:
if int_part[n] % 2 == 0:
f0s_unw[n] = f0s[n] - int_part[n] * 0.5
else:
f0s_unw[n] = 0.5 - (f0s[n] - int_part[n] * 0.5)
plt.figure(0)
plt.subplot(311)
plt.plot(ns, f0, 'k')
plt.plot(ns, s_ests[0, :], 'r')
#plt.plot(ns[:-1], (s_ests[1, 1:]-s_ests[1, :-1])/(2 * np.pi), 'b')
plt.subplot(312)
plt.plot(ns, phi, 'k')
plt.plot(ns, s_ests[1, :], 'r')
plt.subplot(313)
plt.plot(ns, y, 'k', zorder=2)
plt.plot(ns, x, 'r', zorder=1)
plt.figure(1)
plt.subplot(111)
plt.plot(ns, f0, 'k')
plt.plot(ns, f0s_unw, 'r')
plt.show()
|
{"hexsha": "8bb56df1647cc5c6ea350826c1808d6109691bd7", "size": 2573, "ext": "py", "lang": "Python", "max_stars_repo_path": "figuras/PycharmKayStatisticalReport/problem_13_21.py", "max_stars_repo_name": "bor9/estudiando_el_kay", "max_stars_repo_head_hexsha": "6e07908b8b0b5a5166dadce30001e6100e8304c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "figuras/PycharmKayStatisticalReport/problem_13_21.py", "max_issues_repo_name": "bor9/estudiando_el_kay", "max_issues_repo_head_hexsha": "6e07908b8b0b5a5166dadce30001e6100e8304c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "figuras/PycharmKayStatisticalReport/problem_13_21.py", "max_forks_repo_name": "bor9/estudiando_el_kay", "max_forks_repo_head_hexsha": "6e07908b8b0b5a5166dadce30001e6100e8304c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-02T05:27:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-02T05:27:27.000Z", "avg_line_length": 22.7699115044, "max_line_length": 72, "alphanum_fraction": 0.6272833269, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 957}
|
#=
The power set of a set is the set of all its subsets. Write a function that, given a set, generates its power set.
For example, given the set {1, 2, 3}, it should return {{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}}.
You may also use a list or array to represent a set.
=#
using Test
include("Solutions/problem37_generate_powerset.jl")
@test get_powerset([1,2,3]) == [[], [1], [2], [3], [1, 2], [1, 3], [2, 3], [1, 2, 3]]
|
{"hexsha": "c6b1934eeef8e94036ba6accb8802bffcdd58460", "size": 438, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Tests/problem37_generate_powerset_test.jl", "max_stars_repo_name": "DominiqueCaron/daily-coding-problem", "max_stars_repo_head_hexsha": "41234497aa3a2c21c5dff43d86e9153d9582cced", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Tests/problem37_generate_powerset_test.jl", "max_issues_repo_name": "DominiqueCaron/daily-coding-problem", "max_issues_repo_head_hexsha": "41234497aa3a2c21c5dff43d86e9153d9582cced", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2019-06-17T14:04:18.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-12T20:01:45.000Z", "max_forks_repo_path": "Tests/problem37_generate_powerset_test.jl", "max_forks_repo_name": "DominiqueCaron/daily-coding-problem", "max_forks_repo_head_hexsha": "41234497aa3a2c21c5dff43d86e9153d9582cced", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6923076923, "max_line_length": 114, "alphanum_fraction": 0.598173516, "num_tokens": 169}
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from configs.path_config import SHAPENETCLASSES
from configs.path_config import ScanNet_OBJ_CLASS_IDS as OBJ_CLASS_IDS
import torch
class TdwPhysicsConfig(object):
def __init__(self):
self.num_class = len(OBJ_CLASS_IDS)
self.num_heading_bin = 12
self.num_size_cluster = len(OBJ_CLASS_IDS)
#self.type2class = {SHAPENETCLASSES[cls]:index for index, cls in enumerate(OBJ_CLASS_IDS)}
#self.class2type = {self.type2class[t]: t for t in self.type2class}
#self.class_ids = OBJ_CLASS_IDS
self.mean_size_arr = np.array([0.4, 0.4, 0.4])
self.type_mean_size = {}
self.data_path = 'datasets/scannet/processed_data'
#for i in range(self.num_size_cluster):
# self.type_mean_size[self.class2type[i]] = self.mean_size_arr[i, :]
self.with_rotation = False
def size2class(self, size, type_name):
''' Convert 3D box size (l,w,h) to size class and size residual '''
size_class = self.type2class[type_name]
size_residual = size - self.type_mean_size[type_name]
return size_class, size_residual
def class2size(self, tmp, residual):
''' Inverse function to size2class '''
return self.mean_size_arr + residual
def class2size_cuda(self, residual):
''' Inverse function to size2class '''
mean_size_arr = torch.from_numpy(self.mean_size_arr).to(residual.device).float()
return torch.expand_dims(mean_size_arr, axis=0) + residual
def param2obb(self, center, heading_class, heading_residual, size_class, size_residual):
heading_angle = self.class2angle(heading_class, heading_residual)
box_size = self.class2size(int(size_class), size_residual)
obb = np.zeros((7,))
obb[0:3] = center
obb[3:6] = box_size
obb[6] = heading_angle
return obb
if __name__ == '__main__':
cfg = ScannetConfig()
|
{"hexsha": "6f0558dc026252e3a845605465c250e0621c73cf", "size": 2113, "ext": "py", "lang": "Python", "max_stars_repo_path": "configs/tdw_physics_config.py", "max_stars_repo_name": "htung0101/RfDNet", "max_stars_repo_head_hexsha": "5fb3e8cb0a50d2d1f3eee39bccfcc67b1f942ad0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "configs/tdw_physics_config.py", "max_issues_repo_name": "htung0101/RfDNet", "max_issues_repo_head_hexsha": "5fb3e8cb0a50d2d1f3eee39bccfcc67b1f942ad0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "configs/tdw_physics_config.py", "max_forks_repo_name": "htung0101/RfDNet", "max_forks_repo_head_hexsha": "5fb3e8cb0a50d2d1f3eee39bccfcc67b1f942ad0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8679245283, "max_line_length": 98, "alphanum_fraction": 0.6857548509, "include": true, "reason": "import numpy", "num_tokens": 530}
|
import numpy as np
from spira.core.transformation import ReversibleTransform
from spira.core.parameters.descriptor import SetFunctionParameter
from spira.yevon.geometry.coord import CoordParameter, Coord
__all__ = ['Stretch', 'scale_element', 'stretch_element_by_port']
class Stretch(ReversibleTransform):
""" Stretch an object using.
Example
-------
>>> s = Stretch()(shape)
"""
stretch_center = CoordParameter(default=(0,0))
def set_stretch_factor(self, value):
if isinstance(value, Coord):
self.__stretch_factor__ = value
else:
self.__stretch_factor__ = Coord(value[0], value[1])
if self.__stretch_factor__[0] == 0.0 or self.__stretch_factor__[1] == 0.0:
raise ValueError("Error: Stretch factor cannot be zero in Stretch transform")
stretch_factor = SetFunctionParameter('__stretch_factor__', set_stretch_factor)
def __repr__(self):
return "[SPiRA: Stretch] (factor {}, center {})".format(self.stretch_factor, self.stretch_center)
def __str__(self):
return self.__repr__()
def apply_to_coord(self, coord):
x1 = self.__stretch_factor__[0] * coord[0]
x2 = (1 - self.__stretch_factor__[0]) * self.stretch_center[0]
y1 = self.__stretch_factor__[1] * coord[1]
y2 = (1 - self.__stretch_factor__[1]) * self.stretch_center[1]
return Coord(x1+x2, y1+y2)
def reverse_on_coord(self, coord):
x1 = 1.0 / self.__stretch_factor__[0] * coord[0]
x2 = (1 - 1.0 / self.__stretch_factor__[0]) * self.stretch_center[0]
y1 = 1.0 / self.__stretch_factor__[1] * coord[1]
y2 = (1 - 1.0 / self.__stretch_factor__[1]) * self.stretch_center[1]
return Coord(x1+x2, y1+y2)
def apply_to_array(self, coords):
coords *= np.array([self.stretch_factor.x, self.stretch_factor.y])
x = (1 - self.__stretch_factor__.x) * self.stretch_center.x
y = (1 - self.__stretch_factor__.y) * self.stretch_center.y
coords += np.array([x, y])
return coords
def reverse_on_array(self, coords):
coords *= np.array([1.0 / self.stretch_factor.x, 1.0 / self.stretch_factor.y])
x = (1 - 1.0 / self.__stretch_factor__.x) * self.stretch_center.x
y = (1 - 1.0 / self.__stretch_factor__.y) * self.stretch_center.y
coords += np.array([x, y])
return coords
def apply_to_angle(self, angle):
# FIXME: This is required for transforming polygon ports.
# This is currently just a temporary fix.
return angle
def is_identity(self):
""" Returns True if the transformation does nothing """
return ((self.stretch_factor.x == 1.0) and (self.stretch_factor.y == 1.0))
def id_string(self):
return self.__repr__()
def scale_element(elem, scaling=(1.0, 1.0), scale_center=(0.0, 0.0)):
from spira.core.transforms.magnification import Magnification
if scaling[0] == scaling[1]:
return Magnification(scale_center, scaling[0])(elem)
else:
return Stretch(stretch_factor=scaling, stretch_center=scale_center)(elem)
def stretch_element_by_port(elem, const_port, subj_port, destination):
""" """
p1, p2 = const_port, subj_port
d0 = p1.midpoint.distance(p2.midpoint)
d1 = p1.midpoint.distance(destination)
sf = d1/d0
if p2.orientation == 0:
T = Stretch(stretch_factor=(sf,1), stretch_center=p1.midpoint)
elif p2.orientation == 90:
T = Stretch(stretch_factor=(1,sf), stretch_center=p1.midpoint)
elif p2.orientation == 180:
T = Stretch(stretch_factor=(sf,1), stretch_center=p1.midpoint)
elif p2.orientation == 270:
T = Stretch(stretch_factor=(1,sf), stretch_center=p1.midpoint)
return T
|
{"hexsha": "50c22c6adfbeb1fe4e0f69c84e94f47d1c39aa3b", "size": 3777, "ext": "py", "lang": "Python", "max_stars_repo_path": "spira/core/transforms/stretching.py", "max_stars_repo_name": "qedalab/spira", "max_stars_repo_head_hexsha": "32e4d2096e298b9fcc5952abd654312dc232a259", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2018-07-13T09:46:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-22T13:34:50.000Z", "max_issues_repo_path": "spira/core/transforms/stretching.py", "max_issues_repo_name": "qedalab/spira", "max_issues_repo_head_hexsha": "32e4d2096e298b9fcc5952abd654312dc232a259", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2018-09-09T11:32:40.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-08T07:47:31.000Z", "max_forks_repo_path": "spira/core/transforms/stretching.py", "max_forks_repo_name": "qedalab/spira", "max_forks_repo_head_hexsha": "32e4d2096e298b9fcc5952abd654312dc232a259", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2019-01-17T18:50:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-13T20:27:52.000Z", "avg_line_length": 37.0294117647, "max_line_length": 105, "alphanum_fraction": 0.6571352926, "include": true, "reason": "import numpy", "num_tokens": 1003}
|
import matplotlib.pyplot as plt
import tensorflow as tf
import cv2
import os
import glob
import numpy as np
from sklearn.metrics import confusion_matrix
import random
from skimage import io, color
import DataProcessing as load
import main as rem
from tensorflow.examples.tutorials.mnist import input_data
from xlwt import Workbook
import xlrd
import matplotlib.patches as patches
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
font = {'family': 'serif',
'color': 'blue',
'weight': 'normal',
'size': 10,
}
def rgbCalculation(img):
minR = 255
maxR = 0
minG = 255
maxG = 0
minB = 255
maxB = 0
#img = cv2.imread(imagename)
#img = io.imread(imagename, as_grey=False)
#print(img.shape)
#plt.imshow(img)
#plt.show()
for i in range (img.shape[0]):
for j in range(img.shape[1]):
for k in range (img.shape[2]):
if(k==0):
minR = min(minR,img[i][j][k])
maxR = max(maxR,img[i][j][k])
elif(k==1):
minG = min(minG, img[i][j][k])
maxG = max(maxG, img[i][j][k])
else:
minB = min(minB, img[i][j][k])
maxB = max(maxB, img[i][j][k])
# print(minR)
# print(maxR)
# print(minG)
# print(maxG)
# print(minB)
# print(maxB)
return ( minR>=93 and minR<=211 and maxR>=93 and maxR<=211 and
minG >= 142 and minG <= 222 and maxG >= 142 and maxG <= 222 and
minB >= 64 and minB <= 155 and maxB >= 64 and maxB <= 155 )
n = len(load.XX)
Number_of_features = 0
flag = 1
normalizeValue = 0.0
def featuresFunction(mask,image_list,flagvalue):
cnt = 0
for i in range(15):
if( (mask & (1<<i)) ):
cnt = cnt + 1
#print(cnt)
assert cnt>0
global Number_of_features
Number_of_features = cnt * 3
Train_Matrix = np.zeros(shape=(Number_of_features,len(image_list)))
for i in range(len(image_list)):
rem.GLCM(Train_Matrix,i,mask,flagvalue)
# wb = Workbook()
# sheet1 = wb.add_sheet('Train Matrix')
# for i in range(Train_Matrix.shape[0]):
# for j in range(Train_Matrix.shape[1]):
# sheet1.write(i, j, Train_Matrix[i][j])
# wb.save('Feature Data.xls')
return Train_Matrix
#############################################################################################
## Neural Network Code
n_nodes_hl1 = 50
n_nodes_hl2 = 50
n_nodes_hl3 = 50
n_classes = 4
batch_size = 100
# input feature size = 28x28 pixels = 784
x_var = tf.placeholder('float', [None, 15])
y_var = tf.placeholder('float')
def neural_network_model(data):
# input_data * weights + biases
hidden_l1 = {'weights': tf.Variable(tf.random_normal([15, n_nodes_hl1])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_l2 = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_l3 = {'weights': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_l = {'weights': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases': tf.Variable(tf.random_normal([n_classes]))}
l1 = tf.add(tf.matmul(data, hidden_l1['weights']), hidden_l1['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_l2['weights']), hidden_l2['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_l3['weights']), hidden_l3['biases'])
l3 = tf.nn.relu(l3)
output = tf.add(tf.matmul(l3, output_l['weights']), output_l['biases'])
return output
prediction = neural_network_model(x_var)
def estimatedPrediction(test_set_x,One_hot_matrix_test):
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y_var, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
acc = accuracy.eval({x_var: test_set_x.transpose(), y_var: One_hot_matrix_test})
#print('Accuracy rate:', acc)
return acc
def classChecking(file,value):
image_list1 = []
y = []
image_list1.append(file)
y.append(value)
test_set_y = np.array(y)
rem.testImage(image_list1)
mask = 24604
test_set_x_flatten = featuresFunction(mask, image_list1,1)
test_set_x = test_set_x_flatten / normalizeValue
##############
num_classes = 4
One_hot_matrix_test = np.zeros(shape=(len(test_set_y), num_classes))
for i in range(len(test_set_y)):
One_hot_matrix_test[i][test_set_y[i]] = 1
return estimatedPrediction(test_set_x,One_hot_matrix_test)>0.0
# def Testing():
# for file in glob.glob('./TestImage/*.*'):
# img = io.imread(file, as_grey=False)
# # 0 --- means Narrow Brown Spot Disease
# # 1 --- means Paddy Blast Disease
# # 2 --- means Brown Spot Disease
#
# if (classChecking(file,2)):
# plt.imshow(img)
# plt.title("Brown Spot")
# plt.show()
# elif (classChecking(file,0)):
# plt.imshow(img)
# plt.title("Narrow Brown Spot")
# plt.show()
# elif(classChecking(file,1)):
# plt.imshow(img)
# plt.title("Paddy Blast")
# plt.show()
# else:
# plt.imshow(img)
# plt.title("Other")
# plt.show()
def Testing(str):
for file in glob.glob(str):
img = io.imread(file, as_grey=False)
# 0 --- means Narrow Brown Spot Disease
# 1 --- means Paddy Blast Disease
# 2 --- means Brown Spot Disease
if (classChecking(file,2)):
plt.imshow(img)
plt.title("Brown Spot")
plt.show()
elif (classChecking(file,0)):
plt.imshow(img)
plt.title("Narrow Brown Spot")
plt.show()
elif(classChecking(file,1)):
plt.imshow(img)
plt.title("Paddy Blast")
plt.show()
else:
plt.imshow(img)
plt.title("Other")
plt.show()
def fun():
for f in glob.glob('./Testing/*.*'):
os.remove(f)
for f in glob.glob('./Picture/*.*'):
os.remove(f)
lvlIndex = 0
for file in glob.glob('./Picture1/*.*'):
img = cv2.imread(file, 1)
xxxx = 258
for ii in range(3):
lvlIndex += 1
path = './Picture/' + str(lvlIndex) + '.jpg'
cv2.imwrite(path, cv2.resize(img, ( int(xxxx), int(xxxx) ), interpolation=cv2.INTER_LINEAR))
xxxx /= 2
indd = 0
imageNo = 0
for file in glob.glob('./Picture/*.*'):
img1 = io.imread(file, as_grey=False)
im = np.array(Image.open(file), dtype=np.uint8)
fig, ax = plt.subplots(1)
ax.imshow(im)
img = cv2.imread(file, 1)
# newimg = cv2.imread('./Waste/img.jpg', 1)
newimg = cv2.resize( img , (64, 64), interpolation=cv2.INTER_LINEAR)
# newimg = io.imread('./Waste/img.jpg', as_grey=False)
flag = 0
# 0 --- means Narrow Brown Spot Disease
# 1 --- means Paddy Blast Disease
# 2 --- means Brown Spot Disease
ClassZero = 0
ClassOne = 0
ClassTwo = 0
index = 0
tmpIndex = 0
imageNo += 1
for i in range(0, img.shape[0], 25):
for j in range(0, img.shape[1], 25):
if (i + 64 <= img.shape[0] and j + 64 <= img.shape[1]):
for k in range(i, i + 64):
for l in range(j, j + 64):
for p in range(3):
newimg[k - i][l - j][p] = img[k][l][p]
if (rgbCalculation(img1) == False):
flag = 1
tmpIndex += 1
path = './Testing/Level' + str(imageNo) + '_' + str(tmpIndex) + '.jpg'
cv2.imwrite(path, cv2.resize(newimg, (64, 64), interpolation=cv2.INTER_LINEAR) )
if (classChecking(path,0)):
rect = patches.Rectangle((i, j), 64, 64, linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
plt.text(i, j, "0", font)
ClassZero += 1
else:
if (classChecking(path,2)):
rect = patches.Rectangle((i, j), 64, 64, linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
plt.text(i, j, "2", font)
ClassTwo += 1
elif (classChecking(path,1)):
rect = patches.Rectangle((i, j), 64, 64, linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
plt.text(i, j, "1", font)
ClassOne += 1
else:
print("None")
print(ClassZero)
print(ClassOne)
print(ClassTwo)
if (flag == 0):
plt.title("Normal Leaf")
else:
if (ClassTwo >= ClassOne and ClassTwo >= ClassZero):
plt.title("Brown Spot")
elif (ClassZero >= ClassTwo and ClassZero >= ClassOne):
plt.title("Narrow Brown Spot")
else:
plt.title("Paddy Blast")
plt.show()
indd += 1
path2 = './Full/' + str(indd) + '.jpg'
fig.savefig(path2)
def train_neural_network(x_var,train_set_x,One_hot_matrix_train): #,train_set_x,One_hot_matrix_train,test_set_x,One_hot_matrix_test
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y_var)) # v1.0 changes
# optimizer = tf.train.AdamOptimizer().minimize(cost)
epochs_no = 90001
epoch_x = train_set_x.transpose()
epoch_y = One_hot_matrix_train
saver = tf.train.Saver()
rem = .98
cst = 12629
with tf.Session() as sess:
# sess.run(tf.global_variables_initializer()) # v1.0 changes
# # training
# for epoch in range(epochs_no):
# _, c = sess.run([optimizer, cost], feed_dict={x_var: epoch_x, y_var: epoch_y})
#
# if(epoch%3000==0):
# print('Epoch', epoch, 'completed out of', epochs_no, 'loss:', c)
# acc = estimatedPrediction(train_set_x, One_hot_matrix_train)
# if(acc>rem):
# rem = acc
# cst = c
# save_path = saver.save(sess, "my_net/save_net.ckpt")
# elif(acc==rem and c<cst):
# cst = c
# save_path = saver.save(sess, "my_net/save_net.ckpt")
# print("Accuracy: {0:.1%}".format(estimatedPrediction(train_set_x, One_hot_matrix_train)))
saver.restore(sess, "my_net/save_net.ckpt")
print("Accuracy: {0:.1%}".format( estimatedPrediction(train_set_x,One_hot_matrix_train) ))
fun()
# str = './TestImage/B/*.*'
# Testing(str)
# print(str)
# str = './TestImage/P/*.*'
# Testing(str)
# print(str)
# str = './TestImage/N/*.*'
# Testing(str)
# print(str)
# str = './TestImage/O/*.*'
# Testing(str)
################################################################################################
X=load.XX
y=load.yy
image_list = load.Image_list
train_set_x_orig = np.array(X)
train_set_y = np.array(y)
rem.trainImage(image_list)
m_train = len(train_set_y)
mask = 24604
train_set_x_flatten = featuresFunction(mask,image_list,0)
for i in range(train_set_x_flatten.shape[0]):
for j in range(train_set_x_flatten.shape[1]):
normalizeValue = max(normalizeValue, train_set_x_flatten[i][j])
train_set_x = train_set_x_flatten / normalizeValue
##############
num_classes = 4
One_hot_matrix_train = np.zeros(shape=(len(train_set_y), num_classes))
for i in range(len(train_set_y)):
One_hot_matrix_train[i][train_set_y[i]] = 1
print(train_set_x.shape)
train_neural_network(x_var,train_set_x,One_hot_matrix_train)
|
{"hexsha": "acb8e88fe3208db108ad098f1b83513f5bdbc87d", "size": 12399, "ext": "py", "lang": "Python", "max_stars_repo_path": "Project Files/Full_image_check.py", "max_stars_repo_name": "greenJIS/A-Study-on-Paddy-Disease-Detection-using-Color-Co-occurrence-Features", "max_stars_repo_head_hexsha": "18a76b043951ef1c29428e063b7e2ef5703a1862", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-03-27T21:42:11.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-02T08:21:13.000Z", "max_issues_repo_path": "Project Files/Full_image_check.py", "max_issues_repo_name": "greenJIS/A-Study-on-Paddy-Disease-Detection-using-Color-Co-occurrence-Features", "max_issues_repo_head_hexsha": "18a76b043951ef1c29428e063b7e2ef5703a1862", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Project Files/Full_image_check.py", "max_forks_repo_name": "greenJIS/A-Study-on-Paddy-Disease-Detection-using-Color-Co-occurrence-Features", "max_forks_repo_head_hexsha": "18a76b043951ef1c29428e063b7e2ef5703a1862", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-01-02T14:02:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T23:06:20.000Z", "avg_line_length": 33.4204851752, "max_line_length": 131, "alphanum_fraction": 0.5407694169, "include": true, "reason": "import numpy", "num_tokens": 3178}
|
// Copyright (c) 2014-2017 The Dash Core developers
// Copyright (c) 2017-2019 The KZCash Core developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "darksend.h"
#include "governance-vote.h"
#include "masternodeman.h"
#include "util.h"
#include <boost/lexical_cast.hpp>
std::string CGovernanceVoting::ConvertOutcomeToString(vote_outcome_enum_t nOutcome)
{
switch(nOutcome)
{
case VOTE_OUTCOME_NONE:
return "NONE"; break;
case VOTE_OUTCOME_YES:
return "YES"; break;
case VOTE_OUTCOME_NO:
return "NO"; break;
case VOTE_OUTCOME_ABSTAIN:
return "ABSTAIN"; break;
}
return "error";
}
std::string CGovernanceVoting::ConvertSignalToString(vote_signal_enum_t nSignal)
{
string strReturn = "NONE";
switch(nSignal)
{
case VOTE_SIGNAL_NONE:
strReturn = "NONE";
break;
case VOTE_SIGNAL_FUNDING:
strReturn = "FUNDING";
break;
case VOTE_SIGNAL_VALID:
strReturn = "VALID";
break;
case VOTE_SIGNAL_DELETE:
strReturn = "DELETE";
break;
case VOTE_SIGNAL_ENDORSED:
strReturn = "ENDORSED";
break;
case VOTE_SIGNAL_NOOP1:
strReturn = "NOOP1";
break;
case VOTE_SIGNAL_NOOP2:
strReturn = "NOOP2";
break;
case VOTE_SIGNAL_NOOP3:
strReturn = "NOOP3";
break;
case VOTE_SIGNAL_NOOP4:
strReturn = "NOOP4";
break;
case VOTE_SIGNAL_NOOP5:
strReturn = "NOOP5";
break;
case VOTE_SIGNAL_NOOP6:
strReturn = "NOOP6";
break;
case VOTE_SIGNAL_NOOP7:
strReturn = "NOOP7";
break;
case VOTE_SIGNAL_NOOP8:
strReturn = "NOOP8";
break;
case VOTE_SIGNAL_NOOP9:
strReturn = "NOOP9";
break;
case VOTE_SIGNAL_NOOP10:
strReturn = "NOOP10";
break;
case VOTE_SIGNAL_NOOP11:
strReturn = "NOOP11";
break;
case VOTE_SIGNAL_CUSTOM1:
strReturn = "CUSTOM1";
break;
case VOTE_SIGNAL_CUSTOM2:
strReturn = "CUSTOM2";
break;
case VOTE_SIGNAL_CUSTOM3:
strReturn = "CUSTOM3";
break;
case VOTE_SIGNAL_CUSTOM4:
strReturn = "CUSTOM4";
break;
case VOTE_SIGNAL_CUSTOM5:
strReturn = "CUSTOM5";
break;
case VOTE_SIGNAL_CUSTOM6:
strReturn = "CUSTOM6";
break;
case VOTE_SIGNAL_CUSTOM7:
strReturn = "CUSTOM7";
break;
case VOTE_SIGNAL_CUSTOM8:
strReturn = "CUSTOM8";
break;
case VOTE_SIGNAL_CUSTOM9:
strReturn = "CUSTOM9";
break;
case VOTE_SIGNAL_CUSTOM10:
strReturn = "CUSTOM10";
break;
case VOTE_SIGNAL_CUSTOM11:
strReturn = "CUSTOM11";
break;
case VOTE_SIGNAL_CUSTOM12:
strReturn = "CUSTOM12";
break;
case VOTE_SIGNAL_CUSTOM13:
strReturn = "CUSTOM13";
break;
case VOTE_SIGNAL_CUSTOM14:
strReturn = "CUSTOM14";
break;
case VOTE_SIGNAL_CUSTOM15:
strReturn = "CUSTOM15";
break;
case VOTE_SIGNAL_CUSTOM16:
strReturn = "CUSTOM16";
break;
case VOTE_SIGNAL_CUSTOM17:
strReturn = "CUSTOM17";
break;
case VOTE_SIGNAL_CUSTOM18:
strReturn = "CUSTOM18";
break;
case VOTE_SIGNAL_CUSTOM19:
strReturn = "CUSTOM19";
break;
case VOTE_SIGNAL_CUSTOM20:
strReturn = "CUSTOM20";
break;
}
return strReturn;
}
vote_outcome_enum_t CGovernanceVoting::ConvertVoteOutcome(std::string strVoteOutcome)
{
vote_outcome_enum_t eVote = VOTE_OUTCOME_NONE;
if(strVoteOutcome == "yes") {
eVote = VOTE_OUTCOME_YES;
}
else if(strVoteOutcome == "no") {
eVote = VOTE_OUTCOME_NO;
}
else if(strVoteOutcome == "abstain") {
eVote = VOTE_OUTCOME_ABSTAIN;
}
return eVote;
}
vote_signal_enum_t CGovernanceVoting::ConvertVoteSignal(std::string strVoteSignal)
{
vote_signal_enum_t eSignal = VOTE_SIGNAL_NONE;
if(strVoteSignal == "funding") {
eSignal = VOTE_SIGNAL_FUNDING;
}
else if(strVoteSignal == "valid") {
eSignal = VOTE_SIGNAL_VALID;
}
if(strVoteSignal == "delete") {
eSignal = VOTE_SIGNAL_DELETE;
}
if(strVoteSignal == "endorsed") {
eSignal = VOTE_SIGNAL_ENDORSED;
}
if(eSignal != VOTE_SIGNAL_NONE) {
return eSignal;
}
// ID FIVE THROUGH CUSTOM_START ARE TO BE USED BY GOVERNANCE ENGINE / TRIGGER SYSTEM
// convert custom sentinel outcomes to integer and store
try {
int i = boost::lexical_cast<int>(strVoteSignal);
if(i < VOTE_SIGNAL_CUSTOM1 || i > VOTE_SIGNAL_CUSTOM20) {
eSignal = VOTE_SIGNAL_NONE;
}
else {
eSignal = vote_signal_enum_t(i);
}
}
catch(std::exception const & e)
{
std::ostringstream ostr;
ostr << "CGovernanceVote::ConvertVoteSignal: error : " << e.what() << std::endl;
LogPrintf(ostr.str().c_str());
}
return eSignal;
}
CGovernanceVote::CGovernanceVote()
: fValid(true),
fSynced(false),
nVoteSignal(int(VOTE_SIGNAL_NONE)),
vinMasternode(),
nParentHash(),
nVoteOutcome(int(VOTE_OUTCOME_NONE)),
nTime(0),
vchSig()
{}
CGovernanceVote::CGovernanceVote(CTxIn vinMasternodeIn, uint256 nParentHashIn, vote_signal_enum_t eVoteSignalIn, vote_outcome_enum_t eVoteOutcomeIn)
: fValid(true),
fSynced(false),
nVoteSignal(eVoteSignalIn),
vinMasternode(vinMasternodeIn),
nParentHash(nParentHashIn),
nVoteOutcome(eVoteOutcomeIn),
nTime(GetAdjustedTime()),
vchSig()
{}
void CGovernanceVote::Relay() const
{
CInv inv(MSG_GOVERNANCE_OBJECT_VOTE, GetHash());
RelayInv(inv, PROTOCOL_VERSION);
}
bool CGovernanceVote::Sign(CKey& keyMasternode, CPubKey& pubKeyMasternode)
{
// Choose coins to use
CPubKey pubKeyCollateralAddress;
CKey keyCollateralAddress;
std::string strError;
std::string strMessage = vinMasternode.prevout.ToStringShort() + "|" + nParentHash.ToString() + "|" +
boost::lexical_cast<std::string>(nVoteSignal) + "|" + boost::lexical_cast<std::string>(nVoteOutcome) + "|" + boost::lexical_cast<std::string>(nTime);
if(!darkSendSigner.SignMessage(strMessage, vchSig, keyMasternode)) {
LogPrintf("CGovernanceVote::Sign -- SignMessage() failed\n");
return false;
}
if(!darkSendSigner.VerifyMessage(pubKeyMasternode, vchSig, strMessage, strError)) {
LogPrintf("CGovernanceVote::Sign -- VerifyMessage() failed, error: %s\n", strError);
return false;
}
return true;
}
bool CGovernanceVote::IsValid(bool fSignatureCheck) const
{
if(nTime > GetTime() + (60*60)) {
LogPrint("gobject", "CGovernanceVote::IsValid -- vote is too far ahead of current time - %s - nTime %lli - Max Time %lli\n", GetHash().ToString(), nTime, GetTime() + (60*60));
return false;
}
// support up to 50 actions (implemented in sentinel)
if(nVoteSignal > MAX_SUPPORTED_VOTE_SIGNAL)
{
LogPrint("gobject", "CGovernanceVote::IsValid -- Client attempted to vote on invalid signal(%d) - %s\n", nVoteSignal, GetHash().ToString());
return false;
}
// 0=none, 1=yes, 2=no, 3=abstain. Beyond that reject votes
if(nVoteOutcome > 3)
{
LogPrint("gobject", "CGovernanceVote::IsValid -- Client attempted to vote on invalid outcome(%d) - %s\n", nVoteSignal, GetHash().ToString());
return false;
}
masternode_info_t infoMn = mnodeman.GetMasternodeInfo(vinMasternode);
if(!infoMn.fInfoValid) {
LogPrint("gobject", "CGovernanceVote::IsValid -- Unknown Masternode - %s\n", vinMasternode.prevout.ToStringShort());
return false;
}
if(!fSignatureCheck) return true;
std::string strError;
std::string strMessage = vinMasternode.prevout.ToStringShort() + "|" + nParentHash.ToString() + "|" +
boost::lexical_cast<std::string>(nVoteSignal) + "|" + boost::lexical_cast<std::string>(nVoteOutcome) + "|" + boost::lexical_cast<std::string>(nTime);
if(!darkSendSigner.VerifyMessage(infoMn.pubKeyMasternode, vchSig, strMessage, strError)) {
LogPrintf("CGovernanceVote::IsValid -- VerifyMessage() failed, error: %s\n", strError);
return false;
}
return true;
}
bool operator==(const CGovernanceVote& vote1, const CGovernanceVote& vote2)
{
bool fResult = ((vote1.vinMasternode == vote2.vinMasternode) &&
(vote1.nParentHash == vote2.nParentHash) &&
(vote1.nVoteOutcome == vote2.nVoteOutcome) &&
(vote1.nVoteSignal == vote2.nVoteSignal) &&
(vote1.nTime == vote2.nTime));
return fResult;
}
bool operator<(const CGovernanceVote& vote1, const CGovernanceVote& vote2)
{
bool fResult = (vote1.vinMasternode < vote2.vinMasternode);
if(!fResult) {
return false;
}
fResult = (vote1.vinMasternode == vote2.vinMasternode);
fResult = fResult && (vote1.nParentHash < vote2.nParentHash);
if(!fResult) {
return false;
}
fResult = fResult && (vote1.nParentHash == vote2.nParentHash);
fResult = fResult && (vote1.nVoteOutcome < vote2.nVoteOutcome);
if(!fResult) {
return false;
}
fResult = fResult && (vote1.nVoteOutcome == vote2.nVoteOutcome);
fResult = fResult && (vote1.nVoteSignal == vote2.nVoteSignal);
if(!fResult) {
return false;
}
fResult = fResult && (vote1.nVoteSignal == vote2.nVoteSignal);
fResult = fResult && (vote1.nTime < vote2.nTime);
return fResult;
}
|
{"hexsha": "8b2b8b9fcd79f06f2e3fd00c4968c8507d38177c", "size": 10292, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/governance-vote.cpp", "max_stars_repo_name": "unitedcryptocommunity/kzcash", "max_stars_repo_head_hexsha": "763bc86787079356f9f0b60b6256a522979caf4a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2017-11-29T12:03:11.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-10T01:03:25.000Z", "max_issues_repo_path": "src/governance-vote.cpp", "max_issues_repo_name": "unitedcryptocommunity/kzcash", "max_issues_repo_head_hexsha": "763bc86787079356f9f0b60b6256a522979caf4a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2017-11-05T14:34:15.000Z", "max_issues_repo_issues_event_max_datetime": "2017-11-12T00:11:00.000Z", "max_forks_repo_path": "src/governance-vote.cpp", "max_forks_repo_name": "unitedcryptocommunity/kzcash", "max_forks_repo_head_hexsha": "763bc86787079356f9f0b60b6256a522979caf4a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10.0, "max_forks_repo_forks_event_min_datetime": "2017-11-19T15:04:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T22:38:23.000Z", "avg_line_length": 30.5400593472, "max_line_length": 183, "alphanum_fraction": 0.6060046638, "num_tokens": 2669}
|
[STATEMENT]
lemma lift\<^sub>c_Throw:
"(lift\<^sub>c prj inject c = Throw) = (c = Throw)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (lift\<^sub>c prj inject c = Throw) = (c = Throw)
[PROOF STEP]
by (cases c) auto
|
{"llama_tokens": 99, "file": "Simpl_ex_Compose", "length": 1}
|
# code from https://github.com/xmu-xiaoma666/External-Attention-pytorch/blob/master/attention/BAM.py
import numpy as np
import torch
from torch import nn
from torch.nn import init
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.shape[0], -1)
class ChannelAttention(nn.Module):
def __init__(self, channel, reduction=16, num_layers=3):
super().__init__()
self.avgpool = nn.AdaptiveAvgPool2d(1)
gate_channels = [channel]
gate_channels += [channel // reduction] * num_layers
gate_channels += [channel]
self.ca = nn.Sequential()
self.ca.add_module('flatten', Flatten())
for i in range(len(gate_channels) - 2):
self.ca.add_module('fc%d' % i, nn.Linear(gate_channels[i], gate_channels[i + 1]))
self.ca.add_module('bn%d' % i, nn.BatchNorm1d(gate_channels[i + 1]))
self.ca.add_module('relu%d' % i, nn.ReLU())
self.ca.add_module('last_fc', nn.Linear(gate_channels[-2], gate_channels[-1]))
def forward(self, x):
res = self.avgpool(x)
res = self.ca(res)
res = res.unsqueeze(-1).unsqueeze(-1).expand_as(x)
return res
class SpatialAttention(nn.Module):
def __init__(self, channel, reduction=16, num_layers=3, dia_val=2):
super().__init__()
self.sa = nn.Sequential()
self.sa.add_module('conv_reduce1',
nn.Conv2d(kernel_size=1, in_channels=channel, out_channels=channel // reduction))
self.sa.add_module('bn_reduce1', nn.BatchNorm2d(channel // reduction))
self.sa.add_module('relu_reduce1', nn.ReLU())
for i in range(num_layers):
self.sa.add_module('conv_%d' % i, nn.Conv2d(kernel_size=3, in_channels=channel // reduction,
out_channels=channel // reduction, padding=1, dilation=dia_val))
self.sa.add_module('bn_%d' % i, nn.BatchNorm2d(channel // reduction))
self.sa.add_module('relu_%d' % i, nn.ReLU())
self.sa.add_module('last_conv', nn.Conv2d(channel // reduction, 1, kernel_size=1))
def forward(self, x):
res = self.sa(x)
res = res.expand_as(x)
return res
class BAMBlock(nn.Module):
def __init__(self, channel=512, reduction=16, dia_val=2):
super().__init__()
self.ca = ChannelAttention(channel=channel, reduction=reduction)
self.sa = SpatialAttention(channel=channel, reduction=reduction, dia_val=dia_val)
self.sigmoid = nn.Sigmoid()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
b, c, _, _ = x.size()
sa_out = self.sa(x)
ca_out = self.ca(x)
weight = self.sigmoid(sa_out + ca_out)
out = (1 + weight) * x
return out
if __name__ == '__main__':
input = torch.randn(50, 512, 7, 7)
bam = BAMBlock(channel=512, reduction=16, dia_val=2)
output = bam(input)
print(output.shape)
|
{"hexsha": "25e5185865498434e488402d5510ecf495447807", "size": 3498, "ext": "py", "lang": "Python", "max_stars_repo_path": "external_attention_block/BAM.py", "max_stars_repo_name": "Roypic/Attention_Code", "max_stars_repo_head_hexsha": "5b6cbfc36e49101567d19d65894641550917a66e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-07-05T08:31:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T02:42:29.000Z", "max_issues_repo_path": "external_attention_block/BAM.py", "max_issues_repo_name": "Roypic/Attention_Code", "max_issues_repo_head_hexsha": "5b6cbfc36e49101567d19d65894641550917a66e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "external_attention_block/BAM.py", "max_forks_repo_name": "Roypic/Attention_Code", "max_forks_repo_head_hexsha": "5b6cbfc36e49101567d19d65894641550917a66e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-05T08:31:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-28T10:57:02.000Z", "avg_line_length": 38.0217391304, "max_line_length": 120, "alphanum_fraction": 0.5969125214, "include": true, "reason": "import numpy", "num_tokens": 860}
|
#include "rpcConnection.h"
#include "collector/statCollectorManager.h"
#include <boost/bind.hpp>
#include <jansson.h>
RPCConnection::RPCConnection(tcp::socket* socket) : m_socket(socket), m_buffer(SOCK_BUFFER_SIZE)
{
async_read_until(*m_socket, m_buffer,
'\n', boost::bind(&RPCConnection::handle_read, this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
}
RPCConnection::~RPCConnection()
{
}
void RPCConnection::handle_read(const boost::system::error_code& ec, size_t bytes)
{
if (ec.value() != 0) {
finish();
return;
}
// Read request:
m_buffer.commit(bytes);
std::ostringstream ss;
ss << &m_buffer;
std::string data = ss.str();
json_error_t error;
json_t* args = json_loads(data.c_str(), 0, &error);
if (!args) {
std::cerr << "failed to load message " << data
<< ": json_loads returned NULL: error on line "
<< error.line << ": " << error.text << std::endl;
finish();
return;
}
if (!json_is_object(args)) {
std::cerr << "failed to load message " << data
<< ": args is not an object" << std::endl;
json_decref(args);
finish();
return;
}
json_t* j_method = json_object_get(args, "method");
if (!j_method || !json_is_string(j_method)) {
std::cerr << "failed to load message " << data \
<< ": invalid or missing method" << std::endl;
json_decref(args);
finish();
return;
}
json_t* resp = json_object();
json_t* result = nullptr;
bool success = false;
std::string error_str = "";
std::string method = std::string(json_string_value(j_method));
// Process request:
if (method == "list") {
success = true;
StatCollectorManager::get_global_ptr()->write_json(&result);
}
else if (method == "remove") {
auto j_name = json_object_get(args, "name");
if (!j_name || !json_is_string(j_name)) {
error_str = "invalid or missing 'name' param";
} else {
std::string name = json_string_value(j_name);
success = StatCollectorManager::get_global_ptr()->remove_collector(name);
if (!success)
error_str = "no such collector";
}
}
else if (method == "add_incremental") {
auto j_name = json_object_get(args, "name");
auto j_event = json_object_get(args, "event");
if (!j_name || !json_is_string(j_name)) {
error_str = "invalid or missing 'name' param";
}
else if (!j_event || !json_is_string(j_event)) {
error_str = "invalid or missing 'event' param";
} else {
std::string name = json_string_value(j_name);
std::string event = json_string_value(j_event);
success = StatCollectorManager::get_global_ptr()->add_incremental_collector(name, event);
if (!success)
error_str = name + " already exists";
}
}
else if (method == "add_periodic") {
auto j_name = json_object_get(args, "name");
auto j_event = json_object_get(args, "event");
auto j_period = json_object_get(args, "period");
if (!j_name || !json_is_string(j_name)) {
error_str = "invalid or missing 'name' param";
}
else if (!j_event || !json_is_string(j_event)) {
error_str = "invalid or missing 'event' param";
}
else if (!j_period || !json_is_integer(j_period)) {
error_str = "invalid or missing 'period' param";
}
else {
std::string name = json_string_value(j_name);
std::string event = json_string_value(j_event);
unsigned int period = json_integer_value(j_period);
success = StatCollectorManager::get_global_ptr()->add_periodic_collector(name, event, period);
if (!success)
error_str = name + " already exists";
}
}
else if (method == "add_highscore") {
auto j_name = json_object_get(args, "name");
auto j_event = json_object_get(args, "event");
auto j_reversed = json_object_get(args, "reversed");
if (!j_name || !json_is_string(j_name)) {
error_str = "invalid or missing 'name' param";
}
else if (!j_event || !json_is_string(j_event)) {
error_str = "invalid or missing 'event' param";
}
else if (!j_reversed || !json_is_boolean(j_reversed)) {
error_str = "invalid or missing 'reversed' param";
}
else {
std::string name = json_string_value(j_name);
std::string event = json_string_value(j_event);
bool reversed = json_is_true(j_reversed);
success = StatCollectorManager::get_global_ptr()->add_highscore_collector(name, event, reversed);
if (!success)
error_str = name + " already exists";
}
}
else if (method == "ban") {
auto j_id = json_object_get(args, "id");
if (!j_id || !json_is_integer(j_id)) {
error_str = "invalid or missing 'id' param";
} else {
doid_t id = json_integer_value(j_id);
StatCollectorManager::get_global_ptr()->add_to_ban_list(id);
success = true;
}
}
else {
error_str = "unknown method";
}
// Send response:
json_object_set_new(resp, "success", json_boolean(success));
if (result) {
json_object_set_new(resp, "result", result);
}
else if (error_str.size()) {
json_object_set_new(resp, "error", json_string(error_str.c_str()));
}
char* dump = json_dumps(resp, 0);
async_write(*m_socket, boost::asio::buffer(dump, strlen(dump)),
boost::bind(&RPCConnection::handle_write, this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
free(dump);
// Cleanup:
json_decref(resp);
json_decref(args);
}
void RPCConnection::handle_write(const boost::system::error_code&, size_t)
{
finish();
}
void RPCConnection::finish()
{
delete m_socket;
delete this;
}
|
{"hexsha": "f688a6a678b8adfe0011d1f758e9a941a6bbf962", "size": 6422, "ext": "cxx", "lang": "C++", "max_stars_repo_path": "src/net/rpcConnection.cxx", "max_stars_repo_name": "C0MPU73R/tlopo-stats", "max_stars_repo_head_hexsha": "7a7c2bfb5c2a1b9888e94ac611ad76da193f9405", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-11-08T03:44:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T03:44:13.000Z", "max_issues_repo_path": "src/net/rpcConnection.cxx", "max_issues_repo_name": "C0MPU73R/tlopo-stats", "max_issues_repo_head_hexsha": "7a7c2bfb5c2a1b9888e94ac611ad76da193f9405", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/net/rpcConnection.cxx", "max_forks_repo_name": "C0MPU73R/tlopo-stats", "max_forks_repo_head_hexsha": "7a7c2bfb5c2a1b9888e94ac611ad76da193f9405", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7272727273, "max_line_length": 109, "alphanum_fraction": 0.5688259109, "num_tokens": 1469}
|
module ExaPF
# Standard library
using Printf
using LinearAlgebra
using SparseArrays
import CUDA
import CUDA.CUBLAS
import CUDA.CUSPARSE
import CUDA.CUSOLVER
import ForwardDiff
using KernelAbstractions
const KA = KernelAbstractions
import MathOptInterface
const MOI = MathOptInterface
using TimerOutputs: @timeit, TimerOutput
import Base: show, get
const VERBOSE_LEVEL_HIGH = 3
const VERBOSE_LEVEL_MEDIUM = 2
const VERBOSE_LEVEL_LOW = 1
const VERBOSE_LEVEL_NONE = 0
const TIMER = TimerOutput()
include("utils.jl")
include("architectures.jl")
# Templates
include("models.jl")
# Import submodules
include("autodiff.jl")
using .AutoDiff
include("LinearSolvers/LinearSolvers.jl")
using .LinearSolvers
include("PowerSystem/PowerSystem.jl")
using .PowerSystem
const PS = PowerSystem
# Polar formulation
include("Polar/polar.jl")
# Evaluators
include("Evaluators/Evaluators.jl")
end
|
{"hexsha": "626c9f5a5e7c45c17adffa6908b9746799c5c998", "size": 886, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ExaPF.jl", "max_stars_repo_name": "exanauts/ExaPF.jl", "max_stars_repo_head_hexsha": "cd1bcb8a0782fe448d46a10816f82c5d28c3854e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2020-07-15T16:01:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T21:28:25.000Z", "max_issues_repo_path": "src/ExaPF.jl", "max_issues_repo_name": "exanauts/ExaPF.jl", "max_issues_repo_head_hexsha": "cd1bcb8a0782fe448d46a10816f82c5d28c3854e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 129, "max_issues_repo_issues_event_min_datetime": "2020-07-02T11:59:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T20:10:54.000Z", "max_forks_repo_path": "src/ExaPF.jl", "max_forks_repo_name": "exanauts/ExaPF.jl", "max_forks_repo_head_hexsha": "cd1bcb8a0782fe448d46a10816f82c5d28c3854e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-07-15T18:49:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-29T20:12:19.000Z", "avg_line_length": 17.72, "max_line_length": 41, "alphanum_fraction": 0.8069977427, "num_tokens": 238}
|
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function
import math
from collections import OrderedDict
from functools import partial
import numpy as np
from ..common import CaffeGraph, CaffeOp, CaffeDN, caffe_factory
from ..common import EXTRA_WEIGHTS
from ...common import dog
from ...common.converter_base import ConverterBase
from ...common.nnef_dog_types import NnefOp, NnefDN, nnef_factory
from ...common.types import *
class Converter(ConverterBase):
def __init__(self, caffedog, custom_converters=None):
# type: (CaffeGraph, Optional[Dict[str, Callable[(CaffeOp, Converter), None]]])->None
converters = dict(DefaultConverters)
if custom_converters:
converters.update(custom_converters)
super(Converter, self).__init__(sourcedog=caffedog,
source_factory=caffe_factory,
target_factory=nnef_factory,
converters=converters)
def make_variable(self, caffeop, discriminator, value):
# type: (CaffeOp, str, np.ndarray)->NnefDN
nnefop = NnefOp("variable")
nnefop.add_arg("shape", list(value.shape))
nnefop.add_arg("label", "{}/{}".format(caffeop.args["name"], discriminator))
nnefop.add_result("output", self.make_targetdn(name=caffeop.args["name"], discriminator=discriminator))
self.add_targetop(nnefop)
nnefop.extra[EXTRA_WEIGHTS] = value
return nnefop.result
@staticmethod
def nnef_padding(pad):
# type: (List[int])->List[Tuple[int, int]]
return list(zip(pad, pad))
@staticmethod
def nnef_axis(axis, rank):
# type: (int, int)->int
return axis if axis >= 0 else rank + axis
@staticmethod
def get_pooling_right_padding(h, k, p, q, s):
# type: (int, int, int, int, int)->int
a = int(math.ceil(float(h + p + q - k) / s))
return s * a + k - h - p
def generic_convert_input(caffeop, converter):
# type: (CaffeOp, Converter)->None
if len(caffeop.args["shapes"]) != 1:
converter.print_error(caffeop, "Input operation only supported with 1 result")
converter.add_targetop_ex(caffeop, "external",
OrderedDict([
("shape", caffeop.args["shapes"][0])
]),
OrderedDict([
("output", caffeop.result)
]))
def convert_scaled_batch_norm(caffeop, converter):
# type: (CaffeOp, Converter)->None
weights = caffeop.extra[EXTRA_WEIGHTS]
if weights["scale_factor"].shape != (1,):
converter.print_error(caffeop, "scale_factor.shape must be [1]")
scale_factor = weights["scale_factor"][0]
norm = 0.0 if scale_factor == 0.0 else 1.0 / scale_factor
converter.add_targetop_ex(caffeop, "batch_normalization",
OrderedDict([
("input", caffeop.args[dog.gen_arg_name(0)]),
("mean", converter.make_variable(caffeop, "mean",
np.expand_dims(weights["mean"] * norm, 0))),
("variance", converter.make_variable(caffeop, "variance",
np.expand_dims(weights["variance"] * norm, 0))),
("offset", converter.make_variable(caffeop, "offset",
np.expand_dims(weights["bias"], 0))),
("scale", converter.make_variable(caffeop, "scale",
np.expand_dims(weights["weight"], 0))),
("epsilon", caffeop.args["eps"]),
]),
OrderedDict([
("output", caffeop.result)
]))
def convert_scale(caffeop, converter):
# type: (CaffeOp, Converter)->None
weights = caffeop.extra[EXTRA_WEIGHTS]
mul = NnefOp("mul")
mul.add_arg("x", converter.get_targetdn(caffeop.args[dog.gen_arg_name(0)]))
mul.add_arg("y", converter.make_variable(caffeop, "weight", np.expand_dims(weights["weight"], 0)))
if caffeop.args["bias_term"]:
mul.add_result("z", converter.make_targetdn(name=caffeop.args["name"], discriminator="scale"))
else:
mul.add_result("z", converter.make_targetdn(sourcedn=caffeop.result_node))
converter.add_targetop(mul, caffeop)
if caffeop.args["bias_term"]:
add = NnefOp("add")
add.add_arg("x", mul.result_node)
add.add_arg("y", converter.make_variable(caffeop, "bias", np.expand_dims(weights["bias"], 0)))
add.add_result("z", converter.make_targetdn(sourcedn=caffeop.result_node))
converter.add_targetop(add, caffeop)
def generic_convert_unary(caffeop, converter, target_name):
# type: (CaffeOp, Converter, str)->None
converter.add_targetop_ex(caffeop, target_name,
OrderedDict([
("x", caffeop.args[dog.gen_arg_name(0)]),
]),
OrderedDict([
("y", caffeop.result)
]))
def convert_power(caffeop, converter):
# type: (CaffeOp, Converter)->None
input_ = converter.get_targetdn(caffeop.args[dog.gen_arg_name(0)]) # type: NnefDN
ops = []
if caffeop.args["scale"] != 1:
mul = NnefOp("mul")
mul.add_arg("x", input_)
mul.add_arg("y", caffeop.args["scale"])
output_ = (converter.make_targetdn(sourcedn=caffeop.result)
if caffeop.args["shift"] == 0 and caffeop.args["power"] == 1
else converter.make_targetdn(name=caffeop.args["name"], discriminator="scale"))
mul.add_result("z", output_)
ops.append(mul)
input_ = output_
if caffeop.args["shift"] != 0:
add = NnefOp("add")
add.add_arg("x", input_)
add.add_arg("y", caffeop.args["shift"])
output_ = (converter.make_targetdn(sourcedn=caffeop.result)
if caffeop.args["power"] == 1
else converter.make_targetdn(name=caffeop.args["name"], discriminator="shift"))
add.add_result("z", output_)
ops.append(add)
input_ = output_
if caffeop.args["power"] != 1 or not ops:
pow_ = NnefOp("pow")
pow_.add_arg("x", input_)
pow_.add_arg("y", caffeop.args["power"])
pow_.add_result("z", converter.make_targetdn(sourcedn=caffeop.result))
ops.append(pow_)
for op in ops:
converter.add_targetop(op, caffeop)
def convert_relu(caffeop, converter):
# type: (CaffeOp, Converter)->None
if caffeop.args["negative_slope"] == 0:
converter.add_targetop_ex(caffeop, "relu",
OrderedDict([
("x", caffeop.args[dog.gen_arg_name(0)]),
]),
OrderedDict([
("y", caffeop.result)
]))
else:
converter.add_targetop_ex(caffeop, "leaky_relu",
OrderedDict([
("x", caffeop.args[dog.gen_arg_name(0)]),
("alpha", caffeop.args["negative_slope"]),
]),
OrderedDict([
("y", caffeop.result)
]))
def convert_prelu(caffeop, converter):
# type: (CaffeOp, Converter)->None
weights = caffeop.extra[EXTRA_WEIGHTS]
converter.add_targetop_ex(
caffeop, "prelu",
OrderedDict([
("x", caffeop.args[dog.gen_arg_name(0)]),
("alpha", (converter.make_variable(caffeop, "alpha", (weights["alpha"]
if caffeop.args["channel_shared"]
else np.expand_dims(weights["alpha"], 0))))),
]),
OrderedDict([
("y", caffeop.result)
]))
def convert_elu(caffeop, converter):
# type: (CaffeOp, Converter)->None
if caffeop.args["alpha"] != 1:
converter.print_error(caffeop, "only alpha=1 is supported")
converter.add_targetop_ex(caffeop, "elu",
OrderedDict([
("x", caffeop.args[dog.gen_arg_name(0)]),
]),
OrderedDict([
("y", caffeop.result)
]))
def generic_convert_convolution(caffeop, converter, target_name):
# type: (CaffeOp, Converter, str)->None
factor = caffeop.args["stride"][0]
if (caffeop.name == "Deconvolution"
and caffeop.args["weight_filler_type"] == "bilinear"
and caffeop.args["num_output"] == caffeop.args[dog.gen_arg_name(0)].shape[1]
and not caffeop.args["bias_term"]
and caffeop.args["kernel_size"] == 2 * [2 * factor - factor % 2]
and caffeop.args["stride"] == 2 * [factor]
and caffeop.args["pad"] == 2 * [int(math.ceil((factor - 1) / 2.0))]
and caffeop.args["group"] == dog.get_shape_safe(caffeop.args[dog.gen_arg_name(0)])[1]):
converter.add_targetop_ex(caffeop, "multilinear_upsample",
OrderedDict([
("input", caffeop.args[dog.gen_arg_name(0)]),
("factor", 2 * [factor]),
("method", "symmetric"),
("border", "constant")
]),
OrderedDict([
("output", caffeop.result)
]))
else:
weights = caffeop.extra[EXTRA_WEIGHTS]
converter.add_targetop_ex(caffeop, target_name,
OrderedDict([
("input", caffeop.args[dog.gen_arg_name(0)]),
("filter", converter.make_variable(caffeop, "filter", weights["weight"])),
("bias",
(converter.make_variable(caffeop, "bias", np.expand_dims(weights["bias"], 0))
if caffeop.args["bias_term"]
else 0.0)),
("border", "constant"),
("padding", converter.nnef_padding(caffeop.args["pad"])),
("stride", caffeop.args["stride"]),
("dilation", caffeop.args["dilation"]),
("groups", caffeop.args["group"]),
]),
OrderedDict([
("output", caffeop.result)
]))
def convert_pooling(caffeop, converter):
# type: (CaffeOp, Converter)->None
rank = len(caffeop.args[dog.gen_arg_name(0)].shape)
pool_name_by_pool = {
0: "max_pool",
1: "avg_pool"
}
reduce_name_by_pool = {
0: "max_reduce",
1: "mean_reduce"
}
if caffeop.args["pool"] not in reduce_name_by_pool:
converter.print_error(caffeop, "unsupported pool method {}".format(caffeop.args["pool"]))
caffeop.set_arg("pool", 0)
if caffeop.args["global_pooling"]:
converter.add_targetop_ex(caffeop, reduce_name_by_pool[caffeop.args["pool"]],
OrderedDict([
("input", caffeop.args[dog.gen_arg_name(0)]),
("axes", list(range(2, rank)))
]),
OrderedDict([
("output", caffeop.result)
]))
else:
input_size = caffeop.args[dog.gen_arg_name(0)].shape
padding = converter.nnef_padding([0, 0] + caffeop.args["pad"])
stride = [1, 1] + caffeop.args["stride"]
kernel_size = [1, 1] + caffeop.args["kernel_size"]
# compensate for caffe's pooling output size calculation
# https://github.com/BVLC/caffe/issues/1318#issuecomment-59594323
old_padding = padding
padding = [(p, converter.get_pooling_right_padding(h, k, p, q, s))
for h, k, (p, q), s in zip(input_size, kernel_size, old_padding, stride)]
converter.add_targetop_ex(caffeop, pool_name_by_pool[caffeop.args["pool"]],
OrderedDict([
("input", caffeop.args[dog.gen_arg_name(0)]),
("size", kernel_size),
("border", "constant"),
("padding", padding),
("stride", stride)
]),
OrderedDict([
("output", caffeop.result)
]))
def convert_softmax(caffeop, converter):
# type: (CaffeOp, Converter)->None
rank = len(caffeop.args[dog.gen_arg_name(0)].shape)
converter.add_targetop_ex(caffeop, "softmax",
OrderedDict([
("x", caffeop.args[dog.gen_arg_name(0)]),
("axes", [converter.nnef_axis(caffeop.args["axis"], rank)])
]),
OrderedDict([
("y", caffeop.result)
]))
def convert_inner_product(caffeop, converter):
# type: (CaffeOp, Converter)->None
weights = caffeop.extra[EXTRA_WEIGHTS]
input_ = caffeop.args[dog.gen_arg_name(0)]
rank = len(input_.shape)
axis = converter.nnef_axis(caffeop.args["axis"], rank)
if axis != rank - 1:
reshape = converter.add_targetop_ex(caffeop, "reshape",
OrderedDict([
("input", input_),
("shape", input_.shape[:axis] + [-1])
]),
OrderedDict([
("output", converter.make_targetdn(name=caffeop.args["name"],
discriminator="flatten"))
]))
input_ = reshape.result_node
matmul = converter.add_targetop_ex(caffeop, "matmul",
OrderedDict([
("A", input_),
("B",
converter.make_variable(caffeop,
discriminator="weight",
value=weights["weight"])),
("transposeA", False),
("transposeB", not caffeop.args["transpose"])
]),
OrderedDict([
("C",
(converter.make_targetdn(name=caffeop.args["name"], discriminator="matmul")
if caffeop.args["bias_term"]
else caffeop.result))
]))
input_ = matmul.result_node
if caffeop.args["bias_term"]:
converter.add_targetop_ex(caffeop, "add",
OrderedDict([
("x", input_),
("y", converter.make_variable(caffeop,
discriminator="bias",
value=np.expand_dims(weights["bias"], 0)))
]),
OrderedDict([
("z", caffeop.result)
]))
def convert_reshape(caffeop, converter):
# type: (CaffeOp, Converter)->None
input_ = caffeop.args[dog.gen_arg_name(0)]
shape = input_.shape
rank = len(shape)
axis = converter.nnef_axis(caffeop.args["axis"], rank)
num_axes = rank if caffeop.args["num_axes"] == -1 else caffeop.args["num_axes"]
new_shape = shape[:axis] + caffeop.args["shape"] + shape[axis + num_axes:]
converter.add_targetop_ex(caffeop, "reshape",
OrderedDict([
("input", input_),
("shape", new_shape)
]),
OrderedDict([
("output", caffeop.result)
]))
def convert_flatten(caffeop, converter):
# type: (CaffeOp, Converter)->None
input_ = caffeop.args[dog.gen_arg_name(0)]
shape = input_.shape
rank = len(shape)
axis = converter.nnef_axis(caffeop.args["axis"], rank)
end_axis = converter.nnef_axis(caffeop.args["end_axis"], rank)
new_shape = shape[:axis] + [-1] + shape[end_axis + 1:]
converter.add_targetop_ex(caffeop, "reshape",
OrderedDict([
("input", input_),
("shape", new_shape)
]),
OrderedDict([
("output", caffeop.result)
]))
def convert_eltwise(caffeop, converter):
# type: (CaffeOp, Converter)->None
target_name_by_operation = {
0: "mul",
1: "add",
2: "max"
}
if caffeop.num_gen_args() != 2:
converter.print_error(caffeop, "Eltwise only supported with 2 inputs")
converter.add_targetop_ex(caffeop, target_name_by_operation[caffeop.args["operation"]],
OrderedDict([
("x", caffeop.args[dog.gen_arg_name(0)]),
("y", caffeop.args[dog.gen_arg_name(1)])
]),
OrderedDict([
("z", caffeop.result_node),
]))
def convert_lrn(caffeop, converter):
# type: (CaffeOp, Converter)->None
rank = len(caffeop.args[dog.gen_arg_name(0)].shape)
if caffeop.args["norm_region"] != 0:
converter.print_error(caffeop, "Only ACROSS_CHANNELS is supported")
size = [caffeop.args["local_size"] if i == 1 else 1 for i in range(rank)]
converter.add_targetop_ex(caffeop, "local_response_normalization",
OrderedDict([
("input", caffeop.args[dog.gen_arg_name(0)]),
("size", size),
("alpha", caffeop.args["alpha"]),
("beta", caffeop.args["beta"]),
]),
OrderedDict([
("output", caffeop.result_node),
]))
def convert_concat(caffeop, converter):
# type: (CaffeOp, Converter)->None
rank = len(caffeop.args[dog.gen_arg_name(0)].shape)
converter.add_targetop_ex(caffeop, "concat",
OrderedDict([
("values", caffeop.get_arg_nodes()),
("axis", converter.nnef_axis(caffeop.args["axis"], rank)),
]),
OrderedDict([
("value", caffeop.result_node),
]))
def convert_argmax(caffeop, converter):
# type: (CaffeOp, Converter)->None
rank = len(caffeop.args[dog.gen_arg_name(0)].shape)
if caffeop.args["top_k"] != 1 or caffeop.args["out_max_val"]:
converter.print_error(caffeop, "Argmax params not supported yet")
if caffeop.args["axis"] is None:
axes = list(range(1, rank))
else:
axes = [converter.nnef_axis(caffeop.args["axis"], rank)]
converter.add_targetop_ex(caffeop, "argmax_reduce",
OrderedDict([
("input", caffeop.args[dog.gen_arg_name(0)]),
("axes", axes),
]),
OrderedDict([
("output", caffeop.result_node),
]))
def convert_crop(caffeop, converter):
# type: (CaffeOp, Converter)->None
caffedn_input = caffeop.args[dog.gen_arg_name(0)] # type: CaffeDN
caffedn_reference = caffeop.args[dog.gen_arg_name(1)] # type: CaffeDN
input_shape = caffedn_input.shape
reference_shape = caffedn_reference.shape
axis = caffeop.args["axis"]
offset = caffeop.args["offset"]
if len(offset) == 1:
offset = (len(input_shape) - axis) * offset
axes = list(range(axis, len(input_shape)))
begin = list(offset)
end = [o + s for o, s in zip(offset, reference_shape[axis:])]
converter.add_targetop_ex(caffeop, "slice",
OrderedDict([
("input", caffeop.args[dog.gen_arg_name(0)]),
("axes", axes),
("begin", begin),
("end", end),
]),
OrderedDict([
("output", caffeop.result_node),
]))
DefaultConverters = {
"BNLL": partial(generic_convert_unary, target_name="softplus"),
"TanH": partial(generic_convert_unary, target_name="tanh"),
"AbsVal": partial(generic_convert_unary, target_name="abs"),
"Sigmoid": partial(generic_convert_unary, target_name="sigmoid"),
"Convolution": partial(generic_convert_convolution, target_name="conv"),
"Deconvolution": partial(generic_convert_convolution, target_name="deconv"),
"Input": generic_convert_input,
"Python": generic_convert_input,
"Data": generic_convert_input,
"_ScaledBatchNorm": convert_scaled_batch_norm,
"Scale": convert_scale,
"Power": convert_power,
"Softmax": convert_softmax,
"Reshape": convert_reshape,
"Flatten": convert_flatten,
"ArgMax": convert_argmax,
"Pooling": convert_pooling,
"ELU": convert_elu,
"ReLU": convert_relu,
"PReLU": convert_prelu,
"Concat": convert_concat,
"Eltwise": convert_eltwise,
"InnerProduct": convert_inner_product,
"LRN": convert_lrn,
"Crop": convert_crop
} # type: Dict[str, Callable[(CaffeOp, Converter), None]]
|
{"hexsha": "a1dd30f3e0dfd46e88af9b13128a10508c82f885", "size": 24454, "ext": "py", "lang": "Python", "max_stars_repo_path": "legacy/caffe/nnef_converters/caffe_converters/caffe_to_nnef/converters.py", "max_stars_repo_name": "jnorwood/NNEF-Tools", "max_stars_repo_head_hexsha": "5eb3755b5322040d42893e41b15093337abe04ce", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "legacy/caffe/nnef_converters/caffe_converters/caffe_to_nnef/converters.py", "max_issues_repo_name": "jnorwood/NNEF-Tools", "max_issues_repo_head_hexsha": "5eb3755b5322040d42893e41b15093337abe04ce", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "legacy/caffe/nnef_converters/caffe_converters/caffe_to_nnef/converters.py", "max_forks_repo_name": "jnorwood/NNEF-Tools", "max_forks_repo_head_hexsha": "5eb3755b5322040d42893e41b15093337abe04ce", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7303754266, "max_line_length": 119, "alphanum_fraction": 0.4812709577, "include": true, "reason": "import numpy", "num_tokens": 4907}
|
import tensorflow as tf
import numpy as np
char_rdic = ['h', 'e', 'l', 'o']
char_dic = {w:i for i, w in enumerate(char_rdic)}
x_data = np.array([[1.0,0,0,0], # h
[0,1,0,0], # e
[0,0,1,0], # l
[0,0,1,0]], # l
dtype='f')
sample = [char_dic[c] for c in "hello"] # to index
# configuration
char_vocab_size = len(char_dic)
rnn_size = char_vocab_size # 1 hot coding (one of 4)
time_step_size = 4 # 'hell' -> predict 'ello'
batch_size = 1 # one sample
# RNN model
rnn_cell = tf.contrib.rnn.BasicRNNCell(rnn_size)
state = tf.zeros([batch_size, rnn_cell.state_size])
# tf.split(split_dim, num_splits, value) –>
# tf.split(value, num_or_size_splits, axis)
X_split = tf.split(x_data, time_step_size, axis=0)
# outputs, state = tf.nn.rnn(rnn_cell, X_split, state)
outputs, state = tf.contrib.rnn.static_rnn(rnn_cell, X_split, state)
#outputs, states = tf.contrib.rnn.static_rnn(lstm_cell_1, [tf.constant([[1.0],[1.0]], dtype=tf.float32)], dtype=tf.float32)
# logits: list of 2D Tensors of shape [batch_size x num_decoder_symbols].
# targets: list of 1D batch-sized int32 Tensors of the same length as logits.
# weights: list of 1D batch-sized float-Tensors of the same length as logits.
"""
logits = tf.reshape(tf.concat(1, outputs), [-1, rnn_size])
https://www.tensorflow.org/install/migration
tf.concat: keyword argument concat_dim should be renamed to axis
arguments have been reordered to tf.concat(values, axis, name='concat').
tf 1.0에서 concat의 인자 순서가 바뀜 조심해야함
"""
logits = tf.reshape(tf.concat(outputs, 1), [-1, rnn_size])
targets = tf.reshape(sample[1:],[-1])
weights = tf.ones([time_step_size * batch_size])
# loss = tf.nn.seq2seq.sequence_loss_by_example([logits], [targets], [weights])
# loss = tf.contrib.rnn.seq2seq.sequence_loss_by_example([logits], [targets], [weights])
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example([logits], [targets], [weights])
cost = tf.reduce_sum(loss) / batch_size
train_op = tf.train.RMSPropOptimizer(0.01, 0.9).minimize(cost)
# Launch the graph in a session
with tf.Session() as sess:
# you need to initialize all variables
tf.initialize_all_variables().run()
for i in range(100):
sess.run(train_op)
result = sess.run(tf.arg_max(logits, 1))
print (result, [char_rdic[t] for t in result])
|
{"hexsha": "178b3449c727ca604d0f2abd7a942232ab980f03", "size": 2351, "ext": "py", "lang": "Python", "max_stars_repo_path": "hunkim/ml_lab_12.py", "max_stars_repo_name": "juvenilehex/ml2", "max_stars_repo_head_hexsha": "57fa64660a87b2e432872c06414d1a86846ce380", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2017-02-20T09:26:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-17T15:15:21.000Z", "max_issues_repo_path": "hunkim/ml_lab_12.py", "max_issues_repo_name": "angelkim88/ml", "max_issues_repo_head_hexsha": "57fa64660a87b2e432872c06414d1a86846ce380", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hunkim/ml_lab_12.py", "max_forks_repo_name": "angelkim88/ml", "max_forks_repo_head_hexsha": "57fa64660a87b2e432872c06414d1a86846ce380", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 58, "max_forks_repo_forks_event_min_datetime": "2017-02-22T05:18:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-08T08:41:17.000Z", "avg_line_length": 35.6212121212, "max_line_length": 123, "alphanum_fraction": 0.6877924288, "include": true, "reason": "import numpy", "num_tokens": 684}
|
#include "vr/market/sources/mock/mock_ouch_server.h"
#include "vr/fields.h"
#include "vr/io/net/socket_factory.h"
#include "vr/io/pcap/pcap_reader.h"
#include "vr/io/net/utility.h" // min_size_or_zero, make_group_range_filter
#include "vr/io/stream_factory.h"
#include "vr/market/defs.h"
#include "vr/market/sources/mock/asx/mock_ouch_handlers.h"
#include "vr/market/sources/mock/mock_market_event_context.h"
#include "vr/market/sources/mock/mock_response.h"
#include "vr/mc/spinflag.h"
#include "vr/mc/spinlock.h"
#include "vr/rt/cfg/app_cfg.h"
#include "vr/settings.h"
#include "vr/sys/cpu.h"
#include "vr/sys/os.h"
#include "vr/util/logging.h"
#include "vr/util/ops_int.h"
#include <boost/thread/thread.hpp>
#include <bitset>
#include <mutex>
//----------------------------------------------------------------------------
namespace vr
{
namespace market
{
using namespace io;
//............................................................................
using spin_lock = mc::spinlock<>;
using int_ops_checked = util::ops_int<util::arg_policy<util::zero_arg_policy::ignore, 0>, true>;
//............................................................................
using data_link = client_connection::data_link;
vr_static_assert (data_link::has_ts_last_recv ());
vr_static_assert (data_link::has_ts_last_send ());
constexpr int32_t data_link_capacity () { return (256 * 1024); }; // replicates 'market::ouch_link_capacity ()'
//............................................................................
constexpr timestamp_t client_heartbeat_timeout () { return (15 * _1_second ()); } // ASX-specific?
constexpr timestamp_t server_heartbeat_timeout () { return (1 * _1_second ()); } // ASX-specific?
//............................................................................
struct mock_ouch_server::pimpl final
{
// select request policy impl:
using request_handler_ctx = mock_market_event_context<_ts_origin_, _partition_, _ts_local_, _mock_scenario_>;
using request_handler = ASX::scripted_handler<mock_ouch_server::pimpl, request_handler_ctx>;
static constexpr int32_t min_available () { return net::min_size_or_zero<request_handler>::value (); }
static constexpr int32_t new_cc_check_mask () { return 0xFFFF; }
static constexpr int32_t new_cc_check_stagger () { return 100009; }
VR_ENUM (state,
(
created,
running,
stopped
),
printable
); // end of enum
VR_ASSUME_COLD pimpl (mock_ouch_server & parent, scope_path const & cfg_path) :
m_parent { parent },
m_cfg_path { cfg_path }
{
LOG_trace1 << "configured with scope path " << print (cfg_path);
}
VR_ASSUME_COLD void start ()
{
rt::app_cfg const & config = (* m_parent.m_config);
m_session_date = config.start_date ();
settings const & cfg = config.scope (m_cfg_path);
LOG_trace1 << "using cfg:\n" << print (cfg);
check_condition (cfg.is_object (), cfg.type ());
LOG_trace1 << "session date: " << m_session_date;
m_rng_seed = config.rng_seed ();
LOG_trace1 << "[seed = " << m_rng_seed << ']';
check_nonzero (m_rng_seed);
m_request_handler_args ["seed"] = m_rng_seed;
int32_t const port_base = cfg.at ("server").at ("port");
check_within (port_base, 64 * 1024);
auto const & partitions = cfg.at ("partitions");
bitset32_t pix_mask { };
for (auto const & v : partitions)
{
int32_t const pix = v;
check_within (pix, part_count ());
pix_mask |= (1 << pix);
}
check_nonzero (pix_mask, partitions);
m_active_partitions = pix_mask;
LOG_trace1 << "active partition(s): " << std::bitset<part_count ()> (pix_mask);
// open listening sockets for all active partitions:
// TODO use 'thread_pool' instead of creating ad hoc threads here
while (pix_mask > 0)
{
int32_t const pix_bit = (pix_mask & - pix_mask);
int32_t const pix = int_ops_checked::log2_floor (pix_bit);
pix_mask ^= pix_bit;
partition & p = m_partitions [pix];
p.m_listener.listen (port_base + pix);
{
// [don't make the listening thread inherit the current thread's affinity set]
sys::affinity::scoped_thread_sched _ { make_bit_set (sys::cpu_info::instance ().PU_count ()).set () };
p.m_listen_thread = boost::thread { std::ref (p.m_listener) };
}
p.m_state = state::running;
}
LOG_trace1 << "listening socket thread(s) started";
}
VR_ASSUME_COLD void stop ()
{
// TODO drain outgoing I/O buffers while not receiving any new requests (for more graceful client handling)
bitset32_t pix_mask { m_active_partitions };
while (pix_mask > 0)
{
int32_t const pix_bit = (pix_mask & - pix_mask);
int32_t const pix = int_ops_checked::log2_floor (pix_bit);
pix_mask ^= pix_bit;
partition & p = m_partitions [pix];
if (p.m_state == state::running)
{
// shut down partition listener and clean up pending accepts:
LOG_trace1 << 'P' << pix << " requesting listener stop ...";
p.m_listener.request_stop ();
// now should be able to join the listener thread
LOG_trace1 << 'P' << pix << " joining listener thread ...";
p.m_listen_thread.join ();
// with no new connections possible, disconnect all current client sessions:
LOG_trace1 << 'P' << pix << " closing sessions ...";
for (auto & kv : p.m_sessions)
{
client_session & cs = kv.second;
cs.close ();
}
p.m_sessions.clear ();
p.m_state = state::stopped;
}
}
}
// core step logic:
VR_FORCEINLINE void step () // note: force-inlined
{
++ m_step_counter;
bitset32_t pix_mask { m_active_partitions };
timestamp_t const now_utc = sys::realtime_utc ();
// iterate over all active partitions:
while (pix_mask > 0)
{
int32_t const pix_bit = (pix_mask & - pix_mask);
int32_t const pix = int_ops_checked::log2_floor (pix_bit);
pix_mask ^= pix_bit;
step (pix, now_utc);
}
}
// working a given partition:
VR_FORCEINLINE void step (int32_t const pix, timestamp_t const now_utc)
{
partition & p = m_partitions [pix];
// handle pending actions (outgoing requests):
time_action_queue & taq = p.m_action_queue;
while (true)
{
auto * const e = taq.front ();
if (e == nullptr)
break; // queue empty
mock_response & r = * e->value ();
bool const done = r.evaluate (now_utc); // support "multi-step" actions (e.g. fractional response writes)
if (! done)
break; // stop process events at this time without dequeueing 'e': every is correct as long as no new events get scheduled ahead of it
taq.dequeue ();
}
// handle I/O in all current connections:
// TODO modify logic to enforce 'm_action_pending_limit' per partition queue
for (auto & kv : p.m_sessions)
{
client_session & cs = kv.second;
if (VR_UNLIKELY (cs.m_state == io::client_connection::state::closed))
continue;
try
{
// incoming requests:
assert_nonnull (cs.m_data_link);
std::pair<addr_const_t, capacity_t> const rc = cs.m_data_link->recv_poll (); // non-blocking read
auto available = rc.second;
if (available >= min_available ()) // a message header is guaranteed to have been read entirely
{
request_handler_ctx ctx { };
if (has_field<_partition_, request_handler_ctx> ())
{
field<_partition_> (ctx) = pix;
}
if (has_field<_ts_local_, request_handler_ctx> ())
{
// no matter when the request arrived to the link, 'now_utc' is the
// time when this action is considered for the first time:
field<_ts_local_> (ctx) = now_utc;
}
if (has_field<_ts_origin_, request_handler_ctx> ())
{
timestamp_t const ts_recv = cs.m_data_link->ts_last_recv ();
LOG_trace2 << '[' << m_step_counter << ", session " << kv.first << "]: ts_last_recv = " << print_timestamp (ts_recv);
// note that the recv timestamp as reported by the link is
// not the same as 'now_utc', hence we report the former through '_ts_origin_'
// (although it probably shouldn't factor into the simulated actions)
field<_ts_origin_> (ctx) = ts_recv;
}
LOG_trace2 << '[' << m_step_counter << ", " << print_timestamp (now_utc) << ", session " << kv.first << "] available " << available;
assert_condition (cs.m_request_handler); // has been set
int32_t consumed { };
do // loop over all full messages in the buffer
{
int32_t const rrc = cs.m_request_handler->consume (ctx, addr_plus (rc.first, consumed), available);
if (rrc < 0)
break; // partial message
// [reactions to client requests are in the event timer queue]
available -= rrc;
consumed += rrc;
assert_nonnegative (available);
assert_le (consumed, rc.second);
}
while (available >= min_available ());
if (consumed) cs.m_data_link->recv_flush (consumed); // no actual I/O, just buffer memory release, so never partial
}
// pending outgoing bytes (due to partial sends, actual or simulated):
auto const len = (cs.m_send_committed - cs.m_send_flushed);
if (len > 0)
{
auto const rc = cs.m_data_link->send_flush (len);
cs.m_send_flushed += rc;
}
else if (VR_UNLIKELY (cs.m_state == io::client_connection::state::closing))
{
assert_eq (cs.m_send_committed, cs.m_send_flushed); // don't close on a partial send
p.close_client_session (kv.first, cs);
}
// heartbeat housekeeping:
if (VR_LIKELY (cs.m_state == io::client_connection::state::serving))
{
if (VR_UNLIKELY (now_utc >= cs.m_data_link->ts_last_recv () + client_heartbeat_timeout ()))
{
LOG_warn << "timing out client session " << kv.first;
cs.m_state = io::client_connection::state::closing; // disable any enqueued responses
}
else if (VR_UNLIKELY (now_utc >= cs.m_data_link->ts_last_send () + server_heartbeat_timeout ()))
{
enqueue_heartbeat (pix, cs, now_utc); // note: can't send it here because a "partial" response write may be in progress
}
}
}
catch (eof_exception const & eofe)
{
LOG_warn << "closing client session " << kv.first << ": " << eofe.what ();
p.close_client_session (kv.first, cs);
}
// TODO GC closed sessions that have no pending requests in the queue
}
// periodically check for new connections:
if (! ((m_step_counter + (new_cc_check_stagger () << pix)) & new_cc_check_mask ()))
{
std::lock_guard<spin_lock> _ { p.m_listener.m_lock };
for (client_session & cs : p.m_listener.m_accepted)
{
int32_t const cs_ID = m_session_ID_counter ++;
auto const i = p.m_sessions.emplace (cs_ID, std::move (cs)).first;
// vary rng seed for different sessions:
{
m_request_handler_args ["seed"] = ++ m_rng_seed;
}
i->second.configure_handler (* this);
}
p.m_listener.m_accepted.clear ();
}
}
/*
* 'client_connection' augmented with OUCH request handler
*/
struct client_session final: public client_connection
{
client_session (std::unique_ptr<data_link> && link) :
client_connection (std::move (link))
{
}
void configure_handler (mock_ouch_server::pimpl & parent)
{
assert_condition (! m_request_handler);
m_request_handler = std::make_unique<request_handler> (parent, * this, parent.m_request_handler_args);
}
VR_ASSUME_COLD void close ()
{
client_connection::close (); // [chain]
}
std::unique_ptr<request_handler> m_request_handler { };
}; // end of class
/*
* (per-partition) callable for threads that listen for new client connections
*/
struct connection_listener final
{
connection_listener () = default;
/*
* must be called prior to creating the executing thread
*/
void listen (int32_t const port)
{
check_condition (! m_lsh); // called only once
m_lsh.reset (new net::socket_handle { net::socket_factory::create_TCP_server (string_cast (port)) });
}
VR_ASSUME_COLD void request_stop ()
{
m_stop_requested.raise ();
}
// callable:
void operator() ()
{
assert_nonnull (m_lsh);
try
{
while (true)
{
net::socket_handle csh { m_lsh->accept (m_stop_requested, 200 * _1_millisecond ()) }; // "interruptible" accept
timestamp_t const now_utc = sys::realtime_utc ();
auto const pn = csh.peer_name ();
LOG_info << '[' << print_timestamp (now_utc) << "] accepted new connection from " << std::get<0> (pn) << ':' << std::get<1> (pn);
{
std::lock_guard<spin_lock> _ { m_lock };
m_accepted.emplace_back (accept_client_connection (std::move (csh), data_link_capacity ())); // note: last use of 'csh'
}
}
}
catch (stop_iteration const &) { }
LOG_trace1 << '[' << print_timestamp (sys::realtime_utc ()) << "] shutting down connection listener...";
{
// first, make sure no new connections can be accepted:
m_lsh.reset ();
// next, stop all session in 'm_accepted' that haven't been picked up:
{
std::lock_guard<spin_lock> _ { m_lock };
for (client_session & cs : m_accepted)
{
try
{
cs.close ();
}
catch (...) { }
}
m_accepted.clear ();
}
}
LOG_trace1 << '[' << print_timestamp (sys::realtime_utc ()) << "] listener DONE";
}
std::unique_ptr<net::socket_handle> m_lsh { }; // created in 'listen()'
std::vector<client_session> m_accepted { }; // protected by 'm_lock'
spin_lock m_lock { };
mc::spinflag<> m_stop_requested { };
}; // end of nested class
struct partition final
{
partition () = default;
void close_client_session (int32_t const cs_ID, client_session & cs)
{
cs.m_data_link.reset ();
cs.m_state = io::client_connection::state::closed;
}
// TODO method to GC (erase from 'm_sessions')
connection_listener m_listener { }; // created in 'start()'
boost::thread m_listen_thread { }; // created as not-a-thread, move-assigned in 'start()'
boost::unordered_map<int32_t, client_session> m_sessions { };
time_action_queue m_action_queue { }; // queue pending actions per partition, to sim partition concurrency
state::enum_t m_state { state::created };
}; // end of nested class
util::date_t const & session_date () const
{
return m_session_date;
}
time_action_queue & action_queue (int32_t const pix)
{
assert_within (pix, part_count ());
return m_partitions [pix].m_action_queue;
}
void enqueue_action (int32_t const pix, std::unique_ptr<mock_response> && action)
{
timestamp_t const ts_start = action->m_ts_start;
action_queue (pix).enqueue (ts_start, std::move (action)); // note: last use of 'action'
}
void enqueue_heartbeat (int32_t const pix, client_connection & cc, timestamp_t const ts_start)
{
std::unique_ptr<mock_response> action { new ouch_heartbeat { cc, ts_start } };
action_queue (pix).enqueue (ts_start, std::move (action)); // note: last use of 'action'
}
/*
* note: returned client session does not have a handler set
*/
static client_session accept_client_connection (net::socket_handle && accepted, int32_t const capacity)
{
recv_arg_map recv_args { { "capacity", capacity } };
send_arg_map send_args { { "capacity", capacity } };
return { std::make_unique<data_link> (std::move (accepted), recv_args, send_args) }; // TODO use 'tcp_link_factory'
}
mock_ouch_server & m_parent;
scope_path const m_cfg_path;
uint64_t m_rng_seed { }; // set in 'start()'
arg_map m_request_handler_args { }; // TODO fill in in start
util::date_t m_session_date { }; // set in 'start()'
std::array<partition, part_count ()> m_partitions { };
int64_t m_step_counter { };
int32_t m_action_pending_limit { }; // set in 'start()'
bitset32_t m_active_partitions { }; // set in 'start()'
int32_t m_session_ID_counter { };
}; // end of nested class
//............................................................................
//............................................................................
mock_ouch_server::mock_ouch_server (scope_path const & cfg_path) :
m_impl { std::make_unique<pimpl> (* this, cfg_path) }
{
dep (m_config) = "config";
}
mock_ouch_server::~mock_ouch_server () = default; // pimpl
//............................................................................
void
mock_ouch_server::start ()
{
m_impl->start ();
}
void
mock_ouch_server::stop ()
{
m_impl->stop ();
}
//............................................................................
void
mock_ouch_server::step ()
{
m_impl->step ();
}
} // end of 'market'
} // end of namespace
//----------------------------------------------------------------------------
|
{"hexsha": "10878a6521ca8ca5dfc8b71d0a7d7fdefc98a93d", "size": 19855, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "vr/vr_rt/src/vr/market/sources/mock/mock_ouch_server.cpp", "max_stars_repo_name": "vladium/vrt", "max_stars_repo_head_hexsha": "57394a630c306b7529dbe4574036ea71420d00cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2019-09-09T22:08:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-17T13:43:31.000Z", "max_issues_repo_path": "vr/vr_rt/src/vr/market/sources/mock/mock_ouch_server.cpp", "max_issues_repo_name": "vladium/vrt", "max_issues_repo_head_hexsha": "57394a630c306b7529dbe4574036ea71420d00cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vr/vr_rt/src/vr/market/sources/mock/mock_ouch_server.cpp", "max_forks_repo_name": "vladium/vrt", "max_forks_repo_head_hexsha": "57394a630c306b7529dbe4574036ea71420d00cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-09-09T15:46:20.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-09T15:46:20.000Z", "avg_line_length": 33.8822525597, "max_line_length": 152, "alphanum_fraction": 0.5318055905, "num_tokens": 4161}
|
function deserialize_image_summary(summary)
img = summary.image
value = load(_format_stream(format"PNG", IOBuffer(img.encoded_image_string)))
return value
end
function lookahead_deserialize_image_summary(old_tag, old_val, evs::Summary,
state_old)
# prepare the default output (when no action is taken)
result = old_tag, old_val, state_old
combined_imgs = [old_val]
state = state_old + 1
iter_result = iterate(evs, state)
while iter_result !== nothing
((tag, summary), state) = iter_result
# iteration body
typ = summary_type(summary)
typ !== :image && break
tag != old_tag && break
push!(combined_imgs, deserialize_image_summary(summary))
# end iterate
iter_result = iterate(evs, state)
end
if length(combined_imgs) > 1
new_val = similar(old_val, size(old_val)..., length(combined_imgs))
for (i, img)=enumerate(combined_imgs)
new_val[:, :, i] .= img
end
result = old_tag, new_val, state_old + length(combined_imgs)
end
return result
end
|
{"hexsha": "2b147e6573d61fdc501bf5890c2ccc22d2b8f0bd", "size": 1155, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Deserialization/images.jl", "max_stars_repo_name": "JJMinton/TensorBoardLogger.jl", "max_stars_repo_head_hexsha": "25d8db22c5082d029ff1ec876512633b2b24dbc8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 73, "max_stars_repo_stars_event_min_datetime": "2019-01-29T07:00:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-18T16:03:10.000Z", "max_issues_repo_path": "src/Deserialization/images.jl", "max_issues_repo_name": "JJMinton/TensorBoardLogger.jl", "max_issues_repo_head_hexsha": "25d8db22c5082d029ff1ec876512633b2b24dbc8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 89, "max_issues_repo_issues_event_min_datetime": "2019-02-25T08:23:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-18T10:45:08.000Z", "max_forks_repo_path": "src/Deserialization/images.jl", "max_forks_repo_name": "JJMinton/TensorBoardLogger.jl", "max_forks_repo_head_hexsha": "25d8db22c5082d029ff1ec876512633b2b24dbc8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 24, "max_forks_repo_forks_event_min_datetime": "2019-02-23T23:25:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-17T21:12:42.000Z", "avg_line_length": 28.875, "max_line_length": 81, "alphanum_fraction": 0.6251082251, "num_tokens": 267}
|
# import time
from copy import deepcopy
import random
from timeit import default_timer
from numpy import mean, median, arange, zeros, float64, log, power, argsort, array, newaxis, \
abs, full, empty
from numpy.random import choice, uniform
from sklearn.utils.extmath import stable_cumsum
from algorithms.common.metric import WeightedRootMeanSquaredError # , RootMeanSquaredError
# from data.extract import generate_sub_training_set
# from utils.useful_methods import generate_random_weight_vector, generate_weight_vector
# from threading import Thread
# from multiprocessing import Process
class Ensemble():
"""
Class represents ensemble learning technique. In short, ensemble techniques predict output over a meta learner
that it self is supplied with output of a number of base learners.
Attributes:
base_learner: Base learner algorithms that supplies meta learner.
number_learners: Number of base learners.
meta_learner: Meta learner that predicts output, based on base learner predictions.
learners: List, containing the trained base learners.
Notes:
base_learner needs to support fit() and predict() function.
meta_learner function needs to support numpy ndarray as input.
"""
def __init__(self, base_learner, number_learners, meta_learner=mean, deep_copy=True):
self.base_learner = base_learner
self.number_learners = number_learners
self.meta_learner = meta_learner
self.learners = list()
self.deep_copy = deep_copy
def _fit_learner(self, i, input_matrix, target_vector, metric, verbose):
# if verbose: print(i)
# Creates deepcopy of base learner.
if self.deep_copy:
start_time = default_timer()
learner = deepcopy(self.base_learner)
deep_copy_time = default_timer() - start_time
if deep_copy_time >= 1:
print('\t\t\tdeep_copy_time:', deep_copy_time)
else:
learner = self.base_learner
# print('\t\t\tFitting learner of index', i, 'of a simple ensemble')
#=======================================================================
# if i == 0:
# print('\t\t\tFitting first learner of a simple ensemble\n\t\t\t...')
# elif i == self.number_learners - 1:
# print('\t\t\tFitting last learner of a simple ensemble')
# else:
# print('\t\t\tFitting learner', i, 'of a simple ensemble')
#=======================================================================
# Trains base learner.
if learner.__class__.__name__ == 'MLPClassifier' or learner.__class__.__name__ == 'MLPRegressor':
learner.fit(input_matrix, target_vector)
else:
learner.fit(input_matrix, target_vector, metric, verbose)
# Adds base learner to list.
return learner
def fit(self, input_matrix, target_vector, metric, verbose=False):
"""Trains learner to approach target vector, given an input matrix, based on a defined metric."""
start_time = default_timer()
# threads = []
# for i in range(self.number_learners):
# t = Process(target=self._fit_learner, args=(i, input_matrix, target_vector, metric, verbose))
# t.daemon = True
# threads.append(t)
# for t in threads:
# t.start()
# for t in threads:
# t.join()
# if verbose: print(i)
# # Creates deepcopy of base learner.
# learner = deepcopy(self.base_learner)
# # Trains base learner.
# learner.fit(input_matrix, target_vector, metric, verbose)
# # Adds base learner to list.
# self.learners.append(learner)
self.learners = [self._fit_learner(i, input_matrix, target_vector, metric, verbose) for i in range(self.number_learners)]
fit_time = default_timer() - start_time
print('\t\t\tfit_time:', fit_time)
def predict(self, input_matrix):
"""Predicts target vector, given input_matrix, based on trained ensemble."""
start_time = default_timer()
# Creates prediction matrix.
predictions = zeros([input_matrix.shape[0], self.number_learners])
# Supplies prediction matrix with predictions of base learners.
for learner, i in zip(self.learners, range(len(self.learners))):
predictions[:, i] = learner.predict(input_matrix)
# Applies meta learner to prediction matrix.
final_predictions = self.meta_learner(predictions, axis=1)
predict_time = default_timer() - start_time
print('\t\t\tpredict_time:', predict_time)
return final_predictions
class EnsembleBagging(Ensemble):
def __init__(self, base_learner, number_learners, meta_learner=mean):
Ensemble.__init__(self, base_learner, number_learners, meta_learner)
def _fit_learner(self, i, input_matrix, target_vector, metric, verbose):
# print('\t\t\tFitting learner of index', i, 'of a Bagging ensemble')
#=======================================================================
# if i == 0:
# print('\t\t\tFitting first learner of a Bagging ensemble\n\t\t\t...')
# elif i == self.number_learners - 1:
# print('\t\t\tFitting last learner of a Bagging ensemble')
# else:
# print('\t\t\tFitting learner', i, 'of a Bagging ensemble')
#=======================================================================
size = input_matrix.shape[0]
# Creates deepcopy of base learner
if self.deep_copy:
start_time = default_timer()
learner = deepcopy(self.base_learner)
deep_copy_time = default_timer() - start_time
if deep_copy_time >= 1:
print('\t\t\tdeep_copy_time:', deep_copy_time)
else:
learner = self.base_learner
# Reorganizes the input matrix
idx = choice(arange(size), size, replace=True)
# input_matrix = input_matrix[idx]
# target_vector = target_vector[idx]
# Trains base learner.
if learner.__class__.__name__ == 'MLPClassifier' or learner.__class__.__name__ == 'MLPRegressor':
learner.fit(input_matrix[idx], target_vector[idx])
else:
learner.fit(input_matrix[idx], target_vector[idx], metric, verbose)
return learner
def fit(self, input_matrix, target_vector, metric, verbose=False):
start_time = default_timer()
# original_input_matrix = input_matrix
# original_target_vector = target_vector
# size = input_matrix.shape[0]
# for i in range(self.number_learners):
# if verbose: print(i)
# # Creates deepcopy of base learner.
# learner = deepcopy(self.base_learner)
# ## Reorganizes the input matrix
# idx = np.random.choice(np.arange(size), size, replace=True)
# input_matrix = original_input_matrix[idx]
# target_vector = original_target_vector[idx]
# # Trains base learner.
# learner.fit(input_matrix, target_vector, metric, verbose)
# # Adds base learner to list.
# self.learners.append(learner)
self.learners = [self._fit_learner(i, input_matrix, target_vector, metric, verbose) for i in range(self.number_learners)]
fit_time = default_timer() - start_time
print('\t\t\tfit_time:', fit_time)
class EnsembleRandomIndependentWeighting(Ensemble):
def __init__(self, base_learner, number_learners, meta_learner=mean, weight_range=1):
Ensemble.__init__(self, base_learner, number_learners, meta_learner)
self.weight_range = weight_range
def _fit_learner(self, i, input_matrix, target_vector, metric, verbose):
# print('\t\t\tFitting learner of index', i, 'of a RIW ensemble')
#=======================================================================
# if i == 0:
# print('\t\t\tFitting first learner of a RIW ensemble\n\t\t\t...')
# elif i == self.number_learners - 1:
# print('\t\t\tFitting last learner of a RIW ensemble')
# else:
# print('\t\t\tFitting learner', i, 'of a RIW ensemble')
#=======================================================================
# if verbose: print(i)
# Creates deepcopy of base learner.
if self.deep_copy:
start_time = default_timer()
learner = deepcopy(self.base_learner)
deep_copy_time = default_timer() - start_time
if deep_copy_time >= 1:
print('\t\t\tdeep_copy_time:', deep_copy_time)
else:
learner = self.base_learner
weight_vector = uniform(0, self.weight_range, input_matrix.shape[0])
# Instatiates the WeightedRootMeanSquaredError object with the weight vector
metric = WeightedRootMeanSquaredError(weight_vector)
# Trains base learner #
learner.fit(input_matrix, target_vector, metric, verbose)
# Adds base learner to list.
return learner
def fit(self, input_matrix, target_vector, metric, verbose=False):
start_time = default_timer()
self.learners = [self._fit_learner(i, input_matrix, target_vector, metric, verbose) for i in range(self.number_learners)]
fit_time = default_timer() - start_time
print('\t\t\tfit_time:', fit_time)
class EnsembleBoosting(Ensemble):
def __init__(self, base_learner, number_learners, meta_learner=mean, learning_rate=1):
Ensemble.__init__(self, base_learner, number_learners, meta_learner)
self.learning_rate = learning_rate
self.estimator_weights = zeros(self.number_learners, dtype=float64)
def _get_learning_rate(self, learning_rate):
if (self.learning_rate == 'random'):
# return random generated learning rate between 0 and 1
return random.uniform(0, 1)
return 1
def fit(self, input_matrix, target_vector, metric, verbose=False):
start_time = default_timer()
# Initialize the weights with 1/n where n is the size of the input matrix
size = input_matrix.shape[0]
weight_vector = empty(size)
weight_vector.fill(1 / size)
# weight_vector = generate_weight_vector(size)
original_input_matrix = input_matrix
original_target_vector = target_vector
for i in range(self.number_learners):
# print('\t\t\tFitting learner of index', i, 'of a Boosting ensemble')
#===================================================================
# if i == 0:
# print('\t\t\tFitting first learner of a Boosting ensemble\n\t\t\t...')
# elif i == self.number_learners - 1:
# print('\t\t\tFitting last learner of a Boosting ensemble')
# else:
# print('\t\t\tFitting learner', i, 'of a Boosting ensemble')
#===================================================================
# if verbose: print(i)
# Creates deepcopy of base learner.
if self.deep_copy:
deep_copy_start_time = default_timer()
learner = deepcopy(self.base_learner)
deep_copy_time = default_timer() - deep_copy_start_time
if deep_copy_time >= 1:
print('\t\t\tdeep_copy_time:', deep_copy_time)
else:
learner = self.base_learner
# select the training instances
idx = choice(arange(size), size, p=weight_vector)
input_matrix = original_input_matrix[idx]
target_vector = original_target_vector[idx]
# Trains base learner.
if learner.__class__.__name__ == 'MLPClassifier' or learner.__class__.__name__ == 'MLPRegressor':
learner.fit(input_matrix, target_vector)
else:
learner.fit(input_matrix, target_vector, metric, verbose)
# calculate the output (semantics) of the model for every instance even the ones not used for the training
# learner.predict(self, input_matrix)
y_predict = learner.predict(original_input_matrix)
# calculate the absolute error vector: Ei = |yi - ti|
error_vector = abs(target_vector - y_predict)
# calculate the maximum absolute error
max_abs_error = error_vector.max()
# calculate the normalized error vector (with values between 0 and 1): ENi = Ei / max absolute error
if max_abs_error == 0:
error_vector = error_vector
else:
error_vector = error_vector / max_abs_error
# take into account the loss function - square in this case
error_vector **= 2
# calculate the weighted error of this element: EEk = sum(wi*Ei)
learner_error = (weight_vector * error_vector).sum()
# calculate the beta used in the weight update: beta = EEk/(1-EEk)
beta = learner_error / (1. - learner_error)
# calculate the weight that the elem will have on the final ensemble
# self.learning_rate*np.log(1/beta)
learning_rate = self._get_learning_rate(self.learning_rate)
estimator_weight = learning_rate * log(1. / beta)
self.estimator_weights[i] = estimator_weight
# update the weights for the next iteration
# sample_weight *= np.power(beta, (1-error_vect) *self.learning_rate)
weight_vector *= power(beta, (1. - error_vector) * learning_rate)
# doing this, because otherwise will get an error that probabilities do not sum to 1
weight_vector /= weight_vector.sum()
# Adds base learner to list.
self.learners.append(learner)
fit_time = default_timer() - start_time
print('\t\t\tfit_time:', fit_time)
def _get_median_predict(self, input_matrix, limit):
# Evaluate predictions of all estimators
predictions = array([
learner.predict(input_matrix) for learner in self.learners[:limit]]).T
# Sort the predictions
sorted_idx = argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = stable_cumsum(self.estimator_weights[sorted_idx], axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[arange(input_matrix.shape[0]), median_idx]
# Return median predictions
return predictions[arange(input_matrix.shape[0]), median_estimators]
def _get_mean_predict(self, input_matrix):
# Creates prediction matrix.
predictions = zeros([input_matrix.shape[0], self.number_learners])
# Supplies prediction matrix with predictions of base learners.
for learner, i in zip(self.learners, range(len(self.learners))):
predictions[:, i] = learner.predict(input_matrix)
# Applies meta learner to prediction matrix.
return self.meta_learner(predictions, axis=1)
def predict(self, input_matrix):
start_time = default_timer()
# verify what is the meta learner, if it's median then return get_median_predict else return the mean predictions
if self.meta_learner == median:
final_predictions = self._get_median_predict(input_matrix, self.number_learners)
else:
final_predictions = self._get_mean_predict(input_matrix)
predict_time = default_timer() - start_time
print('\t\t\tpredict_time:', predict_time)
return final_predictions
|
{"hexsha": "fdac17957adae8748c721102b6701dda7d23bdca", "size": 16291, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/algorithms/common/ensemble.py", "max_stars_repo_name": "martasls/pythonic-learning-machine", "max_stars_repo_head_hexsha": "330d1d5320adc8667bc7ce527808ec7a9c2271d4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/algorithms/common/ensemble.py", "max_issues_repo_name": "martasls/pythonic-learning-machine", "max_issues_repo_head_hexsha": "330d1d5320adc8667bc7ce527808ec7a9c2271d4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/algorithms/common/ensemble.py", "max_forks_repo_name": "martasls/pythonic-learning-machine", "max_forks_repo_head_hexsha": "330d1d5320adc8667bc7ce527808ec7a9c2271d4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.0027624309, "max_line_length": 129, "alphanum_fraction": 0.6035234178, "include": true, "reason": "from numpy", "num_tokens": 3467}
|
import sys
import numpy
import matplotlib.pyplot as plt
try:
from scipy import stats
except: # pragma: no cover
stats = None
import pytest
import probscale
from probscale.probscale import _minimal_norm
PY27 = sys.version_info.major == 2
if PY27:
TOLERANCE = 25
else:
TOLERANCE = 22
@pytest.fixture
def mn():
return _minimal_norm()
@pytest.fixture
def mn_input():
x = numpy.array([
0.331, 0.742, 0.067, 0.826, 0.357, 0.089,
0.754, 0.342, 0.762, 0.658, 0.239, 0.910,
])
return x
def test_minimal_norm_A(mn):
known__A = 0.1400122886866665
assert abs(mn._A - known__A) < 0.0000001
def test_minimal_norm__approx_erf(mn, mn_input):
known_erf = numpy.array([
0.36029027, 0.70598131, 0.07548843, 0.75724986,
0.38635283, 0.10016122, 0.71371964, 0.37137355,
0.71880142, 0.64791492, 0.26463458, 0.80188283,
])
diff = mn._approx_erf(mn_input) - known_erf
assert numpy.all(numpy.abs(diff) < 0.001)
def test_minimal_norm__approx_inv_erf(mn, mn_input):
diff = mn._approx_inv_erf(mn._approx_erf(mn_input)) - mn_input
assert numpy.all(numpy.abs(diff) < 0.00001)
def test_minimal_norm_ppf(mn, mn_input):
known_ppf = numpy.array([
-0.43715354, 0.64952360, -1.49851307, 0.93847570,
-0.36648929, -1.34693863, 0.68713129, -0.40701088,
+0.71275076, 0.40701088, -0.70952297, 1.34075503,
])
diff = mn.ppf(mn_input) - known_ppf
assert numpy.all(numpy.abs(diff) < 0.001)
def test_minimal_norm_cdf(mn, mn_input):
known_cdf = numpy.array([
0.62967776, 0.77095633, 0.52670915, 0.79559795,
0.63945410, 0.53545904, 0.77457539, 0.63382455,
0.77697000, 0.74473093, 0.59444721, 0.81858875
])
diff = mn.cdf(mn_input) - known_cdf
assert numpy.all(numpy.abs(diff) < 0.001)
@pytest.mark.mpl_image_compare(
baseline_dir='baseline_images/test_probscale',
tolerance=TOLERANCE
)
@pytest.mark.skipif(PY27, reason="legacy python")
def test_the_scale_default():
fig, ax = plt.subplots(figsize=(4, 8))
ax.set_yscale('prob')
ax.set_ylim(0.01, 99.99)
fig.tight_layout()
return fig
@pytest.mark.mpl_image_compare(
baseline_dir='baseline_images/test_probscale',
tolerance=TOLERANCE
)
def test_the_scale_not_as_pct():
fig, ax = plt.subplots(figsize=(4, 8))
ax.set_yscale('prob', as_pct=False)
ax.set_ylim(0.02, 0.98)
return fig
@pytest.mark.mpl_image_compare(
baseline_dir='baseline_images/test_probscale',
tolerance=TOLERANCE
)
@pytest.mark.skipif(stats is None, reason="scipy not installed")
def test_the_scale_beta():
fig, ax = plt.subplots(figsize=(4, 8))
ax.set_yscale('prob', as_pct=True, dist=stats.beta(3, 2))
ax.set_ylim(1, 99)
fig.tight_layout()
return fig
|
{"hexsha": "7c2ba067ccdd06d6fad9d5fb7cad07cfdbfceed3", "size": 2824, "ext": "py", "lang": "Python", "max_stars_repo_path": "probscale/tests/test_probscale.py", "max_stars_repo_name": "SamsadSajid/mpl-probscale", "max_stars_repo_head_hexsha": "c65feeddbd8eb6ccca897b27dcd6738d72bd9fb2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "probscale/tests/test_probscale.py", "max_issues_repo_name": "SamsadSajid/mpl-probscale", "max_issues_repo_head_hexsha": "c65feeddbd8eb6ccca897b27dcd6738d72bd9fb2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "probscale/tests/test_probscale.py", "max_forks_repo_name": "SamsadSajid/mpl-probscale", "max_forks_repo_head_hexsha": "c65feeddbd8eb6ccca897b27dcd6738d72bd9fb2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.7719298246, "max_line_length": 66, "alphanum_fraction": 0.6749291785, "include": true, "reason": "import numpy,from scipy", "num_tokens": 978}
|
# Build artifacts to log train set distribution
import pandas as pd
import numpy as np
import seaborn as sn
import textwrap as twp
import matplotlib.pyplot as pl
import matplotlib as mat
pl.ioff()
mappables = []
# def time_transform(x):
# return pd.to_datetime(x).apply( lambda t : (t.hour*60+t.minute)//10)
def time_transform(x):
return pd.to_datetime(x).apply( lambda t : (t.hour*60+t.minute)//60)
def calculate_mappable_bounds(heatmap, cmap=mat.cm.viridis) :
'''
Create a scale with 5 discrete intervals instead of continuous color/scale
from the minimum to the maximum of an heatmap
'''
vmin, vmax = heatmap.min().min(), heatmap.max().max()
bounds = [vmin,(vmax-vmin)/50+vmin,(vmax-vmin)/20+vmin,(vmax-vmin)/5+vmin,(vmax-vmin)/2+vmin,vmax]
bounds = [int(x) for x in bounds]
norm = mat.colors.BoundaryNorm(bounds, cmap.N)
mappable = mat.cm.ScalarMappable(norm = norm, cmap = cmap)
return [mappable, bounds, norm]
def remove_low_num_and_nan(dataframe) :
removed = {}
kept = {}
for col in dataframe.columns :
value_ct = dataframe[col].value_counts()
removed[col] = value_ct[value_ct < value_ct.max()/100.0].to_dict()
kept[col] = value_ct[value_ct >= value_ct.max()/100.0].index
for kepti in kept.items() :
dataframe = dataframe[dataframe[kepti[0]].isin(kepti[1])]
return dataframe, kept, removed
def diag_plot_gen(max_val) :
def diag_plot(xarray, **kwargs):
histv = xarray.value_counts().sort_index()
diag_plot.call_ct += 1
if (diag_plot.call_ct < max_val) :
kwargs.update({'position': -0.5})
histv.plot(kind='bar', **kwargs)
ax = pl.gca()
ax.tick_params(direction='in', labelsize=7, which = 'both')
diag_plot.call_ct = 0
return diag_plot
def off_plot(xarray, yarray, **kwargs):
dfcomb = pd.concat((yarray, xarray),axis=1)
hist2d = dfcomb.groupby(dfcomb.columns.tolist()).size().unstack()
cmap = mat.cm.viridis
mappable, bounds, norm = calculate_mappable_bounds(hist2d, cmap=cmap)
mappables.append([mappable, bounds,[xarray.name,yarray.name]])
sn.heatmap(hist2d,cbar=False,cmap=cmap,norm=norm, **kwargs, xticklabels=True)
ax = pl.gca()
for _, spine in ax.spines.items():
spine.set_visible(True)
ax.tick_params(direction='in',labelsize=7, which='both')
def add_zero_infront_ofgate(dataframe, col) :
selection = dataframe[col].str.match('^[A-K][0-9][A-Z]*$')
selection = selection.fillna(False)
dataframe.loc[selection,col] = dataframe.loc[selection, col].apply(lambda x : x[0]+'0'+x[1:])
return dataframe
def training_set_feature_distributions(dataframe, model) :
'''
Create a triangular distribution plot of the feature values
(For now only work up to 6 features, after issue with colorbar)
Parameters
----------
dataframe : dataframe containing the data of the pipeline
model : model dictionary containing the feature names
Returns
-------
filenames of the plots
'''
df = dataframe.copy()
# impeded pipelines push the gufi column to index, so I need to push it back
if (df.index.name == 'gufi') :
df = df.reset_index()
features = model['features']
nfeat = len(features)
df['terminal'] = df['departure_stand_actual'].str.extract('^([A-K])[0-9]+')
df = add_zero_infront_ofgate(df, 'departure_stand_actual')
terminal_names = df['terminal'].value_counts()
# need terminals with enough flights
terminal_names = terminal_names[terminal_names > 50].index.to_numpy()
outfiles = []
# switch time to binned values :
if (("Binned_features" in model) and isinstance(model["Binned_features"],list)) :
for featuri in model["Binned_features"] :
df[featuri] = time_transform(df[featuri])
for terminal_name in terminal_names :
dfs = df[(df.terminal == terminal_name)].copy()
if ('unimpeded_AMA' in dfs) :
dfa = dfs[dfs.unimpeded_AMA & (dfs.group == 'train')]
else :
dfa = dfs[dfs.group == 'train']
dfa, kept, removed = remove_low_num_and_nan(dfa[features])
graph = sn.PairGrid(dfa, vars=features, corner=True,\
diag_sharey= False, despine=False, aspect = 1.5)
graph.map_diag(diag_plot_gen(nfeat))
graph.map_lower(off_plot)
# rotate the bottom right xlabel
axc = graph.axes[nfeat-1][nfeat-1]
if (len(axc.get_xticks()) > 5) :
axc.set_xticklabels(axc.get_xticklabels(), rotation = 90)
# Missing/low feature info :
dl = 0.015*6/nfeat
miss_info_string = []
low_info_string = []
for k in range(nfeat) :
feat = features[k]
missing_feat = set(dfa[feat].unique()) - set(dfs[feat].unique())
miss_info_string.append('missing '+feat+' :'+str(missing_feat))
low_feat = removed[feat]
lines = twp.wrap('low count '+feat+' :'+str(low_feat))
low_info_string += lines
pl.annotate("\n".join(miss_info_string), (0.4, 0.9),
xycoords='figure fraction', color='r')
pl.annotate("\n".join(low_info_string),(0.4,0.9-dl*len(low_info_string)),\
xycoords="figure fraction",color='Orange')
# add some colorbars
pos_col = nfeat*nfeat-nfeat
pl.subplot(nfeat,nfeat,pos_col)
k,l=2,0
pl.axis('off')
fracs = [0.3, 0.45, 0.9] # moving colorbar away from each other
for map_ct in range(len(mappables)):
if (k == -1) :
pos_col = pos_col - nfeat
pl.subplot(nfeat,nfeat,pos_col)
k = 2
l = l+1
pl.axis('off')
mappable = mappables[k+l*3][0]
bounds = mappables[k+l*3][1]
label = "\n".join(mappables[k+l*3][2])
cb=pl.colorbar(mappable, label=label, aspect = 10, fraction = fracs[2-k], ticks= bounds)
cb.ax.tick_params(labelsize=7)
cb.ax.yaxis.set_ticks_position('left')
cb.set_label(cb._label,size=7,labelpad=0)
k = k-1
fign = pl.get_fignums()[-1]
fig = pl.figure(fign)
fig.suptitle('Terminal '+terminal_name)
outfile = 'terminal_'+terminal_name+'_training_distributions.png'
pl.savefig(outfile,bbox_inches='tight')
pl.close()
outfiles.append(outfile)
mappables.clear()
return outfiles
|
{"hexsha": "84a876db8f1d341623951bb749dc52ee5b310046", "size": 6635, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/taxi_out/pipelines/data_science/training_set_distribution.py", "max_stars_repo_name": "nasa/ML-airport-taxi-out", "max_stars_repo_head_hexsha": "0b153c6527c4a6f4fac31ec83bf4f10835e89276", "max_stars_repo_licenses": ["NASA-1.3"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2021-09-10T13:55:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T08:41:17.000Z", "max_issues_repo_path": "src/taxi_out/pipelines/data_science/training_set_distribution.py", "max_issues_repo_name": "nasa/ML-airport-taxi-out", "max_issues_repo_head_hexsha": "0b153c6527c4a6f4fac31ec83bf4f10835e89276", "max_issues_repo_licenses": ["NASA-1.3"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/taxi_out/pipelines/data_science/training_set_distribution.py", "max_forks_repo_name": "nasa/ML-airport-taxi-out", "max_forks_repo_head_hexsha": "0b153c6527c4a6f4fac31ec83bf4f10835e89276", "max_forks_repo_licenses": ["NASA-1.3"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-09-13T03:23:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-15T07:59:30.000Z", "avg_line_length": 34.9210526316, "max_line_length": 102, "alphanum_fraction": 0.6110022607, "include": true, "reason": "import numpy", "num_tokens": 1736}
|
# Advent of Code 2016
#
# From https://adventofcode.com/2016/day/21
#
from itertools import product, permutations
import networkx as nx
import numpy as np
from more_itertools import pairwise
# Extract inputs
data = np.array([list(x.strip()) for x in open('../inputs/Advent2016_24.txt', 'r')])
G = nx.Graph()
numbers = {}
for y, x in product(range(data.shape[1] - 1), range(data.shape[0] - 1)):
if data[x, y] == '#':
continue
if data[x + 1, y] != '#':
G.add_edge(x + 1j * y, x + 1 + 1j * y)
if data[x, y + 1] != '#':
G.add_edge(x + 1j * y, x + 1j * (y + 1))
if data[x, y].isdigit():
numbers[data[x, y]] = x + 1j * y
def find_shortest(G, part=1):
shortest = 999999999999
numbers_list = list(numbers.values())
zero = numbers_list.pop(numbers_list.index(numbers['0']))
for order in permutations(numbers_list):
length = 0
if part == 1:
order_zero = [zero] + list(order)
else:
order_zero = [zero] + list(order) + [zero]
for a, b in pairwise(order_zero):
length += nx.shortest_path_length(G, a, b)
shortest = min(shortest, length)
return shortest
print(f"AoC 2016 Day 24, Part 1 answer is {find_shortest(G, 1)}")
print(f"AoC 2016 Day 24, Part 2 answer is {find_shortest(G, 2)}")
|
{"hexsha": "f0816dd1f9b2099a1ed59027ea1ac28b1856cbee", "size": 1324, "ext": "py", "lang": "Python", "max_stars_repo_path": "2016/src/Advent2016_24.py", "max_stars_repo_name": "davidxbuck/advent2018", "max_stars_repo_head_hexsha": "eed5424a8008b9c0829f5872ad6cd469ce9f70b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-11T02:19:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-11T02:19:28.000Z", "max_issues_repo_path": "2016/src/Advent2016_24.py", "max_issues_repo_name": "davidxbuck/advent2018", "max_issues_repo_head_hexsha": "eed5424a8008b9c0829f5872ad6cd469ce9f70b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2016/src/Advent2016_24.py", "max_forks_repo_name": "davidxbuck/advent2018", "max_forks_repo_head_hexsha": "eed5424a8008b9c0829f5872ad6cd469ce9f70b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-08T04:31:46.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-08T04:31:46.000Z", "avg_line_length": 28.7826086957, "max_line_length": 84, "alphanum_fraction": 0.5936555891, "include": true, "reason": "import numpy,import networkx", "num_tokens": 409}
|
import glob
import logging
from pathlib import Path
from datetime import datetime
from typing import List, Tuple
from lxml import etree
import cv2
import numpy as np
class Line2Page:
"""Object, which stores meta data
source, image_folder, gt_folder, dest_folder are Path objects
"""
def __init__(self,
creator: str,
source: str,
image_folder: str,
gt_folder: str,
destination_folder: str,
ext: str,
pred: str,
lines: int,
spacing: int,
border: Tuple[int],
debug: bool,
threads: int,
xml_schema: str):
logging.basicConfig(level=int(debug))
self.log = logging.getLogger(__name__)
self.creator = creator
self.source = self.check_dest(Path(source).resolve())
if image_folder == source or image_folder:
self.image_folder = self.source
else:
self.image_folder = self.check_dest(Path(image_folder).resolve())
if gt_folder == str(self.source) or gt_folder:
self.gt_folder = self.source
else:
self.gt_folder = self.check_dest(Path(gt_folder).resolve())
self.dest_folder = self.check_dest(Path(destination_folder).resolve(), True)
self.ext = ext
self.pred = pred
self.lines = lines
self.line_spacing = spacing
self.threads = threads
# List of all images in the folder with the desired extension
self.imgList = [f for f in sorted(glob.glob(f"{str(self.image_folder)}/*{self.ext}"))]
self.gtList = []
self.nameList = []
self.matches = []
# Extension strings used
self.gt_suffix = ".gt.txt"
self.pred_suffix = ".pred.txt"
self.img_suffix = '.nrm.png'
self.background_colour = (0, 0, 0)
self.colour_channels = 3
if border[1] > lines:
footer_size = border[1] - lines
else:
footer_size = 0
self.border = (border[0], footer_size, border[2], border[3])
self.nsmap = f'http://schema.primaresearch.org/PAGE/gts/pagecontent/{xml_schema}-07-15'
self.xsi = 'http://www.w3.org/2001/XMLSchema-instance'
self.xmlSchemaLocation = \
f'http://schema.primaresearch.org/PAGE/gts/pagecontent/{xml_schema}-07-15 ' \
f'http://schema.primaresearch.org/PAGE/gts/pagecontent/{xml_schema}-07-15/pagecontent.xsd'
self.log.debug(f"Attributes: \nCreator: {self.creator}\nSource Folder: {self.source}\n"
f"Image Folder: {self.image_folder}\nGT Folder: {self.gt_folder}\n"
f"Destination Folder: {self.dest_folder}\nImage Extension: {self.ext}\n"
f"Predecessor: {self.pred}\nNumber of lines per image: {self.lines}\n"
f"Line Spacing: {self.line_spacing}\nBorder: (head:{self.border[0]}, footer: {self.border[1]}, "
f"left: {self.border[2]}, right:{self.border[3]})\nThreads: {self.threads}\n"
f"XML Schema: {self.xmlSchemaLocation}")
@staticmethod
def get_text(file: str) -> str:
"""Extracts the text from inside the file"""
with Path(file).open('r') as read_file:
return read_file.read().rstrip()
@staticmethod
def chunks(lst: list, n: int):
"""Yields successive n-sized chunks from lst"""
for i in range(0, len(lst), n):
yield lst[i: i + n]
@staticmethod
def name_pages(pages: List[str]) -> List[str]:
"""Returns a list of all objects in pages with pagename followed by a 4-digit pagenumber"""
page_with_name = []
pages_with_name = []
page_iterator = 0
for page in pages:
page_iterator += 1
name = str(page_iterator).zfill(4)
page_with_name.append(page)
page_with_name.append(name)
pages_with_name.append(page_with_name.copy())
page_with_name.clear()
return pages_with_name
def check_dest(self, dest: Path, create_folder=False):
"""Checks if the destination is legitimate and creates directory, if create is True"""
if not dest.is_dir():
if create_folder:
dest.expanduser()
Path.mkdir(dest, parents=True, exist_ok=True)
self.log.info(f" {str(dest)} directory created")
else:
error_msg = f" {str(dest)} does not exist"
self.log.error(error_msg)
raise NameError(error_msg)
return dest
def make_page(self, page_with_name, semaphore):
"""Creates img and corresponding xml of a page"""
merged = self.merge_images(page_with_name[0])
cv2.imwrite(str(self.dest_folder.joinpath(Path(page_with_name[1]).name)) + self.img_suffix, merged)
xml_tree = self.build_xml(page_with_name[0], page_with_name[1] + self.img_suffix, merged.shape[0],
merged.shape[1])
self.log.debug(etree.tostring(xml_tree, encoding='unicode', pretty_print=True))
xml = etree.tostring(xml_tree, encoding='utf-8', xml_declaration='xml')
xml_tree.clear()
myfile = open(str(self.dest_folder.joinpath(Path(page_with_name[1]).name)) + ".xml", "wb")
myfile.write(xml)
myfile.close()
semaphore.release()
def match_files(self):
"""Pairs image with gt-Text and adds the pairing to matches"""
for img in self.imgList:
pairing = []
img_name = Path(img.split('.')[0]).name
self.gtList = [f for f in glob.glob(str(self.gt_folder.joinpath(img_name)) + self.gt_suffix)]
if len(self.gtList) > 0:
self.nameList.append(img_name)
pairing.append(img)
gt_filename = self.gtList[0]
pairing.append(gt_filename)
pairing.append(self.get_text(gt_filename))
if self.pred:
pred_filelist = [f for f in glob.glob(str(self.gt_folder.joinpath(img_name)) + self.pred_suffix)]
if len(pred_filelist) > 0:
pred_filename = pred_filelist[0]
pairing.append(pred_filename)
pairing.append(self.get_text(pred_filename))
else:
self.log.warning(f" The File {self.gt_folder.joinpath(img_name)}{self.pred_suffix} could not be"
f" found! Omitting line from page")
self.matches.append(pairing.copy())
else:
self.log.warning(
f" The File {str(self.gt_folder.joinpath(img_name))}{self.gt_suffix} could not be found! "
f"Omitting line from page")
def merge_images(self, page):
"""
Merge list of images into one, displayed on top of each other
:return: the merged Image object
"""
img_list = []
img_width = 0
# find max-width of all images
for line in page:
image_data = cv2.imread(line[0])
image = image_data.copy()
width = image.shape[1]
img_width = max(img_width, width)
img_list.append(image)
result = np.full((self.border[0], img_width + self.border[2] + self.border[3], self.colour_channels),
self.background_colour, np.uint8)
# All images need the same width for np.concatenate to work -> padding on the image at its right side
for img in img_list:
padding = img_width - img.shape[1]
img = cv2.copyMakeBorder(img, 0, self.line_spacing, self.border[2], padding + self.border[3],
cv2.BORDER_CONSTANT, None, self.background_colour)
result = np.concatenate((result, img), axis=0)
footer = np.full((self.border[1], img_width + self.border[2] + self.border[3], self.colour_channels),
self.background_colour, np.uint8)
result = np.concatenate((result, footer), axis=0)
return result
def build_xml(self, line_list, img_name, img_height, img_width):
"""
Builds PageXML from list of images, with corresponding text
:return: the built PageXml[.xml] file
"""
attribute_schema_location = etree.QName("http://www.w3.org/2001/XMLSchema-instance", "schemaLocation")
NSMAP = {None: self.nsmap,
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'}
pcgts = etree.Element('PcGts', {attribute_schema_location: self.xmlSchemaLocation}, nsmap=NSMAP)
metadata = etree.SubElement(pcgts, 'Metadata')
creator = etree.SubElement(metadata, 'Creator')
creator.text = self.creator
created = etree.SubElement(metadata, 'Created')
generated_on = datetime.now().isoformat()
created.text = generated_on
last_change = etree.SubElement(metadata, 'LastChange')
last_change.text = generated_on
page = etree.SubElement(pcgts, 'Page')
page.set('imageFilename', img_name)
page.set('imageHeight', str(img_height))
page.set('imageWidth', str(img_width))
text_region = etree.SubElement(page, 'TextRegion')
text_region.set('id', 'r0')
text_region.set('type', 'paragraph')
region_coords = etree.SubElement(text_region, 'Coords')
min_x = self.border[2]
max_x = img_width - self.border[3]
min_y = self.border[0]
max_y = img_height - self.border[1]
coord_string = f'{min_x},{min_y} {max_x},{min_y} {max_x},{max_y} {min_x},{max_y}'
region_coords.set('points', coord_string)
last_bottom = min_y
for line in line_list:
text_line = etree.SubElement(text_region, 'TextLine')
text_line.set('id', 'r0_l' + str(Path(line[0]).name.split('.')[0].zfill(3)))
line_coords = etree.SubElement(text_line, 'Coords')
image = cv2.imread(line[0])
height = image.shape[0]
width = image.shape[1]
line_coords.set('points', self.make_coord_string(last_bottom, width, height))
last_bottom += (height + self.line_spacing)
line_gt_text = etree.SubElement(text_line, 'TextEquiv')
line_gt_text.set('index', str(0))
unicode_gt = etree.SubElement(line_gt_text, 'Unicode')
unicode_gt.text = line[2]
if self.pred:
line_prediction_text = etree.SubElement(text_line, 'TextEquiv')
line_prediction_text.set('index', str(1))
unicode_prediction = etree.SubElement(line_prediction_text, 'Unicode')
unicode_prediction.text = line[4]
return pcgts
def make_coord_string(self, previous_lower_left, line_width, line_height):
"""Builds value string, to be incorporated into the xml"""
x_min = self.border[0]
x_max = x_min + line_width
y_min = previous_lower_left
y_max = y_min + line_height
return f'{x_min},{y_min} {x_max},{y_min} {x_max},{y_max} {x_min},{y_max}'
|
{"hexsha": "f1316033c2baa9f5b3c256dd4fe91455069b3ffc", "size": 11376, "ext": "py", "lang": "Python", "max_stars_repo_path": "pagetools/src/line2page/Line2Page.py", "max_stars_repo_name": "ThisTunaCanFly/PAGETools", "max_stars_repo_head_hexsha": "aa74cbae347132611c761abd5661a284327d7fac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pagetools/src/line2page/Line2Page.py", "max_issues_repo_name": "ThisTunaCanFly/PAGETools", "max_issues_repo_head_hexsha": "aa74cbae347132611c761abd5661a284327d7fac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pagetools/src/line2page/Line2Page.py", "max_forks_repo_name": "ThisTunaCanFly/PAGETools", "max_forks_repo_head_hexsha": "aa74cbae347132611c761abd5661a284327d7fac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.0930232558, "max_line_length": 120, "alphanum_fraction": 0.5864978903, "include": true, "reason": "import numpy", "num_tokens": 2532}
|
from __future__ import print_function
from builtins import range
import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
import numpy as np
from sklearn.cluster import KMeans
from sklearn.impute import SimpleImputer
from h2o.estimators.kmeans import H2OKMeansEstimator
def get_model_kmeans():
print("Importing benign.csv data...")
benign_h2o = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/benign.csv"))
benign_sci = np.genfromtxt(pyunit_utils.locate("smalldata/logreg/benign.csv"), delimiter=",")
# Impute missing values with column mean
imp = SimpleImputer(missing_values=np.nan, strategy="mean")
benign_sci = imp.fit_transform(benign_sci)
for i in range(2,7):
km_h2o = H2OKMeansEstimator(k=i)
km_h2o.train(x=list(range(benign_h2o.ncol)), training_frame=benign_h2o)
km_h2o.show()
model = h2o.get_model(km_h2o._id)
model.show()
km_sci = KMeans(n_clusters=i, init='k-means++', n_init=1)
km_sci.fit(benign_sci)
print("sckit centers")
print(km_sci.cluster_centers_)
if __name__ == "__main__":
pyunit_utils.standalone_test(get_model_kmeans)
else:
get_model_kmeans()
|
{"hexsha": "b6da4c264ea4411d0afbe972e5b6fd382d3b3751", "size": 1221, "ext": "py", "lang": "Python", "max_stars_repo_path": "h2o-py/tests/testdir_algos/kmeans/pyunit_get_modelKmeans.py", "max_stars_repo_name": "vishalbelsare/h2o-3", "max_stars_repo_head_hexsha": "9322fb0f4c0e2358449e339a434f607d524c69fa", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6098, "max_stars_repo_stars_event_min_datetime": "2015-05-22T02:46:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T16:54:51.000Z", "max_issues_repo_path": "h2o-py/tests/testdir_algos/kmeans/pyunit_get_modelKmeans.py", "max_issues_repo_name": "vishalbelsare/h2o-3", "max_issues_repo_head_hexsha": "9322fb0f4c0e2358449e339a434f607d524c69fa", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2517, "max_issues_repo_issues_event_min_datetime": "2015-05-23T02:10:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T17:03:39.000Z", "max_forks_repo_path": "h2o-py/tests/testdir_algos/kmeans/pyunit_get_modelKmeans.py", "max_forks_repo_name": "vishalbelsare/h2o-3", "max_forks_repo_head_hexsha": "9322fb0f4c0e2358449e339a434f607d524c69fa", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2199, "max_forks_repo_forks_event_min_datetime": "2015-05-22T04:09:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T22:20:45.000Z", "avg_line_length": 30.525, "max_line_length": 97, "alphanum_fraction": 0.7174447174, "include": true, "reason": "import numpy", "num_tokens": 345}
|
"""
"""
import h5py
import numpy as np
class RawH5Parser:
def __init__(self):
self.base_hostlist = []
self.read_datasets = ['procdata','procstat','procfd','procobs']
## fields filled out by the parse() function
self.filenames = []
self.hosts = []
self.datasets = {}
self.host_offsets = {}
self.host_counts = {}
self.dset_types = {}
def __repr__(self):
return "RawH5Parser:ReadFiles_%d;Hosts_%d" % (len(self.filenames), len(self.hosts))
def __get_h5_host_counts(self, filename, host_counts, ref_hosts = None):
hostnames = []
fd = h5py.File(filename, 'r')
try:
hostnames = fd.keys()
except:
sys.stderr.write("Couldn't get hostnames from %s, problem with "
"file?; falling back on hard-coded host list." % filename)
hostnames = self.base_hostlist
for host in hostnames:
if host not in fd:
continue
if ref_hosts is not None and host not in ref_hosts:
continue
hostgroup = fd[host]
if host not in host_counts:
host_counts[host] = {x:0 for x in self.read_datasets}
for dset in self.read_datasets:
if dset not in hostgroup:
continue
nRec = hostgroup[dset].attrs['nRecords']
host_counts[host][dset] += nRec #hostgroup[dset].len()
fd.close()
def __detectParents(self, data):
parentPids = []
if data.size > 0:
parentPids = np.unique(np.intersect1d(data['pid',data['ppid']]))
isParent = np.zeros(shape=data.size, dtype=np.int32)
for pid in parentPids:
mask = data['pid'] == pid
isParent[mask] = 1
return isParent
def parse(self, filenames, ref_hosts = None):
"""Read all the data from the input h5 files."""
## step 1: get the list of all the hosts and how many records of each
## dataset are stored across all the files for which we are responsible
host_counts = {}
for filename in filenames:
self.__get_h5_host_counts(filename, host_counts, ref_hosts)
## step 2: for each dataset, allocate sufficient space and start reading
## the data
self.datasets = {}
self.dset_types = {}
self.hosts = sorted(host_counts.keys())
self.host_offsets = {}
self.host_counts = {}
for dset in self.read_datasets:
self.host_counts[dset] = np.zeros(shape=len(self.hosts), dtype=np.uint64)
self.host_offsets[dset] = np.zeros(shape=len(self.hosts), dtype=np.uint64)
self.host_offsets[dset][:] = np.hstack(
(0,np.cumsum([host_counts[x][dset] for x in self.hosts])[0:-1])
)
for filename in filenames:
self.filenames.append(filename)
fd = h5py.File(filename, 'r')
for (h_idx,host) in enumerate(self.hosts):
if host not in fd:
continue
hostgroup = fd[host]
for dset in self.read_datasets:
# starting position to write is the offset, plus however much has been written for this host
offset = self.host_offsets[dset][h_idx]
offset += self.host_counts[dset][h_idx]
if dset not in hostgroup or hostgroup[dset].len() == 0:
continue
if dset not in self.datasets:
if dset not in self.dset_types:
## get type by reading first entry for dset we can find
ltype = hostgroup[dset][0].dtype
newtype = sorted([ (x,ltype.fields[x][0]) for x in ltype.fields ], key=lambda y: ltype.fields[y[0]][1])
newtype.append( ('host', '|S36', ) )
newtype.append( ('sortidx', np.uint32,) )
if dset == "procdata":
newtype.append(('isParent', np.int32))
self.dset_types[dset] = np.dtype(newtype)
total = sum([host_counts[x][dset] for x in host_counts])
self.datasets[dset] = np.zeros(total, dtype=self.dset_types[dset])
nRec = hostgroup[dset].attrs['nRecords']
limit = offset + nRec
ldata = fd[host][dset][0:nRec]
ltype = ldata.dtype
if dset == "procdata":
self.datasets[dset][offset:limit]['isParent'] = self.__detectParents(ldata)
for col in ltype.names:
self.datasets[dset][offset:limit][col] = ldata[col]
self.datasets[dset][offset:limit]['host'] = host
self.host_counts[dset][h_idx] += nRec
fd.close()
def get(host, dataset):
if dataset not in self.dset_types:
raise ValueError("%s is unknown datset identifier" % dataset)
h_idx = self.hosts.index(host)
if h_idx < 0:
return np.empty(shape=0, dtype=self.dset_types[dataset])
def free(self, dset):
if dset in self.datasets and self.datasets[dset] is not None:
del self.datasets[dset]
def count_processes(self, host):
h_idx = self.hosts.index(host)
if h_idx < 0:
return 0
host_offset = self.host_offsets['procdata'][h_idx]
host_count = self.host_counts['procdata'][h_idx]
if host_count == 0:
return 0
limit = host_offset + host_count
unique_processes = np.unique(self.datasets['procdata'][host_offset:limit][['pid','startTime']])
return unique_processes.size
|
{"hexsha": "6515bfd7618667e17df72c9b282c18bdb0c18c86", "size": 5952, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/analysis/python/procmon/H5Parser.py", "max_stars_repo_name": "glennklockwood/procmon", "max_stars_repo_head_hexsha": "c6e67d63e7c9c24f85a46b6d8965b8c615097edc", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/analysis/python/procmon/H5Parser.py", "max_issues_repo_name": "glennklockwood/procmon", "max_issues_repo_head_hexsha": "c6e67d63e7c9c24f85a46b6d8965b8c615097edc", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/analysis/python/procmon/H5Parser.py", "max_forks_repo_name": "glennklockwood/procmon", "max_forks_repo_head_hexsha": "c6e67d63e7c9c24f85a46b6d8965b8c615097edc", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9019607843, "max_line_length": 131, "alphanum_fraction": 0.5376344086, "include": true, "reason": "import numpy", "num_tokens": 1340}
|
"""
Data loader for the Healing MNIST data set (c.f. https://arxiv.org/abs/1511.05121)
Adapted from https://github.com/Nikita6000/deep_kalman_filter_for_BM/blob/master/healing_mnist.py
"""
import numpy as np
import scipy.ndimage
from tensorflow.keras.datasets import mnist
def apply_square(img, square_size):
img = np.array(img)
img[:square_size, :square_size] = 255
return img
def apply_noise(img, bit_flip_ratio):
img = np.array(img)
mask = np.random.random(size=(28,28)) < bit_flip_ratio
img[mask] = 255 - img[mask]
return img
def get_rotations(img, rotation_steps):
for rot in rotation_steps:
img = scipy.ndimage.rotate(img, rot, reshape=False)
yield img
def binarize(img):
return (img > 127).astype(np.int)
def heal_image(img, seq_len, square_count, square_size, noise_ratio, max_angle):
squares_begin = np.random.randint(0, seq_len - square_count)
squares_end = squares_begin + square_count
rotations = []
rotation_steps = np.random.normal(size=seq_len, scale=max_angle)
for idx, rotation in enumerate(get_rotations(img, rotation_steps)):
# Don't add the squares right now
# if idx >= squares_begin and idx < squares_end:
# rotation = apply_square(rotation, square_size)
# Don't add noise for now
# noisy_img = apply_noise(rotation, noise_ratio)
noisy_img = rotation
binarized_img = binarize(noisy_img)
rotations.append(binarized_img)
return rotations, rotation_steps
class HealingMNIST():
def __init__(self, seq_len=5, square_count=3, square_size=5, noise_ratio=0.15, digits=range(10), max_angle=180):
(x_train, y_train),(x_test, y_test) = mnist.load_data()
mnist_train = [(img,label) for img, label in zip(x_train, y_train) if label in digits]
mnist_test = [(img, label) for img, label in zip(x_test, y_test) if label in digits]
train_images = []
test_images = []
train_rotations = []
test_rotations = []
train_labels = []
test_labels = []
for img, label in mnist_train:
train_img, train_rot = heal_image(img, seq_len, square_count, square_size, noise_ratio, max_angle)
train_images.append(train_img)
train_rotations.append(train_rot)
train_labels.append(label)
for img, label in mnist_test:
test_img, test_rot = heal_image(img, seq_len, square_count, square_size, noise_ratio, max_angle)
test_images.append(test_img)
test_rotations.append(test_rot)
test_labels.append(label)
self.train_images = np.array(train_images)
self.test_images = np.array(test_images)
self.train_rotations = np.array(train_rotations)
self.test_rotations = np.array(test_rotations)
self.train_labels = np.array(train_labels)
self.test_labels = np.array(test_labels)
|
{"hexsha": "cafbf7c419b52327ada84bdf631fbcdcb7f95ca0", "size": 3052, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/healing_mnist.py", "max_stars_repo_name": "siddharthchaini/GP-VAE", "max_stars_repo_head_hexsha": "440b5875bf0f95fb2fd551c8d02a494494cda511", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/healing_mnist.py", "max_issues_repo_name": "siddharthchaini/GP-VAE", "max_issues_repo_head_hexsha": "440b5875bf0f95fb2fd551c8d02a494494cda511", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/healing_mnist.py", "max_forks_repo_name": "siddharthchaini/GP-VAE", "max_forks_repo_head_hexsha": "440b5875bf0f95fb2fd551c8d02a494494cda511", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0804597701, "max_line_length": 117, "alphanum_fraction": 0.6536697248, "include": true, "reason": "import numpy,import scipy", "num_tokens": 706}
|
import argparse
import io
import time
import numpy as np
from PIL import Image, ImageColor, ImageDraw, ImageFont, ImageOps
from tflite_runtime.interpreter import Interpreter
# Module level vars
interpreter = None
labels = None
def load_labels(path):
with open(path) as f:
return {
int(s.split(" ")[0]): s.split(" ")[1].strip()
for s in f.readlines()
}
def initialize(model_file='detect.tflite', labels_file='coco_labels.txt'):
global interpreter , labels
interpreter = Interpreter(model_file)
_, height, width, _ = interpreter.get_input_details()[0]['shape']
interpreter.allocate_tensors()
labels = load_labels(labels_file)
def set_input_tensor(image):
"""Sets the input tensor."""
tensor_index = interpreter.get_input_details()[0]['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
def get_output_tensor(index):
"""Returns the output tensor at the given index."""
output_details = interpreter.get_output_details()[index]
tensor = np.squeeze(interpreter.get_tensor(output_details['index']))
return tensor
def detect_objects(image, threshold, classes_incl=None):
"""Returns a list of detection results, each a dictionary of object info. If classes is None, return all"""
set_input_tensor(image)
interpreter.invoke()
# Get all output details
boxes = get_output_tensor(0)
classes = get_output_tensor(1)
scores = get_output_tensor(2)
count = int(get_output_tensor(3))
results = []
for i in range(count):
if scores[i] >= threshold:
result = {
'bounding_box': boxes[i],
'class_id': int(classes[i]),
'score': scores[i]
}
if not classes_incl:
results.append(result)
elif classes[i] in classes_incl:
results.append(result)
return results
def obj_detect_from_pil(img, threshold=0.3, classes_incl=None):
if not interpreter:
initialize()
_, height, width, _ = interpreter.get_input_details()[0]['shape']
image = img.convert('RGB').resize((width, height), Image.ANTIALIAS)
results = detect_objects(image, threshold, classes_incl)
return results
def draw_bounding_box_on_image(image, ymin, xmin, ymax, xmax, color, font, thickness=2, display_str_list=()):
"""Adds a bounding box to an image."""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = top + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle([(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom)],
fill=color)
draw.text((left + margin, text_bottom - text_height - margin),
display_str,
fill="black",
font=font)
text_bottom -= text_height - 2 * margin
def draw_box(image, boxes, class_name, score, max_boxes=10, min_score=0.1):
"""Simplified from obj detection full TF example"""
colors = list(ImageColor.colormap.values())
font = ImageFont.load_default()
ymin, xmin, ymax, xmax = tuple(boxes)
display_str = "{}: {}%".format(class_name, int(100 * score))
color = colors[hash(class_name) % len(colors)]
draw_bounding_box_on_image(
image,ymin,xmin,ymax,xmax,color,font,display_str_list=[display_str]
)
def draw_boxes(image, results, min_score=0.2, max_boxes=10):
""" Draw boxes from results structure onto image """
results = sorted(results, key=lambda x: x['score'])
results = results[0:max_boxes]
for r in results:
if r['score'] < min_score:
continue
draw_box(image, r['bounding_box'], labels[r['class_id']], r['score'])
|
{"hexsha": "fddec48923511802c50e873c8cccec75b547a348", "size": 4776, "ext": "py", "lang": "Python", "max_stars_repo_path": "detect_tflite.py", "max_stars_repo_name": "atomic77/opilite-object-detect", "max_stars_repo_head_hexsha": "6190034386293dd2eca199ac3bffdf765fd77425", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-30T16:57:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-15T10:16:03.000Z", "max_issues_repo_path": "detect_tflite.py", "max_issues_repo_name": "atomic77/opilite-object-detect", "max_issues_repo_head_hexsha": "6190034386293dd2eca199ac3bffdf765fd77425", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "detect_tflite.py", "max_forks_repo_name": "atomic77/opilite-object-detect", "max_forks_repo_head_hexsha": "6190034386293dd2eca199ac3bffdf765fd77425", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9097744361, "max_line_length": 111, "alphanum_fraction": 0.6423785595, "include": true, "reason": "import numpy", "num_tokens": 1105}
|
program main
use module_gauss_turan_quadrature
implicit none
type(GaussTuranQuadrature) :: gt(maxngt)
call init_gauss_turan(gt)
call assemble_gauss_turan
call uninit_gauss_turan(gt)
end program main
|
{"hexsha": "e74564debfbd00d9c98de71cdcca40d68d9b3301", "size": 216, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "gtq_quadp/src/main.f90", "max_stars_repo_name": "isakari/gtq_quadp", "max_stars_repo_head_hexsha": "b6b6c7ade6c96cc1f07230dc7d15b3b49fd2a313", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gtq_quadp/src/main.f90", "max_issues_repo_name": "isakari/gtq_quadp", "max_issues_repo_head_hexsha": "b6b6c7ade6c96cc1f07230dc7d15b3b49fd2a313", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gtq_quadp/src/main.f90", "max_forks_repo_name": "isakari/gtq_quadp", "max_forks_repo_head_hexsha": "b6b6c7ade6c96cc1f07230dc7d15b3b49fd2a313", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.0, "max_line_length": 42, "alphanum_fraction": 0.7916666667, "num_tokens": 66}
|
-- Suma_de_los_primeros_cubos.lean
-- Suma de los primeros cubos
-- José A. Alonso Jiménez
-- Sevilla, 22 de septiembre de 2021
-- ---------------------------------------------------------------------
-- ---------------------------------------------------------------------
-- Demostrar que la suma de los primeros cubos
-- 0³ + 1³ + 2³ + 3³ + ··· + n³
-- es (n(n+1)/2)²
-- ---------------------------------------------------------------------
import data.nat.basic
import tactic
open nat
variable (n : ℕ)
set_option pp.structure_projections false
@[simp]
def sumaCubos : ℕ → ℕ
| 0 := 0
| (n+1) := sumaCubos n + (n+1)^3
-- 1ª demostración
example :
4 * sumaCubos n = (n*(n+1))^2 :=
begin
induction n with n HI,
{ simp, },
{ calc 4 * sumaCubos (succ n)
= 4 * (sumaCubos n + (n+1)^3)
: by simp
... = 4 * sumaCubos n + 4*(n+1)^3
: by ring
... = (n*(n+1))^2 + 4*(n+1)^3
: by {congr; rw HI}
... = (n+1)^2*(n^2+4*n+4)
: by ring
... = (n+1)^2*(n+2)^2
: by ring
... = ((n+1)*(n+2))^2
: by ring
... = (succ n * (succ n + 1)) ^ 2
: by simp, },
end
-- 2ª demostración
example :
4 * sumaCubos n = (n*(n+1))^2 :=
begin
induction n with n HI,
{ simp, },
{ calc 4 * sumaCubos (succ n)
= 4 * sumaCubos n + 4*(n+1)^3
: by {simp ; ring}
... = (n*(n+1))^2 + 4*(n+1)^3
: by {congr; rw HI}
... = ((n+1)*(n+2))^2
: by ring
... = (succ n * (succ n + 1)) ^ 2
: by simp, },
end
-- Referencia:
-- ¿Qué es la Matemática? https://bit.ly/3lrPKAp de R. Courant y
-- H. Robbins p. 22
|
{"author": "jaalonso", "repo": "Calculemus", "sha": "0fb664ab298c0e90b4b8034729a2cdad20503e18", "save_path": "github-repos/lean/jaalonso-Calculemus", "path": "github-repos/lean/jaalonso-Calculemus/Calculemus-0fb664ab298c0e90b4b8034729a2cdad20503e18/src/Suma_de_los_primeros_cubos.lean"}
|
(* Title: HOL/Auth/n_flash_lemma_on_inv__83.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_flash Protocol Case Study*}
theory n_flash_lemma_on_inv__83 imports n_flash_base
begin
section{*All lemmas on causal relation between inv__83 and some rule r*}
lemma n_PI_Remote_GetVsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_Get src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_Get src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Remote_GetXVsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_GetX src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_GetX src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_NakVsinv__83:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Nak dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Nak dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__0Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__1Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__2Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__0Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__1Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''HomeUniMsg'') ''Cmd'')) (Const UNI_PutX)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_HeadVsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_PutVsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_DirtyVsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_NakVsinv__83:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_Nak_HomeVsinv__83:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_PutVsinv__83:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_Put_HomeVsinv__83:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__0Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__1Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__2Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__0Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__1Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_1Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_2Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_3Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_4Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_5Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_6Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__0Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__1Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__0Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__1Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_HomeVsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_Home_NODE_GetVsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8Vsinv__83:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_NODE_GetVsinv__83:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__0Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__1Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10_HomeVsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10Vsinv__83:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_11Vsinv__83:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_NakVsinv__83:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_Nak_HomeVsinv__83:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_PutXVsinv__83:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_PutX_HomeVsinv__83:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') p__Inv4) ''CacheState'')) (Const CACHE_E))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''HomeUniMsg'') ''Cmd'')) (Const UNI_GetX)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutVsinv__83:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Put dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Put dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutXVsinv__83:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_PutX dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_PutX dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Local_Get_GetVsinv__83:
assumes a1: "(r=n_PI_Local_Get_Get )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_GetX__part__0Vsinv__83:
assumes a1: "(r=n_PI_Local_GetX_GetX__part__0 )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_GetX__part__1Vsinv__83:
assumes a1: "(r=n_PI_Local_GetX_GetX__part__1 )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Nak_HomeVsinv__83:
assumes a1: "(r=n_NI_Nak_Home )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Local_PutVsinv__83:
assumes a1: "(r=n_NI_Local_Put )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Local_PutXAcksDoneVsinv__83:
assumes a1: "(r=n_NI_Local_PutXAcksDone )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__83 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_PutX__part__0Vsinv__83:
assumes a1: "r=n_PI_Local_GetX_PutX__part__0 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_WbVsinv__83:
assumes a1: "r=n_NI_Wb " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_StoreVsinv__83:
assumes a1: "\<exists> src data. src\<le>N\<and>data\<le>N\<and>r=n_Store src data" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_3Vsinv__83:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_3 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_1Vsinv__83:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_1 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_ReplaceVsinv__83:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_PI_Remote_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_Store_HomeVsinv__83:
assumes a1: "\<exists> data. data\<le>N\<and>r=n_Store_Home data" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_ReplaceVsinv__83:
assumes a1: "r=n_PI_Local_Replace " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_existsVsinv__83:
assumes a1: "\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_InvAck_exists src pp" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_PutXVsinv__83:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_PI_Remote_PutX dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvVsinv__83:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Inv dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_PutXVsinv__83:
assumes a1: "r=n_PI_Local_PutX " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_Get_PutVsinv__83:
assumes a1: "r=n_PI_Local_Get_Put " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_ShWbVsinv__83:
assumes a1: "r=n_NI_ShWb N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX_HeadVld__part__0Vsinv__83:
assumes a1: "r=n_PI_Local_GetX_PutX_HeadVld__part__0 N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_ReplaceVsinv__83:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX__part__1Vsinv__83:
assumes a1: "r=n_PI_Local_GetX_PutX__part__1 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_exists_HomeVsinv__83:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_exists_Home src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Replace_HomeVsinv__83:
assumes a1: "r=n_NI_Replace_Home " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Nak_ClearVsinv__83:
assumes a1: "r=n_NI_Nak_Clear " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_2Vsinv__83:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_2 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX_HeadVld__part__1Vsinv__83:
assumes a1: "r=n_PI_Local_GetX_PutX_HeadVld__part__1 N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_FAckVsinv__83:
assumes a1: "r=n_NI_FAck " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__83 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
{"author": "paraVerifier", "repo": "paraVerifier", "sha": "5de62b433a160bf8d714e0a5085a38b9dd8899f0", "save_path": "github-repos/isabelle/paraVerifier-paraVerifier", "path": "github-repos/isabelle/paraVerifier-paraVerifier/paraVerifier-5de62b433a160bf8d714e0a5085a38b9dd8899f0/proof_scripts/flash/n_flash_lemma_on_inv__83.thy"}
|
@testset "TMJets algorithm (TMJets21b)" begin
prob, tspan = vanderpol()
# default algorithm for nonlinear systems
sol = solve(prob, tspan=tspan)
@test sol.alg isa TMJets
# pass the algorithm explicitly
sol = solve(prob, tspan=tspan, TMJets())
@test sol.alg isa TMJets
# pass options outside algorithm
sol = solve(prob, T=0.2, maxsteps=2100)
@test sol.alg isa TMJets && sol.alg.maxsteps == 2100
sol = solve(prob, T=0.2, maxsteps=2100) # alias
@test sol.alg isa TMJets && sol.alg.maxsteps == 2100
sol = solve(prob, T=0.2, orderT=7, orderQ=1)
@test sol.alg isa TMJets && sol.alg.orderT == 7 && sol.alg.orderQ == 1
sol = solve(prob, T=0.2, abstol=1e-11)
@test sol.alg isa TMJets && sol.alg.abstol == 1e-11
sol = solve(prob, T=0.2, abstol=1e-11)
@test sol.alg isa TMJets && sol.alg.abstol == 1e-11
@test ReachabilityAnalysis.tspan(shift(sol,1.0))==ReachabilityAnalysis.tspan(sol)+1.0
# split initial conditions
X0, S = initial_state(prob), system(prob)
X0s = split(X0, [2, 1]) # split along direction x
sols = solve(IVP(S, X0s), T=0.1, threading=false) # no threading
@test flowpipe(sols) isa MixedFlowpipe
sols = solve(IVP(S, X0s), T=0.1, threading=true) # with threading (default)
@test flowpipe(sols) isa MixedFlowpipe
end
@testset "TMJets algorithm (TMJets21b): linear IVPs" begin
prob, dt = exponential_1d()
sol = solve(prob, tspan=dt, TMJets())
@test sol.alg isa TMJets
# getter functions for a taylor model reach-set
R = sol[1]
@test domain(R) == tspan(R)
@test diam(remainder(R)[1]) < 1e-9
@test get_order(R) == [8]
@test polynomial(R) isa Vector{Taylor1{TaylorN{Float64}}}
@test expansion_point(R) ≈ [IA.Interval(0.0)]
# test intersection with invariant
prob, dt = exponential_1d(invariant=HalfSpace([-1.0], -0.3)) # x >= 0.3
sol_inv = solve(prob, tspan=dt, TMJets())
@test [0.3] ∈ overapproximate(sol_inv[end], Zonotope)
m = length(sol_inv)
# check that the following reach-set escapes the invariant
@test [0.3] ∉ overapproximate(sol[m+1], Zonotope)
# TODO test higher order system
# prob, tspan = linear5D_homog()
# sol = solve(prob, tspan=tspan, TMJets())
# @test sol.alg isa TMJets
# TODO test linear system with input
# prob, tspan = linear5D()
# sol = solve(prob, tspan=tspan, TMJets())
# @test sol.alg isa TMJets
end
@testset "TMJets algorithm (TMJets20): linear IVPs" begin
prob, dt = exponential_1d()
sol = solve(prob, tspan=dt, TMJets20())
@test sol.alg isa TMJets20
# getter functions for a taylor model reach-set
R = sol[1]
@test domain(R) == tspan(R)
@test diam(remainder(R)[1]) < 1e-13
@test get_order(R) == [8]
@test polynomial(R) isa Vector{Taylor1{TaylorN{Float64}}}
@test expansion_point(R) ≈ [IA.Interval(0.0)]
# test intersection with invariant
prob, dt = exponential_1d(invariant=HalfSpace([-1.0], -0.3)) # x >= 0.3
sol_inv = solve(prob, tspan=dt, TMJets20())
@test [0.3] ∈ overapproximate(sol_inv[end], Zonotope)
m = length(sol_inv)
# check that the following reach-set escapes the invariant
@test [0.3] ∉ overapproximate(sol[m+1], Zonotope)
end
@testset "1D Burgers equation (TMJets21b)" begin
L0 = 1. # domain length
U0 = 1. # Re = 20.
x = range(-0.5*L0, 0.5*L0, length=4)
# Initial velocity
X0 = Singleton(-U0*sin.(2*π/L0*x))
# IVP definition
prob = @ivp(x' = burgers!(x), dim=4, x(0) ∈ X0)
sol = solve(prob, tspan=(0.0, 1.0), alg=TMJets());
@test dim(sol) == 4
end
#=
alg = TMJets(abstol=1e-10, orderT=10, orderQ=2)
# reach mode
sol = solve(P, T=7.0, alg)
@test set(sol[1]) isa Hyperrectangle # check default set representation
=#
|
{"hexsha": "1861ae5c5906ffe3aec36be93af931eeacf26807", "size": 3780, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/algorithms/TMJets.jl", "max_stars_repo_name": "lyg1597/ReachabilityAnalysis.jl", "max_stars_repo_head_hexsha": "2fdd273e895166dc1bec727bb2cfa209d198927f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 138, "max_stars_repo_stars_event_min_datetime": "2020-03-30T16:14:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T08:26:48.000Z", "max_issues_repo_path": "test/algorithms/TMJets.jl", "max_issues_repo_name": "lyg1597/ReachabilityAnalysis.jl", "max_issues_repo_head_hexsha": "2fdd273e895166dc1bec727bb2cfa209d198927f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 258, "max_issues_repo_issues_event_min_datetime": "2020-03-30T14:13:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T17:07:04.000Z", "max_forks_repo_path": "test/algorithms/TMJets.jl", "max_forks_repo_name": "lyg1597/ReachabilityAnalysis.jl", "max_forks_repo_head_hexsha": "2fdd273e895166dc1bec727bb2cfa209d198927f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-04-23T03:15:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T23:11:32.000Z", "avg_line_length": 34.6788990826, "max_line_length": 89, "alphanum_fraction": 0.6428571429, "num_tokens": 1288}
|
import numpy as np
from trust_based_filterer import TrustBasedFilterer
from surprise import Dataset, AlgoBase, PredictionImpossible
from surprise.model_selection import cross_validate
class Inverse_distance_weighted_tbr(AlgoBase):
def __init__(self, sim_options={}):
AlgoBase.__init__(self, sim_options=sim_options)
def fit(self, trainset):
AlgoBase.fit(self, trainset)
self.sim = self.compute_similarities()
self._filterer = TrustBasedFilterer(list(trainset.all_ratings()), self.sim)
return self
def estimate(self, u, i):
if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):
raise PredictionImpossible('User and/or item is unknown.')
indexes = np.nonzero(self._filterer._customers_versus_products_table[:, i])[0]
rate_array = self._filterer._customers_versus_products_table[indexes, i]
weight_array = self._filterer._weight_matrix[u, indexes]
"""
indexes = np.argsort(weight_array)[-10:]
"""
indexes = np.argpartition(weight_array, max(-10, 1-weight_array.size))[-10:]
rate_array = rate_array[indexes]
weight_array = weight_array[indexes]
numerator = np.sum(rate_array * weight_array)
denominator = np.sum(weight_array)
estimation = 0 if denominator == 0 else float(numerator)/denominator
return estimation
if __name__ == '__main__':
data = Dataset.load_builtin('ml-100k')
sim_options = {'name': 'cosine',}
algo = Inverse_distance_weighted_tbr(sim_options=sim_options)
cross_validate(algo, data, cv=5, verbose=True)
|
{"hexsha": "39afa3b6b6517966c64b2b1c6b198dcff773ac55", "size": 1501, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/test_module/test.py", "max_stars_repo_name": "oonat/inverse-distance-weighted-trust-based-recommender", "max_stars_repo_head_hexsha": "3f559f3e7dbc565da373f6297362ddf307b2d0ec", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/test_module/test.py", "max_issues_repo_name": "oonat/inverse-distance-weighted-trust-based-recommender", "max_issues_repo_head_hexsha": "3f559f3e7dbc565da373f6297362ddf307b2d0ec", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/test_module/test.py", "max_forks_repo_name": "oonat/inverse-distance-weighted-trust-based-recommender", "max_forks_repo_head_hexsha": "3f559f3e7dbc565da373f6297362ddf307b2d0ec", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8653846154, "max_line_length": 80, "alphanum_fraction": 0.7628247835, "include": true, "reason": "import numpy", "num_tokens": 398}
|
import logging
from math import log
import numpy as np
SUN_REPORT_ID_INDEX = 0
SUN_REPORT_DID_INDEX = 1
SUN_REPORT_UNIGRAM_TOKEN = 2
SUN_REPORT_UNIGRAM_SUM = 3
SUN_REPORT_UNIGRAM_SUM_LEN = 4
SUN_REPORT_UNIGRAM_DESC = 5
SUN_REPORT_UNIGRAM_DESC_LEN = 6
SUN_REPORT_BIGRAM_TOKEN = 7
SUN_REPORT_BIGRAM_SUM = 8
SUN_REPORT_BIGRAM_SUM_LEN = 9
SUN_REPORT_BIGRAM_DESC = 10
SUN_REPORT_BIGRAM_DESC_LEN = 11
SUN_REPORT_VERSION = 12
SUN_REPORT_COMPONENT = 13
SUN_REPORT_SUB_COMPONENT = 14
SUN_REPORT_TYPE = 15
SUN_REPORT_PRIORITY = 16
def calculate_length(tokens):
len = 0
for _, freq in tokens:
len += freq
return len
def score_bm25f(query_tk, query_sum, query_desc, cand_tk, cand_sum, cand_sum_len,
cand_desc, cand_desc_len, k1, k3, wf_list, bf_list, avg_lengths, IDF):
query_idx = 0
cand_idx = 0
cand_len = (cand_sum_len, cand_desc_len)
bm25f_score = 0
n_query = len(query_tk)
n_cand = len(cand_tk)
while query_idx < n_query and cand_idx < n_cand:
query_token_id = query_tk[query_idx]
cand_token_id = cand_tk[cand_idx]
if query_token_id < cand_token_id:
query_idx += 1
elif query_token_id > cand_token_id:
cand_idx += 1
else:
# Compute term and query weight BM25F
term_weight = 0
query_weight = 0
query_tf_values = (float(query_sum[query_idx]), float(query_desc[query_idx]))
cand_tf_values = (float(cand_sum[cand_idx]), float(cand_desc[cand_idx]))
for qtf, ctf, wf, bf, length, avg_len in zip(query_tf_values, cand_tf_values, wf_list, bf_list, cand_len,
avg_lengths):
if ctf != 0:
term_weight += (wf * ctf) / ((1 - bf) + bf * length / avg_len)
if qtf != 0:
query_weight += wf * qtf
# Compute query weight BM25F
query_part = ((k3 + 1) * query_weight) / (k3 + query_weight)
term__part = term_weight / (k1 + term_weight)
bm25f_score += IDF[query_token_id] * term__part * query_part
cand_idx += 1
query_idx += 1
return bm25f_score
class BM25F_EXT:
def __init__(self, total_weight_unigram, total_weight_bigram, k1_unigram, k3_unigram, w_unigram_sum, w_unigram_desc,
bf_unigram_sum, bf_unigram_desc, k1_bigram, k3_bigram, w_bigram_sum, w_bigram_desc, bf_bigram_sum,
bf_bigram_desc):
self.k1_unigram = k1_unigram
self.k3_unigram = k3_unigram
self.w_unigram = (w_unigram_sum, w_unigram_desc)
self.bf_unigram = (bf_unigram_sum, bf_unigram_desc)
self.total_weight_unigram = total_weight_unigram
self.k1_bigram = k1_bigram
self.k3_bigram = k3_bigram
self.w_bigram = (w_bigram_sum, w_bigram_desc)
self.bf_bigram = (bf_bigram_sum, bf_bigram_desc)
self.total_weight_bigram = total_weight_bigram
logging.getLogger().info("BM25F weights: {}".format(self.__dict__))
self.IDF = {}
self.unigram_average = [0., 0.]
self.bigram_average = [0., 0.]
@staticmethod
def aggregate_field_tf(summary, description):
sum_idx = 0
desc_idx = 0
sum_len = len(summary)
desc_len = len(description)
union_tk = []
union_sum = []
union_desc = []
while sum_idx < sum_len and desc_idx < desc_len:
sum_token_id, sum_token_tf = summary[sum_idx]
desc_token_id, desc_token_tf = description[desc_idx]
if sum_token_id > desc_token_id:
union_tk.append(desc_token_id)
union_sum.append(0)
union_desc.append(desc_token_tf)
desc_idx += 1
elif desc_token_id > sum_token_id:
union_tk.append(sum_token_id)
union_sum.append(sum_token_tf)
union_desc.append(0)
sum_idx += 1
else:
union_tk.append(sum_token_id)
union_sum.append(sum_token_tf)
union_desc.append(desc_token_tf)
desc_idx += 1
sum_idx += 1
while sum_idx < sum_len:
sum_token_id, sum_token_tf = summary[sum_idx]
union_tk.append(sum_token_id)
union_sum.append(sum_token_tf)
union_desc.append(0)
sum_idx += 1
while desc_idx < desc_len:
desc_token_id, desc_token_tf = description[desc_idx]
union_tk.append(desc_token_id)
union_sum.append(0)
union_desc.append(desc_token_tf)
desc_idx += 1
return np.asarray(union_tk, dtype=np.uint32), np.asarray(union_sum, dtype=np.uint8), sum(union_sum), \
np.asarray(union_desc, dtype=np.uint8), sum(union_desc)
def fit_transform(self, reports, max_token_id, replace_reports=True):
n_reports = 0
self.IDF = [0.0] * max_token_id
new_reports = reports if replace_reports else [None] * len(reports)
for idx, report in enumerate(reports):
for token_id, tf in report['A-U']:
self.IDF[token_id] += 1.0
new = [report['id'], report['DID']]
import sys
sys.getsizeof(report['id'])
self.unigram_average[0] += calculate_length(report['S-U'])
self.unigram_average[1] += calculate_length(report['D-U'])
new.extend(self.aggregate_field_tf(report['S-U'], report['D-U']))
for token_id, tf in report['A-B']:
self.IDF[token_id] += 1.0
self.bigram_average[0] += calculate_length(report['S-B'])
self.bigram_average[1] += calculate_length(report['D-B'])
new.extend(self.aggregate_field_tf(report['S-B'], report['D-B']))
new.extend(
[report['VERSION'], report['COMPONENT'], report['SUB-COMPONENT'], report['TYPE'], report['PRIORITY']])
new_reports[idx] = new
n_reports += 1
self.unigram_average[0] /= n_reports
self.unigram_average[1] /= n_reports
self.bigram_average[0] /= n_reports
self.bigram_average[1] /= n_reports
for token_id in range(len(self.IDF)):
if self.IDF[token_id] == 0:
continue
self.IDF[token_id] = log(n_reports / self.IDF[token_id], 2)
logging.getLogger().info(
"Reports: {}, Unigram Average: {}, Bigram Average: {}".format(len(reports), self.unigram_average,
self.bigram_average))
return new_reports
def similarity(self, query, candidate):
scores = []
query_unigram = query[SUN_REPORT_UNIGRAM_TOKEN]
query_unigram_sum = query[SUN_REPORT_UNIGRAM_SUM]
query_unigram_sum_len = query[SUN_REPORT_UNIGRAM_SUM_LEN]
query_unigram_desc = query[SUN_REPORT_UNIGRAM_DESC]
query_unigram_desc_len = query[SUN_REPORT_UNIGRAM_DESC_LEN]
query_bigram = query[SUN_REPORT_BIGRAM_TOKEN]
query_bigram_sum = query[SUN_REPORT_BIGRAM_SUM]
query_bigram_sum_len = query[SUN_REPORT_BIGRAM_SUM_LEN]
query_bigram_desc = query[SUN_REPORT_BIGRAM_DESC]
query_bigram_desc_len = query[SUN_REPORT_BIGRAM_DESC_LEN]
candidate_unigram = candidate[SUN_REPORT_UNIGRAM_TOKEN]
candidate_unigram_sum = candidate[SUN_REPORT_UNIGRAM_SUM]
candidate_unigram_sum_len = candidate[SUN_REPORT_UNIGRAM_SUM_LEN]
candidate_unigram_desc = candidate[SUN_REPORT_UNIGRAM_DESC]
candidate_unigram_desc_len = candidate[SUN_REPORT_UNIGRAM_DESC_LEN]
candidate_bigram = candidate[SUN_REPORT_BIGRAM_TOKEN]
candidate_bigram_sum = candidate[SUN_REPORT_BIGRAM_SUM]
candidate_bigram_sum_len = candidate[SUN_REPORT_BIGRAM_SUM_LEN]
candidate_bigram_desc = candidate[SUN_REPORT_BIGRAM_DESC]
candidate_bigram_desc_len = candidate[SUN_REPORT_BIGRAM_DESC_LEN]
unigram_score = score_bm25f(query_unigram, query_unigram_sum, query_unigram_desc, candidate_unigram,
candidate_unigram_sum, candidate_unigram_sum_len, candidate_unigram_desc,
candidate_unigram_desc_len, self.k1_unigram, self.k3_unigram, self.w_unigram,
self.bf_unigram, self.unigram_average, self.IDF)
bi_score = score_bm25f(query_bigram, query_bigram_sum, query_bigram_desc, candidate_bigram,
candidate_bigram_sum, candidate_bigram_sum_len, candidate_bigram_desc,
candidate_bigram_desc_len, self.k1_bigram, self.k3_bigram, self.w_bigram, self.bf_bigram,
self.bigram_average, self.IDF)
return self.total_weight_unigram * unigram_score + self.total_weight_bigram * bi_score
|
{"hexsha": "8c486bbc08afdb9b2bd832b824be8e4d00e4a00c", "size": 9022, "ext": "py", "lang": "Python", "max_stars_repo_path": "classical_approach/bm25f.py", "max_stars_repo_name": "happygirlzt/soft_alignment_model_bug_deduplication", "max_stars_repo_head_hexsha": "9c529542749a52e377baeb99d1782920bc72df49", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "classical_approach/bm25f.py", "max_issues_repo_name": "happygirlzt/soft_alignment_model_bug_deduplication", "max_issues_repo_head_hexsha": "9c529542749a52e377baeb99d1782920bc72df49", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "classical_approach/bm25f.py", "max_forks_repo_name": "happygirlzt/soft_alignment_model_bug_deduplication", "max_forks_repo_head_hexsha": "9c529542749a52e377baeb99d1782920bc72df49", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-04T07:36:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-04T07:36:24.000Z", "avg_line_length": 36.8244897959, "max_line_length": 120, "alphanum_fraction": 0.6300155176, "include": true, "reason": "import numpy", "num_tokens": 2215}
|
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
from bisect import bisect_right
from logging import warning
from typing import Union
import torch
from numpy import asarray, where
from sklearn.metrics import accuracy_score, roc_curve, auc, average_precision_score, f1_score
def _maybe_cast_torch_objects_to_numpy(logits, labels):
"""
Casts objects to Numpy array
:param logits: ood or classification logits
:param labels: ood or classification labels
:return: casted logits and labels
"""
if isinstance(logits, torch.Tensor):
warning("Better not to pass torch tensors for logits. Too much copyting from GPU")
logits = logits.detach().cpu().numpy()
return asarray(logits), asarray(labels)
def _validate_ood_labels(labels):
"""Ensures that labels are either 0 or 1. Accepts lists and numpy arrays"""
labels = asarray(labels)
if not ((labels == 0) | (labels == 1)).all():
raise RuntimeError("OOD labels can only be 0 or 1")
def _validate_sizes(logits, labels, only_batch_size=False):
"""Checks if sizes are same, if `only_batch_size` is True checks only first dimension"""
if not logits.size or not labels.size:
raise RuntimeError("Passed empty array to metric")
if not only_batch_size:
if logits.shape != labels.shape:
raise RuntimeError("Predictions and labels should have same shape")
else:
if logits.shape[0] != labels.shape[0]:
raise RuntimeError("Predictions and labels should have same batch size")
def classification_accuracy(predictions, labels):
"""
Classification accuracy metric
:param logits: classification predictions: batch_size X 1
:param labels: classification labels: batch_size X 1
:return: accuracy score
"""
predictions, labels = _maybe_cast_torch_objects_to_numpy(predictions, labels)
_validate_sizes(predictions, labels)
return accuracy_score(predictions.flatten(), labels.flatten())
def classification_f1_macro_score(predictions, labels):
predictions, labels = _maybe_cast_torch_objects_to_numpy(predictions, labels)
_validate_sizes(predictions, labels)
return f1_score(labels, predictions, average='macro')
def classification_f1_micro_score(predictions, labels):
predictions, labels = _maybe_cast_torch_objects_to_numpy(predictions, labels)
_validate_sizes(predictions, labels)
return f1_score(labels, predictions, average='micro')
def _cast_and_validate_ood(ood_scores, labels):
"""Combine validation helpers for OOD metrics"""
ood_scores, labels = _maybe_cast_torch_objects_to_numpy(ood_scores, labels)
_validate_ood_labels(labels)
_validate_sizes(ood_scores, labels)
return ood_scores, labels
def ood_classification_accuracy(ood_scores, labels, threshold):
"""
Classification accuracy metric for OOD task
:param ood_scores: OOD certainty scores: batch_size X 1
:param labels: OOD labels, 1 for OOD, 0 for in-domain: batch_size X 1
:param threshold: decision rule for `ood_scores`
:return: OOD classification accuracy
"""
ood_scores, labels = _cast_and_validate_ood(ood_scores, labels)
ood_predictions = ood_scores >= threshold
return accuracy_score(ood_predictions, labels)
def roc_auc(ood_scores, labels, swap_labels: bool = False):
"""
Area under ROC curve for OOD task
:param ood_scores: OOD certainty scores: batch_size X 1
:param labels: OOD labels, 1 for OOD, 0 for in-domain: batch_size X 1
:param swap_labels: whether to swap labels, i.e. positive class would be a negative and vice versa.
:return: AUROC
"""
ood_scores, labels = _cast_and_validate_ood(ood_scores, labels)
if swap_labels:
ood_scores, labels = swap_labels_scores(ood_scores, labels)
fpr, tpr, _ = roc_curve(labels, ood_scores)
return auc(fpr, tpr)
def roc_aupr(ood_scores, labels, swap_labels: bool = False):
"""
Area under PR curve for OOD task
:param ood_scores: OOD certainty scores: batch_size X 1
:param labels: OOD labels, 1 for OOD, 0 for in-domain: batch_size X 1
:param swap_labels: whether to swap labels, i.e. positive class would be a negative and vice versa.
:return: AUPR
"""
ood_scores, labels = _cast_and_validate_ood(ood_scores, labels)
if swap_labels:
ood_scores, labels = swap_labels_scores(ood_scores, labels)
return average_precision_score(labels, ood_scores)
def _custom_bisect(tpr, tpr_level):
idx = bisect_right(tpr, tpr_level)
while idx > -1 and tpr[idx - 1] >= tpr_level:
idx -= 1
return idx
def fpr_at_x_tpr(ood_scores, labels, tpr_level: Union[int, float], swap_labels: bool = False):
"""
Computer False Positive rate (1 - in-domain recall) at fixed True Positive rate (OOD recall)
:param ood_scores: OOD certainty scores: batch_size X 1
:param labels: OOD labels, 1 for OOD, 0 for in-domain: batch_size X 1
:param tpr_level: OOD recall, 0-100 for int arg, 0.0-1.0 for float arg
:param swap_labels: whether to swap labels, i.e. positive class would be a negative and vice versa.
:return: FPR@{trp_level}TPR
"""
assert isinstance(tpr_level, (int, float))
if isinstance(tpr_level, int):
assert 0 <= tpr_level <= 100
tpr_level /= 100
assert 0 <= tpr_level <= 1
ood_scores, labels = _cast_and_validate_ood(ood_scores, labels)
if swap_labels:
ood_scores, labels = swap_labels_scores(ood_scores, labels)
fpr, tpr, _ = roc_curve(labels, ood_scores, drop_intermediate=False)
closest_index = _custom_bisect(tpr, tpr_level)
idx = max(closest_index, 0)
idx = min(idx, len(fpr) - 1)
return fpr[idx]
def swap_labels_scores(scores, labels):
"""
Swaps positive class with negative one, revert scores order.
:param scores: certainty scores
:param labels: binary labels, 1 for positive class, 0 for negative class
:return:
"""
swapped_labels = where(labels, 0, 1)
reverted_scores = -scores
return reverted_scores, swapped_labels
|
{"hexsha": "b70f60088dbc8cef5fae49b4e52d56c39b0c5ccb", "size": 6434, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/metrics.py", "max_stars_repo_name": "alabrashJr/Maha-Odd", "max_stars_repo_head_hexsha": "cce4bab1f30589cf3d52636fe511c0269058679e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/metrics.py", "max_issues_repo_name": "alabrashJr/Maha-Odd", "max_issues_repo_head_hexsha": "cce4bab1f30589cf3d52636fe511c0269058679e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/metrics.py", "max_forks_repo_name": "alabrashJr/Maha-Odd", "max_forks_repo_head_hexsha": "cce4bab1f30589cf3d52636fe511c0269058679e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9939393939, "max_line_length": 103, "alphanum_fraction": 0.7228784582, "include": true, "reason": "from numpy", "num_tokens": 1612}
|
#!/usr/bin/python
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
data = np.genfromtxt('kv-gh-rot.csv', delimiter=',', names=True)
# print "2"
print(type(data))
print(matplotlib.backends.backend)
# plt.plot(data['time'], data['Z_value'])
# plt.show()
locX = []
locY = []
curX = 0.0
curY = 0.0
for i in range(1, min(len(data['time']), len(data['Z_value'])) - 1):
# print data['time'][i], data['Z_value'][i]
curX += (data['time'][i] - data['time'][i-1]) * np.sin(np.deg2rad(data['Z_value'][i]+14.5))
curY += (data['time'][i] - data['time'][i-1]) * np.cos(np.deg2rad(data['Z_value'][i]+14.5))
# print curLoc
locX.append(curX)
locY.append(curY)
# print loc[0,:]
plt.plot(locX, locY, marker='.', color='r', ls='')
# plt.xlim(-200, 200)
# plt.ylim(-200, 200)
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
|
{"hexsha": "e072e9c080810dee050f9ea760fc3ae6d5e3cdfd", "size": 847, "ext": "py", "lang": "Python", "max_stars_repo_path": "graph.py", "max_stars_repo_name": "kunal15595/inertial-nav", "max_stars_repo_head_hexsha": "d5ab9c16e3befd5a1e2bd9b06255c88c114040bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-12-19T06:45:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-16T02:55:46.000Z", "max_issues_repo_path": "graph.py", "max_issues_repo_name": "kunal15595/inertial-nav", "max_issues_repo_head_hexsha": "d5ab9c16e3befd5a1e2bd9b06255c88c114040bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graph.py", "max_forks_repo_name": "kunal15595/inertial-nav", "max_forks_repo_head_hexsha": "d5ab9c16e3befd5a1e2bd9b06255c88c114040bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9117647059, "max_line_length": 93, "alphanum_fraction": 0.626918536, "include": true, "reason": "import numpy", "num_tokens": 284}
|
#This software is Copyright 2012 The Regents of the University of California. All Rights Reserved.
#Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes for non-profit institutions, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following three paragraphs appear in all copies.
#Permission to make commercial use of this software may be obtained by contacting:
#Technology Transfer Office
#9500 Gilman Drive, Mail Code 0910
#University of California
#La Jolla, CA 92093-0910
#(858) 534-5815
#invent@ucsd.edu
#This software program and documentation are copyrighted by The Regents of the University of California. The software program and documentation are supplied "as is", without any accompanying services from The Regents. The Regents does not warrant that the operation of the program will be uninterrupted or error-free. The end-user understands that the program was developed for research purposes and is advised not to rely exclusively on the program for any reason.
#IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO
#ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR
#CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING
#OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
#EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF
#THE POSSIBILITY OF SUCH DAMAGE. THE UNIVERSITY OF
#CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
#INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
#MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO
#PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
#MODIFICATIONS.
# Functions for comparing contours
from numpy import *
from geometry import *
from point_set import *
def biggestGap(contour1, contour2):
"""See Hausdorff distance."""
biggestGapValue = 0
locations1 = contour1.locations()
locations2 = contour2.locations()
for index1 in range(len(locations1)):
point1 = array(locations1[index1])
# the minimum distance represents the gap from point1 to point2
# initialize gap
gap = distance(point1, array(locations2[0]))
# find the minimum
#print "find the minimum"
for coordList2 in locations2:
point2 = array(coordList2)
dist = distance(point1, point2)
#print dist
if dist < gap:
gap = dist
# if the gap is the largest found so far, keep it
if gap > biggestGapValue:
biggestGapValue = gap
return biggestGapValue
def overlap_old(contour1, contour2):
"""Deprecated"""
temp1 = zeros((1000, 1000), dtype=int8)
temp2 = zeros((1000, 1000), dtype=int8)
binaryImage1 = contour1.binaryImage
binaryImage2 = contour2.binaryImage
boundingBox1 = contour1.get2DBoundingBox()
boundingBox2 = contour2.get2DBoundingBox()
temp1[boundingBox1[0][0]:boundingBox1[1][0]+1,
boundingBox1[0][1]:boundingBox1[1][1]+1] = binaryImage1
temp2[boundingBox2[0][0]:boundingBox2[1][0]+1,
boundingBox2[0][1]:boundingBox2[1][1]+1] = binaryImage2
andImage = logical_and(temp1, temp2) * 1
orImage = logical_or(temp1, temp2) * 1
fraction = float(sum(andImage)) / float(sum(orImage))
return fraction
def overlap(contour1, contour2):
"""Contour overlap"""
#temp1 = zeros((1000, 1000), dtype=int8)
#temp2 = zeros((1000, 1000), dtype=int8)
binaryImage1 = contour1.binaryImage
binaryImage2 = contour2.binaryImage
boundingBox1 = contour1.get2DBoundingBox()
boundingBox2 = contour2.get2DBoundingBox()
minX = min(boundingBox1[0][0], boundingBox2[0][0])
minY = min(boundingBox1[0][1], boundingBox2[0][1])
maxX = max(boundingBox1[1][0], boundingBox2[1][0])
maxY = max(boundingBox1[1][1], boundingBox2[1][1])
temp1 = zeros((maxX-minX+1, maxY-minY+1))
temp2 = zeros((maxX-minX+1, maxY-minY+1))
temp1[boundingBox1[0][0]-minX:boundingBox1[1][0]-minX+1,
boundingBox1[0][1]-minY:boundingBox1[1][1]-minY+1] = binaryImage1
temp2[boundingBox2[0][0]-minX:boundingBox2[1][0]-minX+1,
boundingBox2[0][1]-minY:boundingBox2[1][1]-minY+1] = binaryImage2
andImage = logical_and(temp1, temp2) * 1
orImage = logical_or(temp1, temp2) * 1
fraction = float(sum(andImage)) / float(sum(orImage))
return fraction
|
{"hexsha": "1878a2f37a446dcf00ff094d3e8e657c1ec62cb0", "size": 4573, "ext": "py", "lang": "Python", "max_stars_repo_path": "cytoseg/contour_comparison.py", "max_stars_repo_name": "slash-segmentation/DP2", "max_stars_repo_head_hexsha": "6f768e4b8a75a3ab2bf1359ae94704332426a4d6", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cytoseg/contour_comparison.py", "max_issues_repo_name": "slash-segmentation/DP2", "max_issues_repo_head_hexsha": "6f768e4b8a75a3ab2bf1359ae94704332426a4d6", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cytoseg/contour_comparison.py", "max_forks_repo_name": "slash-segmentation/DP2", "max_forks_repo_head_hexsha": "6f768e4b8a75a3ab2bf1359ae94704332426a4d6", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1083333333, "max_line_length": 465, "alphanum_fraction": 0.714848021, "include": true, "reason": "from numpy", "num_tokens": 1223}
|
[STATEMENT]
lemma (in nf_invar) CVdI: "\<lbrakk>u\<in>C\<rbrakk> \<Longrightarrow> u\<in>Vd d"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. u \<in> C \<Longrightarrow> u \<in> Vd d
[PROOF STEP]
using C_ss
[PROOF STATE]
proof (prove)
using this:
C \<subseteq> Vd d
goal (1 subgoal):
1. u \<in> C \<Longrightarrow> u \<in> Vd d
[PROOF STEP]
by (auto)
|
{"llama_tokens": 161, "file": "EdmondsKarp_Maxflow_Augmenting_Path_BFS", "length": 2}
|
#pragma once
#include "lue/py/framework/type_traits.hpp"
#include "lue/framework/core/shape.hpp"
// TODO Refactor with similar blocks in other stream.hpp headers.
#include <boost/predef.h>
#if BOOST_COMP_MSVC
# include <boost/io/ostream_joiner.hpp>
# define lue_make_ostream_joiner boost::io::make_ostream_joiner
#else
# include <experimental/iterator>
# define lue_make_ostream_joiner std::experimental::make_ostream_joiner
#endif
#include <sstream>
#include <tuple>
namespace lue::framework {
template<
typename OutputStream,
typename... Type>
OutputStream& operator<<(
OutputStream& stream,
std::tuple<Type...> const& tuple)
{
stream << '(';
std::apply(
[&stream](
auto&&... ts)
{
((stream << ts << ", "), ...);
},
tuple);
stream << ')';
return stream;
}
template<
typename OutputStream,
typename Count,
Rank rank>
OutputStream& operator<<(
OutputStream& stream,
Shape<Count, rank> const& shape)
{
stream << '(';
std::copy(
std::begin(shape), std::end(shape),
lue_make_ostream_joiner(stream, ", "));
stream << ')';
return stream;
}
template<
typename Count,
Rank rank>
std::string as_string(
Shape<Count, rank> const& shape)
{
std::ostringstream stream;
stream << shape;
return stream.str();
}
} // lue::framework
|
{"hexsha": "de4ec4a59bd0d6c12b09ee673c724b7bb7b8eeae", "size": 1594, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "source/framework/python/include/lue/py/framework/stream.hpp", "max_stars_repo_name": "pcraster/lue", "max_stars_repo_head_hexsha": "e64c18f78a8b6d8a602b7578a2572e9740969202", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-04-14T15:51:12.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-14T15:51:12.000Z", "max_issues_repo_path": "source/framework/python/include/lue/py/framework/stream.hpp", "max_issues_repo_name": "pcraster/lue", "max_issues_repo_head_hexsha": "e64c18f78a8b6d8a602b7578a2572e9740969202", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 262.0, "max_issues_repo_issues_event_min_datetime": "2016-08-11T10:12:02.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-13T18:09:16.000Z", "max_forks_repo_path": "source/framework/python/include/lue/py/framework/stream.hpp", "max_forks_repo_name": "pcraster/lue", "max_forks_repo_head_hexsha": "e64c18f78a8b6d8a602b7578a2572e9740969202", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-03-11T09:49:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-11T09:49:41.000Z", "avg_line_length": 22.1388888889, "max_line_length": 73, "alphanum_fraction": 0.5514429109, "num_tokens": 351}
|
/*
* ====================================================================
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
* ====================================================================
*/
#include <boost/test/unit_test.hpp>
#include "../src/private/depth_private.hpp"
namespace svn = ::apache::subversion::svnxx;
namespace impl = ::apache::subversion::svnxx::impl;
BOOST_AUTO_TEST_SUITE(depth);
BOOST_AUTO_TEST_CASE(convert_to)
{
BOOST_TEST((impl::convert(svn::depth::unknown) == svn_depth_unknown));
BOOST_TEST((impl::convert(svn::depth::exclude) == svn_depth_exclude));
BOOST_TEST((impl::convert(svn::depth::empty) == svn_depth_empty));
BOOST_TEST((impl::convert(svn::depth::files) == svn_depth_files));
BOOST_TEST((impl::convert(svn::depth::immediates) == svn_depth_immediates));
BOOST_TEST((impl::convert(svn::depth::infinity) == svn_depth_infinity));
}
BOOST_AUTO_TEST_CASE(convert_from)
{
BOOST_TEST((impl::convert(svn_depth_unknown) == svn::depth::unknown));
BOOST_TEST((impl::convert(svn_depth_exclude) == svn::depth::exclude));
BOOST_TEST((impl::convert(svn_depth_empty) == svn::depth::empty));
BOOST_TEST((impl::convert(svn_depth_files) == svn::depth::files));
BOOST_TEST((impl::convert(svn_depth_immediates) == svn::depth::immediates));
BOOST_TEST((impl::convert(svn_depth_infinity) == svn::depth::infinity));
}
BOOST_AUTO_TEST_CASE(char_names)
{
BOOST_TEST((to_string(svn::depth::unknown) == "unknown"));
BOOST_TEST((to_string(svn::depth::exclude) == "exclude"));
BOOST_TEST((to_string(svn::depth::empty) == "empty"));
BOOST_TEST((to_string(svn::depth::files) == "files"));
BOOST_TEST((to_string(svn::depth::immediates) == "immediates"));
BOOST_TEST((to_string(svn::depth::infinity) == "infinity"));
}
BOOST_AUTO_TEST_CASE(wchar_names)
{
BOOST_TEST((to_wstring(svn::depth::unknown) == L"unknown"));
BOOST_TEST((to_wstring(svn::depth::exclude) == L"exclude"));
BOOST_TEST((to_wstring(svn::depth::empty) == L"empty"));
BOOST_TEST((to_wstring(svn::depth::files) == L"files"));
BOOST_TEST((to_wstring(svn::depth::immediates) == L"immediates"));
BOOST_TEST((to_wstring(svn::depth::infinity) == L"infinity"));
}
BOOST_AUTO_TEST_CASE(char16_names)
{
BOOST_TEST((to_u16string(svn::depth::unknown) == u"unknown"));
BOOST_TEST((to_u16string(svn::depth::exclude) == u"exclude"));
BOOST_TEST((to_u16string(svn::depth::empty) == u"empty"));
BOOST_TEST((to_u16string(svn::depth::files) == u"files"));
BOOST_TEST((to_u16string(svn::depth::immediates) == u"immediates"));
BOOST_TEST((to_u16string(svn::depth::infinity) == u"infinity"));
}
BOOST_AUTO_TEST_CASE(char32_names)
{
BOOST_TEST((to_u32string(svn::depth::unknown) == U"unknown"));
BOOST_TEST((to_u32string(svn::depth::exclude) == U"exclude"));
BOOST_TEST((to_u32string(svn::depth::empty) == U"empty"));
BOOST_TEST((to_u32string(svn::depth::files) == U"files"));
BOOST_TEST((to_u32string(svn::depth::immediates) == U"immediates"));
BOOST_TEST((to_u32string(svn::depth::infinity) == U"infinity"));
}
BOOST_AUTO_TEST_SUITE_END();
|
{"hexsha": "151c2d9f92fc5d2765c47f7449f8ea6be7ede961", "size": 3978, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "subversion/bindings/cxx/tests/test_depth.cpp", "max_stars_repo_name": "timgates42/subversion", "max_stars_repo_head_hexsha": "0f088f530747140c6783c2eeb77ceff8e8613c42", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2017-01-03T03:20:56.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-24T22:05:09.000Z", "max_issues_repo_path": "subversion/bindings/cxx/tests/test_depth.cpp", "max_issues_repo_name": "timgates42/subversion", "max_issues_repo_head_hexsha": "0f088f530747140c6783c2eeb77ceff8e8613c42", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2016-06-12T17:02:25.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-03T11:08:18.000Z", "max_forks_repo_path": "subversion/bindings/cxx/tests/test_depth.cpp", "max_forks_repo_name": "timgates42/subversion", "max_forks_repo_head_hexsha": "0f088f530747140c6783c2eeb77ceff8e8613c42", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2017-01-21T00:15:13.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-04T07:23:50.000Z", "avg_line_length": 43.2391304348, "max_line_length": 78, "alphanum_fraction": 0.6679235797, "num_tokens": 1047}
|
# -*- coding: utf-8 -*-
"""
Simple Power Plant Economic Dispatch using Linear Programming in Python
Setup: Power Co. operates a 200MW power plant that consists of four
gas-fired turbines. The cost to operate each generator/turbine (in $/hr) is a
quadratic function of the power generation (MW). To solve with an LP, we
will linearize the quadratic cost function. Each generator has a min
and max generation capacity (MW) limits.
Question: What is optimal generation level for each generator/turbine?
The goal of this code to find the optimal generation level for each turbine
so as to minimize costs while satisfying demand.
LP problem statement:
min SUM_ij (cost_coeff(i,j) * x(i,j))
s.t. SUM_ij (x(i,j)) = demand - SUM_i (p_tot_min_i)
x_lb(i,j) <= x(i,j) <= x_ub(i,j) for every i,j
notes:
generator i, production interval j
x(i,j) is choice variable- generator i's incremental production within
production interval j. Example: if production interval j=1 is
(20MW,40MW) and generator i produces 35MW, x(i,j)=15MW
cost_coeff(i,j): vector of coefficients for lineraized cost function
these params are the slopes of the linearized cost function
demand: total demand requirement (MW)
p_tot_min_i: min power that generator i must produce. note that x is
incremental production above the minimum amount of power a generator must
produce
x_lb(i,j): lower bound power generation for generator i and in production
interval j (MW)
x_ub(i,j): upper bound power generation for generator i and in production
interval j (MW)
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import linprog
# number of line segments to make when linearizing quadartic cost functions
n_seg = 4
class generator:
# each generator has a name, cost function, lower, upper bounds for power gen
def __init__(self, name, quad_cost, p_min, p_max):
self.name = name
# parameters [a,b,c] of the quadartic cost function a + b*x +cx**2
# cost in $/hr and x= MW power generation
self.quad_cost = quad_cost
self.p_max = p_max # min power level (MW)
self.p_min = p_min # max power level (MW)
# lower and upper bound for each choice variable is the start and end
# for each line segment
self.p_lb_ub = [(0, (self.p_max - self.p_min) / n_seg)]*n_seg
# min cost to run generator at min power level
self.p_min_cost = np.dot(self.quad_cost, np.array([1, p_min, p_min**2]))
# linearized cost function
def lin_cost(self):
c_params=[]
# iterate through each line segment of the cost function
for i in range(1, n_seg + 1):
# starting point of line segment
p_strt = (1/n_seg)*(self.p_max-self.p_min)*(i-1)
# ending point of line segment
p_end = (1/n_seg)*(self.p_max-self.p_min)*i
# cost at the starting point
c_strt = np.dot(self.quad_cost, np.array([1, p_strt, p_strt**2]))
# cost at the ending point
c_end = np.dot(self.quad_cost, np.array([1, p_end, p_end**2]))
# slope of the linearized cost function over the segment
c_delta = ((self.p_max-self.p_min)/n_seg)*(c_end - c_strt)
c_params.append(c_delta)
return c_params
# plot the generator's quadratic cost function
def plot_cost(self):
power = np.linspace(self.p_min, self.p_max, 50)
plt.plot(power, self.quad_cost[0] + self.quad_cost[1]*power + self.quad_cost[2]*power**2,
label=self.name)
# number of generators
n_gen = 4
# instances of the generator class
gen_a = generator('gen_a', [30, 0.5, 0.05], 10, 45)
gen_b = generator('gen_b', [40, 0.4, 0.04], 15, 60)
gen_c = generator('gen_c', [50, 0.3, 0.03], 20, 55)
gen_d = generator('gen_d', [60, 0.2, 0.02], 15, 40)
# list of generators
gen_lst = [gen_a, gen_b, gen_c, gen_d]
# cost parameters from linearized cost functions
cost_params=[]
# upper and lower bounds for each choice variable
p_bounds=[]
# total min power generation summed across all generators
p_tot_min = 0
# total min cost summed across all generators
p_tot_min_cost = 0
for gen in gen_lst:
cost_params = cost_params + gen.lin_cost()
p_bounds = p_bounds + gen.p_lb_ub
p_tot_min += gen.p_min
p_tot_min_cost += gen.p_min_cost
# plot the cost functions
gen.plot_cost()
# label and legend for plot
plt.ylabel('cost ($ per hour)')
plt.xlabel('power (MW)')
plt.legend
plt.show()
# total demand requirement is 150MW
demand = 150
# matrix of coeffs
A = np.ones((1, 16))
# linear prog
econ_dis = linprog(cost_params, A_eq=A, b_eq=demand-p_tot_min,
bounds=p_bounds, options={"disp":True})
print(econ_dis)
min_cost = econ_dis.fun + p_tot_min_cost
print('The minimum cost to meet %.0f MW demand is $%.0f' % (demand, min_cost))
print('Optimal power generation:')
j=0
for gen in gen_lst:
gen_x = econ_dis.x[j:j+n_seg].sum(axis=0) + gen.p_min
print('Generator %s: %.1f MW' % (gen.name, gen_x))
j += 4
|
{"hexsha": "b3b2d96fc97e0db64c3c4400821e8f5feab74ebd", "size": 5304, "ext": "py", "lang": "Python", "max_stars_repo_path": "econ_dispatch_LP.py", "max_stars_repo_name": "redlinger/Simple-Econ-Dispatch-LP-Python", "max_stars_repo_head_hexsha": "6f29d76ab83ef8d41cda25e17164b9020bc33b74", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-25T05:46:20.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-25T05:46:20.000Z", "max_issues_repo_path": "econ_dispatch_LP.py", "max_issues_repo_name": "redlinger/Simple-Econ-Dispatch-LP-Python", "max_issues_repo_head_hexsha": "6f29d76ab83ef8d41cda25e17164b9020bc33b74", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "econ_dispatch_LP.py", "max_forks_repo_name": "redlinger/Simple-Econ-Dispatch-LP-Python", "max_forks_repo_head_hexsha": "6f29d76ab83ef8d41cda25e17164b9020bc33b74", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1582733813, "max_line_length": 98, "alphanum_fraction": 0.649321267, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1395}
|
def make_supercell(cell, diff_species):
"""Append all sites in a unit cell to a structure - must have cubic lattice"""
#diff_species: Boolean, if true, make different species diff colors. If false, make one basis group one color.
#Get a copy of the structure defining the basis
basis=cell.copy()
superCell = cell.copy()
#Create a list of all the lattice points in the cubic unit cell (i.e all the corners)
f=[[i,j,k] for i in range(2) for j in range(2) for k in range(2)]
#Remove the lattice point associated with the basis [0,0,0]
f=f[1:]
#Add a basis at each of the unit cell lattice points
if diff_species:
[superCell.append(atom.species, atom.frac_coords+f[site]) for atom in basis for site in range(len(f))]
else:
[superCell.append(atom.specie, atom.frac_coords+f[site]) for site in range(len(f)) for atom in basis]
return(superCell)
def cubicCell(cell, a3):
"""Append all sites in a unit cell to a structure"""
from pymatgen import Structure, Lattice
import numpy as np
import nglview as ngl
basis=cell.copy()
superCell = cell.copy()
prim_vec = np.asarray(cell.lattice.as_dict().get('matrix'))
#Append atoms with different combinations of lattice vectors until all coordinates exceed cubic cell [1,1,1]
i = -1
j = -1
k = -1
#Since not perfect
thr_a = a3*1.15
coord_base = [0,0,0]
new_coord = coord_base.copy()
#while all(x <=thr_a for x in new_coord):
#while all(x <=thr_a for x in new_coord):
#while all(x <= thr_a for x in new_coord):
for i in range(-3, 3):
for j in range(-3, 3):
for k in range(-3,3):
new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k
[superCell.append(atom.species, atom.coords + new_coord, coords_are_cartesian=True) for atom in basis]
#k +=1
#print(new_coord)
#print(i, j, k)
#j +=1
#k=-1
#new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k
#i+=1
#j=-1
#k=-1
#new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k
return(superCell)
def visLattice(lattice):
from pymatgen import Structure, Lattice
import nglview as ngl
unit_cell= Structure(lattice, ['Cl'], [[0,0,0]], to_unit_cell=True, coords_are_cartesian=False)
view6=ngl.show_pymatgen(unit_cell)
view6.clear_representations()
view6.add_unitcell()
return(view6)
def visUC(SC, a3):
from pymatgen import Structure, Lattice
import nglview as ngl
selec=[]
for ind, site in enumerate(SC.sites):
if all(site.coords>=-a3*.15) & all(site.coords<=a3*1.15):
selec=selec+[ind]
view6 = ngl.show_pymatgen(SC)
view6.clear_representations()
#view6.add_representation('ball+stick', aspectRatio=10, selection=selec)
[view6.add_representation('ball+stick', aspectRatio=5, selection=[i]) for i in selec]
return(view6)
|
{"hexsha": "dd2c94cddb41c0f750f8c5386511357d90f02000", "size": 3044, "ext": "py", "lang": "Python", "max_stars_repo_path": "MSE430Funcs/CrysStrucFuncs.py", "max_stars_repo_name": "KCMak653/MSE430Notebooks", "max_stars_repo_head_hexsha": "4f2ecfff557447de141121bbafbe5aa6bd60753b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MSE430Funcs/CrysStrucFuncs.py", "max_issues_repo_name": "KCMak653/MSE430Notebooks", "max_issues_repo_head_hexsha": "4f2ecfff557447de141121bbafbe5aa6bd60753b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MSE430Funcs/CrysStrucFuncs.py", "max_forks_repo_name": "KCMak653/MSE430Notebooks", "max_forks_repo_head_hexsha": "4f2ecfff557447de141121bbafbe5aa6bd60753b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1219512195, "max_line_length": 118, "alphanum_fraction": 0.6284494087, "include": true, "reason": "import numpy", "num_tokens": 864}
|
############################################################################################
# INFEASIBLE MODELS #
############################################################################################
struct Infeasible{N,M,D<:AbstractModel} <: AbstractModel
model::D
_u::SVector{M,Int} # inds to original controls
_ui::SVector{N,Int} # inds to infeasible controls
end
struct InfeasibleLie{N,M,D<:AbstractModel} <: RobotDynamics.LieGroupModel
model::D
_u::SVector{M,Int} # inds to original controls
_ui::SVector{N,Int} # inds to infeasible controls
end
""" $(TYPEDEF)
An infeasible model is an augmented dynamics model that makes the system artifically fully
actuated by augmenting the control vector with `n` additional controls. The dynamics are
handled explicitly in discrete time:
``x_{k+1} = f(x_k,u_k,dt) + w_k``
where ``w_k`` are the additional `n`-dimensional controls. In practice, these are constrained
to be zero by the end of the solve.
# Constructors
```julia
InfeasibleModel(model::AbstractModel)
```
"""
const InfeasibleModel{N,M,D} = Union{Infeasible{N,M,D},InfeasibleLie{N,M,D}} where {N,M,D}
function InfeasibleModel(model::AbstractModel)
n,m = size(model)
_u = SVector{m}(1:m)
_ui = SVector{n}((1:n) .+ m)
Infeasible(model, _u, _ui)
end
function InfeasibleModel(model::RobotDynamics.LieGroupModel)
n,m = size(model)
_u = SVector{m}(1:m)
_ui = SVector{n}((1:n) .+ m)
InfeasibleLie(model, _u, _ui)
end
RobotDynamics.LieState(model::InfeasibleLie) = RobotDynamics.LieState(model.model)
# Generic Infeasible Methods
RobotDynamics.state_dim(model::InfeasibleModel{n}) where n = n
RobotDynamics.control_dim(model::InfeasibleModel{n,m}) where {n,m} = n+m
RobotDynamics.dynamics(::InfeasibleModel, x, u) =
throw(ErrorException("Cannot evaluate continuous dynamics on an infeasible model"))
@generated function RobotDynamics.discrete_dynamics(::Type{Q}, model::InfeasibleModel{N,M},
z::AbstractKnotPoint{T,N}) where {T,N,M,Q<:Explicit}
_u = SVector{M}((1:M) .+ N)
_ui = SVector{N}((1:N) .+ (N+M))
quote
x = state(z)
dt = z.dt
u0 = z.z[$_u]
ui = z.z[$_ui]
RobotDynamics.discrete_dynamics($Q, model.model, x, u0, z.t, dt) + ui
end
end
@inline RobotDynamics.rotation_type(model::InfeasibleModel) where D = rotation_type(model.model)
@generated function RobotDynamics.discrete_jacobian!(::Type{Q}, ∇f, model::InfeasibleModel{N,M},
z::AbstractKnotPoint{T,N}, cache=nothing) where {T,N,M,Q<:Explicit}
∇ui = [(@SMatrix zeros(N,N+M)) Diagonal(@SVector ones(N)) @SVector zeros(N)]
_x = SVector{N}(1:N)
_u = SVector{M}((1:M) .+ N)
_z = SVector{N+M}(1:N+M)
_ui = SVector{N}((1:N) .+ (N+M))
zi = [:(z.z[$i]) for i = 1:N+M]
NM1 = N+M+1
NM = N+M
∇u0 = @SMatrix zeros(N,N)
quote
# Build KnotPoint for original model
s0 = SVector{$NM1}($(zi...), z.dt)
u0 = z.z[$_u]
ui = z.z[$_ui]
z_ = StaticKnotPoint(z.z[$_z], $_x, $_u, z.dt, z.t)
∇f_ = uview(∇f, 1:N, 1:$NM)
discrete_jacobian!($Q, ∇f_, model.model, z_)
# ∇f[$_x, N+NM] .= ∇f_[$_x, N+M] # ∇dt
∇f[$_x, $_ui] .= Diagonal(@SVector ones(N))
return
# ∇f[$_x,$_ui]
# [∇f[$_x, $_z] $∇u0 ∇dt] + $∇ui
end
end
function RD._discrete_jacobian!(::RD.ForwardAD, ::Type{Q}, ∇f, model::InfeasibleModel{N,M},
z::AbstractKnotPoint{T,N}, cache=nothing) where {T,N,M,Q<:Explicit}
RD.discrete_jacobian!(Q, ∇f, model, z, cache)
end
function RobotDynamics.state_diff(model::InfeasibleModel, x::SVector, x0::SVector)
RobotDynamics.state_diff(model.model, x, x0)
end
function RobotDynamics.state_diff_jacobian!(G, model::InfeasibleModel, Z::Traj)
RobotDynamics.state_diff_jacobian!(G, model.model, Z)
end
function RobotDynamics.∇²differential!(∇G, model::InfeasibleModel, x::SVector, dx::SVector)
return ∇²differential!(∇G, model.model, x, dx)
end
RobotDynamics.state_diff_size(model::InfeasibleModel) = RobotDynamics.state_diff_size(model.model)
Base.position(model::InfeasibleModel, x::SVector) = position(model.model, x)
RobotDynamics.orientation(model::InfeasibleModel, x::SVector) = orientation(model.model, x)
"Calculate a dynamically feasible initial trajectory for an infeasible problem, given a
desired trajectory"
function infeasible_trajectory(model::InfeasibleModel{n,m}, Z0::Traj) where {T,n,m}
x,u = zeros(model)
ui = @SVector zeros(n)
Z = [KnotPoint(state(z), [control(z); ui], z.dt, z.t) for z in Z0]
N = length(Z0)
for k = 1:N-1
RobotDynamics.propagate_dynamics(RobotDynamics.RK4, model, Z[k+1], Z[k])
x′ = state(Z[k+1])
u_slack = state(Z0[k+1]) - x′
u = [control(Z0[k]); u_slack]
RobotDynamics.set_control!(Z[k], u)
RobotDynamics.set_state!(Z[k+1], x′ + u_slack)
end
return Traj(Z)
end
############################################################################################
# INFEASIBLE CONSTRAINT #
############################################################################################
""" $(TYPEDEF) Constraints additional ``infeasible'' controls to be zero.
Constructors: ```julia
InfeasibleConstraint(model::InfeasibleModel)
InfeasibleConstraint(n,m)
```
"""
struct InfeasibleConstraint{n} <: TO.ControlConstraint
ui::SVector{n,Int}
m::Int
function InfeasibleConstraint(n::Int, m::Int)
ui = SVector{n}((1:n) .+ m)
new{n}(ui, m)
end
end
InfeasibleConstraint(model::InfeasibleModel{n,m}) where {n,m} = InfeasibleConstraint(n,m)
RobotDynamics.control_dim(con::InfeasibleConstraint{n}) where n = n + con.m
@inline TO.sense(::InfeasibleConstraint) = TO.Equality()
@inline Base.length(::InfeasibleConstraint{n}) where n = n
function TO.evaluate(con::InfeasibleConstraint, u::SVector)
ui = u[con.ui] # infeasible controls
end
function TO.jacobian!(∇c, con::InfeasibleConstraint{n}, u::SVector) where n
for (i,j) in enumerate(con.ui)
∇c[i,j] = 1
end
return true
end
|
{"hexsha": "930cfffde6955101424cdea5a9f96a426594238b", "size": 6092, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/infeasible_model.jl", "max_stars_repo_name": "tpr0p/Altro.jl", "max_stars_repo_head_hexsha": "cfe5f79fe64b454919d3edc26ad2ff2bb6cfe793", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2020-09-21T20:49:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T13:45:20.000Z", "max_issues_repo_path": "src/infeasible_model.jl", "max_issues_repo_name": "tpr0p/Altro.jl", "max_issues_repo_head_hexsha": "cfe5f79fe64b454919d3edc26ad2ff2bb6cfe793", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2020-07-11T00:04:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-02T16:36:08.000Z", "max_forks_repo_path": "src/infeasible_model.jl", "max_forks_repo_name": "tpr0p/Altro.jl", "max_forks_repo_head_hexsha": "cfe5f79fe64b454919d3edc26ad2ff2bb6cfe793", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2020-08-07T06:16:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-28T10:06:15.000Z", "avg_line_length": 34.418079096, "max_line_length": 98, "alphanum_fraction": 0.6229481287, "num_tokens": 1888}
|
################################################################################
# CORE DISPATCHVARIABLEREF METHOD EXTENSIONS
################################################################################
# Extend dispatch_variable_ref
function dispatch_variable_ref(model::InfiniteModel,
index::DependentParameterIndex
)::DependentParameterRef
return DependentParameterRef(model, index)
end
# Extend _add_data_object
function _add_data_object(model::InfiniteModel,
object::MultiParameterData
)::DependentParametersIndex
index = MOIUC.add_item(model.dependent_params, object)
push!(model.param_object_indices, index)
return index
end
# Extend _data_dictionary (type based)
function _data_dictionary(model::InfiniteModel,
::Type{DependentParameters})::MOIUC.CleverDict
return model.dependent_params
end
# Extend _data_dictionary (ref based)
function _data_dictionary(pref::DependentParameterRef)::MOIUC.CleverDict
return JuMP.owner_model(pref).dependent_params
end
# Extend _data_object
function _data_object(pref::DependentParameterRef)::MultiParameterData
object = get(_data_dictionary(pref), JuMP.index(pref).object_index, nothing)
object === nothing && error("Invalid dependent parameter reference, cannot find " *
"corresponding parameter in the model. This is likely " *
"caused by using the reference of a deleted parameter.")
return object
end
# Extend _core_variable_object
function _core_variable_object(pref::DependentParameterRef)::DependentParameters
return _data_object(pref).parameters
end
# Return the number of dependent parameters involved
function _num_parameters(pref::DependentParameterRef)::Int
return length(_data_object(pref).names)
end
# Extend _delete_data_object
function _delete_data_object(vref::DependentParameterRef)::Nothing
delete!(_data_dictionary(vref), JuMP.index(vref).object_index)
return
end
################################################################################
# PARAMETER DEFINITION
################################################################################
# Store partially processed individual dependent parameters
struct _DependentParameter{S <: AbstractInfiniteSet, M <: AbstractDerivativeMethod}
set::S
supports::Vector{Float64}
name::String
deriv_method::M
function _DependentParameter(set::S,
supports::Union{Vector{<:Real}, Real},
name::String,
method::M
)::_DependentParameter{S, M} where {S <: AbstractInfiniteSet, M <: AbstractDerivativeMethod}
if supports isa Real
return new{S, M}(set, [supports], name, method)
else
return new{S, M}(set, supports, name, method)
end
end
end
## Use type dispatch to efficiently check the set(s)
# All InfiniteScalarSets
function _check_param_sets(_error::Function,
params::AbstractArray{<:_DependentParameter{<:InfiniteScalarSet}}
)::Nothing
return
end
# All MultiDistributionSet with non SparseAxisArray
function _check_param_sets(_error::Function,
params::AbstractArray{<:_DependentParameter{<:MultiDistributionSet}}
)::Nothing
dist = first(params).set.distribution
set_type = typeof(first(params).set)
if size(dist) != size(params)
_error("The dimensions of the parameters and the multi-dimensional " *
"distribution $dist.")
end
return
end
# All MultiDistributionSet with SparseAxisArray
function _check_param_sets(_error::Function,
params::JuMPC.SparseAxisArray{<:_DependentParameter{<:MultiDistributionSet}}
)
_error("Cannot specify multiple-dimensional distribution set with a " *
"`SparseAxisArray` of dependent infinite parameters.")
end
# All CollectionSet
function _check_param_sets(_error::Function,
params::AbstractArray{<:_DependentParameter{<:CollectionSet}}
)::Nothing
sets = collection_sets(first(params).set)
set_type = typeof(first(params).set)
if length(sets) != length(params)
_error("The dimensions of the parameters and the specified CollectionSet " *
"do not match.")
elseif params isa JuMPC.SparseAxisArray
@warn("CollectionSet order may not match the given `SparseAxisArray` " *
"of specified dependent infinite parameters, consider instead " *
"specifying the `InfiniteScalarSet` for each parameter using " *
"the `set` keyword and the appropriate indices.")
end
return
end
# All some InfiniteArraySet (for extensions)
function _check_param_sets(_error::Function,
params::AbstractArray{<:_DependentParameter{<:InfiniteArraySet}}
)::Nothing
return
end
# Mixed sets
function _check_param_sets(_error::Function,
params::AbstractArray{<:_DependentParameter}
)::Nothing
set_type = typeof(first(params).set)
if !all(param.set isa InfiniteScalarSet for param in params)
_error("Cannot specify multiple `InfiniteScalarSets` for one container " *
"of infinite dependent parameters.")
end
return
end
# Fallback
function _check_param_sets(_error::Function, params)
_error("Unrecognized input for infinite set.")
end
## Define methods for checking the the derivative methods
# Expected format
function _check_derivative_methods(_error::Function,
params::AbstractArray{<:_DependentParameter})::Nothing
if !all(p.deriv_method isa NonGenerativeDerivativeMethod for p in params)
_error("Cannot use generative derivative evaluation methods with dependent " *
"infinite parameters. Only subtypes of `NonGenerativeDerivativeMethod` " *
"can be used.")
end
return
end
# Fallback
function _check_derivative_methods(_error::Function, params)
_error("Unrecognized input for derivative method.")
end
## Use set type dispatch to make the proper InfiniteArraySet
# InfiniteArraySet
function _make_array_set(params::Vector{<:_DependentParameter{T}}
)::T where {T <: InfiniteArraySet}
return first(params).set
end
# InfiniteScalarSets
function _make_array_set(params::Vector{<:_DependentParameter{T}}
)::CollectionSet{T} where {T <: InfiniteScalarSet}
return CollectionSet([p.set for p in params])
end
# Build a DependentParameters object given an array of _DependentParameters
function _build_parameters(_error::Function,
params::AbstractArray{<:_DependentParameter};
num_supports::Int = 0,
sig_digits::Int = DefaultSigDigits,
extra_kw_args...)
# error with extra keywords
for (kwarg, _) in extra_kw_args
_error("Unrecognized keyword argument $kwarg")
end
# check the formatting
_check_param_sets(_error, params)
_check_derivative_methods(_error, params)
# vectorize the parameter array
indices = Collections._get_indices(params)
ordered_params = Collections._make_ordered(params, indices)
vector_params = _make_vector(ordered_params)
# make the set
set = _make_array_set(vector_params)
# make the supports and labels
lens = map(p -> length(p.supports), vector_params)
_allequal(lens) || _error("Inconsistent support dimensions.")
# we have supports
if first(lens) != 0
# build the support array transpose to fill in column order (leverage locality)
trans_supps = Array{Float64}(undef, first(lens), length(vector_params))
for i in 1:size(trans_supps, 2)
trans_supps[:, i] = vector_params[i].supports
end
supps = permutedims(trans_supps)
supports_in_set(supps, set) || _error("Supports violate infinite set domain.")
supps = round.(supps, sigdigits = sig_digits)
label = UserDefined
supp_dict = Dict{Vector{Float64}, Set{DataType}}(@views supps[:, i] =>
Set([label]) for i in 1:size(supps, 2))
# we want to generate supports
elseif num_supports != 0
supps, label = generate_support_values(set, num_supports = num_supports,
sig_digits = sig_digits)
supp_dict = Dict{Vector{Float64}, Set{DataType}}(@views supps[:, i] =>
Set([label]) for i in 1:size(supps, 2))
# no supports are specified
else
supp_dict = Dict{Vector{Float64}, Set{DataType}}()
end
# make the parameter object
names = map(p -> p.name, vector_params)
methods = map(p -> p.deriv_method, vector_params)
return DependentParameters(set, supp_dict, sig_digits, methods), names, indices
end
"""
add_parameters(model::InfiniteModel,
params::DependentParameters,
names::Vector{String} = ["noname", "noname", ...],
indices = nothing
)::AbstractArray{<:GeneralVariableRef}
Add `params` to `model` and return an appropriate container of the dependent
infinite parameter references. This is intended as an internal method for use
with [`@dependent_parameters`](@ref). However, if desired users can use this
to create a container of infinite dependent parameter without the use of a
macro. `names` denote the name of each parameter and `indices` denote the
indices of the expected container as used by `Containers._make_array`
(implemented by `VectorTuple`s), by default a `Vector` is returned.
**Example**
```julia-repl
julia> using Distributions
julia> dist = MvNormal(ones(3)); # 3 dimensional
julia> set = MultiDistributionSet(dist); # 3 dimensional
julia> params = DependentParameters(set, Dict{Vector{Float64}, Set{DatatType}}(), 10);
julia> prefs = add_parameters(model, params, ["par1", "par2", "par3"])
3-element Array{GeneralVariableRef,1}:
par1
par2
par3
```
"""
function add_parameters(model::InfiniteModel,
params::DependentParameters,
names::Vector{String} = String[],
indices = nothing
)::AbstractArray{<:GeneralVariableRef}
# get the number of parameters
num_params = length(params.set)
# process the names
if isempty(names)
names = ["noname" for i in 1:num_params]
end
# process the indices
if indices === nothing
indices = CartesianIndices(1:num_params)
end
# make the parameter model object
obj_num = length(_param_object_indices(model)) + 1
first_param_num = model.last_param_num + 1
last_param_num = model.last_param_num += num_params
param_nums = first_param_num:last_param_num
data_object = MultiParameterData(params, obj_num, param_nums, names)
# add the data object to the model and make the references
obj_index = _add_data_object(model, data_object)
prefs = [GeneralVariableRef(model, obj_index.value, DependentParameterIndex, i)
for i in 1:num_params]
return Collections._make_array(prefs, indices)
end
# Construct an expression to build an infinite set(s) (use with @dependent_parameters)
function _construct_array_set(_error::Function, info::_ParameterInfoExpr)
if (info.has_lb || info.has_ub) && !(info.has_lb && info.has_ub)
_error("Must specify both an upper bound and a lower bound")
elseif info.has_lb
check = :(isa($(info.lower_bound), Real))
return :($(check) ? IntervalSet($(info.lower_bound), $(info.upper_bound)) : error("Bounds must be a real number."))
elseif info.has_dist
check = :(isa($(info.distribution), Distributions.UnivariateDistribution))
return :($(check) ? UniDistributionSet($(info.distribution)) : MultiDistributionSet($(info.distribution)))
elseif info.has_set
check1 = :(isa($(info.set), AbstractInfiniteSet))
check2 = :(isa($(info.set), Distributions.UnivariateDistribution))
return :($(check1) ? $(info.set) : ($(check2) ? UniDistributionSet($(info.set)) : MultiDistributionSet($(info.set))))
else
_error("Must specify upper/lower bounds, a distribution, or a set")
end
end
################################################################################
# NAMING
################################################################################
# Get the parameter index in the DependentParameters object
_param_index(pref::DependentParameterRef)::Int = JuMP.index(pref).param_index
"""
JuMP.name(pref::DependentParameterRef)::String
Extend [`JuMP.name`](@ref JuMP.name(::JuMP.VariableRef)) to return the names of
infinite dependent parameters.
**Example**
```julia-repl
julia> name(pref)
"par_name"
```
"""
function JuMP.name(pref::DependentParameterRef)::String
object = get(_data_dictionary(pref), JuMP.index(pref).object_index, nothing)
return object === nothing ? "" : object.names[_param_index(pref)]
end
"""
JuMP.set_name(pref::DependentParameterRef, name::String)::Nothing
Extend [`JuMP.set_name`](@ref JuMP.set_name(::JuMP.VariableRef, ::String)) to set
names of dependent infinite parameters.
**Example**
```julia-repl
julia> set_name(vref, "par_name")
julia> name(vref)
"para_name"
```
"""
function JuMP.set_name(pref::DependentParameterRef, name::String)::Nothing
_data_object(pref).names[_param_index(pref)] = name
JuMP.owner_model(pref).name_to_param = nothing
return
end
################################################################################
# PARAMETER DEPENDENCIES
################################################################################
# Extend _infinite_variable_dependencies
function _infinite_variable_dependencies(pref::DependentParameterRef
)::Vector{InfiniteVariableIndex}
return _data_object(pref).infinite_var_indices
end
# Extend _parameter_function_dependencies
function _parameter_function_dependencies(pref::DependentParameterRef
)::Vector{ParameterFunctionIndex}
return _data_object(pref).parameter_func_indices
end
# Extend _measure_dependencies
function _measure_dependencies(pref::DependentParameterRef
)::Vector{MeasureIndex}
return _data_object(pref).measure_indices[_param_index(pref)]
end
# Extend _constraint_dependencies
function _constraint_dependencies(pref::DependentParameterRef
)::Vector{ConstraintIndex}
return _data_object(pref).constraint_indices[_param_index(pref)]
end
# Extend _derivative_dependencies
function _derivative_dependencies(pref::DependentParameterRef
)::Vector{DerivativeIndex}
return _data_object(pref).derivative_indices[_param_index(pref)]
end
"""
used_by_infinite_variable(pref::DependentParameterRef)::Bool
Return a `Bool` indicating if the dependent infinite parameter `pref` is used by
an infinite variable.
**Example**
```julia-repl
julia> used_by_infinite_variable(pref)
true
```
"""
function used_by_infinite_variable(pref::DependentParameterRef)::Bool
return !isempty(_infinite_variable_dependencies(pref))
end
"""
used_by_parameter_function(pref::DependentParameterRef)::Bool
Return a `Bool` indicating if the dependent infinite parameter `pref` is used by
an infinite parameter function.
**Example**
```julia-repl
julia> used_by_parameter_function(pref)
true
```
"""
function used_by_parameter_function(pref::DependentParameterRef)::Bool
return !isempty(_parameter_function_dependencies(pref))
end
"""
used_by_measure(pref::DependentParameterRef)::Bool
Return a `Bool` indicating if the dependent infinite parameter `pref` is used
by a measure.
**Example**
```julia-repl
julia> used_by_measure(pref)
true
```
"""
function used_by_measure(pref::DependentParameterRef)::Bool
return !isempty(_measure_dependencies(pref))
end
"""
used_by_constraint(pref::DependentParameterRef)::Bool
Return a `Bool` indicating if the dependent infinite parameter `pref` is used by
a constraint.
**Example**
```julia-repl
julia> used_by_constraint(pref)
false
```
"""
function used_by_constraint(pref::DependentParameterRef)::Bool
return !isempty(_constraint_dependencies(pref))
end
"""
used_by_derivative(pref::DependentParameterRef)::Bool
Return a `Bool` indicating if the dependent infinite parameter `pref` is used by
a derivative.
**Example**
```julia-repl
julia> used_by_derivative(pref)
false
```
"""
function used_by_derivative(pref::DependentParameterRef)::Bool
return !isempty(_derivative_dependencies(pref))
end
# Extend used by objective
used_by_objective(pref::DependentParameterRef)::Bool = false
"""
is_used(pref::DependentParameterRef)::Bool
Return a `Bool` indicating if the dependent infinite parameter `pref` is used in
the model.
**Example**
```julia-repl
julia> is_used(pref)
true
```
"""
function is_used(pref::DependentParameterRef)::Bool
return used_by_measure(pref) || used_by_constraint(pref) ||
used_by_infinite_variable(pref) || used_by_derivative(pref) ||
used_by_parameter_function(pref)
end
################################################################################
# PARAMETER OBJECT METHODS
################################################################################
# Extend _parameter_number
function _parameter_number(pref::DependentParameterRef)::Int
return _data_object(pref).parameter_nums[_param_index(pref)]
end
# Extend _parameter_numbers
function _parameter_numbers(pref::DependentParameterRef)::Vector{Int}
return [_parameter_number(pref)]
end
# Extend _object_number
function _object_number(pref::DependentParameterRef)::Int
return _data_object(pref).object_num
end
# Extend _object_numbers
function _object_numbers(pref::DependentParameterRef)::Vector{Int}
return [_object_number(pref)]
end
## Set helper methods for adapting data_objects with parametric changes
# No change needed
function _adaptive_data_update(pref::DependentParameterRef, params::P,
data::MultiParameterData{P})::Nothing where {P <: DependentParameters}
data.parameters = params
return
end
# Reconstruction is necessary
function _adaptive_data_update(pref::DependentParameterRef, params::P1,
data::MultiParameterData{P2})::Nothing where {P1, P2}
new_data = MultiParameterData(params, data.object_num, data.parameter_nums,
data.names, data.parameter_func_indices,
data.infinite_var_indices,
data.derivative_indices, data.measure_indices,
data.constraint_indices,
data.has_internal_supports,
data.has_deriv_constrs)
_data_dictionary(pref)[JuMP.index(pref).object_index] = new_data
return
end
# Extend _set_core_variable_object
function _set_core_variable_object(pref::DependentParameterRef,
params::DependentParameters)::Nothing
_adaptive_data_update(pref, params, _data_object(pref))
return
end
################################################################################
# DERIVATIVE METHOD FUNCTIONS
################################################################################
# Extend fallback for dependent parameters
function has_derivative_supports(pref::DependentParameterRef)::Bool
return false
end
# Extend fallback for dependent parameters
function _set_has_derivative_supports(pref::DependentParameterRef,
status::Bool)::Nothing
return
end
# Extend has derivative constraints
function has_derivative_constraints(pref::DependentParameterRef)::Bool
return _data_object(pref).has_deriv_constrs[_param_index(pref)]
end
# Extend setting if has derivative constraints
function _set_has_derivative_constraints(pref::DependentParameterRef,
status::Bool)::Nothing
_data_object(pref).has_deriv_constrs[_param_index(pref)] = status
return
end
# Get the raw derivative method vector
function _derivative_methods(pref::DependentParameterRef)
return _core_variable_object(pref).derivative_methods
end
"""
derivative_method(pref::DependentParameterRef)::NonGenerativeDerivativeMethod
Returns the numerical derivative evaluation method employed with `pref` when it
is used as an operator parameter in a derivative.
**Example**
```julia-repl
julia> derivative_method(pref)
FiniteDifference
```
"""
function derivative_method(pref::DependentParameterRef)::NonGenerativeDerivativeMethod
return _derivative_methods(pref)[_param_index(pref)]
end
## Define helper methods for setting the derivative method efficiently
# Compatible with vector type
function _adaptive_method_update(pref,
p::DependentParameters{S, M1},
method::M2
)::Nothing where {S, M1 <: NonGenerativeDerivativeMethod, M2 <: M1}
p.derivative_methods[_param_index(pref)] = method
return
end
# Not compatible
function _adaptive_method_update(pref,
p::DependentParameters{S, M1},
method::M2
)::Nothing where {S, M1, M2}
methods = p.derivative_methods
new_methods = [i == _param_index(pref) ? method : m
for (i, m) in enumerate(methods)]
new_params = DependentParameters(p.set, p.supports, p.sig_digits, new_methods)
_set_core_variable_object(pref, new_params)
return
end
"""
set_derivative_method(pref::DependentParameterRef,
method::NonGenerativeDerivativeMethod)::Nothing
Specfies the desired derivative evaluation method `method` for derivatives that are
taken with respect to `pref`. Errors if `method` is generative (i.e., it requires
the definition of additional supports)
**Example**
```julia-repl
julia> set_derivative_method(d, FiniteDifference())
```
"""
function set_derivative_method(pref::DependentParameterRef,
method::AbstractDerivativeMethod
)::Nothing
if !(method isa NonGenerativeDerivativeMethod)
error("Must specify a subtype of `NonGenerativeDerivativeMethod` for " *
"for a dependent parameter.")
end
_adaptive_method_update(pref, _core_variable_object(pref), method)
_reset_derivative_evaluations(pref)
if is_used(pref)
set_optimizer_model_ready(JuMP.owner_model(pref), false)
end
return
end
"""
set_all_derivative_methods(model::InfiniteModel,
method::AbstractDerivativeMethod)::Nothing
Sets the desired evaluation method `method` for all the derivatives currently added
to `model`. Note that this is done with respect to the infinite parameters. Errors
if a generative method is specified and the model contains dependent parameters.
**Example**
```julia-repl
julia> set_all_derivative_methods(model, OrthogonalCollocation(2))
```
"""
function set_all_derivative_methods(model::InfiniteModel,
method::AbstractDerivativeMethod
)::Nothing
for pref in all_parameters(model, InfiniteParameter)
set_derivative_method(pref, method)
end
return
end
################################################################################
# INFINITE SET METHODS
################################################################################
## Get the individual infinite set if possible
# raw_set
function _parameter_set(pref::DependentParameterRef)::InfiniteArraySet
return _core_variable_object(pref).set
end
# CollectionSet
function _parameter_set(set::CollectionSet{S},
pref::DependentParameterRef
)::S where {S <: InfiniteScalarSet}
return collection_sets(set)[_param_index(pref)]
end
# InfiniteArraySet (Fallback)
function _parameter_set(set::InfiniteArraySet, pref::DependentParameterRef)
error("An individual infinite set is not well-defined for $pref which " *
"is part of a group of dependent infinite parameters that correspond " *
"to an multi-dimensional infinite set of type `$(typeof(set))`.")
end
"""
infinite_set(pref::DependentParameterRef)::InfiniteScalarSet
Return the infinite set associated with the particular infinite dependent
parameter `pref` if valid. Errors if the underlying [`DependentParameters`](@ref)
object does not use a [`CollectionSet`](@ref).
**Example**
```julia-repl
julia> infinite_set(x[1])
[-1, 1]
```
"""
function infinite_set(pref::DependentParameterRef)::InfiniteScalarSet
return _parameter_set(_parameter_set(pref), pref)
end
# Check that prefs are complete
function _check_complete_param_array(
prefs::AbstractArray{<:DependentParameterRef}
)::Nothing
if length(prefs) != _num_parameters(first(prefs))
error("Dimensions of parameter container and the infinite set do not " *
"match, ensure all related dependent parameters are included.")
end
return
end
"""
infinite_set(prefs::AbstractArray{<:DependentParameterRef})::InfiniteArraySet
Return the infinite set associated with the container of infinite dependent
parameters `prefs`. Errors if the container `prefs` is incomplete.
**Example**
```julia-repl
julia> infinite_set(x)
ZeroMeanDiagNormal(
dim: 2
μ: [0.0, 0.0]
Σ: [1.0 0.0; 0.0 1.0]
)
```
"""
function infinite_set(prefs::AbstractArray{<:DependentParameterRef}
)::InfiniteArraySet
_check_complete_param_array(prefs)
return _parameter_set(first(prefs))
end
# Update the underlying set and delete the supports
function _update_parameter_set(pref::DependentParameterRef,
new_set::InfiniteArraySet)::Nothing
old_params = _core_variable_object(pref)
new_supports = Dict{Vector{Float64}, Set{DataType}}()
sig_figs = significant_digits(pref)
methods = _derivative_methods(pref)
new_params = DependentParameters(new_set, new_supports, sig_figs, methods)
_set_core_variable_object(pref, new_params)
for i in 1:length(new_set)
idx = DependentParameterIndex(JuMP.index(pref).object_index, i)
p = DependentParameterRef(JuMP.owner_model(pref), idx)
_reset_derivative_evaluations(p)
end
_set_has_internal_supports(pref, false)
if is_used(pref)
set_optimizer_model_ready(JuMP.owner_model(pref), false)
end
return
end
"""
set_infinite_set(pref::DependentParameterRef,
set::InfiniteScalarSet)::Nothing
Specify the scalar infinite set of the dependent infinite parameter `pref` to
`set` if `pref` is part of a [`CollectionSet`](@ref), otherwise an error is
thrown. Note this will reset/delete all the supports contained in the
underlying [`DependentParameters`](@ref) object. Also, errors if `pref` is used
by a measure.
**Example**
```julia-repl
julia> set_infinite_set(x[1], IntervalSet(0, 2))
julia> infinite_set(x[1])
[0, 2]
```
"""
function set_infinite_set(pref::DependentParameterRef,
set::InfiniteScalarSet)::Nothing
old_set = _parameter_set(pref)
if !(old_set isa CollectionSet)
error("Cannot set the individual infinite set of $pref if the " *
"underlying set is not a CollectionSet.")
elseif used_by_measure(pref)
error("Cannot override the infinite set of $pref since it is used by " *
"a measure.")
end
param_idx = _param_index(pref)
new_set = CollectionSet([i != param_idx ? collection_sets(old_set)[i] : set
for i in eachindex(collection_sets(old_set))])
_update_parameter_set(pref, new_set)
return
end
"""
set_infinite_set(prefs::AbstractArray{<:DependentParameterRef},
set::InfiniteArraySet)::Nothing
Specify the multi-dimensional infinite set of the dependent infinite parameters
`prefs` to `set`. Note this will reset/delete all the supports contained in the
underlying [`DependentParameters`](@ref) object. This will error if the not all
of the dependent infinite parameters are included, if any of them are used by
measures.
**Example**
```julia-repl
julia> set_infinite_set(x, CollectionSet([IntervalSet(0, 1), IntervalSet(0, 2)]))
```
"""
function set_infinite_set(prefs::AbstractArray{<:DependentParameterRef},
set::InfiniteArraySet)::Nothing
if any(used_by_measure(pref) for pref in prefs)
error("Cannot override the infinite set of $prefs since it is used by " *
"a measure.")
end
_check_complete_param_array(prefs)
_update_parameter_set(first(prefs), set)
return
end
"""
JuMP.has_lower_bound(pref::DependentParameterRef)::Bool
Extend the `JuMP.has_lower_bound` function to accomodate a single dependent
infinite parameter.
Return true if the set associated with `pref` has a defined lower bound or if a
lower bound can be found. Extensions with user-defined scalar infinite set types
should extend `JuMP.has_lower_bound(set::NewType)`.
**Example**
```julia-repl
julia> has_lower_bound(x[1])
true
```
"""
function JuMP.has_lower_bound(pref::DependentParameterRef)::Bool
set = _parameter_set(pref)
if set isa CollectionSet
return JuMP.has_lower_bound(collection_sets(set)[_param_index(pref)])
else
return false
end
end
"""
JuMP.lower_bound(pref::DependentParameterRef)::Number
Extend the `JuMP.lower_bound` function to accomodate a single dependent infinite
parameter. Returns the lower bound associated with the infinite set. Errors if
such a bound is not well-defined.
**Example**
```julia-repl
julia> lower_bound(x[1])
0.0
```
"""
function JuMP.lower_bound(pref::DependentParameterRef)::Number
if !JuMP.has_lower_bound(pref)
error("Parameter $(pref) does not have a lower bound.")
end
return JuMP.lower_bound(infinite_set(pref))
end
"""
JuMP.set_lower_bound(pref::DependentParameterRef, lower::Real)::Nothing
Extend the `JuMP.set_lower_bound` function to accomodate a single dependent
infinite parameter. Updates the infinite set lower bound if such an operation
is supported. Infinite scalar set extensions that seek to employ this should extend
`JuMP.set_lower_bound(set::NewType, lower::Number)`. This will call
[`set_infinite_set`](@ref) and will error if this is not well-defined. Note
that existing supports will be deleted.
**Example**
```julia-repl
julia> set_lower_bound(t, -1)
julia> lower_bound(t)
-1.0
```
"""
function JuMP.set_lower_bound(pref::DependentParameterRef, lower::Real)::Nothing
set = infinite_set(pref)
new_set = JuMP.set_lower_bound(set, lower)
set_infinite_set(pref, new_set)
return
end
"""
JuMP.has_upper_bound(pref::DependentParameterRef)::Bool
Extend the `JuMP.has_upper_bound` function to accomodate a single dependent
infinite parameter.
Return true if the set associated with `pref` has a defined upper bound or if a
upper bound can be found. Extensions with user-defined scalar infinite set types
should extend `JuMP.has_upper_bound(set::NewType)`.
**Example**
```julia-repl
julia> has_upper_bound(x[1])
true
```
"""
function JuMP.has_upper_bound(pref::DependentParameterRef)::Bool
set = _core_variable_object(pref).set
if set isa CollectionSet
return JuMP.has_upper_bound(collection_sets(set)[_param_index(pref)])
else
return false
end
end
"""
JuMP.upper_bound(pref::DependentParameterRef)::Number
Extend the `JuMP.upper_bound` function to accomodate a single dependent infinite
parameter. Returns the upper bound associated with the infinite set. Errors if
such a bound is not well-defined.
**Example**
```julia-repl
julia> upper_bound(x[1])
0.0
```
"""
function JuMP.upper_bound(pref::DependentParameterRef)::Number
if !JuMP.has_upper_bound(pref)
error("Parameter $(pref) does not have a upper bound.")
end
return JuMP.upper_bound(infinite_set(pref))
end
"""
JuMP.set_upper_bound(pref::DependentParameterRef, upper::Real)::Nothing
Extend the `JuMP.set_upper_bound` function to accomodate a single dependent
infinite parameter. Updates the infinite set upper bound if such an operation
is supported. Infinite scalar set extensions that seek to employ this should extend
`JuMP.set_upper_bound(set::NewType, upper::Number)`. This will call
[`set_infinite_set`](@ref) and will error if this is not well-defined. Note
that existing supports will be deleted.
**Example**
```julia-repl
julia> set_upper_bound(t, -1)
julia> upper_bound(t)
-1.0
```
"""
function JuMP.set_upper_bound(pref::DependentParameterRef, upper::Real)::Nothing
set = infinite_set(pref)
new_set = JuMP.set_upper_bound(set, upper)
set_infinite_set(pref, new_set)
return
end
################################################################################
# SUPPORT METHODS
################################################################################
# Get the raw supports
function _parameter_supports(pref::DependentParameterRef
)::Dict{Vector{Float64}, Set{DataType}}
return _core_variable_object(pref).supports
end
"""
significant_digits(pref::DependentParameterRef)::Int
Return the number of significant digits enforced on the supports of `pref`.
**Example**
```julia-repl
julia> significant_digits(x[1])
12
```
"""
function significant_digits(pref::DependentParameterRef)::Int
return _core_variable_object(pref).sig_digits
end
"""
num_supports(pref::DependentParameterRef;
[label::Type{<:AbstractSupportLabel} = PublicLabel])::Int
Return the number of support points associated with a single dependent infinite
parameter `pref`. Specify a subset of supports via `label` to only count the
supports with `label`. By default only the amount of public supports are given, but
the full amount is obtained via `label == All`.
**Example**
```julia-repl
julia> num_supports(x[1])
2
julia> num_supports(x[1], label = MCSample)
0
```
"""
function num_supports(pref::DependentParameterRef;
label::Type{<:AbstractSupportLabel} = PublicLabel)::Int
supp_dict = _parameter_supports(pref)
if label == All || (!has_internal_supports(pref) && label == PublicLabel)
return length(supp_dict)
else
return count(p -> any(v -> v <: label, p[2]), supp_dict)
end
end
"""
num_supports(prefs::AbstractArray{<:DependentParameterRef};
[label::Type{<:AbstractSupportLabel} = PublicLabel])::Int
Return the number of support points associated with dependent infinite
parameters `prefs`. Errors if not all from the same underlying object.
Specify a subset of supports via `label` to only count the supports with `label`.
By default only the amount of public supports are given, but the full amount is
obtained via `label == All`.
**Example**
```julia-repl
julia> num_supports(x)
2
```
"""
function num_supports(prefs::AbstractArray{<:DependentParameterRef};
label::Type{<:AbstractSupportLabel} = PublicLabel
)::Int
_check_complete_param_array(prefs)
return num_supports(first(prefs), label = label)
end
"""
has_supports(pref::DependentParameterRef)::Bool
Return true if `pref` has supports or false otherwise.
**Example**
```julia-repl
julia> has_supports(x[1])
true
```
"""
has_supports(pref::DependentParameterRef)::Bool = !isempty(_parameter_supports(pref))
"""
has_supports(prefs::AbstractArray{<:DependentParameterRef})::Bool
Return true if `prefs` have supports or false otherwise. Errors if not all of the
infinite dependent parameters are from the same object.
**Example**
```julia-repl
julia> has_supports(x)
true
```
"""
function has_supports(prefs::AbstractArray{<:DependentParameterRef})::Bool
_check_complete_param_array(prefs)
return has_supports(first(prefs))
end
"""
supports(pref::DependentParameterRef;
[label::Type{<:AbstractSupportLabel} = PublicLabel])::Vector{Float64}
Return the support points associated with `pref`. A subset of supports can be
returned via `label` to return just the supports associated with `label`. By
default only the public supports are given, but the full set is
obtained via `label == All`.
**Example**
```julia-repl
julia> supports(x[1])
2-element Array{Float64,1}:
0.0
1.0
```
"""
function supports(pref::DependentParameterRef;
label::Type{<:AbstractSupportLabel} = PublicLabel)::Vector{Float64}
supp_dict = _parameter_supports(pref)
pindex = _param_index(pref)
if label == All || (!has_internal_supports(pref) && label == PublicLabel)
return Float64[supp[pindex] for supp in keys(supp_dict)]
else
reduced_supps = findall(e -> any(v -> v <: label, e), supp_dict)
return Float64[supp[pindex] for supp in reduced_supps]
end
end
"""
supports(prefs::AbstractArray{<:DependentParameterRef};
[label::Type{<:AbstractSupportLabel} = PublicLabel]
)::Union{AbstractArray{<:Vector{<:Float64}}, Array{Float64, 2}}
Return the support points associated with `prefs`. Errors if not all of the
infinite dependent parameters are from the same object. This will return a
matrix if `prefs` is `Vector`, otherwise an array of vectors is returned by
calling `supports.(prefs)`. A subset of supports can be
returned via `label` to return just the supports associated with `label`. By
default only the public supports are given, but the full set is obtained via
`label == All`.
**Example**
```julia-repl
julia> supports(x) # columns are supports
2×2 Array{Float64,2}:
0.0 1.0
0.0 1.0
```
"""
function supports(prefs::AbstractArray{<:DependentParameterRef};
label::Type{<:AbstractSupportLabel} = PublicLabel
)::AbstractArray{<:Vector{<:Float64}}
_check_complete_param_array(prefs)
return supports.(prefs, label = label) # TODO make more efficient
end
# More efficient dispatch for Vectors
function supports(prefs::Vector{DependentParameterRef};
label::Type{<:AbstractSupportLabel} = PublicLabel
)::Array{Float64, 2}
if !has_supports(prefs)
return zeros(Float64, _num_parameters(first(prefs)), 0)
elseif label == All || (!has_internal_supports(first(prefs)) && label == PublicLabel)
raw_supps = keys(_parameter_supports(first(prefs)))
if length(raw_supps) == 1
return reduce(hcat, collect(raw_supps))
else
return reduce(hcat, raw_supps)
end
else
raw_supps = findall(e -> any(v -> v <: label, e),
_parameter_supports(first(prefs)))
if isempty(raw_supps)
return zeros(Float64, _num_parameters(first(prefs)), 0)
else
return reduce(hcat, raw_supps)
end
end
end
# Define method for overriding the current supports
function _update_parameter_supports(prefs::AbstractArray{<:DependentParameterRef},
supports::Array{<:Real, 2},
label::Type{<:AbstractSupportLabel})::Nothing
set = _parameter_set(first(prefs))
new_supps = Dict{Vector{Float64}, Set{DataType}}(@views supports[:, i] =>
Set([label]) for i in 1:size(supports, 2))
sig_figs = significant_digits(first(prefs))
methods = _derivative_methods(first(prefs))
new_params = DependentParameters(set, new_supps, sig_figs, methods)
_set_core_variable_object(first(prefs), new_params)
_set_has_internal_supports(first(prefs), label <: InternalLabel)
for pref in prefs
_reset_derivative_evaluations(pref)
end
if any(is_used(pref) for pref in prefs)
set_optimizer_model_ready(JuMP.owner_model(first(prefs)), false)
end
return
end
# Process an array of vectors into a support matrix
function _make_support_matrix(prefs::AbstractArray{<:DependentParameterRef},
supports::AbstractArray{<:Vector{<:Real}}
)::Array{<:Real, 2}
_keys(supports) == _keys(prefs) || error("Inconsistent support indices")
lens = [length(supp) for supp in supports]
_allequal(lens) || error("Inconsistent support dimensions.")
trans_supps = Array{Float64}(undef, first(lens), length(prefs))
for k in eachindex(prefs)
trans_supps[:, _param_index(prefs[k])] = supports[k]
end
return permutedims(trans_supps)
end
"""
set_supports(prefs::AbstractArray{<:DependentParameterRef},
supports::AbstractArray{<:Vector{<:Real}};
[force::Bool = false,
label::Type{<:AbstractSupportLabel} = UserDefined])::Nothing
Specify the support points for `prefs`. Errors if the supports violate the domain
of the infinite set, if the dimensions don't match up properly,
if `prefs` and `supports` have different indices, not all of the `prefs` are
from the same dependent infinite parameter container, there are existing
supports and `force = false`. Note that it is strongly preferred to use
`add_supports` if possible to avoid destroying measure dependencies.
```julia
set_supports(prefs::Vector{DependentParameterRef},
supports::Array{<:Real, 2};
[force::Bool = false,
label::Type{<:AbstractSupportLabel} = UserDefined])::Nothing
```
Specify the supports for a vector `prefs` of dependent infinite parameters.
Here rows of `supports` correspond to `prefs` and the columns correspond to the
supports. This is more efficient than the above method and will error for the
same reasons.
**Example**
```julia-repl
julia> set_supports(y, [[0, 1], [0, 1]])
julia> set_supports(x, [0 1; 0 1])
julia> supports(x)
2×2 Array{Float64,2}:
0.0 1.0
0.0 1.0
```
"""
function set_supports(prefs::AbstractArray{<:DependentParameterRef},
supports::AbstractArray{<:Vector{<:Real}};
force::Bool = false,
label::Type{<:AbstractSupportLabel} = UserDefined
)::Nothing
supps = _make_support_matrix(prefs, supports)
set_supports(_make_vector(prefs), supps, force = force, label = label)
return
end
# Efficient method for vector prefs and matrix of supports
function set_supports(prefs::Vector{DependentParameterRef},
supports::Array{<:Real, 2};
force::Bool = false,
label::Type{<:AbstractSupportLabel} = UserDefined
)::Nothing
set = infinite_set(prefs) # this does a check on prefs
if has_supports(prefs) && !force
error("Unable set supports for $prefs since they already have supports." *
" Consider using `add_supports` or use set `force = true` to " *
"overwrite the existing supports.")
elseif !supports_in_set(supports, set)
error("Supports violate the domain of the infinite set.")
end
supports = round.(supports, sigdigits = significant_digits(first(prefs)))
_update_parameter_supports(prefs, supports, label)
return
end
# Error for single dependent parameters
function set_supports(pref::DependentParameterRef, supports; kwargs...)
error("Cannot modify the supports of a single dependent infinite parameter.")
end
"""
add_supports(prefs::AbstractArray{<:DependentParameterRef},
supports::AbstractArray{<:Vector{<:Real}};
[label::Type{<:AbstractSupportLabel} = UserDefined])::Nothing
Add additional support points for `prefs`. Errors if the supports violate the domain
of the infinite set, if the dimensions don't match up properly,
if `prefs` and `supports` have different indices, or not all of the `prefs` are
from the same dependent infinite parameter container.
```julia
add_supports(prefs::Vector{DependentParameterRef},
supports::Array{<:Real, 2};
[label::Type{<:AbstractSupportLabel} = UserDefined])::Nothing
```
Specify the supports for a vector `prefs` of dependent infinite parameters.
Here rows of `supports` correspond to `prefs` and the columns correspond to the
supports. This is more efficient than the above method and will error for the
same reasons.
**Example**
```julia-repl
julia> add_supports(x, [[1], [1]])
julia> supports(x)
2×2 Array{Float64,2}:
0.0 1.0
0.0 1.0
julia> add_supports(x, ones(2, 1) * 0.5)
julia> supports(t)
2×3 Array{Float64,2}:
0.0 1.0 0.5
0.0 1.0 0.5
```
"""
function add_supports(prefs::AbstractArray{<:DependentParameterRef},
supports::AbstractArray{<:Vector{<:Real}};
label::Type{<:AbstractSupportLabel} = UserDefined, # interal keyword args
check::Bool = true)::Nothing
supps = _make_support_matrix(prefs, supports)
add_supports(_make_vector(prefs), supps, label = label, check = check)
return
end
# More efficient version for supports in the correct format
function add_supports(prefs::Vector{DependentParameterRef},
supports::Array{<:Real, 2};
label::Type{<:AbstractSupportLabel} = UserDefined, # internal keyword args
check::Bool = true)::Nothing
set = infinite_set(prefs) # this does a check on prefs
if check && !supports_in_set(supports, set)
error("Supports violate the domain of the infinite set.")
end
supports = round.(supports, sigdigits = significant_digits(first(prefs)))
current_supports = _parameter_supports(first(prefs))
for i in 1:size(supports, 2)
s = @view(supports[:, i])
if haskey(current_supports, s)
push!(current_supports[s], label)
else
current_supports[s] = Set([label])
end
end
if label <: InternalLabel
_set_has_internal_supports(first(prefs), true)
end
for pref in prefs
_reset_derivative_evaluations(pref)
end
if any(is_used(pref) for pref in prefs)
set_optimizer_model_ready(JuMP.owner_model(first(prefs)), false)
end
return
end
# Error for single dependent parameters
function add_supports(pref::DependentParameterRef, supports; kwargs...)
error("Cannot modify the supports of a single dependent infinite parameter.")
end
"""
delete_supports(prefs::AbstractArray{<:DependentParameterRef};
[label::Type{<:AbstractSupportLabel} = All])::Nothing
Delete the support points for `prefs`. Errors if any of the parameters are
used by a measure or if not all belong to the same set of dependent parameters.
If `label != All` then that label is removed along with any supports that solely
contain that label.
**Example**
```julia-repl
julia> delete_supports(w)
```
"""
function delete_supports(prefs::AbstractArray{<:DependentParameterRef};
label::Type{<:AbstractSupportLabel} = All)::Nothing
_check_complete_param_array(prefs)
supp_dict = _parameter_supports(first(prefs))
for pref in prefs
_reset_derivative_evaluations(pref)
end
if label == All
if any(used_by_measure(pref) for pref in prefs)
error("Cannot delete supports with measure dependencies.")
end
empty!(supp_dict)
_set_has_internal_supports(first(prefs), false)
else
filter!(p -> !all(v -> v <: label, p[2]), supp_dict)
for (k, v) in supp_dict
filter!(l -> !(l <: label), v)
end
pref1 = first(prefs)
if has_internal_supports(pref1) && num_supports(pref1, label = InternalLabel) == 0
_set_has_internal_supports(pref1, false)
end
end
if any(is_used(pref) for pref in prefs)
set_optimizer_model_ready(JuMP.owner_model(first(prefs)), false)
end
return
end
# Error for single dependent parameters
function delete_supports(pref::DependentParameterRef)
error("Cannot delete the supports of a single dependent infinite parameter.")
end
# TODO resolve case that there are existing UniformGrid supports
"""
generate_and_add_supports!(prefs::AbstractArray{<:DependentParameterRef},
set::InfiniteArraySet,
[method::Type{<:AbstractSupportLabel}];
[num_supports::Int = DefaultNumSupports])::Nothing
Generate supports for `prefs` via [`generate_support_values`](@ref) and add them
to `pref`. This is intended as an extendable internal method for
[`fill_in_supports!`](@ref fill_in_supports!(::AbstractArray{<:DependentParameterRef})).
Most extensions that employ user-defined infinite sets can typically enable this
by extending [`generate_support_values`](@ref). However, in some cases it may be
necessary to extend this when more complex operations need to take place then just
adding supports to a set of infinite parameters. Errors if the
infinite set type is not recognized.
"""
function generate_and_add_supports!(prefs::AbstractArray{<:DependentParameterRef},
set::InfiniteArraySet;
num_supports::Int = DefaultNumSupports)::Nothing
new_supps, label = generate_supports(set,
num_supports = num_supports,
sig_digits = significant_digits(first(prefs)))
add_supports(_make_vector(prefs), new_supps, check = false, label = label)
return
end
# Method dispatch
function generate_and_add_supports!(prefs::AbstractArray{<:DependentParameterRef},
set::InfiniteArraySet,
method::Type{<:AbstractSupportLabel};
num_supports::Int = DefaultNumSupports)::Nothing
new_supps, label = generate_supports(set, method,
num_supports = num_supports,
sig_digits = significant_digits(first(prefs)))
add_supports(_make_vector(prefs), new_supps, check = false, label = label)
return
end
"""
fill_in_supports!(prefs::AbstractArray{<:DependentParameterRef};
[num_supports::Int = DefaultNumSupports,
modify::Bool = true])::Nothing
Automatically generate support points for a container of dependent infinite
parameters `prefs`. Generating up to `num_supports` for the parameters in accordance
with `generate_and_add_supports!`. Will add nothing if there are supports and
`modify = false`. Extensions that use user defined
set types should extend [`generate_and_add_supports!`](@ref) and/or
[`generate_support_values`](@ref) as needed. Errors if the infinite set type is
not recognized.
**Example**
```julia-repl
julia> fill_in_supports!(x, num_supports = 4)
julia> supports(x)
2×4 Array{Float64,2}:
0.0 0.333 0.667 1.0
0.0 0.333 0.667 1.0
```
"""
function fill_in_supports!(prefs::AbstractArray{<:DependentParameterRef};
num_supports::Int = DefaultNumSupports,
modify::Bool = true)::Nothing
set = infinite_set(prefs) # does check for bad container
current_amount = InfiniteOpt.num_supports(first(prefs))
if (modify || current_amount == 0) && (current_amount < num_supports)
generate_and_add_supports!(prefs, set,
num_supports = num_supports - current_amount)
end
return
end
# Error for single dependent parameters
function fill_in_supports!(pref::DependentParameterRef; kwargs...)
error("Cannot modify the supports of a single dependent infinite parameter.")
end
"""
fill_in_supports!(model::InfiniteModel; [num_supports::Int = DefaultNumSupports,
modify::Bool = true])::Nothing
Automatically generate support points for all infinite parameters in model.
This calls `fill_in_supports!` for each parameter in the model.
See [`fill_in_supports!`](@ref)
for more information. Errors if one of the infinite set types is unrecognized.
Note that no supports will be added to a particular parameter if it already has
some and `modify = false`.
**Example**
```julia-repl
julia> fill_in_supports!(model, num_supports = 4)
julia> supports(t)
4-element Array{Float64,1}:
0.0
0.333
0.667
1.0
```
"""
function fill_in_supports!(model::InfiniteModel; num_supports::Int = DefaultNumSupports,
modify::Bool = true)::Nothing
# fill in the the supports of each independent parameter
for (key, data_object) in model.independent_params
pref = dispatch_variable_ref(model, key)
fill_in_supports!(pref, num_supports = num_supports, modify = modify)
end
# fill in the supports of each dependent parameter set
for (key, data_object) in model.dependent_params
prefs = [dispatch_variable_ref(model, DependentParameterIndex(key, i))
for i in 1:length(data_object.names)]
fill_in_supports!(prefs, num_supports = num_supports, modify = modify)
end
return
end
################################################################################
# MODEL PARAMETER QUERIES
################################################################################
"""
num_parameters(model::InfiniteModel,
[type::Type{InfOptParameter} = InfOptParameter])::Int
Return the number of `InfiniteOpt` parameters assigned to `model`. By default,
the total number of infinite and finite parameters is returned. The amount
of a particular type is obtained by specifying the concrete parameter type
of [`InfOptParameter`](@ref) via `type`. Type options include:
- `InfOptParameter`: all parameters
- `ScalarParameter`: all scalar parameters
- `InfiniteParameter`: all infinite parameters
- `FiniteParameter`: all finite parameters
- `IndependentParameter`: all independent infinite parameters
- `DependentParameters`: all dependent infinite parameters
**Example**
```julia-repl
julia> num_parameters(model)
3
julia> num_parameters(model, IndependentParameter)
2
```
"""
function num_parameters(model::InfiniteModel,
type::Type{InfOptParameter} = InfOptParameter
)::Int
num_pars = num_parameters(model, IndependentParameter)
num_pars += num_parameters(model, FiniteParameter)
num_pars += num_parameters(model, DependentParameters)
return num_pars
end
# Particular scalar parameter types
function num_parameters(model::InfiniteModel,
type::Type{C})::Int where {C <: ScalarParameter}
return length(_data_dictionary(model, type))
end
# ScalarParameter
function num_parameters(model::InfiniteModel,
type::Type{ScalarParameter})::Int
num_pars = num_parameters(model, FiniteParameter)
num_pars += num_parameters(model, IndependentParameter)
return num_pars
end
# DependentParameters
function num_parameters(model::InfiniteModel,
type::Type{DependentParameters})::Int
num_pars = 0
for (_, object) in _data_dictionary(model, type)
num_pars += length(object.names)
end
return num_pars
end
# InfiniteParameter
function num_parameters(model::InfiniteModel,
type::Type{InfiniteParameter})::Int
num_pars = num_parameters(model, IndependentParameter)
num_pars += num_parameters(model, DependentParameters)
return num_pars
end
"""
all_parameters(model::InfiniteModel,
type::Type{InfOptParameter} = InfOptParameter
)::Vector{GeneralVariableRef}
Return a list of all the `InfiniteOpt` parameters assigned to `model`. By default,
all of the infinite and finite parameters is returned. The search is reduced to
a particular type is obtained by specifying the concrete parameter type
of [`InfOptParameter`](@ref) via `type`. Type options include:
- `InfOptParameter`: all parameters
- `ScalarParameter`: all scalar parameters
- `InfiniteParameter`: all infinite parameters
- `FiniteParameter`: all finite parameters
- `IndependentParameter`: all independent infinite parameters
- `DependentParameters`: all dependent infinite parameters
**Examples**
```julia-repl
julia> all_parameters(model)
4-element Array{GeneralVariableRef,1}:
t
x[1]
x[2]
alpha
julia> all_parameters(model, FiniteParameter)
1-element Array{GeneralVariableRef,1}:
alpha
```
"""
function all_parameters(model::InfiniteModel,
type::Type{InfOptParameter} = InfOptParameter
)::Vector{GeneralVariableRef}
prefs_list = all_parameters(model, IndependentParameter)
append!(prefs_list, all_parameters(model, DependentParameters))
append!(prefs_list, all_parameters(model, FiniteParameter))
return prefs_list
end
# Particular scalar parameter types
function all_parameters(model::InfiniteModel,
type::Type{C}
)::Vector{GeneralVariableRef} where {C <: InfOptParameter}
prefs_list = Vector{GeneralVariableRef}(undef, num_parameters(model, type))
for (i, (index, _)) in enumerate(_data_dictionary(model, type))
prefs_list[i] = _make_parameter_ref(model, index)
end
return prefs_list
end
# ScalarParameter
function all_parameters(model::InfiniteModel,
type::Type{ScalarParameter})::Vector{GeneralVariableRef}
prefs_list = all_parameters(model, IndependentParameter)
append!(prefs_list, all_parameters(model, FiniteParameter))
return prefs_list
end
# DependentParameters
function all_parameters(model::InfiniteModel,
type::Type{DependentParameters}
)::Vector{GeneralVariableRef}
prefs_list = Vector{GeneralVariableRef}(undef, num_parameters(model, type))
counter = 1
for (index, object) in _data_dictionary(model, type)
for i in eachindex(object.names)
dep_idx = DependentParameterIndex(index, i)
prefs_list[counter] = _make_parameter_ref(model, dep_idx)
counter += 1
end
end
return prefs_list
end
# InfiniteParameter
function all_parameters(model::InfiniteModel,
type::Type{InfiniteParameter}
)::Vector{GeneralVariableRef}
prefs_list = all_parameters(model, DependentParameters)
append!(prefs_list, all_parameters(model, IndependentParameter))
return prefs_list
end
################################################################################
# DELETION
################################################################################
"""
JuMP.delete(model::InfiniteModel,
prefs::AbstractArray{<:DependentParameterRef})::Nothing
Extend `JuMP.delete` to delete
dependent infinite parameters and their dependencies. All variables, constraints, and
measure functions that depend on `prefs` are updated to exclude them. Errors if the
parameters are contained in an `AbstractMeasureData` datatype that is employed by
a measure since the measure becomes invalid otherwise. Thus, measures that
contain this dependency must be deleted first. Note that
[`parameter_refs`](@ref parameter_refs(::AbstractMeasureData)) needs to be
extended to allow deletion of parameters when custom `AbstractMeasureData`
datatypes are used. Note that any dependent infinite variables will have their
start values reset to the default via [`reset_start_value_function`](@ref).
**Example**
```julia-repl
julia> print(model)
Min measure(g(t, x)*t + x) + z
Subject to
z ≥ 0.0
g(t, x) + z ≥ 42.0, ∀ t ∈ [0, 6], x[1] ∈ [-1, 1], x[2] ∈ [-1, 1]
g(0.5, x) = 0, x[1] ∈ [-1, 1], x[2] ∈ [-1, 1]
julia> delete(model, x)
julia> print(model)
Min measure(g(t)*t) + z
Subject to
g(t) + z ≥ 42.0, ∀ t ∈ [0, 6]
g(0.5) = 0
```
"""
function JuMP.delete(model::InfiniteModel,
prefs::AbstractArray{<:DependentParameterRef})::Nothing
@assert JuMP.is_valid(model, first(prefs)) "Parameter references are invalid."
_check_complete_param_array(prefs)
gvrefs = [_make_parameter_ref(model, JuMP.index(pref)) for pref in prefs]
# ensure deletion is okay (prefs are not used by measure data)
for pref in gvrefs
for mindex in _measure_dependencies(pref)
data = measure_data(dispatch_variable_ref(model, mindex))
_check_param_in_data(pref, data)
end
end
# make sure it isn't used by parameter function
if used_by_parameter_function(first(prefs))
error("Cannot delete `$prefs` since they are used by an infinite " *
"parameter function.")
end
# update optimizer model status
if any(is_used(pref) for pref in prefs)
set_optimizer_model_ready(model, false)
end
# delete dependence of measures and constraints on prefs
for pref in gvrefs
_update_measures(model, pref)
_update_constraints(model, pref)
end
# get the object and parameter numbers
obj_num = _object_number(first(prefs))
param_nums = collect(_data_object(first(prefs)).parameter_nums)
# update infinite variables that depend on pref
for vindex in _infinite_variable_dependencies(first(prefs))
# remove the parameter dependences
vref = InfiniteVariableRef(model, vindex)
vprefs = raw_parameter_refs(vref)
tup_index = findfirst(isequal(obj_num), _object_number.(vprefs[:, 1]))
delete_indices = vprefs.ranges[tup_index]
deleteat!(vprefs, tup_index, tuple_index = true)
reset_start_value_function(vref)
# update any point variables that depend on vref accordingly
for pindex in _point_variable_dependencies(vref)
pvref = PointVariableRef(model, pindex)
deleteat!(raw_parameter_values(pvref), delete_indices)
end
# update any reduced variables that depend on vref accordingly
for rindex in _reduced_variable_dependencies(vref)
rvref = ReducedVariableRef(model, rindex)
_update_reduced_variable(rvref, delete_indices)
end
end
# delete derivatives that depend on any of these parameters
for pref in gvrefs
for index in _derivative_dependencies(pref)
JuMP.delete(model, dispatch_variable_ref(model, index))
end
end
# delete parameter information stored in model
_delete_data_object(first(prefs))
# update the object numbers and parameter numbers
_update_model_numbers(model, obj_num, param_nums)
return
end
|
{"hexsha": "5e40a2c151fd2e45e4be57c39d4f4a02ea2af990", "size": 62518, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/array_parameters.jl", "max_stars_repo_name": "dlcole3/InfiniteOpt.jl", "max_stars_repo_head_hexsha": "8e5f86fc15343153c59a10a4361f1c722d795775", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/array_parameters.jl", "max_issues_repo_name": "dlcole3/InfiniteOpt.jl", "max_issues_repo_head_hexsha": "8e5f86fc15343153c59a10a4361f1c722d795775", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/array_parameters.jl", "max_forks_repo_name": "dlcole3/InfiniteOpt.jl", "max_forks_repo_head_hexsha": "8e5f86fc15343153c59a10a4361f1c722d795775", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9298850575, "max_line_length": 125, "alphanum_fraction": 0.6808599123, "num_tokens": 14283}
|
#!/usr/bin/env python
u"""
read_GRACE_harmonics.py
Written by Tyler Sutterley (09/2021)
Contributions by Hugo Lecomte
Reads GRACE files and extracts spherical harmonic data and drift rates (RL04)
Adds drift rates to clm and slm for release 4 harmonics
Correct GSM data for drift in pole tide following Wahr et al. (2015)
Parses date of GRACE/GRACE-FO data from filename
INPUTS:
input_file: GRACE/GRACE-FO Level-2 spherical harmonic data file
LMAX: Maximum degree of spherical harmonics (degree of truncation)
OPTIONS:
MMAX: Maximum order of spherical harmonics (order of truncation)
default is the maximum spherical harmonic degree
POLE_TIDE: correct GSM data for pole tide drift following Wahr et al. (2015)
OUTPUTS:
time: mid-month date in year-decimal
start: start date of range as Julian day
end: end date of range as Julian day
l: spherical harmonic degree to LMAX
m: spherical harmonic order to MMAX
clm: cosine spherical harmonics of input data
slm: sine spherical harmonics of input data
eclm: cosine spherical harmonic uncalibrated standard deviations
eslm: sine spherical harmonic uncalibrated standard deviations
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
dateutil: powerful extensions to datetime
https://dateutil.readthedocs.io/en/stable/
PyYAML: YAML parser and emitter for Python
https://github.com/yaml/pyyaml
PROGRAM DEPENDENCIES:
time.py: utilities for calculating time operations
UPDATE HISTORY:
Updated 09/2021: added COST-G combined solutions from the GFZ ICGEM
output spherical harmonic degree and order in dict
Updated 05/2021: define int/float precision to prevent deprecation warning
Updated 12/2020: using utilities from time module
Updated 10/2020: Change parse function to work with GRGS data
Updated 08/2020: flake8 compatible regular expression strings
input file can be "diskless" bytesIO object
Updated 07/2020: added function docstrings
Updated 08/2019: specify yaml loader (PyYAML yaml.load(input) Deprecation)
Updated 07/2019: replace colons in yaml header if within quotations
Updated 11/2018: decode gzip read with ISO-8859-1 for python3 compatibility
Updated 05/2018: updates to file name structure with release 6 and GRACE-FO
output file headers and parse new YAML headers for RL06 and GRACE-FO
Written 10/2017 for public release
"""
import os
import re
import io
import gzip
import yaml
import numpy as np
import gravity_toolkit.time
#-- PURPOSE: read Level-2 GRACE and GRACE-FO spherical harmonic files
def read_GRACE_harmonics(input_file, LMAX, MMAX=None, POLE_TIDE=False):
"""
Extracts spherical harmonic coefficients from GRACE/GRACE-FO files
Adds drift rates to spherical harmonics for Release 4 data
Correct data prior to Release 6 for pole tide drift
Parses date of GRACE/GRACE-FO data from filename
Arguments
---------
input_file: GRACE/GRACE-FO Level-2 spherical harmonic data file
LMAX: Maximum degree of spherical harmonics (degree of truncation)
Keyword arguments
-----------------
MMAX: Maximum order of spherical harmonics
POLE_TIDE: correct for pole tide drift following Wahr et al. (2015)
Returns
-------
time: mid-month date in year-decimal
start: start date of range as Julian day
end: end date of range as Julian day
l: spherical harmonic degree to LMAX
m: spherical harmonic order to MMAX
clm: cosine spherical harmonics coefficients
slm: sine spherical harmonics coefficients
eclm: cosine spherical harmonic uncalibrated standard deviations
eslm: sine spherical harmonic uncalibrated standard deviations
"""
#-- parse filename
PFX,SY,SD,EY,ED,N,PRC,F1,DRL,F2,SFX = parse_file(input_file)
file_contents = extract_file(input_file, (SFX=='.gz'))
#-- JPL Mascon solutions
if PRC in ('JPLMSC',):
DSET = 'GSM'
DREL = np.int64(DRL)
FLAG = r'GRCOF2'
#-- Kusche et al. (2009) DDK filtered solutions
#-- https://doi.org/10.1007/s00190-009-0308-3
elif PFX.startswith('kfilter_DDK'):
DSET = 'GSM'
DREL = np.int64(DRL)
FLAG = r'gfc'
#-- COST-G unfiltered combination solutions
#-- https://doi.org/10.5880/ICGEM.COST-G.001
elif PRC in ('COSTG',):
DSET, = re.findall('GSM|GAC',PFX)
DREL = np.int64(DRL)
FLAG = r'gfc'
#-- Standard GRACE solutions
else:
DSET = PFX
DREL = np.int64(DRL)
FLAG = r'GRCOF2'
#-- output python dictionary with GRACE data and date information
grace_L2_input = {}
#-- extract GRACE date information from input file name
start_yr = np.float64(SY)
end_yr = np.float64(EY)
start_day = np.float64(SD)
end_day = np.float64(ED)
#-- calculate mid-month date taking into account if measurements are
#-- on different years
dpy = gravity_toolkit.time.calendar_days(start_yr).sum()
#-- For data that crosses years (end_yr - start_yr should be at most 1)
end_cyclic = ((end_yr - start_yr)*dpy+end_day)
#-- Calculate mid-month value
mid_day = np.mean([start_day, end_cyclic])
#-- Calculating the mid-month date in decimal form
grace_L2_input['time'] = start_yr + mid_day/dpy
#-- Calculating the Julian dates of the start and end date
grace_L2_input['start'] = 2400000.5 + \
gravity_toolkit.time.convert_calendar_dates(start_yr,1.0,start_day,
epoch=(1858,11,17,0,0,0))
grace_L2_input['end'] = 2400000.5 + \
gravity_toolkit.time.convert_calendar_dates(end_yr,1.0,end_day,
epoch=(1858,11,17,0,0,0))
#-- set maximum spherical harmonic order
MMAX = np.copy(LMAX) if (MMAX is None) else MMAX
#-- output dimensions
grace_L2_input['l'] = np.arange(LMAX+1)
grace_L2_input['m'] = np.arange(MMAX+1)
#-- Spherical harmonic coefficient matrices to be filled from data file
grace_L2_input['clm'] = np.zeros((LMAX+1,MMAX+1))
grace_L2_input['slm'] = np.zeros((LMAX+1,MMAX+1))
#-- spherical harmonic uncalibrated standard deviations
grace_L2_input['eclm'] = np.zeros((LMAX+1,MMAX+1))
grace_L2_input['eslm'] = np.zeros((LMAX+1,MMAX+1))
if ((DREL == 4) and (DSET == 'GSM')):
#-- clm and slm drift rates for RL04
drift_c = np.zeros((LMAX+1,MMAX+1))
drift_s = np.zeros((LMAX+1,MMAX+1))
#-- extract GRACE and GRACE-FO file headers
#-- replace colons in header if within quotations
head = [re.sub(r'\"(.*?)\:\s(.*?)\"',r'"\1, \2"',l) for l in file_contents
if not re.match(r'{0}|GRDOTA'.format(FLAG),l)]
if SFX in ('.gfc',):
#-- extract parameters from header
header_parameters = ['modelname','earth_gravity_constant','radius',
'max_degree','errors','norm','tide_system']
header_regex = re.compile(r'(' + r'|'.join(header_parameters) + r')')
grace_L2_input['header'] = [l for l in head if header_regex.match(l)]
elif ((N == 'GRAC') and (DREL >= 6)) or (N == 'GRFO'):
#-- parse the YAML header for RL06 or GRACE-FO (specifying yaml loader)
grace_L2_input.update(yaml.load('\n'.join(head),Loader=yaml.BaseLoader))
else:
#-- save lines of the GRACE file header removing empty lines
grace_L2_input['header'] = [l.rstrip() for l in head if l]
#-- for each line in the GRACE/GRACE-FO file
for line in file_contents:
#-- find if line starts with data marker flag (e.g. GRCOF2)
if bool(re.match(FLAG,line)):
#-- split the line into individual components
line_contents = line.split()
#-- degree and order for the line
l1 = np.int64(line_contents[1])
m1 = np.int64(line_contents[2])
#-- if degree and order are below the truncation limits
if ((l1 <= LMAX) and (m1 <= MMAX)):
grace_L2_input['clm'][l1,m1] = np.float64(line_contents[3])
grace_L2_input['slm'][l1,m1] = np.float64(line_contents[4])
grace_L2_input['eclm'][l1,m1] = np.float64(line_contents[5])
grace_L2_input['eslm'][l1,m1] = np.float64(line_contents[6])
#-- find if line starts with drift rate flag
elif bool(re.match(r'GRDOTA',line)):
#-- split the line into individual components
line_contents = line.split()
l1 = np.int64(line_contents[1])
m1 = np.int64(line_contents[2])
#-- Reading Drift rates for low degree harmonics
drift_c[l1,m1] = np.float64(line_contents[3])
drift_s[l1,m1] = np.float64(line_contents[4])
#-- Adding drift rates to clm and slm for RL04
#-- if drift rates exist at any time, will add to harmonics
#-- Will convert the secular rates into a stokes contribution
#-- Currently removes 2003.3 to get the temporal average close to 0.
#-- note: += means grace_xlm = grace_xlm + drift_x
if ((DREL == 4) and (DSET == 'GSM')):
#-- time since 2003.3
dt = (grace_L2_input['time']-2003.3)
grace_L2_input['clm'][:,:] += dt*drift_c[:,:]
grace_L2_input['slm'][:,:] += dt*drift_s[:,:]
#-- Correct Pole Tide following Wahr et al. (2015) 10.1002/2015JB011986
if POLE_TIDE and (DSET == 'GSM'):
#-- time since 2000.0
dt = (grace_L2_input['time']-2000.0)
#-- CSR and JPL Pole Tide Correction
if PRC in ('UTCSR','JPLEM','JPLMSC'):
#-- values for IERS mean pole [2010]
if (grace_L2_input['time'] < 2010.0):
a = np.array([0.055974,1.8243e-3,1.8413e-4,7.024e-6])
b = np.array([-0.346346,-1.7896e-3,1.0729e-4,0.908e-6])
elif (grace_L2_input['time'] >= 2010.0):
a = np.array([0.023513,7.6141e-3,0.0,0.0])
b = np.array([-0.358891,0.6287e-3,0.0,0.0])
#-- calculate m1 and m2 values
m1 = np.copy(a[0])
m2 = np.copy(b[0])
for x in range(1,4):
m1 += a[x]*dt**x
m2 += b[x]*dt**x
#-- pole tide values for CSR and JPL
#-- CSR and JPL both remove the IERS mean pole from m1 and m2
#-- before computing their harmonic solutions
C21_PT = -1.551e-9*(m1 - 0.62e-3*dt) - 0.012e-9*(m2 + 3.48e-3*dt)
S21_PT = 0.021e-9*(m1 - 0.62e-3*dt) - 1.505e-9*(m2 + 3.48e-3*dt)
#-- correct GRACE spherical harmonics for pole tide
#-- note: -= means grace_xlm = grace_xlm - PT
grace_L2_input['clm'][2,1] -= C21_PT
grace_L2_input['slm'][2,1] -= S21_PT
#-- GFZ Pole Tide Correction
elif PRC in ('EIGEN','GFZOP'):
#-- pole tide values for GFZ
#-- GFZ removes only a constant pole position
C21_PT = -1.551e-9*(-0.62e-3*dt) - 0.012e-9*(3.48e-3*dt)
S21_PT = 0.021e-9*(-0.62e-3*dt) - 1.505e-9*(3.48e-3*dt)
#-- correct GRACE spherical harmonics for pole tide
#-- note: -= means grace_xlm = grace_xlm - PT
grace_L2_input['clm'][2,1] -= C21_PT
grace_L2_input['slm'][2,1] -= S21_PT
#-- return the GRACE data, GRACE date (mid-month in decimal), and the
#-- start and end days as Julian dates
return grace_L2_input
#-- PURPOSE: extract parameters from filename
def parse_file(input_file):
"""
Extract parameters from filename
Arguments
---------
input_file: GRACE/GRACE-FO Level-2 spherical harmonic data file
"""
#-- compile numerical expression operator for parameters from files
#-- UTCSR: The University of Texas at Austin Center for Space Research
#-- EIGEN: GFZ German Research Center for Geosciences (RL01-RL05)
#-- GFZOP: GFZ German Research Center for Geosciences (RL06+GRACE-FO)
#-- JPLEM: NASA Jet Propulsion Laboratory (harmonic solutions)
#-- JPLMSC: NASA Jet Propulsion Laboratory (mascon solutions)
#-- GRGS: French Centre National D'Etudes Spatiales (CNES)
#-- COSTG: International Combined Time-variable Gravity Fields
args = r'UTCSR|EIGEN|GFZOP|JPLEM|JPLMSC|GRGS|COSTG'
regex_pattern = (r'(.*?)-2_(\d{{4}})(\d{{3}})-(\d{{4}})(\d{{3}})_'
r'(.*?)_({0})_(.*?)_(\d+)(.*?)(\.gz|\.gfc)?$').format(args)
rx = re.compile(regex_pattern, re.VERBOSE)
#-- extract parameters from input filename
if isinstance(input_file, io.IOBase):
return rx.findall(input_file.filename).pop()
else:
return rx.findall(os.path.basename(input_file)).pop()
#-- PURPOSE: read input file and extract contents
def extract_file(input_file, compressed):
"""
Read input file and extract contents
Arguments
---------
input_file: GRACE/GRACE-FO Level-2 spherical harmonic data file
compressed: denotes if the file is compressed
"""
#-- tilde expansion of input file if not byteIO object
if not isinstance(input_file, io.IOBase):
input_file = os.path.expanduser(input_file)
#-- check if file is uncompressed byteIO object
if isinstance(input_file, io.IOBase) and not compressed:
#-- extract spherical harmonic coefficients
return input_file.read().decode('ISO-8859-1').splitlines()
else:
#-- check if file is compressed (read with gzip if gz)
file_opener = gzip.open if compressed else open
#-- opening data file to extract spherical harmonic coefficients
with file_opener(input_file,'rb') as f:
return f.read().decode('ISO-8859-1').splitlines()
|
{"hexsha": "e1e87be0fc8878986950aace72ae5be1f0f612d1", "size": 13705, "ext": "py", "lang": "Python", "max_stars_repo_path": "gravity_toolkit/read_GRACE_harmonics.py", "max_stars_repo_name": "richannan/read-GRACE-harmonics", "max_stars_repo_head_hexsha": "321254fc3caf08220a32733bd3e79aca433b61af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-07-25T00:32:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T13:37:30.000Z", "max_issues_repo_path": "gravity_toolkit/read_GRACE_harmonics.py", "max_issues_repo_name": "richannan/read-GRACE-harmonics", "max_issues_repo_head_hexsha": "321254fc3caf08220a32733bd3e79aca433b61af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-08-15T02:28:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T17:59:50.000Z", "max_forks_repo_path": "gravity_toolkit/read_GRACE_harmonics.py", "max_forks_repo_name": "richannan/read-GRACE-harmonics", "max_forks_repo_head_hexsha": "321254fc3caf08220a32733bd3e79aca433b61af", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2018-08-01T04:37:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T07:36:43.000Z", "avg_line_length": 43.785942492, "max_line_length": 80, "alphanum_fraction": 0.645384896, "include": true, "reason": "import numpy", "num_tokens": 3943}
|
# VinDsl.jl: Fast and furious statistical modeling
<br>
John Pearson
P[λ]ab
Duke Institute for Brain Sciences
# Following along
VinDsl currently makes use of some features of Distributions.jl that are not yet available on master, as well as the latest release of PDMats.jl. You will need to checkout the `jp/autodiff` branch of Distributions:
```julia
Pkg.clone("https://github.com/jmxpearson/VinDsl.jl")
Pkg.update() # if you don't have latest Distributions
Pkg.checkout("Distributions", "jp/autodiff")
```
# About me
- computational neuroscience lab at Duke
- using Julia for about a year
- member of JuliaStats organization
# Our problem
<figure style="display:inline-block;font-size:50%;float:left">
<figcaption>By AKS.9955, via Wikimedia Commons</figcaption>
</figure>
<figure style="display:inline-block;font-size:50%;float:right;text-align:center">
<figcaption>https://praneethnamburi.wordpress.com/2015/02/05/simulating-neural-spike-trains/</figcaption>
</figure>
# Our problem
- model responses to known features (like GLM)
- infer latent features
- do this scalably for large populations of neurons
- use (generative) Bayesian models that account for uncertainty
# One solution: Sampling
- Markov Chain Monte Carlo (MCMC) methods come with guarantees about correctness
- lots of packages (Lora.jl, Mamba.jl, Stan.jl)
- **But** sampling does not scale well to millions of observations and parameters
# Another solution: The Max Power Way
# Variational Bayesian Inference (VB)
Generative model for data: $p(y|\theta)p(\theta)$
Actual posterior: $p(\theta|y) = p(y|\theta)p(\theta)/p(y)$
Approximate posterior: $q(\theta)$
Maximize **E**vidence **L**ower **Bo**und (ELBO) wrt $\theta$:
$$
\mathcal{L} = \log p(y) \ge -KL\left(q \middle\| p\right) = \mathbb{E}_q[\log p(y|\theta)] + \mathcal{H}[q]
$$
- $\mathbb{E}_q[\log p(y|\theta)]$ measures goodness of fit (data likely under approximate posterior)
- $\mathcal{H}[q]$ is the entropy (favors less certain models)
# Why VB?
- Scales well
- Can use well-studied optimization techniques
# Drawbacks:
- !@$*&# hard to code
- Can't quickly spec out a model like with Stan or JAGS/BUGS
- Traditionally, requires that distributions be conjugate, requires doing lots of algebra
# But VB is exploding!
- stochastic variational inference (SVI): [Hoffman et al.](http://dl.acm.org/citation.cfm?id=2502622)
- black box variational inference (BBVI): [Ranganath et al.](http://arxiv.org/abs/1401.0118)
- control variates: [Paisley et al.](http://arxiv.org/abs/1206.6430)
- local expectation gradients (LEG): [Titsias and Lázaro-Gredilla](http://papers.nips.cc/paper/5678-local-expectation-gradients-for-black-box-variational-inference)
- neural variational inference (NVIL): [Mnih and Gregor](http://arxiv.org/abs/1402.0030)
- variational autoencoders [Kingma and Welling](http://arxiv.org/abs/1312.6114), [Rezende et al.](http://arxiv.org/abs/1401.4082)
# What's out there
- research code: individual algorithms, proof of concept
- [Stan](http://mc-stan.org/)
- **Pros**: Robust, actively developed, good with stats, accessible from Julia
- **Cons**: variational methods still experimental, C++
- [Edward](https://github.com/blei-lab/edward)
- **Pros**: Python, multiple backends, from Stan and VB core developers
- **Cons**: Python, very new
# What's out there
- [Theano](http://deeplearning.net/software/theano/) & [TensorFlow](https://www.tensorflow.org/)
- **Pros**: well-tested, stable, well-engineered backends
- **Cons**: complex, C++, not very stats-aware
# What do we want?
- write math, get code — a domain-specific language (DSL)
- easily generalize to different numbers of indices, structures
- only weakly opinionated about model structure or inference
- model code should be *hackable*
- easy to use prefab pieces
- not hard to write custom VB tricks
- fast prototyping
- no (or minimal) algebra
- automatic gradients
Introducing...
## VinDsl.jl: Fast and furious variational inference
# What's the goal?
- quick prototyping
- targeted at researchers
- "internal" DSL
- loosely-coupled parts
- "consenting adults" philosophy
# Today: two prototypes
- "Classical" models (conjugate + some optimization)
- ADVI "Black Box" models
# Model 1
### Finally, some code!
```julia
using Distributions
using VinDsl
```
WARNING: Method definition logpdf(Distributions.Gamma, Real) in module Distributions at /Users/jmxp/.julia/v0.4/Distributions/src/univariates.jl:321 overwritten in module VinDsl at /Users/jmxp/.julia/v0.4/VinDsl/src/distributions/expfam.jl:42.
WARNING: Method definition logpdf(Distributions.Poisson, Int64) in module Distributions at /Users/jmxp/.julia/v0.4/Distributions/src/univariates.jl:321 overwritten in module VinDsl at /Users/jmxp/.julia/v0.4/VinDsl/src/distributions/expfam.jl:64.
# Model structure:
### Main idea: Factor graphs
- idea from Dahua Lin in [this talk](http://people.csail.mit.edu/dhlin/jubayes/julia_bayes_inference.pdf)
- Nodes: arrays of distributions
- Factors $\leftrightarrow$ terms in variational objective
- but not locked in to graphical model structure!
```julia
dims = (20, 6)
μ[j] ~ Normal(zeros(dims[2]), ones(dims[2]))
τ[j] ~ Gamma(1.1 * ones(dims[2]), ones(dims[2]))
μ0[j] ~ Const(zeros(dims[2]))
y[i, j] ~ Const(rand(dims));
```
Nodes: under the hood
nodes define the q/approximate posterior/recognition model
~ defines a node
can use any distribution defined in the Distributions package
code parses the left and right-hand sides
indices on left get tracked and assigned to dimensions of parameter arrays
code is rewritten as a call to a node constructor
```julia
f = @factor LogNormalFactor y μ τ;
```
or...
```julia
@pmodel begin
y ~ Normal(μ, τ)
end
```
1-element Array{VinDsl.Factor{N},1}:
VinDsl.LogNormalFactor{2}
x: VinDsl.ConstantNode{Float64} y[i,j]
μ: VinDsl.RandomNode{Normal} μ[j]
τ: VinDsl.RandomNode{Gamma} τ[j]
New factor types can be defined with yet another macro:
```julia
@deffactor LogNormalFactor [x, μ, τ] begin
-(1/2) * ((E(τ) * ( V(x) + V(μ) + (E(x) - E(μ))^2 ) + log(2π) + Elog(τ)))
end
@deffactor LogGammaFactor [x, α, β] begin
(E(α) - 1) * Elog(x) - E(β) * E(x) + E(α) * E(β) - Eloggamma(α)
end
```
- Uses a "mini-language" with `E(x)` $\equiv \mathbb{E}[X]$, `V(x)` $\equiv \textrm{cov}[X]$, etc.
- Again, no need to track indices
- multivariate distributions (Dirichlet, MvNormal) are automatically multivariate in these expressions
- `VinDsl` generates a `value(f)` function that handles indices appropriately and sums over the dimensions of the array
# Models are just factor graphs
Let's do a simple conjugate model:
p-model:
$$
\begin{align}
\mu_j &\sim \mathcal{N}(0, 1) \\
\tau_j &\sim \mathrm{Gamma}(1.1, 1) \\
y_{ij} &\sim \mathcal{N}(\mu_j, \tau_j)
\end{align}
$$
q-model:
$$
\begin{align}
\mu &\sim \mathcal{N}(m, t) \\
\tau &\sim \mathrm{Gamma}(\alpha, \beta) \\
\end{align}
$$
```julia
dims = (20, 6)
# note: it won't matter how we initialize here
μ[j] ~ Normal(zeros(dims[2]), ones(dims[2]))
τ[j] ~ Gamma(1.1 * ones(dims[2]), ones(dims[2]))
μ0[j] ~ Const(zeros(dims[2]))
τ0[j] ~ Const(2 * ones(dims[2]))
a0[j] ~ Const(1.1 * ones(dims[2]))
b0[j] ~ Const(ones(dims[2]))
y[i, j] ~ Const(rand(dims))
@pmodel begin
y ~ Normal(μ, τ)
μ ~ Normal(μ0, τ0)
τ ~ Gamma(a0, b0)
end
m = VBModel([μ, τ, μ0, τ0, a0, b0, y], pmodel_factors);
```
# What's going on here?
```julia
m.update_strategy
```
Dict{VinDsl.Node,Symbol} with 7 entries:
VinDsl.RandomNode{Gamma… => :conjugate
VinDsl.ConstantNode{Flo… => :constant
VinDsl.ConstantNode{Flo… => :constant
VinDsl.ConstantNode{Flo… => :constant
VinDsl.ConstantNode{Flo… => :constant
VinDsl.RandomNode{Norma… => :conjugate
VinDsl.ConstantNode{Flo… => :constant
```julia
update!(m)
```
# Conjugacy
- right now `VinDsl` goes out of its way to handle conjugacy between nodes
- conjugate relationships not automatically detected, but easy to define
- `@defnaturals` returns expected sufficient statistics from a factor for a given target distribution
```julia
@defnaturals LogNormalFactor μ Normal begin
Ex, Eτ = E(x), E(τ)
(Ex * Eτ, -Eτ/2)
end
```
# Index Bookkeeping
- nodes have associated indices
- factors know which indices go with which nodes, which indices to sum over
- inner indices belong to, e.g., elements of a multivariate normal (should not be separated)
- outer indices correspond to replicates of "atomic" variables
So this is easy: `i` is inner:
```julia
d = 5
μ[i] ~ MvNormalCanon(zeros(d), diagm(ones(d)))
Λ[i, i] ~ Wishart(float(d), diagm(ones(d)));
```
But here, `i` is inner for $\mu$ but not for $\tau$. In any factor combining these two, $\tau$ will be treated like a vector because it matches an inner index for some node:
```julia
μ[i] ~ MvNormalCanon(zeros(d), diagm(ones(d)))
τ[i] ~ Gamma(1.1 * ones(d), ones(d));
```
# Model 2: ADVI
[(Automatic Differentiation Variational Inference)](http://arxiv.org/abs/1603.00788)
# Two major ideas
1. We just need to define an ELBO (or *approxmate* an ELBO)
1. Any unimodal distribution is *approximately* a normal with constrained support
# Implementation
- Approximate ELBO by sampling from normal, transforming to constrained variables
- Let automatic differentiation handle the gradient calculation
- Do gradient ascent
# Hey, we have autodiff!
# But there's always a problem:
- ForwardDiff defines `Dual <: Real`
- But Distributions.jl doesn't allow, e.g., `Dual` $\mu$ and $\sigma$ for `Normal`
- Then a lot of work...
# Today:
- Some distributions allow arbitrary parameter types:
- using PDMats:
- MvNormal, MvNormalCanon, GenericMvTDist
- Wishart, InverseWishart
- other:
- Dirichlet, Normal, NormalCanon, Gamma, InverseGamma, Poisson
- for examples see [here](https://github.com/jmxpearson/distribution_diff_tests)
# This month:
- ~40 more distributions — almost all univariates
- PR in progress from @Halmoni100 in my lab
- some special functions in StatsFuns and Base still assume Float64
# Defining a simple model
$$\begin{align}
\sigma_\eta &\sim \mathrm{Gamma}(a_\eta, b_\eta) \\
a_u &\sim \mathcal{N}(\mu_a, \sigma_a) \\
\eta_{tu} &\sim \mathcal{N}(a_u, \sigma_\eta) \\
N_{tu} &\sim \mathrm{Poisson}(e^{\eta_{tu}}) \\
\end{align}$$
```julia
@ELBO begin
@advi_declarations begin
a::Real()[U]
σ::Positive()
η::Real()[T, U]
end
@advi_model begin
for u in 1:U
a[u] ~ Normal(log(15.), 0.1)
end
σ ~ Gamma(1, 1)
for t in 1:T
for u in 1:U
η[t, u] ~ Normal(a[u], σ)
spikes[t, u] ~ Poisson(exp(η[t, u]))
end
end
end
end
```
ELBO (generic function with 1 method)
```julia
using PyPlot
T = 100 # number of time steps
U = 10 # units
baseline = 10.
baseline_sd = 0.10
log_bl = log(baseline)
unit_bl = log_bl + baseline_sd * randn(U);
fr_log = unit_bl' .+ zeros(T, U) # firing rate
σ_η = 0.05
eta = fr_log + σ_η * randn(T, U)
fr = exp(eta);
spikes = Array{Int}(size(fr)...)
for i in eachindex(fr)
spikes[i] = rand(Poisson(fr[i]))
end
```
/Users/jmxp/anaconda/lib/python2.7/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment.
warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')
Add some data...
```julia
matshow(spikes', aspect="auto", cmap="gray");
```
Number of parameters
- $a$: 2$U$
- $\sigma$: 2
- $\eta$: 2$TU$
```julia
npars = U * VinDsl.num_pars_advi(RReal()) + VinDsl.num_pars_advi(RPositive()) +
T * U * VinDsl.num_pars_advi(RReal())
xx = 0.1 * randn(npars)
ELBO(xx)
```
-21631.47778954906
```julia
∇L = (storage, x) -> ForwardDiff.gradient!(storage, ELBO, x)
stor = similar(xx)
∇L(stor, xx)
@time ∇L(stor, xx)
```
1.562912 seconds (8.04 M allocations: 679.587 MB, 6.21% gc time)
2022-element Array{Float64,1}:
96.2983
38.2569
2615.62
-7682.52
-417.951
-374.225
-142.068
-58.1995
942.229
-725.058
273.02
-36.452
356.29
⋮
17.0867
-16.259
20.1646
-3.70256
4.73903
3.36335
5.88467
7.44689
13.5717
-6.19642
8.93425
-12.4798
```julia
yy = copy(xx)
gg = similar(yy)
avg_sq_grad = similar(gg)
firstpass = true
elbo = Float64[];
```
```julia
step_size = 0.1
decay = 0.9
eps = 1e-8
nsteps = 160
for jj in 1:nsteps
∇L(gg, yy)
nn = norm(gg)
if nn/length(gg) > 10.
gg /= norm(gg) # crude gradient clipping
end
if firstpass == false
avg_sq_grad = avg_sq_grad * decay + gg.^2 * (1 - decay)
else
avg_sq_grad = gg.^2
firstpass = false
end
yy += step_size * gg ./(sqrt(avg_sq_grad) + eps)
push!(elbo, ELBO(yy))
display("Iteration $(length(elbo)) $(elbo[end])")
end
```
"Iteration 1 -21901.643192506774"
"Iteration 2 -21784.408566088663"
"Iteration 3 -22761.50851929847"
"Iteration 4 -20361.289769864685"
"Iteration 5 -21527.27940310702"
"Iteration 6 -17984.190834400826"
"Iteration 7 -17219.647018742507"
"Iteration 8 -20096.171998652517"
"Iteration 9 -15719.133230555724"
"Iteration 10 -14807.092746386164"
"Iteration 11 -14563.375832332598"
"Iteration 12 -14043.741827632271"
"Iteration 13 -13715.407754031177"
"Iteration 14 -12958.378054523191"
"Iteration 15 -11893.664828330544"
"Iteration 16 -11416.167999849515"
"Iteration 17 -10754.339545075985"
"Iteration 18 -10049.352149340006"
"Iteration 19 -9402.409352119079"
"Iteration 20 -9658.607301884787"
"Iteration 21 -8327.828020622168"
"Iteration 22 -8908.349523085642"
"Iteration 23 -7730.0682606714045"
"Iteration 24 -6830.5324054293405"
"Iteration 25 -7717.520227847391"
"Iteration 26 -6949.580812127499"
"Iteration 27 -6652.553936474353"
"Iteration 28 -5976.654400816284"
"Iteration 29 -6357.3658596257765"
"Iteration 30 -5812.548575407791"
"Iteration 31 -5490.162040191422"
"Iteration 32 -5059.303556650625"
"Iteration 33 -4781.426677334765"
"Iteration 34 -4551.261856632452"
"Iteration 35 -4308.270095528986"
"Iteration 36 -4292.079328215761"
"Iteration 37 -3994.083445482836"
"Iteration 38 -4023.492487795364"
"Iteration 39 -3941.4627487592675"
"Iteration 40 -3773.625767885166"
"Iteration 41 -3554.285420824762"
"Iteration 42 -3471.2177896320422"
"Iteration 43 -3606.801729149118"
"Iteration 44 -3458.755837296366"
"Iteration 45 -3312.558257314446"
"Iteration 46 -3422.1017210636055"
"Iteration 47 -3486.693965133956"
"Iteration 48 -3212.4499594516324"
"Iteration 49 -3295.4129262072634"
"Iteration 50 -2894.734068845817"
"Iteration 51 -3044.5778606475983"
"Iteration 52 -3043.3890236880393"
"Iteration 53 -3096.705300079607"
"Iteration 54 -2800.2168429386597"
"Iteration 55 -2823.6872893306668"
"Iteration 56 -2724.815827991035"
"Iteration 57 -2536.2819942592146"
"Iteration 58 -2813.8252996675283"
"Iteration 59 -2622.910766909394"
"Iteration 60 -2545.5242115244478"
"Iteration 61 -2716.8188182666395"
"Iteration 62 -2547.8211304211463"
"Iteration 63 -2487.1965455038285"
"Iteration 64 -2595.838009852493"
"Iteration 65 -2555.6287417482945"
"Iteration 66 -2496.6940447238594"
"Iteration 67 -2440.753239815258"
"Iteration 68 -2529.6677493249813"
"Iteration 69 -2342.0300345334454"
"Iteration 70 -2422.372643545843"
"Iteration 71 -2468.877089588612"
"Iteration 72 -2275.624680316577"
"Iteration 73 -2340.410675965846"
"Iteration 74 -2363.202887175471"
"Iteration 75 -2298.6586223903637"
"Iteration 76 -2424.8088286744824"
"Iteration 77 -2254.9648424563484"
"Iteration 78 -2256.767659962991"
"Iteration 79 -2268.489729925305"
"Iteration 80 -2321.730033369412"
"Iteration 81 -2263.3048735619204"
"Iteration 82 -2247.089332315065"
"Iteration 83 -2239.594407428411"
"Iteration 84 -2157.729730733962"
"Iteration 85 -2320.5130380270866"
"Iteration 86 -2181.973441053312"
"Iteration 87 -2151.7475185448898"
"Iteration 88 -2136.0583167507343"
"Iteration 89 -2207.967863877849"
"Iteration 90 -2213.9749887090247"
"Iteration 91 -2239.828309206509"
"Iteration 92 -2176.8503933645948"
"Iteration 93 -2127.587617212332"
"Iteration 94 -2206.123110053107"
"Iteration 95 -2096.1898543678385"
"Iteration 96 -2110.604683041632"
"Iteration 97 -2142.6319010119396"
"Iteration 98 -2072.0808398411173"
"Iteration 99 -2082.9017019162893"
"Iteration 100 -2067.3815735554995"
"Iteration 101 -2038.6749828045126"
"Iteration 102 -2008.7349778361252"
"Iteration 103 -2175.2734896075244"
"Iteration 104 -2163.3208904355224"
"Iteration 105 -2062.4252397849423"
"Iteration 106 -2027.6384133912975"
"Iteration 107 -2028.4687089222305"
"Iteration 108 -2092.8963261403974"
"Iteration 109 -2044.4750625303006"
"Iteration 110 -1980.050522057777"
"Iteration 111 -2059.126407304515"
"Iteration 112 -2016.3900277082657"
"Iteration 113 -1961.0534237732666"
"Iteration 114 -2076.216579276364"
"Iteration 115 -1965.2993420464118"
"Iteration 116 -2048.9245640823733"
"Iteration 117 -2052.929140493427"
"Iteration 118 -2007.0027668345772"
"Iteration 119 -1930.4442542333984"
"Iteration 120 -2060.5535192709685"
"Iteration 121 -1949.518181457218"
"Iteration 122 -1952.78904923031"
"Iteration 123 -2000.9738932013327"
"Iteration 124 -2099.798320407929"
"Iteration 125 -2093.411012505818"
"Iteration 126 -1966.694448941153"
"Iteration 127 -1943.7472773324391"
"Iteration 128 -2001.3031484555793"
"Iteration 129 -2032.0800170265766"
"Iteration 130 -1923.241793347899"
"Iteration 131 -1910.3122495794478"
"Iteration 132 -1996.2578334156574"
"Iteration 133 -1957.6409762797805"
"Iteration 134 -1951.2053297682116"
"Iteration 135 -1976.5174852613877"
"Iteration 136 -1908.6343265168282"
"Iteration 137 -1925.3980986570036"
"Iteration 138 -1887.3830020508765"
"Iteration 139 -1918.5944250428877"
"Iteration 140 -1991.5230883643699"
"Iteration 141 -2025.4096647444849"
"Iteration 142 -1925.7028873309512"
"Iteration 143 -1950.1036384208073"
"Iteration 144 -2057.729579572335"
"Iteration 145 -2017.467800560341"
"Iteration 146 -1859.708833399075"
"Iteration 147 -1888.4565974368297"
"Iteration 148 -1939.0910823624286"
"Iteration 149 -1948.19136384097"
"Iteration 150 -1919.7321499975917"
"Iteration 151 -1950.7692580396708"
"Iteration 152 -1890.3043823100202"
"Iteration 153 -1905.7101972997893"
"Iteration 154 -1888.98786569496"
"Iteration 155 -1905.1120590604025"
"Iteration 156 -1940.6845582157573"
"Iteration 157 -2026.209087976532"
"Iteration 158 -1931.0174026672742"
"Iteration 159 -1857.8731177192637"
"Iteration 160 -1905.7424392670107"
```julia
plot(elbo)
```
# Moving forward with ReverseDiff
- forward mode autodiff for $f: \mathbb{R}^n \rightarrow \mathbb{R}$ is $\mathcal{O}(n)$
- reverse mode is only $\mathcal{O}(1)$
- reverse mode lies behind Theano, TensorFlow, Stan
Once distributions are parametric, defining a new type that **just works** becomes easy
- same(-ish) idea as ForwardDiff.jl
- caveat: egregious memory usage
```julia
include("/Users/jmxp/code/rdiff/prototype6.jl")
```
WARNING: using DataStructures.update! in module Main conflicts with an existing identifier.
backprop (generic function with 3 methods)
```julia
@time ∇L(stor, xx)
∇L_r = grad(ELBO, length(xx))
rstor = similar(xx)
∇L_r(xx, rstor);
@time ∇L_r(xx, rstor);
```
1.447133 seconds (8.04 M allocations: 679.577 MB, 6.46% gc time)
0.025645 seconds (155.60 k allocations: 5.186 MB)
# VinDsl needs your help!
Still finalizing API
- docs
- tests
- better ideas!
- Reverse mode!
|
{"hexsha": "b12ac41b1ef9b215c91a8bdc755705c62ec158ee", "size": 102873, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "slides.ipynb", "max_stars_repo_name": "jmxpearson/juliacon-2016-talk", "max_stars_repo_head_hexsha": "0d706c7caec4f32d5f76ee4f4381e593f0fce3f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-06-30T18:09:18.000Z", "max_stars_repo_stars_event_max_datetime": "2016-06-30T18:09:18.000Z", "max_issues_repo_path": "slides.ipynb", "max_issues_repo_name": "jmxpearson/juliacon-2016-talk", "max_issues_repo_head_hexsha": "0d706c7caec4f32d5f76ee4f4381e593f0fce3f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "slides.ipynb", "max_forks_repo_name": "jmxpearson/juliacon-2016-talk", "max_forks_repo_head_hexsha": "0d706c7caec4f32d5f76ee4f4381e593f0fce3f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.4859708193, "max_line_length": 31870, "alphanum_fraction": 0.6928154132, "converted": true, "num_tokens": 6734}
|
# coding:utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import os
import time
import sys
import datetime
import ctypes
import json
import numpy as np
from sklearn.metrics import roc_auc_score
import copy
from tqdm import tqdm
from openke.config import Tester
class Validator(Tester):
def __init__(self, model = None, data_loader = None):
super(Validator, self).__init__(data_loader=data_loader, use_gpu=torch.cuda.is_available())
base_file = os.path.abspath(os.path.join(os.path.dirname(__file__), "../release/Base.so"))
self.lib = ctypes.cdll.LoadLibrary(base_file)
self.model = model
self.valid_dataloader = data_loader
self.lib.validHead.argtypes = [ctypes.c_void_p, ctypes.c_int64]
self.lib.validTail.argtypes = [ctypes.c_void_p, ctypes.c_int64]
self.lib.getValidHit10.restype = ctypes.c_float
# self.valid_steps = valid_steps
self.early_stopping_patience = 10
self.bad_counts = 0
self.best_hit10 = 0
def valid(self):
self.lib.validInit()
validation_range = tqdm(self.valid_dataloader)
for index, [valid_head_batch, valid_tail_batch] in enumerate(validation_range):
score = self.valid_one_step(valid_head_batch)
self.lib.validHead(score.__array_interface__["data"][0], index)
score = self.valid_one_step(valid_tail_batch)
self.lib.validTail(score.__array_interface__["data"][0], index)
return self.lib.getValidHit10()
def valid_one_step(self, data):
return self.model.predict({
'batch_h': self.to_var(data['batch_h'], self.use_gpu),
'batch_t': self.to_var(data['batch_t'], self.use_gpu),
'batch_r': self.to_var(data['batch_r'], self.use_gpu),
'mode': data['mode']
})
|
{"hexsha": "1d96036180ac1e88c535e64d70827cc34bb2fb76", "size": 1888, "ext": "py", "lang": "Python", "max_stars_repo_path": "openke/config/Validator.py", "max_stars_repo_name": "luofeisg/OpenKE-PuTransE", "max_stars_repo_head_hexsha": "0bfefb3917e7479520917febd91a9f4d7353c7fc", "max_stars_repo_licenses": ["CC-BY-4.0", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "openke/config/Validator.py", "max_issues_repo_name": "luofeisg/OpenKE-PuTransE", "max_issues_repo_head_hexsha": "0bfefb3917e7479520917febd91a9f4d7353c7fc", "max_issues_repo_licenses": ["CC-BY-4.0", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "openke/config/Validator.py", "max_forks_repo_name": "luofeisg/OpenKE-PuTransE", "max_forks_repo_head_hexsha": "0bfefb3917e7479520917febd91a9f4d7353c7fc", "max_forks_repo_licenses": ["CC-BY-4.0", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.962962963, "max_line_length": 99, "alphanum_fraction": 0.6822033898, "include": true, "reason": "import numpy", "num_tokens": 434}
|
### A Pluto.jl notebook ###
# v0.15.1
using Markdown
using InteractiveUtils
# ╔═╡ 3244173c-e227-11eb-39eb-93a74dce1c9e
using PlutoUI, PDFIO, Taro, WordTokenizers, DeepDiffs
# ╔═╡ 0f5786cb-6430-4a14-bc05-bb127f8b73df
md"""
This notebook does not work!
"""
# ╔═╡ c3353ab9-0ae1-4785-b561-631945710a35
#Taro.init()
# ╔═╡ 74f3335a-db62-49ba-8a62-84b6eebed5a2
function getPDFText(src)
doc = pdDocOpen(src)
docinfo = pdDocGetInfo(doc)
result = IOBuffer()
npage = pdDocGetPageCount(doc)
for i=1:npage
page = pdDocGetPage(doc, i)
pdPageExtractText(result, page)
end
pdDocClose(doc)
return docinfo, result #
end
# ╔═╡ 9b83224c-1529-4ab7-9094-ba3b7b3fe3ba
document1 = "some.pdf"
# ╔═╡ 9318c6dc-dded-403e-8e03-f994c86027c9
document2 = "other.pdf"
# ╔═╡ cf87add1-239a-4917-b721-8482fae365f2
function readpdf(name)
docinfo, iobuffer = getPDFText(name)
(docinfo, String(take!(iobuffer)))
end
# ╔═╡ c71c6c25-c00a-42fc-9df7-5a28984bb69f
info1, text1 = readpdf(document1)
# ╔═╡ cb3b3dca-4712-4292-8e54-0f23dc934e38
info2, text2 = readpdf(document2)
# ╔═╡ e4055352-93a6-4b3c-9485-9869800a2ea6
tokenized_text1 = tokenize.(split_sentences(text1));
# ╔═╡ 0870f966-1242-47b0-aba9-e4f6a1f67727
tokenized_text1
# ╔═╡ eb80a7b3-de56-463b-b889-844417aefba1
tokenized_text2 = tokenize.(split_sentences(text2));
# ╔═╡ f992f24e-b6ce-43bd-a4c3-bf83dba86649
tokenized_text2
# ╔═╡ a2a5d048-6440-4d50-b81b-4a98cf08989d
deepdiff(tokenized_text1, tokenized_text2)
# ╔═╡ 71165030-3fe6-4879-89f7-d3f1ce1904b7
# ╔═╡ c102e8e6-2d8f-4582-b85c-30fdd7029be6
with_terminal() do
print(text1)
end
# ╔═╡ 50f96bd0-21c9-4f7b-8b84-4b1425648395
with_terminal() do
print(text2)
end
# ╔═╡ 79538987-e2bc-442c-bc75-4e9576c0ef73
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
DeepDiffs = "ab62b9b5-e342-54a8-a765-a90f495de1a6"
PDFIO = "4d0d745f-9d9a-592e-8d18-1ad8a0f42b92"
PlutoUI = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
Taro = "61d0e4fa-4e73-5030-88a9-ae4c27b203dd"
WordTokenizers = "796a5d58-b03d-544a-977e-18100b691f6e"
[compat]
DeepDiffs = "~1.2.0"
PDFIO = "~0.1.12"
PlutoUI = "~0.7.9"
Taro = "~0.8.3"
WordTokenizers = "~0.5.6"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
[[AbstractTrees]]
deps = ["Markdown", "Test"]
git-tree-sha1 = "6621d9645702c1c4e6970cc6a3eae440c768000b"
uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
version = "0.2.1"
[[AdobeGlyphList]]
deps = ["DelimitedFiles", "Pkg", "Test"]
git-tree-sha1 = "dce62e14d1eb385ef241a05094a7bc9511a276dc"
uuid = "715cd884-8f22-55db-b077-449d1f7eef73"
version = "0.1.1"
[[ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
[[Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[BinDeps]]
deps = ["Compat", "Libdl", "SHA", "URIParser"]
git-tree-sha1 = "12093ca6cdd0ee547c39b1870e0c9c3f154d9ca9"
uuid = "9e28174c-4ba2-5203-b857-d8d62c4213ee"
version = "0.8.10"
[[BinaryProvider]]
deps = ["Libdl", "Logging", "SHA"]
git-tree-sha1 = "ecdec412a9abc8db54c0efc5548c64dfce072058"
uuid = "b99e7846-7c00-51b0-8f62-c81ae34c0232"
version = "0.5.10"
[[Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "b0b7e8a0d054fada22b64095b46469627a138943"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "2.2.1"
[[DataAPI]]
git-tree-sha1 = "ee400abb2298bd13bfc3df1c412ed228061a2385"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.7.0"
[[DataDeps]]
deps = ["BinaryProvider", "HTTP", "Libdl", "Reexport", "SHA", "p7zip_jll"]
git-tree-sha1 = "4f0e41ff461d42cfc62ff0de4f1cd44c6e6b3771"
uuid = "124859b0-ceae-595e-8997-d05f6a7a8dfe"
version = "0.7.7"
[[DataStructures]]
deps = ["InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "88d48e133e6d3dd68183309877eac74393daa7eb"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.17.20"
[[DataValueInterfaces]]
git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6"
uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464"
version = "1.0.0"
[[Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[DeepDiffs]]
git-tree-sha1 = "9824894295b62a6a4ab6adf1c7bf337b3a9ca34c"
uuid = "ab62b9b5-e342-54a8-a765-a90f495de1a6"
version = "1.2.0"
[[DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[Downloads]]
deps = ["ArgTools", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
[[HTML_Entities]]
deps = ["StrTables"]
git-tree-sha1 = "c4144ed3bc5f67f595622ad03c0e39fa6c70ccc7"
uuid = "7693890a-d069-55fe-a829-b4a6d304f0ee"
version = "1.0.1"
[[HTTP]]
deps = ["Base64", "Dates", "IniFile", "Logging", "MbedTLS", "NetworkOptions", "Sockets", "URIs"]
git-tree-sha1 = "c6a1fff2fd4b1da29d3dccaffb1e1001244d844e"
uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3"
version = "0.9.12"
[[IniFile]]
deps = ["Test"]
git-tree-sha1 = "098e4d2c533924c921f9f9847274f2ad89e018b8"
uuid = "83e8ac13-25f8-5344-8a64-a9f2b223428f"
version = "0.5.0"
[[InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[IteratorInterfaceExtensions]]
git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856"
uuid = "82899510-4779-5014-852e-03e436cf321d"
version = "1.0.0"
[[JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "642a199af8b68253517b80bd3bfd17eb4e84df6e"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.3.0"
[[JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "81690084b6198a2e1da36fcfda16eeca9f9f24e4"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.1"
[[JavaCall]]
deps = ["DataStructures", "Dates", "Libdl", "WinReg"]
git-tree-sha1 = "75ec422f88cca591b379a68f85dd24dc5c037291"
uuid = "494afd89-becb-516b-aafa-70d2670c0337"
version = "0.7.7"
[[LabelNumerals]]
deps = ["RomanNumerals", "Test"]
git-tree-sha1 = "03f5757e29f13d76a93416c0b913c19bf8f9baac"
uuid = "2e475f56-4567-5c41-94ff-3f69a29378f9"
version = "0.1.0"
[[LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
[[LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
[[LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
[[Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[LinearAlgebra]]
deps = ["Libdl"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[MbedTLS]]
deps = ["Dates", "MbedTLS_jll", "Random", "Sockets"]
git-tree-sha1 = "1c38e51c3d08ef2278062ebceade0e46cefc96fe"
uuid = "739be429-bea8-5141-9913-cc70e7f3736d"
version = "1.0.3"
[[MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
[[Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
[[NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
[[OpenSSL_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "15003dcb7d8db3c6c857fda14891a539a8f2705a"
uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95"
version = "1.1.10+0"
[[OrderedCollections]]
git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.4.1"
[[PDFIO]]
deps = ["AbstractTrees", "AdobeGlyphList", "BinDeps", "Dates", "DelimitedFiles", "LabelNumerals", "Libdl", "LinearAlgebra", "OpenSSL_jll", "Pkg", "Printf", "Rectangle", "RomanNumerals", "Zlib_jll"]
git-tree-sha1 = "9e793da6e602ba4440caf087adcf890485a4706f"
uuid = "4d0d745f-9d9a-592e-8d18-1ad8a0f42b92"
version = "0.1.12"
[[Parsers]]
deps = ["Dates"]
git-tree-sha1 = "c8abc88faa3f7a3950832ac5d6e690881590d6dc"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "1.1.0"
[[Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
[[PlutoUI]]
deps = ["Base64", "Dates", "InteractiveUtils", "JSON", "Logging", "Markdown", "Random", "Reexport", "Suppressor"]
git-tree-sha1 = "44e225d5837e2a2345e69a1d1e01ac2443ff9fcb"
uuid = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
version = "0.7.9"
[[Preferences]]
deps = ["TOML"]
git-tree-sha1 = "00cfd92944ca9c760982747e9a1d0d5d86ab1e5a"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.2.2"
[[Primes]]
git-tree-sha1 = "afccf037da52fa596223e5a0e331ff752e0e845c"
uuid = "27ebfcd6-29c5-5fa9-bf4b-fb8fc14df3ae"
version = "0.5.0"
[[Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[Random]]
deps = ["Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[Rectangle]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "2bf927306e130e4718449a8b2c72a6b71afe96e0"
uuid = "9a9db56c-8f71-5460-add5-cb450131785e"
version = "0.1.2"
[[Reexport]]
git-tree-sha1 = "5f6c21241f0f655da3952fd60aa18477cf96c220"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.1.0"
[[RomanNumerals]]
deps = ["Compat", "Primes"]
git-tree-sha1 = "06e621d4a9eda8d09eab8aade41e85cb93cadae9"
uuid = "37834d88-8936-577c-80c9-1066ecf66832"
version = "0.3.1"
[[SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
[[Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[StrTables]]
deps = ["Dates"]
git-tree-sha1 = "5998faae8c6308acc25c25896562a1e66a3bb038"
uuid = "9700d1a9-a7c8-5760-9816-a99fda30bb8f"
version = "1.0.1"
[[Suppressor]]
git-tree-sha1 = "a819d77f31f83e5792a76081eee1ea6342ab8787"
uuid = "fd094767-a336-5f1f-9728-57cf17d0bbfb"
version = "0.2.0"
[[TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
[[TableTraits]]
deps = ["IteratorInterfaceExtensions"]
git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39"
uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c"
version = "1.0.1"
[[Tables]]
deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "TableTraits", "Test"]
git-tree-sha1 = "8ed4a3ea724dac32670b062be3ef1c1de6773ae8"
uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
version = "1.4.4"
[[Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
[[Taro]]
deps = ["Dates", "JavaCall", "Tables"]
git-tree-sha1 = "fad39fbbe8a3eb412c87d9ff85e24392b694aabc"
uuid = "61d0e4fa-4e73-5030-88a9-ae4c27b203dd"
version = "0.8.3"
[[Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[URIParser]]
deps = ["Unicode"]
git-tree-sha1 = "53a9f49546b8d2dd2e688d216421d050c9a31d0d"
uuid = "30578b45-9adc-5946-b283-645ec420af67"
version = "0.4.1"
[[URIs]]
git-tree-sha1 = "97bbe755a53fe859669cd907f2d96aee8d2c1355"
uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4"
version = "1.3.0"
[[UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[WinReg]]
deps = ["Test"]
git-tree-sha1 = "808380e0a0483e134081cc54150be4177959b5f4"
uuid = "1b915085-20d7-51cf-bf83-8f477d6f5128"
version = "0.3.1"
[[WordTokenizers]]
deps = ["DataDeps", "HTML_Entities", "StrTables", "Unicode"]
git-tree-sha1 = "01dd4068c638da2431269f49a5964bf42ff6c9d2"
uuid = "796a5d58-b03d-544a-977e-18100b691f6e"
version = "0.5.6"
[[Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
[[nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
[[p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
"""
# ╔═╡ Cell order:
# ╟─0f5786cb-6430-4a14-bc05-bb127f8b73df
# ╠═3244173c-e227-11eb-39eb-93a74dce1c9e
# ╠═c3353ab9-0ae1-4785-b561-631945710a35
# ╠═74f3335a-db62-49ba-8a62-84b6eebed5a2
# ╠═9b83224c-1529-4ab7-9094-ba3b7b3fe3ba
# ╠═9318c6dc-dded-403e-8e03-f994c86027c9
# ╠═cf87add1-239a-4917-b721-8482fae365f2
# ╠═c71c6c25-c00a-42fc-9df7-5a28984bb69f
# ╠═cb3b3dca-4712-4292-8e54-0f23dc934e38
# ╠═e4055352-93a6-4b3c-9485-9869800a2ea6
# ╠═0870f966-1242-47b0-aba9-e4f6a1f67727
# ╠═eb80a7b3-de56-463b-b889-844417aefba1
# ╠═f992f24e-b6ce-43bd-a4c3-bf83dba86649
# ╠═a2a5d048-6440-4d50-b81b-4a98cf08989d
# ╠═71165030-3fe6-4879-89f7-d3f1ce1904b7
# ╠═c102e8e6-2d8f-4582-b85c-30fdd7029be6
# ╠═50f96bd0-21c9-4f7b-8b84-4b1425648395
# ╠═79538987-e2bc-442c-bc75-4e9576c0ef73
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002
|
{"hexsha": "89f6b3f754fc7f15781d6f539eceb6490f7ab15e", "size": 13463, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "file-compare.jl", "max_stars_repo_name": "StatisticalMice/julia-tutorials", "max_stars_repo_head_hexsha": "73cd9706c75d9544209f2e9321ab6f60d3ae235c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "file-compare.jl", "max_issues_repo_name": "StatisticalMice/julia-tutorials", "max_issues_repo_head_hexsha": "73cd9706c75d9544209f2e9321ab6f60d3ae235c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "file-compare.jl", "max_forks_repo_name": "StatisticalMice/julia-tutorials", "max_forks_repo_head_hexsha": "73cd9706c75d9544209f2e9321ab6f60d3ae235c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8161157025, "max_line_length": 273, "alphanum_fraction": 0.7325261829, "num_tokens": 6400}
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)
import numpy as np
import scipp as sc
import pytest
import ess.choppers as ch
@pytest.fixture
def params():
dim = 'frame'
return {
'frequency':
sc.scalar(56.0, unit="Hz"),
'phase':
sc.scalar(0.5, unit='rad'),
'position':
sc.vector(value=[0., 0., 5.], unit='m'),
'cutout_angles_center':
sc.linspace(dim=dim, start=0.25, stop=2.0 * np.pi, num=6, unit='rad'),
'cutout_angles_width':
sc.linspace(dim=dim, start=0.1, stop=0.6, num=6, unit='rad'),
'kind':
sc.scalar('wfm')
}
def test_make_chopper_bad_widths(params):
params['cutout_angles_width'].values[1] = -3.0
with pytest.raises(ValueError) as e_info:
_ = ch.make_chopper(**params)
assert str(e_info.value) == "Negative window width found in chopper cutout angles."
def test_make_chopper_bad_centers(params):
params['cutout_angles_center'].values = params['cutout_angles_center'].values[[
1, 0, 2, 3, 4, 5
]]
with pytest.raises(ValueError) as e_info:
_ = ch.make_chopper(**params)
assert str(e_info.value) == "Chopper begin cutout angles are not monotonic."
def test_make_chopper_bad_begin_angles(params):
cutout_angles_begin = params[
'cutout_angles_center'] - 0.5 * params['cutout_angles_width']
cutout_angles_end = params[
'cutout_angles_center'] + 0.5 * params['cutout_angles_width']
cutout_angles_begin.values = cutout_angles_begin.values[[1, 0, 2, 3, 4, 5]]
with pytest.raises(ValueError) as e_info:
_ = ch.make_chopper(frequency=params['frequency'],
phase=params['phase'],
position=params['position'],
cutout_angles_begin=cutout_angles_begin,
cutout_angles_end=cutout_angles_end,
kind=params['kind'])
# This will raise the error on the widths before it reaches the monotonicity check
assert str(e_info.value) == "Negative window width found in chopper cutout angles."
def test_make_chopper_bad_close_angles(params):
dim = 'frame'
with pytest.raises(ValueError) as e_info:
_ = ch.make_chopper(frequency=params['frequency'],
phase=params['phase'],
position=params['position'],
cutout_angles_begin=sc.array(dims=[dim],
values=[0.0, 1.0, 2.0],
unit='rad'),
cutout_angles_end=sc.array(dims=[dim],
values=[4.0, 3.0, 5.0],
unit='rad'),
kind=params['kind'])
assert str(e_info.value) == "Chopper end cutout angles are not monotonic."
def test_angular_frequency(params):
chopper = ch.make_chopper(**params)
assert sc.identical(ch.angular_frequency(chopper),
(2.0 * np.pi * sc.units.rad) * params['frequency'])
def test_cutout_angles_from_centers_widths(params):
chopper = ch.make_chopper(**params)
assert sc.allclose(
ch.cutout_angles_begin(chopper),
params["cutout_angles_center"] - 0.5 * params["cutout_angles_width"])
assert sc.allclose(
ch.cutout_angles_end(chopper),
params["cutout_angles_center"] + 0.5 * params["cutout_angles_width"])
def test_cutout_angles_from_begin_end(params):
dim = 'frame'
del params['cutout_angles_center']
del params['cutout_angles_width']
params["cutout_angles_begin"] = sc.linspace(dim=dim,
start=0.0,
stop=1.5 * np.pi,
num=6,
unit='rad')
params["cutout_angles_end"] = sc.linspace(dim=dim,
start=0.1,
stop=2.0 * np.pi,
num=6,
unit='rad')
chopper = ch.make_chopper(**params)
assert sc.allclose(ch.cutout_angles_width(chopper),
params["cutout_angles_end"] - params["cutout_angles_begin"])
assert sc.allclose(
ch.cutout_angles_center(chopper),
0.5 * (params["cutout_angles_begin"] + params["cutout_angles_end"]))
def test_time_open_closed(params):
dim = 'frame'
chopper = ch.make_chopper(
frequency=sc.scalar(0.5, unit=sc.units.one / sc.units.s),
phase=sc.scalar(0., unit='rad'),
position=params['position'],
cutout_angles_begin=sc.array(dims=[dim],
values=np.pi * np.array([0.0, 0.5, 1.0]),
unit='rad'),
cutout_angles_end=sc.array(dims=[dim],
values=np.pi * np.array([0.5, 1.0, 1.5]),
unit='rad'),
kind=params['kind'])
assert sc.allclose(
ch.time_open(chopper),
sc.to_unit(sc.array(dims=[dim], values=[0.0, 0.5, 1.0], unit='s'), 'us'))
assert sc.allclose(
ch.time_closed(chopper),
sc.to_unit(sc.array(dims=[dim], values=[0.5, 1.0, 1.5], unit='s'), 'us'))
chopper["phase"] = sc.scalar(2.0 * np.pi / 3.0, unit='rad')
assert sc.allclose(
ch.time_open(chopper),
sc.to_unit(
sc.array(dims=[dim], values=np.array([0.0, 0.5, 1.0]) + 2.0 / 3.0,
unit='s'), 'us'))
assert sc.allclose(
ch.time_closed(chopper),
sc.to_unit(
sc.array(dims=[dim], values=np.array([0.5, 1.0, 1.5]) + 2.0 / 3.0,
unit='s'), 'us'))
def test_find_chopper_keys():
da = sc.DataArray(data=sc.scalar('dummy'),
coords={
'chopper3': sc.scalar(0),
'abc': sc.scalar(0),
'chopper_1': sc.scalar(0),
'sample': sc.scalar(0),
'source': sc.scalar(0),
'Chopper_wfm': sc.scalar(0),
'chopper0': sc.scalar(0),
'chopper5': sc.scalar(0),
'monitor': sc.scalar(0)
})
expected = ['chopper3', 'chopper_1', 'Chopper_wfm', 'chopper0', 'chopper5']
assert ch.find_chopper_keys(da) == expected
|
{"hexsha": "8817826e03151b105ba767f1ad36906b5e03a2cc", "size": 6730, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/choppers/chopper_test.py", "max_stars_repo_name": "scipp/ess", "max_stars_repo_head_hexsha": "078e10c53cacf849103f9df0c16c61628e4c65ee", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-18T09:39:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T09:39:19.000Z", "max_issues_repo_path": "tests/choppers/chopper_test.py", "max_issues_repo_name": "scipp/ess", "max_issues_repo_head_hexsha": "078e10c53cacf849103f9df0c16c61628e4c65ee", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 59, "max_issues_repo_issues_event_min_datetime": "2019-08-23T07:31:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T08:52:19.000Z", "max_forks_repo_path": "tests/choppers/chopper_test.py", "max_forks_repo_name": "scipp/ess", "max_forks_repo_head_hexsha": "078e10c53cacf849103f9df0c16c61628e4c65ee", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-08-21T07:25:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T12:07:53.000Z", "avg_line_length": 41.0365853659, "max_line_length": 87, "alphanum_fraction": 0.5215453195, "include": true, "reason": "import numpy", "num_tokens": 1586}
|
import vtk
from numpy import zeros
import matplotlib.pyplot as plt
filename = 'test.vtk'
reader = vtk.vtkUnstructuredGridReader()
reader.SetFileName(filename)
reader.Update()
# plane = vtk.vtkPlane()
# plane.SetOrigin(0, 0, 0.5)
# plane.SetNormal(0, 0, 1)
# cutter = vtk.vtkFiltersCorePython.vtkCutter()
# cutter.SetCutFunction(plane)
# cutter.SetInputConnection(reader.GetOutputPort())
# cutter.Update()
# data = cutter.GetOutput()
data = reader.GetOutput()
# triangles = data.GetPolys().GetData()
points = data.GetPoints()
mapper = vtk.vtkCellDataToPointData()
mapper.AddInputData(data)
mapper.Update()
vels = mapper.GetOutput().GetPointData().GetArray(1)
press = mapper.GetOutput().GetPointData().GetArray(2)
# ntri = triangles.GetNumberOfTuples() / 4
npts = points.GetNumberOfPoints()
nvls = vels.GetNumberOfTuples()
npress = press.GetNumberOfTuples()
# tri = zeros((ntri, 3))
x = zeros(npts)
y = zeros(npts)
ux = zeros(nvls)
uy = zeros(nvls)
px = zeros(npress)
py = zeros(npress)
# for i in xrange(0, ntri):
# tri[i, 0] = triangles.GetTuple(4 * i + 1)[0]
# tri[i, 1] = triangles.GetTuple(4 * i + 2)[0]
# tri[i, 2] = triangles.GetTuple(4 * i + 3)[0]
for i in xrange(npts):
pt = points.GetPoint(i)
x[i] = pt[0]
y[i] = pt[1]
for i in xrange(0, nvls):
U = vels.GetTuple(i)
ux[i] = U[0]
uy[i] = U[1]
for i in xrange(0, npress):
p = vels.GetTuple(i)
px[i] = p[0]
py[i] = p[1]
print('test')
# # Mesh
# plt.figure(figsize=(8, 8))
# plt.triplot(x, y, tri)
# plt.gca().set_aspect('equal')
# # Velocity x-component
# plt.figure(figsize=(8, 8))
# plt.tricontourf(x, y, tri, ux, 16)
# plt.tricontour(x, y, tri, ux, 16)
|
{"hexsha": "f91845dc64dbe1ed309f2068a8752be86217c64e", "size": 1676, "ext": "py", "lang": "Python", "max_stars_repo_path": "readVTK.py", "max_stars_repo_name": "hdillinger/parasnip", "max_stars_repo_head_hexsha": "05307140cc21e2de9a472da799fc0983f5fd3d28", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "readVTK.py", "max_issues_repo_name": "hdillinger/parasnip", "max_issues_repo_head_hexsha": "05307140cc21e2de9a472da799fc0983f5fd3d28", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "readVTK.py", "max_forks_repo_name": "hdillinger/parasnip", "max_forks_repo_head_hexsha": "05307140cc21e2de9a472da799fc0983f5fd3d28", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7662337662, "max_line_length": 53, "alphanum_fraction": 0.6587112172, "include": true, "reason": "from numpy", "num_tokens": 550}
|
#!/usr/bin/env python
# coding: utf-8
# <h1> <i> <u> Student Perception Analysis using Multiple Linear Regression
# ## Importing libaries and understanding the data
# In[1]:
import numpy as np
import pandas as pd
from pandas.plotting import table
import matplotlib.pyplot as plt
import matplotlib.colors as pltcol
import matplotlib.ticker as ticker
import seaborn as sns
# get_ipython().run_line_magic('matplotlib', 'inline')
import math
import statsmodels.api as sm
from statsmodels.formula.api import ols
import statsmodels.tools.eval_measures as ev
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from patsy import dmatrices
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.metrics import mean_absolute_error, make_scorer
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler,MinMaxScaler
# In[2]:
data = pd.read_csv('StudentData.csv')
print(len(data))
# In[3]:
print(data.info())
# Data is processed through feature engineering techniques using bivariable analysis
# <class 'pandas.core.frame.DataFrame'>
# RangeIndex: 119 entries, 0 to 118
# Data columns (total 17 columns):
# # Column Non-Null Count Dtype
# --- ------ -------------- -----
# 0 Gender 119 non-null object
# 1 Age 119 non-null int64
# 2 Education 119 non-null object
# 3 ss 119 non-null float64
# 4 ocd 119 non-null float64
# 5 eocd 119 non-null int64
# 6 tdu 119 non-null float64
# 7 doc 119 non-null float64
# 8 ic 119 non-null object
# 9 ac 119 non-null object
# 10 buc 119 non-null object
# 11 poc 119 non-null object
# 12 ata 119 non-null object
# 13 smu 119 non-null object
# 14 bc 119 non-null object
# 15 ce 119 non-null object
# 16 Marks 119 non-null float64
# dtypes: float64(5), int64(2), object(10)
# memory usage: 15.9+ KB
# In[4]:
print(data.describe())
# comparing median, max and min, there may be outliers in Age, tdu, doc and marks
# Age ss ocd eocd tdu doc Marks
# count 119.000000 119.000000 119.000000 119.000000 119.000000 119.000000 119.000000
# mean 20.756303 2.882353 4.764706 4.529412 2.321849 1.044538 81.468067
# std 2.266059 1.407766 1.555231 1.604145 1.031139 0.515513 9.391765
# min 17.000000 1.000000 1.000000 1.000000 0.500000 0.000000 54.000000
# 25% 19.000000 2.000000 4.000000 4.000000 1.500000 0.725000 78.000000
# 50% 21.000000 3.000000 5.000000 4.000000 2.000000 1.000000 81.000000
# 75% 22.000000 4.000000 6.000000 6.000000 3.000000 1.000000 89.500000
# max 29.000000 6.000000 8.000000 8.000000 6.000000 3.000000 96.000000
# #### ocd = Online class duration (H0)
# #### eocd = expected online class duration
# #### tdu = Total data usage
# #### ss = self study
# #### doc = Data online classes (H0)
# #### ac = Academic Outcome (H0)
# #### is = Internet speed (H0)
# <!-- (5 point likert scale data) to measure satisfaction-->
# #### buc = beter in understanding the concept (H0)
# <!-- (ordinal scale) to measure degree fo occurence-->
# #### poc = Participation in online classes (H0)
# #### ata = availability of teacher's assistance (H0)
# #### smu = social media usage (H0)
# #### bc = bored in class (H0)
# #### ce = chear in exams (H0)
#
# after testing different models:
# buc variable has no impact on response variable
# doc has many outliers and also not impacting the variable
# In[5]:
for i in data.columns:
print(i)
print(data[i].value_counts())
print('------------------------------')
# Gender
# Male 75
# Female 44
# Name: Gender, dtype: int64
# ------------------------------
# Age
# 20 21
# 19 20
# 21 19
# 22 16
# 23 14
# 18 12
# 17 6
# 24 5
# 26 3
# 29 1
# 27 1
# 25 1
# Name: Age, dtype: int64
# ------------------------------
# Education
# ug 71
# pg 41
# phd 7
# Name: Education, dtype: int64
# ------------------------------
# ss
# 2.0 34
# 3.0 32
# 1.0 18
# 4.0 14
# 5.0 12
# 6.0 7
# 1.5 2
# Name: ss, dtype: int64
# ------------------------------
# ocd
# 5.0 37
# 6.0 28
# 4.0 25
# 8.0 6
# 2.0 6
# 1.0 5
# 3.0 5
# 7.0 3
# 4.5 2
# 1.5 1
# 2.5 1
# Name: ocd, dtype: int64
# ------------------------------
# eocd
# 4 38
# 6 28
# 5 23
# 3 10
# 1 7
# 8 6
# 2 6
# 7 1
# Name: eocd, dtype: int64
# ------------------------------
# tdu
# 2.0 34
# 3.0 27
# 1.5 20
# 1.0 12
# 4.0 10
# 2.5 7
# 5.0 3
# 0.5 3
# 1.8 1
# 3.5 1
# 6.0 1
# Name: tdu, dtype: int64
# ------------------------------
# doc
# 1.00 56
# 1.50 14
# 0.50 12
# 0.70 10
# 2.00 7
# 0.80 4
# 3.00 3
# 1.20 2
# 0.10 2
# 0.90 1
# 1.70 1
# 0.40 1
# 0.00 1
# 0.30 1
# 0.75 1
# 0.65 1
# 0.60 1
# 0.20 1
# Name: doc, dtype: int64
# ------------------------------
# ic
# g 57
# tb 27
# no 13
# b 13
# tw 9
# Name: ic, dtype: int64
# ------------------------------
# ac
# No 61
# Yes 58
# Name: ac, dtype: int64
# ------------------------------
# buc
# d 68
# n 36
# a 15
# Name: buc, dtype: int64
# ------------------------------
# poc
# a1 76
# st1 27
# n1 16
# Name: poc, dtype: int64
# ------------------------------
# ata
# a2 48
# st2 39
# n2 32
# Name: ata, dtype: int64
# ------------------------------
# smu
# a3 52
# st3 38
# n3 29
# Name: smu, dtype: int64
# ------------------------------
# bc
# st4 55
# a4 39
# n4 25
# Name: bc, dtype: int64
# ------------------------------
# ce
# a5 53
# n5 41
# st5 25
# Name: ce, dtype: int64
# ------------------------------
# Marks
# 80.0 17
# 90.0 11
# 85.0 10
# 70.0 7
# 81.0 6
# 92.0 5
# 82.0 5
# 95.0 5
# 79.0 4
# 78.0 4
# 86.0 4
# 60.0 4
# 89.0 3
# 96.0 3
# 87.0 3
# 94.0 3
# 68.0 3
# 88.0 2
# 65.0 2
# 91.0 2
# 67.0 2
# 72.0 2
# 76.0 2
# 75.0 1
# 83.7 1
# 73.0 1
# 62.0 1
# 54.0 1
# 77.0 1
# 71.0 1
# 69.0 1
# 84.0 1
# 93.0 1
# Name: Marks, dtype: int64
# ------------------------------
# ## Exploratory Data Analysis
# In[6]:
plt.figure(figsize=(12, 10)) # code
sns.heatmap(data=data.corr(), annot=True, vmin=-1, cmap='winter') # code
plt.savefig('edaImages/HeatMap.jpeg')
# ss, ocd, eocd, doc has cosiderable correlation
# selected these variables and validating using exploratory data analysis considering ocd and eocd has significant correlation, colleniearity must be removed
# ### EDA / Univariate
# To detect outliers or anomolies in the data to manipulate accordingly by comparing using bivariate data analysis
# In[7]:
plt.figure(figsize=(10, 10)) # code
data['Age'].plot() # code
plt.savefig('edaImages/AgeLinePlot.jpeg')
#Age predictor has consistent line graph with possible outliers at age of 27-30
#(because of less data available from phd students)
# In[8]:
plt.figure(figsize=(10, 10)) # code
data.ss.plot() # code
plt.savefig("edaImages/SelfStudyLinePlot.jpeg")
# consistent graph with no possible outliers
# possible for right skewed distribution
# In[9]:
plt.figure(figsize=(10, 10)) # code
sns.histplot(data=data,binwidth=0.9, x='ss') # code
plt.savefig("edaImages/SelfStudyHistPlot.jpeg")
# In[10]:
plt.figure(figsize=(10, 10)) # code
data.ocd.plot() # code
plt.savefig("edaImages/ocdLinePlot.jpeg")
# cosistent graph with possible outliers at 1 or consistent
# In[11]:
plt.figure(figsize=(10, 10)) # code
sns.histplot(data=data, x= 'ocd', binwidth=1.4) # code
plt.savefig("edaImages/ocdHistPlot.jpeg")
# left skewed
# In[12]:
plt.figure(figsize=(10, 10))# code
data.eocd.plot() # code
plt.savefig("edaImages/eocdLinePlot.jpeg")
# cosistent graph, possible outliers at 0
# In[13]:
plt.figure(figsize=(10, 10)) # code
sns.histplot(data=data, x='eocd', binwidth=0.9, kde=True) # code
plt.savefig("edaImages/eocdHistPlot.jpeg")
# possibly left skewed with most of the dist. in right part of the dist.
# In[14]:
plt.figure(figsize=(10, 10)) # code
data.tdu.plot() # code
plt.savefig("edaImages/tduLinePlot.jpeg")
# outlier at 6 and possibly right skewed
# In[15]:
plt.figure(figsize=(10, 10)) # code
sns.histplot(data=data, x='tdu', binwidth=1) # code
plt.savefig("edaImages/tduHistPlot.jpeg")
# right skewed with ouliers on right end of dist.
# In[16]:
plt.figure(figsize=(10, 10)) # code
data.doc.plot() # code
plt.savefig("edaImages/docLinePlot.jpeg")
# possible outliers at 0 and 3 and possible to be right skewed
# In[17]:
plt.figure(figsize=(10, 10)) # code
sns.histplot(data=data, x='doc', binwidth=0.5) # code
plt.savefig("edaImages/docHistPlot.jpeg")
# In[18]:
plt.figure(figsize=(12, 10)) # code
data.drop('Marks', axis=1).boxplot(grid = False)
plt.xticks(size=11);
plt.yticks(size=13);
plt.xlabel('Predictor variables')
plt.title('Box plot for outlier analysis', size=20)
plt.savefig("edaImages/MarksdBoxPlot.jpeg")
# Inter quartile range
# ### EDA / Bivariate data analysis
# Compare the response variable with avialable ordianal variables to hypothesise the impact and to select the variable for predicting the response variable.
plt.figure(figsize=(10, 10)) # code
sns.histplot(x=data['Marks'], hue=data['Gender'], multiple='stack', binwidth=5)
plt.savefig("edaImages/GenderBiHistPlot.jpeg")
# Gender ordinal variable has no significan factoring impact on the response variable
# variable not selected
# In[19]:
plt.figure(figsize=(10, 10))
sns.histplot(data=data, x = 'Marks', bins=10, hue= 'Education', multiple='stack');
plt.savefig("edaImages/EducationBiHistPlot.jpeg")
# Due to less avialability of data from phd students and no significant difference in impacting the response variable
# variable no selected
# testing models, pg students has less marks and compared to other grads, even though its not significant, it helped incressing 2% more accuracy
# In[20]:
plt.figure(figsize=(10, 10))
sns.histplot(data=data, x='Marks', hue='ic', multiple='stack');
plt.legend( fontsize='x-large', title = "Internet speed", loc='upper left')
plt.savefig("edaImages/InternetSpeedBiHistPlot.jpeg")
# Internet speed variable has impact on the response variable, people with the best and good internet connection are more likely to get good marks and agrees online classes are better
# variable selec
# In[21]:
plt.figure(figsize=(10, 10))
sns.histplot(data=data, x='Marks', hue='ac', multiple='stack', hue_order=['Yes', 'No'])
plt.savefig("edaImages/acBiHistPlot.jpeg")
# Academic outcome has a significant impact on the response variable
# variable selected
# In[22]:
plt.figure(figsize=(10, 10))
sns.histplot(data=data, x='Marks', hue='buc', multiple='stack')
plt.savefig("edaImages/bucBiHistPlot.jpeg")
# even though there is no significant difference of impact, most of the student with above 80 marks has agreed that online lernign is better that offline learning
# variable selected
# In[23]:
plt.figure(figsize=(10, 10))
sns.histplot(data=data, x='Marks', hue='poc', multiple='stack');
plt.savefig("edaImages/pocBiHistPlot;")
# no impact on response variable
# variable not selected
# In[24]:
plt.figure(figsize=(10, 10))
sns.histplot(data=data, x='Marks', hue='ata', multiple='stack')
plt.savefig("edaImages/ataBiHistPlot.jpeg")
# The higher the marks the most people agreed they are getting teachers assistance
# Even though there is no significant impact, the diffecrence in acceptence in good marks region can impact the response variable moderately
# variable selected
# In[25]:
plt.figure(figsize=(10, 10))
sns.histplot(data=data, x='Marks', hue='smu', multiple='stack')
plt.savefig("edaImages/smuBiHistPlot.jpeg")
# some people of above 75 marks has not uses socail media
# variable selected
# In[26]:
plt.figure(figsize=(10, 10))
sns.histplot(data=data, x='Marks', hue= 'bc', multiple='stack')
plt.savefig("edaImages/bcBiHistPlot.jpeg")
# some people above 75 have never got bored in online classes
# varible selected
# In[27]:
plt.figure(figsize=(10, 10))
sns.histplot(data=data, x='Marks', hue='ce', multiple='stack')
plt.savefig("edaImages/ceBiHistPlot.jpeg")
# some students with more than 75 marks says, they never cheated in exams
# ## Feature Engineering
# # Missing values and alomolies were alredy processed and manipulated sucessfully
# In[29]:
print(data.info())
temp = pd.get_dummies(data['Gender'], drop_first=True)
data = pd.concat([data, temp], axis=1)
# variable not selected
# In[30]:
temp = pd.get_dummies(data['Education'], drop_first=True)
data = pd.concat([data, temp], axis=1)
# In[31]:
temp = pd.get_dummies(data['ic'], drop_first=True)
data = pd.concat([data, temp], axis=1)
# tw, b, g, b, tb
# In[32]:
temp = pd.get_dummies(data['ac'], drop_first=True)
data = pd.concat([data, temp], axis=1)
# Yes, No
# In[33]:
temp = pd.get_dummies(data['buc'], drop_first=True)
data = pd.concat([data, temp], axis=1)
# d, n, a
temp = pd.get_dummies(data['poc'], drop_first=True)
data = pd.concat([data, temp], axis=1)
# variable not selected
# In[34]:
temp = pd.get_dummies(data['ata'], drop_first=True)
data = pd.concat([data, temp], axis=1)
# st2, n2, a2
# In[35]:
temp = pd.get_dummies(data['smu'], drop_first=True)
data = pd.concat([data, temp], axis=1)
# st3, n3, a3
# In[36]:
temp = pd.get_dummies(data['bc'], drop_first=True)
data = pd.concat([data, temp], axis=1)
# st4, n4, a4
# In[37]:
temp = pd.get_dummies(data['ce'], drop_first=True)
data = pd.concat([data, temp], axis=1)
# st5, n5, a5
# # MLR model
# In[39]:
model = ols('Marks ~ ss + doc + d + n + phd + ug + eocd + Yes + g + no + tw + tb + n2 + n3 + n4 + n5 + st2 + st3 + st4 + st5', data).fit();
print(model.params)
# 5th model selected
# 13 dependent variables
# 4 numeric variabels
# In[40]:
print(model.summary2())
# model 1 64.7%
# model 2 68.2%
# model 3 62.0%
# model 4 78.6%
# model 5 81.2%
# In[41]:
def evaluateModel(model):
print("RSS = ", ((data.Marks - model.predict())**2).sum())
print("R2 = ", model.rsquared)
# In[42]:
evaluateModel(model);
# our model is 81.2% accurate
# In[ ]:
# # MLR model and Variable selection(stepwise method)
# The numeric variables are selected using forward variable selection method, and the categorical variables are removed after building the model with all the categorical variables available. The ordinal scale variabels are removed based on the significance values from the summary table, AIC, BIC and adjusted R squared values.
# In[33]:
temp = ols('Marks ~ Gender + Age + Education + ss + ocd + eocd + tdu + doc + ic + ac + buc + poc + ata + smu+ bc + ce', data);
model1= temp.fit()
print(model1.params)
# 16 predictor variables
# Intercept 63.627567
# Gender[T.Male] -0.416923
# Education[T.phd] -8.763488
# Education[T.ug] 1.472859
# ic[T.g] 1.584379
# ic[T.no] -0.855523
# ic[T.tb] 1.994448
# ic[T.tw] -10.418056
# ac[T.Yes] 2.604965
# buc[T.d] 0.456602
# buc[T.n] 0.541268
# poc[T.n1] -0.189289
# poc[T.st1] -1.335949
# ata[T.n2] -1.364864
# ata[T.st2] -0.760046
# smu[T.n3] -1.098206
# smu[T.st3] 0.361447
# bc[T.n4] 0.791058
# bc[T.st4] 0.331018
# ce[T.n5] 1.008146
# ce[T.st5] -0.819092
# Age 0.372708
# ss 3.708051
# ocd -0.592550
# eocd 0.079573
# tdu 0.090725
# doc 0.140157
# dtype: float64
# In[34]:
model1.summary2()
# Model: OLS Adj. R-squared: 0.770
# Dependent Variable: Marks AIC: 719.2200
# Date: 2021-12-03 19:43 BIC: 794.2564
# No. Observations: 119 Log-Likelihood: -332.61
# Df Model: 26 F-statistic: 16.20
# Df Residuals: 92 Prob (F-statistic): 4.02e-24
# R-squared: 0.821 Scale: 20.278
# Coef. Std.Err. t P>|t| [0.025 0.975]
# Intercept 63.6276 8.5121 7.4749 0.0000 46.7218 80.5334
# Gender[T.Male] -0.4169 0.9992 -0.4172 0.6775 -2.4015 1.5676
# Education[T.phd] -8.7635 2.5792 -3.3978 0.0010 -13.8860 -3.6410
# Education[T.ug] 1.4729 1.3424 1.0972 0.2754 -1.1932 4.1389
# ic[T.g] 1.5844 1.4713 1.0769 0.2844 -1.3377 4.5065
# ic[T.no] -0.8555 2.0057 -0.4265 0.6707 -4.8391 3.1281
# ic[T.tb] 1.9944 1.6636 1.1989 0.2337 -1.3096 5.2985
# ic[T.tw] -10.4181 2.1230 -4.9071 0.0000 -14.6346 -6.2015
# ac[T.Yes] 2.6050 1.2372 2.1056 0.0380 0.1479 5.0621
# buc[T.d] 0.4566 1.6145 0.2828 0.7780 -2.7499 3.6631
# buc[T.n] 0.5413 1.5452 0.3503 0.7269 -2.5276 3.6102
# poc[T.n1] -0.1893 1.5374 -0.1231 0.9023 -3.2426 2.8641
# poc[T.st1] -1.3359 1.1268 -1.1857 0.2388 -3.5738 0.9019
# ata[T.n2] -1.3649 1.3655 -0.9995 0.3202 -4.0770 1.3472
# ata[T.st2] -0.7600 1.1327 -0.6710 0.5039 -3.0097 1.4896
# smu[T.n3] -1.0982 1.4230 -0.7718 0.4422 -3.9244 1.7280
# smu[T.st3] 0.3614 1.2063 0.2996 0.7651 -2.0343 2.7572
# bc[T.n4] 0.7911 1.7399 0.4547 0.6504 -2.6646 4.2467
# bc[T.st4] 0.3310 1.4556 0.2274 0.8206 -2.5599 3.2220
# ce[T.n5] 1.0081 1.2228 0.8245 0.4118 -1.4204 3.4367
# ce[T.st5] -0.8191 1.3487 -0.6073 0.5451 -3.4978 1.8596
# Age 0.3727 0.3638 1.0244 0.3083 -0.3499 1.0953
# ss 3.7081 0.4473 8.2908 0.0000 2.8198 4.5963
# ocd -0.5925 0.4459 -1.3289 0.1872 -1.4781 0.2930
# eocd 0.0796 0.4573 0.1740 0.8622 -0.8287 0.9878
# tdu 0.0907 0.5010 0.1811 0.8567 -0.9042 1.0857
# doc 0.1402 1.1346 0.1235 0.9020 -2.1132 2.3936
# Omnibus: 0.217 Durbin-Watson: 1.243
# Prob(Omnibus): 0.897 Jarque-Bera (JB): 0.369
# Skew: -0.076 Prob(JB): 0.831
# Kurtosis: 2.773 Condition No.: 464
#
# In[35]:
temp = ols('Marks ~ Gender + Age + Education + ss + ocd + doc + ic + ac + buc + poc + ata + smu+ bc + ce', data);
model2= temp.fit()
print(model2.params)
# ocd and tdu variables have hign correlation with eocd, which may cause overfit of model
# but ocd has higher significance than other two variables also eocd and tdu has cofficients near to 0
# eocd and tdu variables are removed
# no change in accuracy
# Intercept 64.332753
# Gender[T.Male] -0.433479
# Education[T.phd] -8.761563
# Education[T.ug] 1.480654
# ic[T.g] 1.576066
# ic[T.no] -0.886577
# ic[T.tb] 2.002950
# ic[T.tw] -10.388077
# ac[T.Yes] 2.592704
# buc[T.d] 0.493687
# buc[T.n] 0.563937
# poc[T.n1] -0.276198
# poc[T.st1] -1.358164
# ata[T.n2] -1.310903
# ata[T.st2] -0.732104
# smu[T.n3] -1.068532
# smu[T.st3] 0.338264
# bc[T.n4] 0.898165
# bc[T.st4] 0.434297
# ce[T.n5] 0.907229
# ce[T.st5] -0.864698
# Age 0.351614
# ss 3.712318
# ocd -0.555764
# doc 0.207421
# dtype: float64
# In[36]:
model2.summary2()
# Model: OLS Adj. R-squared: 0.775
# Dependent Variable: Marks AIC: 715.3344
# Date: 2021-12-03 19:43 BIC: 784.8125
# No. Observations: 119 Log-Likelihood: -332.67
# Df Model: 24 F-statistic: 17.91
# Df Residuals: 94 Prob (F-statistic): 2.32e-25
# R-squared: 0.821 Scale: 19.866
# Coef. Std.Err. t P>|t| [0.025 0.975]
# Intercept 64.3328 7.9473 8.0950 0.0000 48.5533 80.1122
# Gender[T.Male] -0.4335 0.9845 -0.4403 0.6607 -2.3883 1.5213
# Education[T.phd] -8.7616 2.5178 -3.4799 0.0008 -13.7607 -3.7624
# Education[T.ug] 1.4807 1.3258 1.1168 0.2669 -1.1518 4.1131
# ic[T.g] 1.5761 1.4537 1.0842 0.2811 -1.3102 4.4624
# ic[T.no] -0.8866 1.9819 -0.4473 0.6557 -4.8216 3.0485
# ic[T.tb] 2.0029 1.6414 1.2203 0.2254 -1.2561 5.2620
# ic[T.tw] -10.3881 2.0986 -4.9499 0.0000 -14.5550 -6.2212
# ac[T.Yes] 2.5927 1.2004 2.1599 0.0333 0.2093 4.9761
# buc[T.d] 0.4937 1.5820 0.3121 0.7557 -2.6474 3.6347
# buc[T.n] 0.5639 1.5243 0.3700 0.7122 -2.4626 3.5905
# poc[T.n1] -0.2762 1.4916 -0.1852 0.8535 -3.2378 2.6854
# poc[T.st1] -1.3582 1.1114 -1.2221 0.2247 -3.5648 0.8484
# ata[T.n2] -1.3109 1.3392 -0.9789 0.3302 -3.9700 1.3481
# ata[T.st2] -0.7321 1.1168 -0.6555 0.5137 -2.9495 1.4853
# smu[T.n3] -1.0685 1.4050 -0.7605 0.4488 -3.8582 1.7211
# smu[T.st3] 0.3383 1.1807 0.2865 0.7751 -2.0061 2.6826
# bc[T.n4] 0.8982 1.6532 0.5433 0.5882 -2.3842 4.1806
# bc[T.st4] 0.4343 1.3738 0.3161 0.7526 -2.2933 3.1619
# ce[T.n5] 0.9072 1.1611 0.7813 0.4366 -1.3982 3.2127
# ce[T.st5] -0.8647 1.3263 -0.6520 0.5160 -3.4980 1.7686
# Age 0.3516 0.3466 1.0144 0.3130 -0.3366 1.0398
# ss 3.7123 0.4404 8.4302 0.0000 2.8380 4.5867
# ocd -0.5558 0.3740 -1.4860 0.1406 -1.2984 0.1868
# doc 0.2074 1.0257 0.2022 0.8402 -1.8292 2.2440
# Omnibus: 0.253 Durbin-Watson: 1.230
# Prob(Omnibus): 0.881 Jarque-Bera (JB): 0.398
# Skew: -0.089 Prob(JB): 0.820
# Kurtosis: 2.779 Condition No.: 428
#
# In[37]:
temp = ols('Marks ~ Education + Age + Gender + ss + poc + ocd + ic + ac + buc + ata + smu+ bc + ce', data);
model3= temp.fit()
print(model3.params)
# doc variable has less significance
# varaible doc removed
# Intercept 64.394123
# Education[T.phd] -8.775505
# Education[T.ug] 1.522060
# Gender[T.Male] -0.472347
# poc[T.n1] -0.226998
# poc[T.st1] -1.309413
# ic[T.g] 1.540754
# ic[T.no] -0.933221
# ic[T.tb] 1.988846
# ic[T.tw] -10.441992
# ac[T.Yes] 2.603730
# buc[T.d] 0.500343
# buc[T.n] 0.580247
# ata[T.n2] -1.296481
# ata[T.st2] -0.711714
# smu[T.n3] -1.093295
# smu[T.st3] 0.304921
# bc[T.n4] 0.877783
# bc[T.st4] 0.458352
# ce[T.n5] 0.936234
# ce[T.st5] -0.808492
# Age 0.350700
# ss 3.725790
# ocd -0.532463
# dtype: float64
# In[38]:
model3.summary2()
# Model: OLS Adj. R-squared: 0.777
# Dependent Variable: Marks AIC: 713.3862
# Date: 2021-12-03 19:43 BIC: 780.0851
# No. Observations: 119 Log-Likelihood: -332.69
# Df Model: 23 F-statistic: 18.88
# Df Residuals: 95 Prob (F-statistic): 5.36e-26
# R-squared: 0.821 Scale: 19.665
# Coef. Std.Err. t P>|t| [0.025 0.975]
# Intercept 64.3941 7.9013 8.1498 0.0000 48.7081 80.0801
# Education[T.phd] -8.7755 2.5041 -3.5044 0.0007 -13.7468 -3.8042
# Education[T.ug] 1.5221 1.3033 1.1678 0.2458 -1.0653 4.1094
# Gender[T.Male] -0.4723 0.9607 -0.4917 0.6241 -2.3796 1.4349
# poc[T.n1] -0.2270 1.4642 -0.1550 0.8771 -3.1337 2.6797
# poc[T.st1] -1.3094 1.0794 -1.2131 0.2281 -3.4523 0.8335
# ic[T.g] 1.5408 1.4358 1.0731 0.2860 -1.3097 4.3913
# ic[T.no] -0.9332 1.9584 -0.4765 0.6348 -4.8212 2.9548
# ic[T.tb] 1.9888 1.6316 1.2189 0.2259 -1.2503 5.2280
# ic[T.tw] -10.4420 2.0711 -5.0418 0.0000 -14.5536 -6.3303
# ac[T.Yes] 2.6037 1.1931 2.1824 0.0315 0.2352 4.9723
# buc[T.d] 0.5003 1.5736 0.3180 0.7512 -2.6237 3.6244
# buc[T.n] 0.5802 1.5145 0.3831 0.7025 -2.4264 3.5869
# ata[T.n2] -1.2965 1.3305 -0.9744 0.3323 -3.9380 1.3450
# ata[T.st2] -0.7117 1.1066 -0.6432 0.5217 -2.9086 1.4852
# smu[T.n3] -1.0933 1.3926 -0.7851 0.4343 -3.8579 1.6713
# smu[T.st3] 0.3049 1.1632 0.2621 0.7938 -2.0044 2.6143
# bc[T.n4] 0.8778 1.6417 0.5347 0.5941 -2.3815 4.1371
# bc[T.st4] 0.4584 1.3617 0.3366 0.7372 -2.2449 3.1616
# ce[T.n5] 0.9362 1.1464 0.8167 0.4162 -1.3397 3.2122
# ce[T.st5] -0.8085 1.2903 -0.6266 0.5324 -3.3700 1.7530
# Age 0.3507 0.3448 1.0170 0.3117 -0.3339 1.0353
# ss 3.7258 0.4331 8.6028 0.0000 2.8660 4.5856
# ocd -0.5325 0.3540 -1.5041 0.1359 -1.2353 0.1703
# Omnibus: 0.239 Durbin-Watson: 1.231
# Prob(Omnibus): 0.887 Jarque-Bera (JB): 0.381
# Skew: -0.087 Prob(JB): 0.827
# Kurtosis: 2.785 Condition No.: 427
#
# In[39]:
temp = ols('Marks ~ Education + ss + ocd + ic + ac + buc + ata + smu+ bc + ce', data);
model4= temp.fit()
print(model4.params)
# poc, Age, and Gender variables are removed since not significant
# AIC value after including Age variable has a difference less than 2, but no significant change in accuracy, variable removed to decrease predictor variables.
# Intercept 71.049442
# Education[T.phd] -7.504945
# Education[T.ug] 0.805427
# ic[T.g] 1.473644
# ic[T.no] -0.797408
# ic[T.tb] 1.876522
# ic[T.tw] -10.367596
# ac[T.Yes] 2.547195
# buc[T.d] 0.582288
# buc[T.n] 0.588619
# ata[T.n2] -1.527142
# ata[T.st2] -0.872390
# smu[T.n3] -1.057230
# smu[T.st3] 0.358491
# bc[T.n4] 0.915006
# bc[T.st4] 0.669938
# ce[T.n5] 0.889707
# ce[T.st5] -0.771634
# ss 3.933295
# ocd -0.582798
# dtype: float64
# In[40]:
model4.summary2()
# Model: OLS Adj. R-squared: 0.780
# Dependent Variable: Marks AIC: 708.6169
# Date: 2021-12-03 19:43 BIC: 764.1994
# No. Observations: 119 Log-Likelihood: -334.31
# Df Model: 19 F-statistic: 23.04
# Df Residuals: 99 Prob (F-statistic): 3.85e-28
# R-squared: 0.816 Scale: 19.390
# Coef. Std.Err. t P>|t| [0.025 0.975]
# Intercept 71.0494 2.8582 24.8583 0.0000 65.3782 76.7207
# Education[T.phd] -7.5049 2.2003 -3.4108 0.0009 -11.8709 -3.1390
# Education[T.ug] 0.8054 0.9506 0.8473 0.3989 -1.0808 2.6917
# ic[T.g] 1.4736 1.4038 1.0498 0.2964 -1.3118 4.2591
# ic[T.no] -0.7974 1.8544 -0.4300 0.6681 -4.4769 2.8820
# ic[T.tb] 1.8765 1.6131 1.1633 0.2475 -1.3242 5.0773
# ic[T.tw] -10.3676 2.0208 -5.1304 0.0000 -14.3773 -6.3579
# ac[T.Yes] 2.5472 1.1597 2.1963 0.0304 0.2460 4.8484
# buc[T.d] 0.5823 1.5508 0.3755 0.7081 -2.4948 3.6594
# buc[T.n] 0.5886 1.4965 0.3933 0.6949 -2.3808 3.5580
# ata[T.n2] -1.5271 1.2119 -1.2601 0.2106 -3.9318 0.8776
# ata[T.st2] -0.8724 1.0655 -0.8188 0.4149 -2.9865 1.2417
# smu[T.n3] -1.0572 1.3602 -0.7773 0.4389 -3.7562 1.6417
# smu[T.st3] 0.3585 1.1294 0.3174 0.7516 -1.8825 2.5995
# bc[T.n4] 0.9150 1.5985 0.5724 0.5683 -2.2568 4.0868
# bc[T.st4] 0.6699 1.3401 0.4999 0.6182 -1.9890 3.3289
# ce[T.n5] 0.8897 1.1281 0.7886 0.4322 -1.3488 3.1282
# ce[T.st5] -0.7716 1.2508 -0.6169 0.5387 -3.2534 1.7102
# ss 3.9333 0.3941 9.9817 0.0000 3.1514 4.7152
# ocd -0.5828 0.3414 -1.7072 0.0909 -1.2602 0.0946
# Omnibus: 0.172 Durbin-Watson: 1.204
# Prob(Omnibus): 0.918 Jarque-Bera (JB): 0.270
# Skew: -0.085 Prob(JB): 0.874
# Kurtosis: 2.841 Condition No.: 53
#
# In[41]:
temp = ols('Marks ~ Education + ss + ocd + ic + ac', data);
model5= temp.fit();
print(model5.params);
# bc, buc, smu, ata, and ce
# Intercept 70.818127
# Education[T.phd] -7.637045
# Education[T.ug] 0.780476
# ic[T.g] 1.845596
# ic[T.no] -0.272558
# ic[T.tb] 1.873452
# ic[T.tw] -10.383679
# ac[T.Yes] 2.957437
# ss 4.076030
# ocd -0.640224
# dtype: float64
# In[42]:
# model5.summary2()
# Model: OLS Adj. R-squared: 0.790
# Dependent Variable: Marks AIC: 694.4447
# Date: 2021-12-03 20:20 BIC: 722.2359
# No. Observations: 119 Log-Likelihood: -337.22
# Df Model: 9 F-statistic: 50.42
# Df Residuals: 109 Prob (F-statistic): 7.94e-35
# R-squared: 0.806 Scale: 18.495
# Coef. Std.Err. t P>|t| [0.025 0.975]
# Intercept 70.8181 2.1925 32.3002 0.0000 66.4727 75.1636
# Education[T.phd] -7.6370 2.0915 -3.6515 0.0004 -11.7823 -3.4918
# Education[T.ug] 0.7805 0.9053 0.8621 0.3905 -1.0139 2.5748
# ic[T.g] 1.8456 1.3340 1.3835 0.1693 -0.7983 4.4895
# ic[T.no] -0.2726 1.7107 -0.1593 0.8737 -3.6632 3.1180
# ic[T.tb] 1.8735 1.5379 1.2182 0.2258 -1.1746 4.9215
# ic[T.tw] -10.3837 1.9355 -5.3648 0.0000 -14.2198 -6.5475
# ac[T.Yes] 2.9574 0.9384 3.1515 0.0021 1.0975 4.8173
# ss 4.0760 0.3481 11.7092 0.0000 3.3861 4.7660
# ocd -0.6402 0.3269 -1.9585 0.0527 -1.2881 0.0077
# Omnibus: 0.160 Durbin-Watson: 1.114
# Prob(Omnibus): 0.923 Jarque-Bera (JB): 0.328
# Skew: -0.043 Prob(JB): 0.849
# Kurtosis: 2.758 Condition No.: 47
# In[43]:
print(model5.predict({'Education': 'ug', 'ss': 4, 'ocd': 3, 'ic': 'g', 'ac': 'No'}))
# In[44]:
def evaluateModel(model):
print("RSS = ", ((data.Marks - model.predict())**2).sum())
print("R2 = ", model.rsquared)
# In[45]:
evaluateModel(model5);
# our model is 81.2% accurate
# RSS = 2015.955913489736
# R2 = 0.8063111488888859
# In[46]:
y, X = dmatrices('Marks ~ Education + ss + ocd + ic + ac', data, return_type='dataframe')
vif = pd.DataFrame()
vif["Feature"] = X.columns
vif["VIF Factor"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
print(" VARIANCE INFLATION FACTOR")
print('=======================================')
print(vif)
# VARIANCE INFLATION FACTOR
# =======================================
# Feature VIF Factor
# 0 Intercept 30.929261
# 1 Education[T.phd] 1.558215
# 2 Education[T.ug] 1.269146
# 3 ic[T.g] 2.857405
# 4 ic[T.no] 1.832353
# 5 ic[T.tb] 2.669301
# 6 ic[T.tw] 1.685108
# 7 ac[T.Yes] 1.415617
# 8 ss 1.532167
# 9 ocd 1.649013
# In[ ]:
# ==================================================================================================================
|
{"hexsha": "60be4d16d244c9877116741429f19ca9632212b2", "size": 29496, "ext": "py", "lang": "Python", "max_stars_repo_path": "spa.py", "max_stars_repo_name": "BALAJI24092001/Student-Perception-Analysis", "max_stars_repo_head_hexsha": "8b21da576fbae1678918be02e5e91615818da4be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "spa.py", "max_issues_repo_name": "BALAJI24092001/Student-Perception-Analysis", "max_issues_repo_head_hexsha": "8b21da576fbae1678918be02e5e91615818da4be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-12-03T09:44:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-03T09:44:47.000Z", "max_forks_repo_path": "spa.py", "max_forks_repo_name": "BALAJI24092001/Student-Perception-Analysis", "max_forks_repo_head_hexsha": "8b21da576fbae1678918be02e5e91615818da4be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9582938389, "max_line_length": 327, "alphanum_fraction": 0.6070992677, "include": true, "reason": "import numpy,import statsmodels,from statsmodels", "num_tokens": 12190}
|
import warnings
import numpy as np
import pandas as pd
import networkx as nx
import statsmodels.api as sm
def probability_to_odds(prob):
"""Converts given probability (proportion) to odds
Parameters
----------
prob : float, array
Probability or array of probabilities to convert to odds
"""
return prob / (1 - prob)
def odds_to_probability(odds):
"""Converts given odds to probability
Parameters
----------
odds : float, array
Odds or array of odds to convert to probabilities
"""
return odds / (1 + odds)
def exp_map(graph, var):
"""Slow implementation of the exposure mapping functionality. Only supports the sum summary measure.
Still used by the dgm files.
Note
----
Depreciated and no longer actively used by any functions.
Parameters
----------
graph : networkx.Graph
Network to calculate the summary measure for.
var : str
Variable in the graph to calculate the summary measure for
Returns
-------
array
One dimensional array of calculated summary measure
"""
# get adjacency matrix
matrix = nx.adjacency_matrix(graph, weight=None)
# get node attributes
y_vector = np.array(list(nx.get_node_attributes(graph, name=var).values()))
# multiply the weight matrix by node attributes
wy_matrix = np.nan_to_num(matrix * y_vector.reshape((matrix.shape[0]), 1)).flatten()
return np.asarray(wy_matrix).flatten() # I hate converting between arrays and matrices...
def fast_exp_map(matrix, y_vector, measure):
r"""Improved (computation-speed-wise) implementation of the exposure mapping functionality. Further supports a
variety of summary measures. This is accomplished by using the adjacency matrix and vectors to efficiently
calculate the summary measures (hence the function name). This is an improvement on previous iterations of this
function.
Available summary measures are
Sum (``'sum'``) :
.. math::
X_i^s = \sum_{j=1}^n X_j \mathcal{G}_{ij}
Mean (``'mean'``) :
.. math::
X_i^s = \sum_{j=1}^n X_j \mathcal{G}_{ij} / \sum_{j=1}^n \mathcal{G}_{ij}
Variance (``'var'``):
.. math::
\bar{X}_j = \sum_{j=1}^n X_j \mathcal{G}_{ij} \\
X_i^s = \sum_{j=1}^n (X_j - \bar{X}_j)^2 \mathcal{G}_{ij} / \sum_{j=1}^n \mathcal{G}_{ij}
Mean distance (``'mean_dist'``) :
.. math::
X_i^s = \sum_{j=1}^n (X_i - X_j) \mathcal{G}_{ij} / \sum_{j=1}^n \mathcal{G}_{ij}
Variance distance (``'var_dist'``) :
.. math::
\bar{X}_{ij} = \sum_{j=1}^n (X_i - X_j) \mathcal{G}_{ij} \\
X_i^s = \sum_{j=1}^n ((X_j - X_j) - \bar{X}_{ij})^2 \mathcal{G}_{ij} / \sum_{j=1}^n \mathcal{G}_{ij}
Note
----
If you would like other summary measures to be added or made available, please reach out via GitHub.
Parameters
----------
matrix : array
Adjacency matrix. Should be extract from a ``networkx.Graph`` via ``nx.adjacency_matrix(...)``
y_vector : array
Array of the variable to calculate the summary measure for. Should be in same order as ``matrix`` for
calculation to work as intended.
measure : str
Summary measure to calculate. Options are provided above.
Returns
-------
array
One dimensional array of calculated summary measure
"""
if measure.lower() == 'sum':
# multiply the weight matrix by node attributes
wy_matrix = np.nan_to_num(matrix * y_vector.reshape((matrix.shape[0]), 1)).flatten()
return np.asarray(wy_matrix).flatten() # converting between arrays and matrices...
elif measure.lower() == 'mean':
rowsum_vector = np.sum(matrix, axis=1) # calculate row-sum (denominator / degree)
with warnings.catch_warnings(): # ignores NumPy's RuntimeWarning for isolated nodes (divide by 0)
warnings.simplefilter('ignore', RuntimeWarning)
weight_matrix = matrix / rowsum_vector.reshape((matrix.shape[0]), 1) # calculate each nodes weight
wy_matrix = weight_matrix * y_vector.reshape((matrix.shape[0]), 1) # multiply matrix by node attributes
return np.asarray(wy_matrix).flatten() # converting between arrays and matrices...
elif measure.lower() == 'var':
a = matrix.toarray() # Convert matrix to array
a = np.where(a == 0, np.nan, a) # filling non-edges with NaN's
with warnings.catch_warnings(): # ignores NumPy's RuntimeWarning for isolated nodes (divide by 0)
warnings.simplefilter('ignore', RuntimeWarning)
return np.nanvar(a * y_vector, axis=1)
elif measure.lower() == 'mean_dist':
a = matrix.toarray() # Convert matrix to array
a = np.where(a == 0, np.nan, a) # filling non-edges with NaN's
c = (a * y_vector).transpose() - y_vector # Calculates the distance metric (needs transpose)
with warnings.catch_warnings(): # ignores NumPy's RuntimeWarning for isolated nodes (divide by 0)
warnings.simplefilter('ignore', RuntimeWarning)
return np.nanmean(c.transpose(), # back-transpose
axis=1)
elif measure.lower() == 'var_dist':
a = matrix.toarray() # Convert matrix to array
a = np.where(a == 0, np.nan, a) # filling non-edges with NaN's
c = (a * y_vector).transpose() - y_vector # Calculates the distance metric (needs transpose)
with warnings.catch_warnings(): # ignores NumPy's RuntimeWarning for isolated nodes (divide by 0)
warnings.simplefilter('ignore', RuntimeWarning)
return np.nanvar(c.transpose(), # back-transpose
axis=1)
else:
raise ValueError("The summary measure mapping" + str(measure) + "is not available")
def exp_map_individual(network, variable, max_degree):
"""Summary measure calculate for the non-parametric mapping approach described in Sofrygin & van der Laan (2017).
This approach works best for networks with uniform degree distributions. This summary measure generates a number
of columns (a total of ``max_degree``). Each column is then an indicator variable for each observation. To keep
all columns the same number of dimensions, zeroes are filled in for all degrees above unit i's observed degree.
Parameters
----------
network : networkx.Graph
The NetworkX graph object to calculate the summary measure for.
variable : str
Variable to calculate the summary measure for (this will always be the exposure variable internally).
max_degree : int
Maximum degree in the network (defines the number of columns to generate).
Returns
-------
dataframe
Data set containing all generated columns
"""
attrs = []
for i in network.nodes:
j_attrs = []
for j in network.neighbors(i):
j_attrs.append(network.nodes[j][variable])
attrs.append(j_attrs[:max_degree])
return pd.DataFrame(attrs,
columns=[variable+'_map'+str(x+1) for x in range(max_degree)])
def network_to_df(graph):
"""Take input network and converts all node attributes to a pandas DataFrame object. This dataframe is then used
within ``NetworkTMLE`` internally.
Parameters
----------
graph : networkx.Graph
Graph with node attributes to transform into data set
Returns
-------
dataframe
Data set containing all node attributes
"""
return pd.DataFrame.from_dict(dict(graph.nodes(data=True)), orient='index')
def bounding(ipw, bound):
"""Internal function to bound or truncate the estimated inverse probablity weights.
Parameters
----------
ipw : array
Estimate inverse probability weights to truncate.
bound : list, float, int, set, array
Bounds to truncate weights by.
Returns
-------
array
Truncated inverse probability weights.
"""
if type(bound) is float or type(bound) is int: # Symmetric bounding
if bound > 1:
ipw = np.where(ipw > bound, bound, ipw)
ipw = np.where(ipw < 1 / bound, 1 / bound, ipw)
elif 0 < bound < 1:
ipw = np.where(ipw < bound, bound, ipw)
ipw = np.where(ipw > 1 / bound, 1 / bound, ipw)
else:
raise ValueError('Bound must be a positive value')
elif type(bound) is str: # Catching string inputs
raise ValueError('Bounds must either be a float or integer, or a collection')
else: # Asymmetric bounds
if bound[0] > bound[1]:
raise ValueError('Bound thresholds must be listed in ascending order')
if len(bound) > 2:
warnings.warn('It looks like your specified bounds is more than two floats. Only the first two '
'specified bounds are used by the bound statement. So only ' +
str(bound[0:2]) + ' will be used', UserWarning)
if type(bound[0]) is str or type(bound[1]) is str:
raise ValueError('Bounds must be floats or integers')
if bound[0] < 0 or bound[1] < 0:
raise ValueError('Both bound values must be positive values')
ipw = np.where(ipw < bound[0], bound[0], ipw)
ipw = np.where(ipw > bound[1], bound[1], ipw)
return ipw
def outcome_learner_fitting(ml_model, xdata, ydata):
"""Internal function to fit custom_models for the outcome nuisance model.
Parameters
----------
ml_model :
Unfitted model to be fit.
xdata : array
Covariate data to fit the model with
ydata : array
Outcome data to fit the model with
Returns
-------
Fitted user-specified model
"""
try:
fm = ml_model.fit(X=xdata, y=ydata)
except TypeError:
raise TypeError("Currently custom_model must have the 'fit' function with arguments 'X', 'y'. This "
"covers both sklearn and supylearner. If there is a predictive model you would "
"like to use, please open an issue at https://github.com/pzivich/zepid and I "
"can work on adding support")
return fm
def outcome_learner_predict(ml_model_fit, xdata):
"""Internal function to take a fitted custom_model for the outcome nuisance model and generate the predictions.
Parameters
----------
ml_model_fit :
Fitted user-specified model
xdata : array
Covariate data to generate the predictions with.
Returns
-------
array
Predicted values for the outcome (probability if binary, and expected value otherwise)
"""
if hasattr(ml_model_fit, 'predict_proba'):
g = ml_model_fit.predict_proba(xdata)
if g.ndim == 1: # allows support for pygam.LogisticGAM
return g
else:
return g[:, 1]
elif hasattr(ml_model_fit, 'predict'):
return ml_model_fit.predict(xdata)
else:
raise ValueError("Currently custom_model must have 'predict' or 'predict_proba' attribute")
def exposure_machine_learner(ml_model, xdata, ydata, pdata):
"""Internal function to fit custom_models for the exposure nuisance model and generate the predictions.
Parameters
----------
ml_model :
Unfitted model to be fit.
xdata : array
Covariate data to fit the model with
ydata : array
Outcome data to fit the model with
pdata : array
Covariate data to generate the predictions with.
Returns
-------
array
Predicted values for the outcome (probability if binary, and expected value otherwise)
"""
# Fitting model
try:
fm = ml_model.fit(X=xdata, y=ydata)
except TypeError:
raise TypeError("Currently custom_model must have the 'fit' function with arguments 'X', 'y'. This "
"covers both sklearn and supylearner. If there is a predictive model you would "
"like to use, please open an issue at https://github.com/pzivich/zepid and I "
"can work on adding support")
# Generating predictions
if hasattr(fm, 'predict_proba'):
g = fm.predict_proba(pdata)
if g.ndim == 1: # allows support for pygam.LogisticGAM
return g
else:
return g[:, 1]
elif hasattr(fm, 'predict'):
g = fm.predict(pdata)
return g
else:
raise ValueError("Currently custom_model must have 'predict' or 'predict_proba' attribute")
def targeting_step(y, q_init, ipw, verbose):
r"""Estimate :math:`\eta` via the targeting model
Parameters
----------
y : array
Observed outcome values.
q_init : array
Predicted outcome values under the observed values of exposure.
ipw : array
Estimated inverse probability weights.
verbose : bool
Whether to print the summary details of the targeting model.
Returns
-------
float
Estimated value to use to target the outcome model predictions
"""
f = sm.families.family.Binomial()
log = sm.GLM(y, # Outcome / dependent variable
np.repeat(1, y.shape[0]), # Generating intercept only model
offset=np.log(probability_to_odds(q_init)), # Offset by g-formula predictions
freq_weights=ipw, # Weighted by calculated IPW
family=f).fit(maxiter=500)
if verbose: # Optional argument to print each intermediary result
print('==============================================================================')
print('Targeting Model')
print(log.summary())
return log.params[0] # Returns single-step estimated Epsilon term
def tmle_unit_bounds(y, mini, maxi):
"""Bounding for continuous outcomes for TMLE.
Parameters
----------
y : array
Observed outcome values
mini : float
Lower bound to apply
maxi : float
Upper bound to apply
Returns
-------
array
Bounded outcomes
"""
return (y - mini) / (maxi - mini)
def tmle_unit_unbound(ystar, mini, maxi):
"""Unbound the bounded continuous outcomes for presentation of results.
Parameters
----------
ystar : array
Bounded outcome values
mini : float
Lower bound to apply
maxi : float
Upper bound to apply
Returns
-------
array
Unbounded outcomes
"""
return ystar*(maxi - mini) + mini
def create_threshold(data, variables, thresholds):
"""Internal function to create threshold variables given setup information.
Parameters
----------
data : dataframe
Data set to calculate the measure for
variables : list, set
List of variable names to create the threshold variables for
thresholds : list, set
List of values (float or int) to create the thresholds at.
Returns
-------
None
"""
for v, t in zip(variables, thresholds):
if type(t) is float:
label = v + '_t' + str(int(t * 100))
else:
label = v + '_t' + str(t)
data[label] = np.where(data[v] > t, 1, 0)
def create_categorical(data, variables, bins, labels, verbose=False):
"""
Parameters
----------
data : dataframe
Data set to calculate the measure for
variables : list, set
List of variable names to create the threshold variables for
bins : list, set
List of lists of values (float or int) to create bins at.
labels : list, set
List of lists of labels (str) to apply as the new column names
verbose : bool, optional
Whether to warn the user if any NaN values occur (a result of bad or incompletely specified bins). Interally,
this option is always set to be True (since important for user to recognize this issue).
Returns
-------
None
"""
for v, b, l in zip(variables, bins, labels):
col_label = v + '_c'
data[col_label] = pd.cut(data[v],
bins=b,
labels=l,
include_lowest=True).astype(float)
if verbose:
if np.any(data[col_label].isna()):
warnings.warn("It looks like some of your categories have missing values when being generated on the "
"input data. Please check pandas.cut to make sure the `bins` and `labels` arguments are "
"being used correctly.", UserWarning)
|
{"hexsha": "1ca8511c0b94911c5b563576565fc5684866e3ca", "size": 16881, "ext": "py", "lang": "Python", "max_stars_repo_path": "mossspider/estimators/utils.py", "max_stars_repo_name": "pzivich/MossSpider", "max_stars_repo_head_hexsha": "43cb6d22959afb47a9862f73754965473f42ddc1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-26T18:49:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T18:49:26.000Z", "max_issues_repo_path": "mossspider/estimators/utils.py", "max_issues_repo_name": "pzivich/MossSpider", "max_issues_repo_head_hexsha": "43cb6d22959afb47a9862f73754965473f42ddc1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mossspider/estimators/utils.py", "max_forks_repo_name": "pzivich/MossSpider", "max_forks_repo_head_hexsha": "43cb6d22959afb47a9862f73754965473f42ddc1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9170212766, "max_line_length": 120, "alphanum_fraction": 0.6106865707, "include": true, "reason": "import numpy,import statsmodels,import networkx", "num_tokens": 3871}
|
import os
import numpy as np
import pytest
from jina.executors.decorators import as_update_method, as_train_method, as_ndarray, batching, \
require_train, store_init_kwargs
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_as_update_method():
class A:
def __init__(self):
self.is_updated = False
@as_update_method
def f(self):
pass
a = A()
assert not a.is_updated
a.f()
assert a.is_updated
def test_as_train_method():
class A:
def __init__(self):
self.is_trained = False
@as_train_method
def f(self):
pass
a = A()
assert not a.is_trained
a.f()
assert a.is_trained
def test_as_ndarray():
class A:
@as_ndarray
def f_list(self, *args, **kwargs):
return [0]
@as_ndarray
def f_int(self, *args, **kwargs):
return 0
a = A()
assert isinstance(a.f_list(), np.ndarray)
with pytest.raises(TypeError):
a.f_int()
def test_require_train():
class A:
def __init__(self):
self.is_trained = False
@require_train
def f(self):
pass
a = A()
a.is_trained = False
with pytest.raises(RuntimeError):
a.f()
a.is_trained = True
a.f()
def test_store_init_kwargs():
class A:
@store_init_kwargs
def __init__(self, a, b, c, *args, **kwargs):
pass
@store_init_kwargs
def f(self, a, b, *args, **kwargs):
pass
instance = A('a', 'b', c=5, d='d')
assert instance._init_kwargs_dict
assert instance._init_kwargs_dict == {'a': 'a', 'b': 'b', 'c': 5}
with pytest.raises(TypeError):
instance.f('a', 'b', c='c')
def test_batching():
class A:
def __init__(self, batch_size):
self.batch_size = batch_size
self.batch_sizes = []
@batching
def f(self, data):
self.batch_sizes.append(len(data))
return data
instance = A(1)
result = instance.f([1, 1, 1, 1])
assert result == [[1], [1], [1], [1]]
assert len(instance.batch_sizes) == 4
for batch_size in instance.batch_sizes:
assert batch_size == 1
instance = A(3)
result = instance.f([1, 1, 1, 1])
assert result == [[1, 1, 1], [1]]
assert len(instance.batch_sizes) == 2
assert instance.batch_sizes[0] == 3
assert instance.batch_sizes[1] == 1
instance = A(5)
result = instance.f([1, 1, 1, 1])
assert result == [1, 1, 1, 1]
assert len(instance.batch_sizes) == 1
assert instance.batch_sizes[0] == 4
|
{"hexsha": "d74a502918ffb8e507252ec7f61a615e49d2fdfc", "size": 2670, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/unit/executors/test_decorators.py", "max_stars_repo_name": "DavidSanwald/jina", "max_stars_repo_head_hexsha": "fa2bd79c30c586928a0a77a44b32c5a99d7932bc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/unit/executors/test_decorators.py", "max_issues_repo_name": "DavidSanwald/jina", "max_issues_repo_head_hexsha": "fa2bd79c30c586928a0a77a44b32c5a99d7932bc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/unit/executors/test_decorators.py", "max_forks_repo_name": "DavidSanwald/jina", "max_forks_repo_head_hexsha": "fa2bd79c30c586928a0a77a44b32c5a99d7932bc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7073170732, "max_line_length": 96, "alphanum_fraction": 0.565917603, "include": true, "reason": "import numpy", "num_tokens": 728}
|
#!/usr/bin/env python
# stdlib imports
import os.path
import tempfile
import shutil
from datetime import datetime
# third party imports
import numpy as np
from mapio.shake import getHeaderData
# local imports
from losspager.io.pagerdata import PagerData
from losspager.models.emploss import EmpiricalLoss
from losspager.models.exposure import Exposure
from losspager.models.econexposure import EconExposure
from losspager.models.growth import PopulationGrowth
from losspager.models.semimodel import SemiEmpiricalFatality
from losspager.vis.contourmap import draw_contour
from mapio.city import Cities
DATETIMEFMT = '%Y-%m-%d %H:%M:%S'
TSUNAMI_MAG_THRESH = 7.3
def tdoc(doc, shakegrid, impact1, impact2, expdict, struct_comment, hist_comment):
eventinfo = doc.getEventInfo()
assert eventinfo['mag'] == shakegrid.getEventDict()['magnitude']
imp1, imp2 = doc.getImpactComments()
assert imp1 == impact1 and imp2 == impact2
version = doc.getSoftwareVersion()
elapsed = doc.getElapsed()
exp = doc.getTotalExposure()
assert np.isclose(np.array(exp), expdict['TotalExposure']).all()
hist_table = doc.getHistoricalTable()
assert hist_table[0]['EventID'] == '198411261621'
scomm = doc.getStructureComment()
assert scomm == struct_comment
hcomm = doc.getHistoricalComment()
assert hcomm == hist_comment
citytable = doc.getCityTable()
assert citytable.iloc[0]['name'] == 'Santa Clarita'
summary = doc.getSummaryAlert()
assert summary == 'yellow'
# test property methods
assert doc.magnitude == shakegrid.getEventDict()['magnitude']
assert doc.time == shakegrid.getEventDict()['event_timestamp']
assert doc.summary_alert == 'yellow'
assert doc.processing_time == datetime.strptime(
doc._pagerdict['pager']['processing_time'], DATETIMEFMT)
assert doc.version == doc._pagerdict['pager']['version_number']
def test():
homedir = os.path.dirname(os.path.abspath(
__file__)) # where is this script?
fatfile = os.path.join(homedir, '..', 'data', 'fatality.xml')
ecofile = os.path.join(homedir, '..', 'data', 'economy.xml')
cityfile = os.path.join(homedir, '..', 'data', 'cities1000.txt')
event = 'northridge'
shakefile = os.path.join(homedir, '..', 'data',
'eventdata', event, '%s_grid.xml' % event)
popfile = os.path.join(homedir, '..', 'data',
'eventdata', event, '%s_gpw.flt' % event)
isofile = os.path.join(homedir, '..', 'data',
'eventdata', event, '%s_isogrid.bil' % event)
urbanfile = os.path.join(homedir, '..', 'data',
'eventdata', 'northridge', 'northridge_urban.bil')
oceanfile = os.path.join(
homedir, '..', 'data', 'eventdata', 'northridge', 'northridge_ocean.json')
oceangridfile = os.path.join(
homedir, '..', 'data', 'eventdata', 'northridge', 'northridge_ocean.bil')
timezonefile = os.path.join(
homedir, '..', 'data', 'eventdata', 'northridge', 'northridge_timezone.shp')
invfile = os.path.join(homedir, '..', 'data', 'semi_inventory.hdf')
colfile = os.path.join(homedir, '..', 'data', 'semi_collapse_mmi.hdf')
casfile = os.path.join(homedir, '..', 'data', 'semi_casualty.hdf')
workfile = os.path.join(homedir, '..', 'data', 'semi_workforce.hdf')
tdir = tempfile.mkdtemp()
basename = os.path.join(tdir, 'output')
exp = Exposure(popfile, 2012, isofile)
results = exp.calcExposure(shakefile)
shakegrid = exp.getShakeGrid()
popgrid = exp.getPopulationGrid()
pdffile, pngfile, mapcities = draw_contour(
shakegrid, popgrid, oceanfile, oceangridfile, cityfile, basename)
shutil.rmtree(tdir)
popyear = 2012
shake_tuple = getHeaderData(shakefile)
tsunami = shake_tuple[1]['magnitude'] >= TSUNAMI_MAG_THRESH
semi = SemiEmpiricalFatality.fromDefault()
semi.setGlobalFiles(popfile, popyear, urbanfile, isofile)
semiloss, resfat, nonresfat = semi.getLosses(shakefile)
popgrowth = PopulationGrowth.fromDefault()
econexp = EconExposure(popfile, 2012, isofile)
fatmodel = EmpiricalLoss.fromDefaultFatality()
expobject = Exposure(popfile, 2012, isofile, popgrowth)
expdict = expobject.calcExposure(shakefile)
fatdict = fatmodel.getLosses(expdict)
econexpdict = econexp.calcExposure(shakefile)
ecomodel = EmpiricalLoss.fromDefaultEconomic()
ecodict = ecomodel.getLosses(expdict)
shakegrid = econexp.getShakeGrid()
pagerversion = 1
cities = Cities.loadFromGeoNames(cityfile)
impact1 = '''Red alert level for economic losses. Extensive damage is probable
and the disaster is likely widespread. Estimated economic losses are less
than 1% of GDP of Italy. Past events with this alert level have required
a national or international level response.'''
impact2 = '''Orange alert level for shaking-related fatalities. Significant
casualties are likely.'''
structcomment = '''Overall, the population in this region resides in structures
that are a mix of vulnerable and earthquake resistant construction. The predominant
vulnerable building types are unreinforced brick with mud and mid-rise nonductile
concrete frame with infill construction.'''
histeq = [1, 2, 3]
struct_comment = '''Overall, the population in this region resides
in structures that are resistant to earthquake
shaking, though some vulnerable structures
exist.'''
secondary_comment = '''Recent earthquakes in this area have caused secondary hazards
such as landslides that might have contributed to losses.'''
hist_comment = ''''A magnitude 7.1 earthquake 240 km east of this event struck Reventador: Ecuador
on March 6, 1987 (UTC), with estimated population exposures of 14,000 at intensity VIII and 2,000
at intensity IX or greater, resulting in a reported 5,000 fatalities.'''.replace('\n', '')
location = 'At the top of the world.'
is_released = True
doc = PagerData()
eventcode = shakegrid.getEventDict()['event_id']
versioncode = eventcode
doc.setInputs(shakegrid, timezonefile, pagerversion,
versioncode, eventcode, tsunami, location, is_released)
doc.setExposure(expdict, econexpdict)
doc.setModelResults(fatmodel, ecomodel,
fatdict, ecodict,
semiloss, resfat, nonresfat)
doc.setComments(impact1, impact2, struct_comment,
hist_comment, secondary_comment)
doc.setMapInfo(cityfile, mapcities)
doc.validate()
# let's test the property methods
tdoc(doc, shakegrid, impact1, impact2,
expdict, struct_comment, hist_comment)
# see if we can save this to a bunch of files then read them back in
try:
tdir = tempfile.mkdtemp()
doc.saveToJSON(tdir)
newdoc = PagerData()
newdoc.loadFromJSON(tdir)
tdoc(newdoc, shakegrid, impact1, impact2,
expdict, struct_comment, hist_comment)
# test the xml saving method
xmlfile = doc.saveToLegacyXML(tdir)
except Exception as e:
assert 1 == 2
finally:
shutil.rmtree(tdir)
if __name__ == '__main__':
test()
|
{"hexsha": "68f2a55ba11b5be72e48e16ae87f72a1debf7ef2", "size": 7283, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/io/pagerdata_test.py", "max_stars_repo_name": "usgs/pager", "max_stars_repo_head_hexsha": "0728e4bfb491343cda744d66304a5b3b14d33f5a", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2017-04-05T20:44:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T19:17:07.000Z", "max_issues_repo_path": "test/io/pagerdata_test.py", "max_issues_repo_name": "usgs/pager", "max_issues_repo_head_hexsha": "0728e4bfb491343cda744d66304a5b3b14d33f5a", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 231, "max_issues_repo_issues_event_min_datetime": "2016-08-22T19:08:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T02:52:50.000Z", "max_forks_repo_path": "test/io/pagerdata_test.py", "max_forks_repo_name": "usgs/pager", "max_forks_repo_head_hexsha": "0728e4bfb491343cda744d66304a5b3b14d33f5a", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2016-08-22T14:34:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T15:42:44.000Z", "avg_line_length": 38.9465240642, "max_line_length": 103, "alphanum_fraction": 0.6844706852, "include": true, "reason": "import numpy", "num_tokens": 1844}
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implements the Hager-Zhang inexact line search algorithm.
Line searches are a central component for many optimization algorithms (e.g.
BFGS, conjugate gradient etc). Most of the sophisticated line search methods
aim to find a step length in a given search direction so that the step length
satisfies the
[Wolfe conditions](https://en.wikipedia.org/wiki/Wolfe_conditions).
[Hager-Zhang 2006](https://epubs.siam.org/doi/abs/10.1137/030601880)
algorithm is a refinement of the commonly used
[More-Thuente](https://dl.acm.org/citation.cfm?id=192132) algorithm.
This module implements the Hager-Zhang algorithm.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.optimizer.linesearch.internal import hager_zhang_lib as hzl
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
__all__ = [
'hager_zhang',
]
def _machine_eps(dtype):
"""Returns the machine epsilon for the supplied dtype."""
dtype = dtype_util.as_numpy_dtype(tf.as_dtype(dtype))
return np.finfo(dtype).eps
HagerZhangLineSearchResult = collections.namedtuple(
'HagerZhangLineSearchResults', [
'converged', # Whether a point satisfying Wolfe/Approx wolfe was found.
'failed', # Whether the line search failed. It can fail if either the
# objective function or the gradient are not finite at
# an evaluation point.
'func_evals', # Number of function evaluations made.
'iterations', # Number of line search iterations made.
'left', # The left end point of the final bracketing interval.
# If converged is True, it is equal to `right`.
# Otherwise, it corresponds to the last interval computed.
'right' # The right end point of the final bracketing interval.
# If converged is True, it is equal to `left`.
# Otherwise, it corresponds to the last interval computed.
])
@deprecation.deprecated_args(
'2020-10-13',
'`step_size_shrink_param` is ignored, and will stop being accepted.',
'step_size_shrink_param')
def hager_zhang(value_and_gradients_function,
initial_step_size=None,
value_at_initial_step=None,
value_at_zero=None,
converged=None,
threshold_use_approximate_wolfe_condition=1e-6,
shrinkage_param=0.66,
expansion_param=5.0,
sufficient_decrease_param=0.1,
curvature_param=0.9,
step_size_shrink_param=0.1,
max_iterations=50,
name=None):
"""The Hager Zhang line search algorithm.
Performs an inexact line search based on the algorithm of
[Hager and Zhang (2006)][2].
The univariate objective function `value_and_gradients_function` is typically
generated by projecting a multivariate objective function along a search
direction. Suppose the multivariate function to be minimized is
`g(x1,x2, .. xn)`. Let (d1, d2, ..., dn) be the direction along which we wish
to perform a line search. Then the projected univariate function to be used
for line search is
```None
f(a) = g(x1 + d1 * a, x2 + d2 * a, ..., xn + dn * a)
```
The directional derivative along (d1, d2, ..., dn) is needed for this
procedure. This also corresponds to the derivative of the projected function
`f(a)` with respect to `a`. Note that this derivative must be negative for
`a = 0` if the direction is a descent direction.
The usual stopping criteria for the line search is the satisfaction of the
(weak) Wolfe conditions. For details of the Wolfe conditions, see
ref. [3]. On a finite precision machine, the exact Wolfe conditions can
be difficult to satisfy when one is very close to the minimum and as argued
by [Hager and Zhang (2005)][1], one can only expect the minimum to be
determined within square root of machine precision. To improve the situation,
they propose to replace the Wolfe conditions with an approximate version
depending on the derivative of the function which is applied only when one
is very close to the minimum. The following algorithm implements this
enhanced scheme.
### Usage:
Primary use of line search methods is as an internal component of a class of
optimization algorithms (called line search based methods as opposed to
trust region methods). Hence, the end user will typically not want to access
line search directly. In particular, inexact line search should not be
confused with a univariate minimization method. The stopping criteria of line
search is the satisfaction of Wolfe conditions and not the discovery of the
minimum of the function.
With this caveat in mind, the following example illustrates the standalone
usage of the line search.
```python
# Define value and gradient namedtuple
ValueAndGradient = namedtuple('ValueAndGradient', ['x', 'f', 'df'])
# Define a quadratic target with minimum at 1.3.
def value_and_gradients_function(x):
return ValueAndGradient(x=x, f=(x - 1.3) ** 2, df=2 * (x-1.3))
# Set initial step size.
step_size = tf.constant(0.1)
ls_result = tfp.optimizer.linesearch.hager_zhang(
value_and_gradients_function, initial_step_size=step_size)
# Evaluate the results.
with tf.Session() as session:
results = session.run(ls_result)
# Ensure convergence.
assert results.converged
# If the line search converged, the left and the right ends of the
# bracketing interval are identical.
assert results.left.x == result.right.x
# Print the number of evaluations and the final step size.
print ("Final Step Size: %f, Evaluations: %d" % (results.left.x,
results.func_evals))
```
### References:
[1]: William Hager, Hongchao Zhang. A new conjugate gradient method with
guaranteed descent and an efficient line search. SIAM J. Optim., Vol 16. 1,
pp. 170-172. 2005.
https://www.math.lsu.edu/~hozhang/papers/cg_descent.pdf
[2]: William Hager, Hongchao Zhang. Algorithm 851: CG_DESCENT, a conjugate
gradient method with guaranteed descent. ACM Transactions on Mathematical
Software, Vol 32., 1, pp. 113-137. 2006.
http://users.clas.ufl.edu/hager/papers/CG/cg_compare.pdf
[3]: Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in
Operations Research. pp 33-36. 2006
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that
correspond to scalar tensors of real dtype containing the point at which
the function was evaluated, the value of the function, and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
input points, function values, and derivatives at those input points.
initial_step_size: (Optional) Scalar positive `Tensor` of real dtype, or
a tensor of shape [n] in batching mode. The initial value (or values) to
try to bracket the minimum. Default is `1.` as a float32.
Note that this point need not necessarily bracket the minimum for the line
search to work correctly but the supplied value must be greater than 0.
A good initial value will make the search converge faster.
value_at_initial_step: (Optional) The full return value of evaluating
value_and_gradients_function at initial_step_size, i.e. a namedtuple with
'x', 'f', 'df', if already known by the caller. If supplied the value of
`initial_step_size` will be ignored, otherwise the tuple will be computed
by evaluating value_and_gradients_function.
value_at_zero: (Optional) The full return value of
value_and_gradients_function at `0.`, i.e. a namedtuple with
'x', 'f', 'df', if already known by the caller. If not supplied the tuple
will be computed by evaluating value_and_gradients_function.
converged: (Optional) In batching mode a tensor of shape [n], indicating
batch members which have already converged and no further search should
be performed. These batch members are also reported as converged in the
output, and both their `left` and `right` are set to the
`value_at_initial_step`.
threshold_use_approximate_wolfe_condition: Scalar positive `Tensor`
of real dtype. Corresponds to the parameter 'epsilon' in
[Hager and Zhang (2006)][2]. Used to estimate the
threshold at which the line search switches to approximate Wolfe
conditions.
shrinkage_param: Scalar positive Tensor of real dtype. Must be less than
`1.`. Corresponds to the parameter `gamma` in
[Hager and Zhang (2006)][2].
If the secant**2 step does not shrink the bracketing interval by this
proportion, a bisection step is performed to reduce the interval width.
expansion_param: Scalar positive `Tensor` of real dtype. Must be greater
than `1.`. Used to expand the initial interval in case it does not bracket
a minimum. Corresponds to `rho` in [Hager and Zhang (2006)][2].
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to `delta` in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager and Zhang (2006)][2].
step_size_shrink_param: Ignored; deprecated; retained for backward
compatibility.
max_iterations: Positive scalar `Tensor` of integral dtype or None. The
maximum number of iterations to perform in the line search. The number of
iterations used to bracket the minimum are also counted against this
parameter.
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name 'hager_zhang' is used.
Returns:
results: A namedtuple containing the following attributes.
converged: Boolean `Tensor` of shape [n]. Whether a point satisfying
Wolfe/Approx wolfe was found.
failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g.
if either the objective function or the gradient are not finite at
an evaluation point.
iterations: Scalar int32 `Tensor`. Number of line search iterations made.
func_evals: Scalar int32 `Tensor`. Number of function evaluations made.
left: A namedtuple, as returned by value_and_gradients_function,
of the left end point of the final bracketing interval. Values are
equal to those of `right` on batch members where converged is True.
Otherwise, it corresponds to the last interval computed.
right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the final bracketing interval. Values are
equal to those of `left` on batch members where converged is True.
Otherwise, it corresponds to the last interval computed.
"""
del step_size_shrink_param
with tf.name_scope(name or 'hager_zhang'):
val_0, val_initial, f_lim, prepare_evals = _prepare_args(
value_and_gradients_function,
initial_step_size,
value_at_initial_step,
value_at_zero,
threshold_use_approximate_wolfe_condition)
valid_inputs = (hzl.is_finite(val_0) & (val_0.df < 0) &
tf.math.is_finite(val_initial.x) & (val_initial.x > 0))
if converged is None:
init_converged = tf.zeros_like(valid_inputs) # i.e. all false.
else:
init_converged = tf.convert_to_tensor(converged)
failed = ~init_converged & ~valid_inputs
init_interval = HagerZhangLineSearchResult(
converged=init_converged,
failed=failed,
func_evals=prepare_evals,
iterations=tf.convert_to_tensor(0),
left=val_0,
right=hzl.val_where(init_converged, val_0, val_initial))
def _apply_bracket_and_search():
"""Bracketing and searching to do for valid inputs."""
return _bracket_and_search(
value_and_gradients_function, init_interval, f_lim, max_iterations,
shrinkage_param, expansion_param, sufficient_decrease_param,
curvature_param)
init_active = ~init_interval.failed & ~init_interval.converged
return prefer_static.cond(
tf.reduce_any(init_active),
_apply_bracket_and_search,
lambda: init_interval)
_LineSearchInnerResult = collections.namedtuple('_LineSearchInnerResult', [
'iteration',
'found_wolfe',
'failed',
'num_evals',
'left',
'right'])
def _bracket_and_search(
value_and_gradients_function,
init_interval,
f_lim,
max_iterations,
shrinkage_param,
expansion_param,
sufficient_decrease_param,
curvature_param):
"""Brackets the minimum and performs a line search.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that
correspond to scalar tensors of real dtype containing the point at which
the function was evaluated, the value of the function, and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
input points, function values, and derivatives at those input points.
init_interval: Instance of `HagerZhangLineSearchResults` containing
the initial line search interval. The gradient of init_interval.left must
be negative (i.e. must be a descent direction), while init_interval.right
must be positive and finite.
f_lim: Scalar `Tensor` of float dtype.
max_iterations: Positive scalar `Tensor` of integral dtype. The maximum
number of iterations to perform in the line search. The number of
iterations used to bracket the minimum are also counted against this
parameter.
shrinkage_param: Scalar positive Tensor of real dtype. Must be less than
`1.`. Corresponds to the parameter `gamma` in [Hager and Zhang (2006)][2].
expansion_param: Scalar positive `Tensor` of real dtype. Must be greater
than `1.`. Used to expand the initial interval in case it does not bracket
a minimum. Corresponds to `rho` in [Hager and Zhang (2006)][2].
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to `delta` in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager and Zhang (2006)][2].
Returns:
A namedtuple containing the following fields.
converged: Boolean `Tensor` of shape [n]. Whether a point satisfying
Wolfe/Approx wolfe was found.
failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g.
if either the objective function or the gradient are not finite at
an evaluation point.
iterations: Scalar int32 `Tensor`. Number of line search iterations made.
func_evals: Scalar int32 `Tensor`. Number of function evaluations made.
left: A namedtuple, as returned by value_and_gradients_function,
of the left end point of the updated bracketing interval.
right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the updated bracketing interval.
"""
bracket_result = hzl.bracket(value_and_gradients_function, init_interval,
f_lim, max_iterations, expansion_param)
converged = init_interval.converged | _very_close(
bracket_result.left.x, bracket_result.right.x)
# We fail if we have not yet converged but already exhausted all iterations.
exhausted_iterations = ~converged & (
bracket_result.iteration >= max_iterations)
line_search_args = HagerZhangLineSearchResult(
converged=converged,
failed=bracket_result.failed | exhausted_iterations,
iterations=bracket_result.iteration,
func_evals=bracket_result.num_evals,
left=bracket_result.left,
right=bracket_result.right)
return _line_search_after_bracketing(
value_and_gradients_function, line_search_args, init_interval.left,
f_lim, max_iterations, sufficient_decrease_param, curvature_param,
shrinkage_param)
def _line_search_after_bracketing(
value_and_gradients_function,
search_interval,
val_0,
f_lim,
max_iterations,
sufficient_decrease_param,
curvature_param,
shrinkage_param):
"""The main loop of line search after the minimum has been bracketed.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that
correspond to scalar tensors of real dtype containing the point at which
the function was evaluated, the value of the function, and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
input points, function values, and derivatives at those input points.
search_interval: Instance of `HagerZhangLineSearchResults` containing
the current line search interval.
val_0: A namedtuple as returned by value_and_gradients_function evaluated
at `0.`. The gradient must be negative (i.e. must be a descent direction).
f_lim: Scalar `Tensor` of float dtype.
max_iterations: Positive scalar `Tensor` of integral dtype. The maximum
number of iterations to perform in the line search. The number of
iterations used to bracket the minimum are also counted against this
parameter.
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to `delta` in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager and Zhang (2006)][2].
shrinkage_param: Scalar positive Tensor of real dtype. Must be less than
`1.`. Corresponds to the parameter `gamma` in [Hager and Zhang (2006)][2].
Returns:
A namedtuple containing the following fields.
converged: Boolean `Tensor` of shape [n]. Whether a point satisfying
Wolfe/Approx wolfe was found.
failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g.
if either the objective function or the gradient are not finite at
an evaluation point.
iterations: Scalar int32 `Tensor`. Number of line search iterations made.
func_evals: Scalar int32 `Tensor`. Number of function evaluations made.
left: A namedtuple, as returned by value_and_gradients_function,
of the left end point of the updated bracketing interval.
right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the updated bracketing interval.
"""
def _loop_cond(curr_interval):
"""Loop condition."""
active = ~(curr_interval.converged | curr_interval.failed)
return (curr_interval.iterations <
max_iterations) & tf.reduce_any(active)
def _loop_body(curr_interval):
"""The loop body."""
secant2_raw_result = hzl.secant2(
value_and_gradients_function, val_0, curr_interval, f_lim,
sufficient_decrease_param, curvature_param)
secant2_result = HagerZhangLineSearchResult(
converged=secant2_raw_result.converged,
failed=secant2_raw_result.failed,
iterations=curr_interval.iterations + 1,
func_evals=secant2_raw_result.num_evals,
left=secant2_raw_result.left,
right=secant2_raw_result.right)
should_check_shrinkage = ~(secant2_result.converged | secant2_result.failed)
def _do_check_shrinkage():
"""Check if interval has shrinked enough."""
old_width = curr_interval.right.x - curr_interval.left.x
new_width = secant2_result.right.x - secant2_result.left.x
sufficient_shrinkage = new_width < old_width * shrinkage_param
func_is_flat = (
_very_close(curr_interval.left.f, curr_interval.right.f) &
_very_close(secant2_result.left.f, secant2_result.right.f))
new_converged = (
should_check_shrinkage & sufficient_shrinkage & func_is_flat)
needs_inner_bisect = should_check_shrinkage & ~sufficient_shrinkage
inner_bisect_args = secant2_result._replace(
converged=secant2_result.converged | new_converged)
def _apply_inner_bisect():
return _line_search_inner_bisection(
value_and_gradients_function, inner_bisect_args,
needs_inner_bisect, f_lim)
return prefer_static.cond(
tf.reduce_any(needs_inner_bisect),
_apply_inner_bisect,
lambda: inner_bisect_args)
next_args = prefer_static.cond(
tf.reduce_any(should_check_shrinkage),
_do_check_shrinkage,
lambda: secant2_result)
interval_shrunk = (
~next_args.failed & _very_close(next_args.left.x, next_args.right.x))
return [next_args._replace(converged=next_args.converged | interval_shrunk)]
return tf.while_loop(
cond=_loop_cond,
body=_loop_body,
loop_vars=[search_interval],
parallel_iterations=1)[0]
def _line_search_inner_bisection(
value_and_gradients_function,
search_interval,
active,
f_lim):
"""Performs bisection and updates the interval."""
midpoint = (search_interval.left.x + search_interval.right.x) / 2
val_mid = value_and_gradients_function(midpoint)
is_valid_mid = hzl.is_finite(val_mid)
still_active = active & is_valid_mid
new_failed = active & ~is_valid_mid
next_inteval = search_interval._replace(
failed=search_interval.failed | new_failed,
func_evals=search_interval.func_evals + 1)
def _apply_update():
update_result = hzl.update(
value_and_gradients_function, next_inteval.left, next_inteval.right,
val_mid, f_lim, active=still_active)
return HagerZhangLineSearchResult(
converged=next_inteval.converged,
failed=next_inteval.failed | update_result.failed,
iterations=next_inteval.iterations + update_result.iteration,
func_evals=next_inteval.func_evals + update_result.num_evals,
left=update_result.left,
right=update_result.right)
return prefer_static.cond(
tf.reduce_any(still_active), _apply_update, lambda: next_inteval)
def _prepare_args(value_and_gradients_function,
initial_step_size,
val_initial,
val_0,
approximate_wolfe_threshold):
"""Prepares the arguments for the line search initialization.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that
correspond to scalar tensors of real dtype containing the point at which
the function was evaluated, the value of the function, and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
input points, function values, and derivatives at those input points.
initial_step_size: Scalar positive `Tensor` of real dtype, or a tensor of
shape [n] in batching mode. The initial value (or values) to try to
bracket the minimum. Default is `1.` as a float32.
Note that this point need not necessarily bracket the minimum for the line
search to work correctly but the supplied value must be greater than 0.
A good initial value will make the search converge faster.
val_initial: The full return value of evaluating
value_and_gradients_function at initial_step_size, i.e. a namedtuple with
'x', 'f', 'df', if already known by the caller. If not None the value of
`initial_step_size` will be ignored, otherwise the tuple will be computed
by evaluating value_and_gradients_function.
val_0: The full return value of value_and_gradients_function at `0.`, i.e.
a namedtuple with 'x', 'f', 'df', if already known by the caller. If None
the tuple will be computed by evaluating value_and_gradients_function.
approximate_wolfe_threshold: Scalar positive `Tensor` of
real dtype. Corresponds to the parameter 'epsilon' in
[Hager and Zhang (2006)][2]. Used to estimate the
threshold at which the line search switches to approximate Wolfe
conditions.
Returns:
left: A namedtuple, as returned by value_and_gradients_function,
containing the value and derivative of the function at `0.`.
val_initial: A namedtuple, as returned by value_and_gradients_function,
containing the value and derivative of the function at
`initial_step_size`.
f_lim: Real `Tensor` of shape [n]. The function value threshold for
the approximate Wolfe conditions to be checked.
eval_count: Scalar int32 `Tensor`. The number of target function
evaluations made by this function.
"""
eval_count = 0
if val_initial is None:
if initial_step_size is not None:
initial_step_size = tf.convert_to_tensor(initial_step_size)
else:
initial_step_size = np.float32(1.)
val_initial = value_and_gradients_function(initial_step_size)
eval_count += 1
if val_0 is None:
x_0 = tf.zeros_like(val_initial.x)
val_0 = value_and_gradients_function(x_0)
eval_count += 1
f_lim = val_0.f + (approximate_wolfe_threshold * tf.math.abs(val_0.f))
return val_0, val_initial, f_lim, tf.convert_to_tensor(eval_count)
def _very_close(x, y):
return tf.math.nextafter(x, y) >= y
def _to_str(x):
"""Converts a bool tensor to a string with True/False values."""
x = tf.convert_to_tensor(x)
if x.dtype == tf.bool:
return tf.where(x, 'True', 'False')
return x
# A convenience function useful while debugging in the graph mode.
def _print(pass_through_tensor, values):
"""Wrapper for tf.Print which supports lists and namedtuples for printing."""
flat_values = []
for value in values:
# Checks if it is a namedtuple.
if hasattr(value, '_fields'):
for field in value._fields:
flat_values.extend([field, _to_str(getattr(value, field))])
continue
if isinstance(value, (list, tuple)):
for v in value:
flat_values.append(_to_str(v))
continue
flat_values.append(_to_str(value))
return tf.Print(pass_through_tensor, flat_values)
|
{"hexsha": "c7e0d6622553f78f88722e440ea0849b9fc34486", "size": 30788, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow_probability/python/optimizer/linesearch/hager_zhang.py", "max_stars_repo_name": "chrism0dwk/probability", "max_stars_repo_head_hexsha": "ab260f15cae94c6802c2f2769fb448ad213b79cd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-02-21T06:30:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-08T19:29:15.000Z", "max_issues_repo_path": "tensorflow_probability/python/optimizer/linesearch/hager_zhang.py", "max_issues_repo_name": "chrism0dwk/probability", "max_issues_repo_head_hexsha": "ab260f15cae94c6802c2f2769fb448ad213b79cd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-08-25T16:14:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T04:47:11.000Z", "max_forks_repo_path": "tensorflow_probability/python/optimizer/linesearch/hager_zhang.py", "max_forks_repo_name": "chrism0dwk/probability", "max_forks_repo_head_hexsha": "ab260f15cae94c6802c2f2769fb448ad213b79cd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-31T13:08:33.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-31T13:08:33.000Z", "avg_line_length": 47.4391371341, "max_line_length": 94, "alphanum_fraction": 0.7183318176, "include": true, "reason": "import numpy", "num_tokens": 6983}
|
#!/usr/bin/env python
# coding: utf-8
# # Convert CIF to JCPDS
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
# * This notebook shows how to make an XRD plot using `pymatgen`.
#
# * This also aims to show how to read `CIF` files, convert them to `JCPDS`.
#
# * Note that `ds_jcpds` is differernt from that in `PeakPo`, but it produces readable jcpds for PeakPo.
# In[2]:
import pymatgen as mg
from pymatgen import Lattice, Structure
from pymatgen.analysis.diffraction.xrd import XRDCalculator
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# In[3]:
mg.__version__
# This works with `pymatgen` version `2019.4.11`.
# In[4]:
# import sys
# sys.path.insert(0, '../peakpo')
import ds_jcpds
# ## Input parameters
# In[5]:
fn_cif = "./MgSiO3_bm.cif"
fn_jcpds = './MgSiO3-bm.jcpds'
comments_jcpds = "Bridgmanite"
# In[6]:
k0 = 260.
k0p = 4.00
alpha = 3.16e-5
# In[7]:
wl_xray = 0.3344
xrange = (0,40)
# ## Read CIF
# The `cif` file below was downloaded from American mineralogist crystal structure database.
# In[8]:
material = mg.Structure.from_file(fn_cif)
# ## Get some parameters in CIF
# In[9]:
print('Unit-cell volume = ', material.volume)
print('Density = ', material.density)
print('Chemical formula = ', material.formula)
# ## Get lattice parameters from CIF
# In[10]:
lattice = material.lattice
print('Lattice parameters = ', lattice.a, lattice.b, lattice.c, lattice.alpha, lattice.beta, lattice.gamma)
crystal_system = SpacegroupAnalyzer(material).get_crystal_system()
print(crystal_system)
# ## Get diffraction pattern
# In[11]:
c = XRDCalculator(wavelength=wl_xray)
# In[12]:
pattern = c.get_pattern(material, two_theta_range = xrange)
# ## Extract twotheta, d-sp, int, hkl
# In[13]:
pattern.hkls[0][0]['hkl']
# In[14]:
pattern.hkls.__len__()
# In[15]:
h = []; k = []; l = []
for i in range(pattern.hkls.__len__()):
h.append(pattern.hkls[i][0]['hkl'][0])
k.append(pattern.hkls[i][0]['hkl'][1])
l.append(pattern.hkls[i][0]['hkl'][2])
# In[16]:
d_lines = [pattern.x, pattern.d_hkls, pattern.y, h, k, l ]
diff_lines = np.transpose(np.asarray(d_lines))
print(diff_lines[1,:])
# ## Table output
# In[17]:
table = pd.DataFrame(data = diff_lines, # values
columns=['Two Theta', 'd-spacing', 'intensity', 'h', 'k', 'l']) # 1st row as the column names
table.head()
# ## Plot peak positions generated from pymatgen
# In[18]:
f = plt.figure(figsize=(10,3))
plt.vlines(diff_lines[:,0], 0., diff_lines[:,2], color='b');
# ## Convert to JCPDS
# Setup an `jcpds` object from a `cif` file
# In[19]:
material_jcpds = ds_jcpds.JCPDS()
material_jcpds.set_from_cif(fn_cif, k0, k0p, thermal_expansion=alpha, two_theta_range=xrange)
# Calculate diffraction pattern at a pressure.
# In[20]:
material_jcpds.cal_dsp(pressure = 100.)
dl = material_jcpds.get_DiffractionLines()
tth, inten = material_jcpds.get_tthVSint(wl_xray)
# In[21]:
f, ax = plt.subplots(2, 1, figsize=(10,3), sharex=True)
ax[0].vlines(diff_lines[:,0], 0., diff_lines[:,2], color='b')
ax[1].vlines(tth, 0., inten, color = 'r')
ax[0].set_xlim(7.5,9)
# ## Save to a JCPDS file
# In[22]:
material_jcpds.write_to_file(fn_jcpds, comments=comments_jcpds)
# #$ Read back the written JCPDS for test
# In[23]:
material_test = ds_jcpds.JCPDS(filename = fn_jcpds)
# Calculate a pattern at a pressure
# In[24]:
material_test.cal_dsp(pressure = 100.)
material_test.get_DiffractionLines()
tth, inten = material_test.get_tthVSint(wl_xray)
# In[25]:
f = plt.figure(figsize=(10,3))
plt.vlines(diff_lines[:,0], 0., diff_lines[:,2], color='b')
plt.vlines(tth, 0., inten, color = 'r');
# In[ ]:
# In[ ]:
# In[ ]:
|
{"hexsha": "17c53815950c6f44339a75ad1e8676e212140e84", "size": 3904, "ext": "py", "lang": "Python", "max_stars_repo_path": "Util_cif_to_jcpds/Convert_CIF_to_JCPDS.py", "max_stars_repo_name": "SHDShim/PMatRes", "max_stars_repo_head_hexsha": "92440c11f2723861dbb82cecdc321fcef9de4443", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Util_cif_to_jcpds/Convert_CIF_to_JCPDS.py", "max_issues_repo_name": "SHDShim/PMatRes", "max_issues_repo_head_hexsha": "92440c11f2723861dbb82cecdc321fcef9de4443", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Util_cif_to_jcpds/Convert_CIF_to_JCPDS.py", "max_forks_repo_name": "SHDShim/PMatRes", "max_forks_repo_head_hexsha": "92440c11f2723861dbb82cecdc321fcef9de4443", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.6787148594, "max_line_length": 115, "alphanum_fraction": 0.671875, "include": true, "reason": "import numpy", "num_tokens": 1231}
|
{-# LANGUAGE FlexibleContexts #-}
-- |
-- Module : Statistics.Sample.Internal
-- Copyright : (c) 2013 Bryan O'Sullivan
-- License : BSD3
--
-- Maintainer : bos@serpentine.com
-- Stability : experimental
-- Portability : portable
--
-- Internal functions for computing over samples.
module Statistics.Sample.Internal
(
robustSumVar
, sum
) where
import Numeric.Sum (kbn, sumVector)
import Prelude hiding (sum)
import Statistics.Function (square)
import qualified Data.Vector.Generic as G
robustSumVar :: (G.Vector v Double) => Double -> v Double -> Double
robustSumVar m = sum . G.map (square . subtract m)
{-# INLINE robustSumVar #-}
sum :: (G.Vector v Double) => v Double -> Double
sum = sumVector kbn
{-# INLINE sum #-}
|
{"hexsha": "53c9a97a337e19c563c27c7bfbdc7bb600264de7", "size": 752, "ext": "hs", "lang": "Haskell", "max_stars_repo_path": "Statistics/Sample/Internal.hs", "max_stars_repo_name": "StefanHubner/statistics", "max_stars_repo_head_hexsha": "e98af025ef4aa0bc31a5b1fcf88bb80295aac956", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 182, "max_stars_repo_stars_event_min_datetime": "2015-01-04T04:34:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-28T18:39:40.000Z", "max_issues_repo_path": "Statistics/Sample/Internal.hs", "max_issues_repo_name": "StefanHubner/statistics", "max_issues_repo_head_hexsha": "e98af025ef4aa0bc31a5b1fcf88bb80295aac956", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 105, "max_issues_repo_issues_event_min_datetime": "2015-01-07T07:49:28.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-26T14:21:32.000Z", "max_forks_repo_path": "Statistics/Sample/Internal.hs", "max_forks_repo_name": "StefanHubner/statistics", "max_forks_repo_head_hexsha": "e98af025ef4aa0bc31a5b1fcf88bb80295aac956", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 46, "max_forks_repo_forks_event_min_datetime": "2015-02-13T00:40:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-15T11:19:16.000Z", "avg_line_length": 24.2580645161, "max_line_length": 67, "alphanum_fraction": 0.6861702128, "num_tokens": 187}
|
# encoding=utf8
"""Implementation of Cosine mixture benchmark."""
from numpy import cos, pi
from NiaPy.benchmarks.benchmark import Benchmark
__all__ = ["CosineMixture"]
class CosineMixture(Benchmark):
r"""Implementations of Cosine mixture function.
Date: 2018
Author: Klemen Berkovič
License: MIT
Function:
**Cosine Mixture Function**
:math:`f(\textbf{x}) = - 0.1 \sum_{i = 1}^D \cos (5 \pi x_i) - \sum_{i = 1}^D x_i^2`
**Input domain:**
The function can be defined on any input domain but it is usually
evaluated on the hypercube :math:`x_i ∈ [-1, 1]`, for all :math:`i = 1, 2,..., D`.
**Global maximu:**
:math:`f(x^*) = -0.1 D`, at :math:`x^* = (0.0,...,0.0)`
LaTeX formats:
Inline:
$f(\textbf{x}) = - 0.1 \sum_{i = 1}^D \cos (5 \pi x_i) - \sum_{i = 1}^D x_i^2$
Equation:
\begin{equation} f(\textbf{x}) = - 0.1 \sum_{i = 1}^D \cos (5 \pi x_i) - \sum_{i = 1}^D x_i^2 \end{equation}
Domain:
$-1 \leq x_i \leq 1$
Reference:
http://infinity77.net/global_optimization/test_functions_nd_C.html#go_benchmark.CosineMixture
"""
Name = ["CosineMixture"]
def __init__(self, Lower=-1.0, Upper=1.0):
r"""Initialize Cosine mixture benchmark.
Args:
Lower (Optional[float]): Lower bound of problem.
Upper (Optional[float]): Upper bound of problem.
See Also:
:func:`NiaPy.benchmarks.Benchmark.__init__`
"""
Benchmark.__init__(self, Lower, Upper)
@staticmethod
def latex_code():
"""Return the latex code of the problem.
Returns:
[str] -- latex code.
"""
return r"""$f(\textbf{x}) = - 0.1 \sum_{i = 1}^D \cos (5 \pi x_i) - \sum_{i = 1}^D x_i^2$"""
@classmethod
def function(cls):
"""Return benchmark evaluation function.
Returns:
[fun] -- Evaluation function.
"""
def evaluate(D, sol):
v1, v2 = 0.0, 0.0
for i in range(D):
v1, v2 = v1 + cos(5 * pi * sol[i]), v2 + sol[i] ** 2
return -0.1 * v1 - v2
return evaluate
|
{"hexsha": "6ea4ea749783792ec13ec93ce2b9da24e71507a0", "size": 2243, "ext": "py", "lang": "Python", "max_stars_repo_path": "NiaPy/benchmarks/cosinemixture.py", "max_stars_repo_name": "lukapecnik/NiaPy", "max_stars_repo_head_hexsha": "a40ac08a4c06a13019ec5e39cc137461884928b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-16T11:15:43.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-16T11:15:43.000Z", "max_issues_repo_path": "NiaPy/benchmarks/cosinemixture.py", "max_issues_repo_name": "lukapecnik/NiaPy", "max_issues_repo_head_hexsha": "a40ac08a4c06a13019ec5e39cc137461884928b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NiaPy/benchmarks/cosinemixture.py", "max_forks_repo_name": "lukapecnik/NiaPy", "max_forks_repo_head_hexsha": "a40ac08a4c06a13019ec5e39cc137461884928b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-25T16:20:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-25T16:20:36.000Z", "avg_line_length": 24.3804347826, "max_line_length": 124, "alphanum_fraction": 0.5358894338, "include": true, "reason": "from numpy", "num_tokens": 692}
|
import unittest
import numpy as np
from pandas import Index
import pandas.util.testing as common
import pandas._tseries as tseries
class TestTseriesUtil(unittest.TestCase):
def test_combineFunc(self):
pass
def test_reindex(self):
pass
def test_isnull(self):
pass
def test_groupby(self):
pass
def test_groupby_withnull(self):
pass
def test_getMergeVec(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler, mask = tseries.getFillVec(old, new, old.indexMap,
new.indexMap, None)
expect_filler = [-1, 0, -1, -1, -1, 1, -1, -1, -1, -1, 2, -1]
expect_mask = np.zeros(12, dtype=bool)
expect_mask[[1, 5, 10]] = True
self.assert_(np.array_equal(filler, expect_filler))
self.assert_(np.array_equal(mask, expect_mask))
# corner case
old = Index([1, 4])
new = Index(range(5, 10))
filler, mask = tseries.getFillVec(old, new, old.indexMap,
new.indexMap, None)
expect_filler = [-1, -1, -1, -1, -1]
expect_mask = np.zeros(5, dtype=bool)
self.assert_(np.array_equal(filler, expect_filler))
self.assert_(np.array_equal(mask, expect_mask))
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler, mask = tseries.getFillVec(old, new, old.indexMap,
new.indexMap, 'BACKFILL')
expect_filler = [0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1]
expect_mask = np.ones(12, dtype=bool)
expect_mask[-1] = False
self.assert_(np.array_equal(filler, expect_filler))
self.assert_(np.array_equal(mask, expect_mask))
# corner case
old = Index([1, 4])
new = Index(range(5, 10))
filler, mask = tseries.getFillVec(old, new, old.indexMap,
new.indexMap, 'BACKFILL')
expect_filler = [-1, -1, -1, -1, -1]
expect_mask = np.zeros(5, dtype=bool)
self.assert_(np.array_equal(filler, expect_filler))
self.assert_(np.array_equal(mask, expect_mask))
def test_pad(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler, mask = tseries.getFillVec(old, new, old.indexMap,
new.indexMap, 'PAD')
expect_filler = [-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2]
expect_mask = np.ones(12, dtype=bool)
expect_mask[0] = False
self.assert_(np.array_equal(filler, expect_filler))
self.assert_(np.array_equal(mask, expect_mask))
# corner case
old = Index([5, 10])
new = Index(range(5))
filler, mask = tseries.getFillVec(old, new, old.indexMap,
new.indexMap, 'PAD')
expect_filler = [-1, -1, -1, -1, -1]
expect_mask = np.zeros(5, dtype=bool)
self.assert_(np.array_equal(filler, expect_filler))
self.assert_(np.array_equal(mask, expect_mask))
class TestMoments(unittest.TestCase):
pass
|
{"hexsha": "d2cb8119ac566bf8514f4b589cffff5f9935a66c", "size": 3172, "ext": "py", "lang": "Python", "max_stars_repo_path": "pandas/tests/test_tseries.py", "max_stars_repo_name": "timClicks/pandas", "max_stars_repo_head_hexsha": "83b216c9efb439c1d19690feff1dcba58c6a2f88", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-17T11:33:26.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-17T11:33:26.000Z", "max_issues_repo_path": "pandas/tests/test_tseries.py", "max_issues_repo_name": "timClicks/pandas", "max_issues_repo_head_hexsha": "83b216c9efb439c1d19690feff1dcba58c6a2f88", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pandas/tests/test_tseries.py", "max_forks_repo_name": "timClicks/pandas", "max_forks_repo_head_hexsha": "83b216c9efb439c1d19690feff1dcba58c6a2f88", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0980392157, "max_line_length": 69, "alphanum_fraction": 0.5542244641, "include": true, "reason": "import numpy", "num_tokens": 860}
|
import logging
import math
import os
import random
from datetime import datetime
from scipy import ndimage
import numpy as np
import cv2
import torch
from torchvision.utils import make_grid
###########
# visdom
###########
def create_vis_plot(vis, xlabel, ylabel, title, legend):
num_lines = len(legend)
# print(num_lines)
win = vis.line(X=torch.zeros((1, )).cpu(),
Y=torch.zeros((1, num_lines)).cpu(),
opts=dict(xlabel=xlabel,
ylabel=ylabel,
title=title,
legend=legend))
# print(win)
return win
def update_vis(vis, window, xaxis, *args):
yaxis = torch.Tensor([args]).cpu()
vis.line(X=torch.Tensor([xaxis]).cpu(),
Y=yaxis,
win=window,
update='append')
return
####################
# miscellaneous
####################
def get_timestamp():
return datetime.now().strftime('%y%m%d-%H%M%S')
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def mkdirs(paths):
if isinstance(paths, str):
mkdir(paths)
else:
for path in paths:
mkdir(path)
def mkdir_and_rename(path):
if os.path.exists(path):
new_name = path + '_archived_' + get_timestamp()
print('Path already exists. Rename it to [{:s}]'.format(new_name))
logger = logging.getLogger('base')
logger.info(
'Path already exists. Rename it to [{:s}]'.format(new_name))
os.rename(path, new_name)
os.makedirs(path)
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def setup_logger(logger_name, root, phase, level=logging.INFO, screen=False):
'''set up logger'''
logs = logging.getLogger(logger_name)
if logs.hasHandlers():
logs.handlers.clear()
formatter = logging.Formatter(
'%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s',
datefmt='%y-%m-%d %H:%M:%S')
log_file = os.path.join(root, phase + '_{}.log'.format(get_timestamp()))
fh = logging.FileHandler(log_file, mode='w')
fh.setFormatter(formatter)
logs.setLevel(level)
logs.addHandler(fh)
if screen:
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logs.addHandler(sh)
####################
# image convert
####################
def tensor2img(tensor, dynamic_range=255, min_max=(0, 1)):
'''
Converts a torch Tensor into an image Numpy array
Input: 4D(B,(8/4/3/1),H,W), 3D(C,H,W), or 2D(H,W), any range
Output: 3D(H,W,C) or 2D(H,W), [0,255], (uint8 as default)
'''
assert dynamic_range in (255, 2047), 'Only 255 and 2047 are accepted!'
tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # clamp
tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0])
n_dim = tensor.dim()
if n_dim == 4:
n_img = len(tensor)
img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)),
normalize=False).numpy()
img_np = np.transpose(img_np, (1, 2, 0)) # HWC
elif n_dim == 3:
img_np = tensor.numpy()
img_np = np.transpose(img_np, (1, 2, 0)) # HWC
elif n_dim == 2:
img_np = tensor.numpy()
else:
raise TypeError(
'Epxect 4D, 3D or 2D tensor, but received {:d}D'.format(n_dim))
img_np = (img_np * dynamic_range).round()
if dynamic_range == 255:
return img_np.astype(np.uint8)
elif dynamic_range == 2047:
return img_np.astype(np.uint16)
def save_img(img, img_path):
"""img write as raw numpy.array"""
np.save(img_path, img)
# cv2.imwrite(img_path, img)
####################
# observation model
####################
def gaussian2d(N, std):
t = np.arange(-(N - 1) // 2, (N + 2) // 2)
t1, t2 = np.meshgrid(t, t)
std = np.double(std)
w = np.exp(-0.5 * (t1 / std)**2) * np.exp(-0.5 * (t2 / std)**2)
return w
def kaiser2d(N, beta):
t = np.arange(-(N - 1) // 2, (N + 2) // 2) / np.double(N - 1)
t1, t2 = np.meshgrid(t, t)
t12 = np.sqrt(t1 * t1 + t2 * t2)
w1 = np.kaiser(N, beta)
w = np.interp(t12, t, w1)
w[t12 > t[-1]] = 0
w[t12 < t[0]] = 0
return w
def fir_filter_wind(Hd, w):
"""
compute fir (finite impulse response) filter with window method
Hd: desired freqeuncy response (2D)
w: window (2D)
"""
hd = np.rot90(np.fft.fftshift(np.rot90(Hd, 2)), 2)
h = np.fft.fftshift(np.fft.ifft2(hd))
h = np.rot90(h, 2)
h = h * w
h = h / np.sum(h)
return h
def GNyq2win(GNyq, downscale=4, N=41):
"""Generate a 2D convolutional window from a given GNyq
GNyq: Nyquist frequency
downscale: spatial size of PAN / spatial size of MS
"""
# fir filter with window method
fcut = 1 / downscale
alpha = np.sqrt(((N - 1) * (fcut / 2))**2 / (-2 * np.log(GNyq)))
H = gaussian2d(N, alpha)
Hd = H / np.max(H)
w = kaiser2d(N, 0.5)
h = fir_filter_wind(Hd, w)
return np.real(h)
def img_resize(img, satellite='QuickBird', downscale=4):
# satellite GNyq
downscale = int(downscale)
if satellite == 'QuickBird':
GNyq = [0.34, 0.32, 0.30, 0.22] # Band Order: B,G,R,NIR
GNyqPan = 0.15
elif satellite == 'IKONOS':
GNyq = [0.26, 0.28, 0.29, 0.28] # Band Order: B,G,R,NIR
GNyqPan = 0.17
else:
raise NotImplementedError('satellite: QuickBird or IKONOS')
# lowpass
img_ = img.squeeze()
img_ = img_.astype(np.float64)
if img_.ndim == 2:
H, W = img_.shape
lowpass = GNyq2win(GNyqPan, downscale, N=41)
# img_ = ndimage.filters.correlate(img_, lowpass, mode='nearest')
# img_ = cv2.resize(img_, 1/downscale, 'nearest')
elif img_.ndim == 3:
H, W, _ = img.shape
lowpass = [GNyq2win(gnyq, downscale, N=41) for gnyq in GNyq]
lowpass = np.stack(lowpass, axis=-1)
img_ = ndimage.filters.correlate(img_, lowpass, mode='nearest')
# downsampling
output_size = (H // downscale, W // downscale)
img_ = cv2.resize(img_, dsize=output_size, interpolation=cv2.INTER_NEAREST)
return img_
####################
# metric
####################
# full reference
def calculate_sam(img1, img2):
"""SAM for 3D image, shape (H, W, C); uint or float[0, 1]"""
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
assert img1.ndim == 3 and img1.shape[
2] > 1, "image n_channels should be greater than 1"
img1_ = img1.astype(np.float64)
img2_ = img2.astype(np.float64)
inner_product = (img1_ * img2_).sum(axis=2)
img1_spectral_norm = np.sqrt((img1_**2).sum(axis=2))
img2_spectral_norm = np.sqrt((img2_**2).sum(axis=2))
# numerical stability
cos_theta = (inner_product / (img1_spectral_norm * img2_spectral_norm +
np.finfo(np.float64).eps)).clip(min=0, max=1)
return np.mean(np.arccos(cos_theta))
def calculate_psnr(img1, img2, dynamic_range=255):
"""PSNR, img uint8 if 255; uint11 if 2047"""
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2)**2)
if mse < 1e-6:
return np.inf
return 20 * np.log10(dynamic_range /
(np.sqrt(mse) + np.finfo(np.float64).eps))
def calculate_scc(img1, img2):
"""SCC for 2D (H, W)or 3D (H, W, C) image; uint or float[0, 1].
The value is in [-1., 1.] due to the appearance of covariance."""
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
img1_ = img1.astype(np.float64)
img2_ = img2.astype(np.float64)
if img1_.ndim == 2:
return np.corrcoef(img1_.reshape(1, -1), img2_.rehshape(1, -1))[0, 1]
elif img1_.ndim == 3:
ccs = [
np.corrcoef(img1_[..., i].reshape(1, -1),
img2_[..., i].reshape(1, -1))[0, 1]
for i in range(img1_.shape[2])
]
return np.mean(ccs)
else:
raise ValueError('Wrong input image dimensions.')
def qindex(img1, img2, block_size=8):
"""Q-index for 2D (one-band) image, shape (H, W); uint or float [0, 1]
This value is in [-1, 1] due to the appearance of covariance."""
assert block_size > 1, 'block_size shold be greater than 1!'
img1_ = img1.astype(np.float64)
img2_ = img2.astype(np.float64)
window = np.ones((block_size, block_size)) / (block_size**2)
# window_size = block_size**2
# filter, valid
pad_topleft = int(np.floor(block_size / 2))
pad_bottomright = block_size - 1 - pad_topleft
mu1 = cv2.filter2D(
img1_, -1,
window)[pad_topleft:-pad_bottomright, pad_topleft:-pad_bottomright]
mu2 = cv2.filter2D(
img2_, -1,
window)[pad_topleft:-pad_bottomright, pad_topleft:-pad_bottomright]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(
img1_**2, -1, window
)[pad_topleft:-pad_bottomright, pad_topleft:-pad_bottomright] - mu1_sq
sigma2_sq = cv2.filter2D(
img2_**2, -1, window
)[pad_topleft:-pad_bottomright, pad_topleft:-pad_bottomright] - mu2_sq
sigma12 = cv2.filter2D(
img1_ * img2_, -1, window
)[pad_topleft:-pad_bottomright, pad_topleft:-pad_bottomright] - mu1_mu2
# all = 1, include the case of simga == mu == 0
qindex_map = np.ones(sigma12.shape)
# sigma == 0 and mu != 0
idx = ((sigma1_sq + sigma2_sq) <= 1e-6) * ((mu1_sq + mu2_sq) > 1e-6)
qindex_map[idx] = 2 * mu1_mu2[idx] / (mu1_sq + mu2_sq)[idx]
# sigma !=0 and mu == 0
idx = ((sigma1_sq + sigma2_sq) > 1e-6) * ((mu1_sq + mu2_sq) <= 1e-6)
qindex_map[idx] = 2 * sigma12[idx] / (sigma1_sq + sigma2_sq)[idx]
# sigma != 0 and mu != 0
idx = ((sigma1_sq + sigma2_sq) > 1e-6) * ((mu1_sq + mu2_sq) > 1e-6)
qindex_map[idx] = ((2 * mu1_mu2[idx]) *
(2 * sigma12[idx])) / ((mu1_sq + mu2_sq)[idx] *
(sigma1_sq + sigma2_sq)[idx])
return np.mean(qindex_map)
def calculate_qindex(img1, img2, block_size=8):
"""Q-index for 2D (H, W) or 3D (H, W, C) image; uint or float [0, 1]"""
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
if img1.ndim == 2:
return qindex(img1, img2, block_size)
elif img1.ndim == 3:
qindexs = [
qindex(img1[..., i], img2[..., i], block_size)
for i in range(img1.shape[2])
]
return np.array(qindexs).mean()
else:
raise ValueError('Wrong input image dimensions.')
def ssim(img1, img2, dynamic_range=255):
"""SSIM for 2D (one-band) image, shape (H, W);
uint8 if 225; uint16 if 2047
The value could small than zero"""
C1 = (0.01 * dynamic_range)**2
C2 = (0.03 * dynamic_range)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) *
(2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calculate_ssim(img1, img2, dynamic_range=255):
'''SSIM for 2D (H, W) or 3D (H, W, C) image;
uint8 if 225; uint16 if 2047'''
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
if img1.ndim == 2:
return ssim(img1, img2, dynamic_range)
elif img1.ndim == 3:
ssims = [
ssim(img1[..., i], img2[..., i], dynamic_range)
for i in range(img1.shape[2])
]
return np.array(ssims).mean()
else:
raise ValueError('Wrong input image dimensions.')
def calculate_ergas(img_fake, img_real, scale=4):
"""ERGAS for 2D (H, W) or 3D (H, W, C) image; uint or float [0, 1].
scale = spatial resolution ratio of PAN and MUL, default 4."""
if not img_fake.shape == img_real.shape:
raise ValueError('Input images must have the same dimensions.')
img_fake_ = img_fake.astype(np.float64)
img_real_ = img_real.astype(np.float64)
if img_fake_.ndim == 2:
mean_real = img_real_.mean()
mse = np.mean((img_fake_ - img_real_)**2)
return 100 / scale * np.sqrt(mse /
(mean_real**2 + np.finfo(np.float64).eps))
elif img_fake_.ndim == 3:
means_real = img_real_.reshape(-1, img_real_.shape[2]).mean(axis=0)
mses = ((img_fake_ - img_real_)**2).reshape(
-1, img_fake_.shape[2]).mean(axis=0)
return 100 / scale * np.sqrt(
(mses / (means_real**2 + np.finfo(np.float64).eps)).mean())
else:
raise ValueError('Wrong input image dimensions.')
# No reference
##################
# No reference IQA
##################
def calculate_D_lambda(img_fake, img_lm, scale=4, block_size=32, p=1):
"""Spectral distortion, if not clipped, it could be very large.
img_fake, generated HRMS
img_lm, LRMS"""
assert img_fake.ndim == img_lm.ndim == 3, 'Images must be 3D!'
H_f, W_f, C_f = img_fake.shape
H_r, W_r, C_r = img_lm.shape
assert C_f == C_r, 'Fake and lm should have the same number of bands!'
# D_lambda
Q_fake = []
Q_lm = []
for i in range(C_f):
for j in range(i + 1, C_f):
# for fake
band1 = img_fake[scale:-scale, scale:-scale, i]
band2 = img_fake[scale:-scale, scale:-scale, j]
Q_fake.append(qindex(band1, band2, block_size=block_size))
# for real
band1 = img_lm[scale:-scale, scale:-scale, i]
band2 = img_lm[scale:-scale, scale:-scale, j]
Q_lm.append(qindex(band1, band2, block_size=block_size))
Q_fake = np.array(Q_fake)
Q_lm = np.array(Q_lm)
D_lambda_index = (np.abs(Q_fake - Q_lm)**p).mean()
return np.clip(D_lambda_index, 0, 1)**(1 / p)
def calculate_D_s(img_fake,
img_lm,
pan_hp,
satellite='QuickBird',
scale=4,
block_size=32,
q=1):
"""Spatial distortion, if not clipped, it could be very large.
img_fake, generated HRMS
img_lm, LRMS
pan_hp, HRPan"""
# fake and lm
if img_fake.ndim != 3 or img_lm.ndim != 3:
raise ValueError('Real or fake MS images must be 3D!')
H_f, W_f, C_f = img_fake.shape
H_r, W_r, C_r = img_lm.shape
if H_f // H_r != scale or W_f // W_r != scale:
raise ValueError('Spatial resolution should be compatible with scale')
if C_f != C_r:
raise ValueError('Fake and lm should have the same number of bands!')
# fake and pan
if pan_hp.ndim == 2:
pan = np.expand_dims(pan_hp, axis=-1)
elif pan_hp.ndim == 3:
pan = pan_hp
else:
raise ValueError('Panchromatic image must be 2D or 3D!')
H_p, W_p, C_p = pan.shape
if C_p != 1:
raise ValueError('size of 3rd dim of Panchromatic image must be 1')
if H_f != H_p or W_f != W_p:
raise ValueError(
"Pan's and fake's spatial resolution should be the same")
# get LRPan, 2D
pan_lr = img_resize(pan, satellite=satellite, downscale=scale)
# D_s
Q_hr = []
Q_lr = []
for i in range(C_f):
# for HR fake
band1 = img_fake[scale:-scale, scale:-scale, i]
# the input PAN is 3D with size=1 along 3rd dim
band2 = pan[scale:-scale, scale:-scale, 0]
Q_hr.append(qindex(band1, band2, block_size=block_size))
band1 = img_lm[scale:-scale, scale:-scale, i]
band2 = pan_lr[scale:-scale, scale:-scale] # this is 2D
Q_lr.append(qindex(band1, band2, block_size=block_size))
Q_hr = np.array(Q_hr)
Q_lr = np.array(Q_lr)
D_s_index = (np.abs(Q_hr - Q_lr)**q).mean()
return np.clip(D_s_index, 0, 1)**(1 / q)
def calculate_qnr(img_fake,
img_lm,
pan,
satellite='QuickBird',
scale=4,
block_size=32,
p=1,
q=1,
alpha=1,
beta=1):
"""QNR - No reference IQA"""
D_lambda_idx = calculate_D_lambda(img_fake, img_lm, scale, block_size, p)
D_s_idx = calculate_D_s(img_fake, img_lm, pan, satellite, scale,
block_size, q)
QNR_idx = (1 - D_lambda_idx)**alpha * (1 - D_s_idx)**beta
return QNR_idx, D_lambda_idx, D_s_idx
if __name__ == '__main__':
img = np.random.random((32, 32, 4))
img_ = img_resize(img, satellite='QuickBird', downscale=4)
print(img_.shape)
|
{"hexsha": "386b1cc7112f509f8e61f5df599ae02cf32ea323", "size": 17350, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/util.py", "max_stars_repo_name": "wasaCheney/PercepPan", "max_stars_repo_head_hexsha": "37d4683e97410846744f8f2e7d9733f551a8771c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-12T13:51:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-06T16:56:12.000Z", "max_issues_repo_path": "utils/util.py", "max_issues_repo_name": "wasaCheney/PercepPan", "max_issues_repo_head_hexsha": "37d4683e97410846744f8f2e7d9733f551a8771c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-09-28T08:42:28.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-29T08:18:30.000Z", "max_forks_repo_path": "utils/util.py", "max_forks_repo_name": "wasaCheney/PercepPan", "max_forks_repo_head_hexsha": "37d4683e97410846744f8f2e7d9733f551a8771c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-07-30T02:52:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-19T09:13:15.000Z", "avg_line_length": 33.3653846154, "max_line_length": 79, "alphanum_fraction": 0.5788472622, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5283}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.