text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import numpy as np
import os
import random
import sys
import math
import cv2
import pickle as pkl
from pykalman import KalmanFilter
from copy import deepcopy
import matplotlib.pyplot as pl
import matplotlib.animation as animation
from time import sleep
import random
from scipy.stats import multivariate_normal
import json
from matplotlib.patches import Ellipse
from tqdm import tqdm
import glob
from gym_collision_avoidance.envs.config import Config
from gym_collision_avoidance.envs.utils.Trajectory import *
from gym_collision_avoidance.envs.utils.AgentContainer import AgentContainer as ped_cont
from gym_collision_avoidance.envs.utils import Support as sup
class DataHandlerLSTM():
"""
Data handler for training an LSTM pedestrian prediction model
"""
def __init__(self,scenario):
self.data_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'datasets/'+scenario))
self.scenario = scenario
self.dt = Config.DT
self.min_length_trajectory = 4
self.trajectory_set = []
# Normalization constants
self.norm_const_x = 1.0
self.norm_const_y = 1.0
self.norm_const_heading = 1.0
self.norm_const_vx = 1.0
self.norm_const_vy = 1.0
self.norm_const_omega = 1.0
self.min_pos_x = 1000
self.min_pos_y = 1000
self.max_pos_x = -1000
self.max_pos_y = -1000
self.min_vel_x = 1000
self.min_vel_y = 1000
self.max_vel_x = -1000
self.max_vel_y = -1000
self.avg_traj_length = 0
# Data structure containing all the information about agents
self.agent_container = ped_cont()
self.processData()
def processData(self, **kwargs):
"""
Processes the simulation or real-world data, depending on the usage.
"""
data_pickle = self.data_path + "/" + self.scenario + ".pickle"
if os.path.isfile(data_pickle):
self.loadTrajectoryData(data_pickle)
else:
print("Processing real-world data.")
self._process_real_data_()
self.saveTrajectoryData(data_pickle)
def _process_gym_data_(self, **kwargs):
"""
Process data generated with gym-collision-avoidance simulator
"""
print("Loading data from: '{}'".format(self.args.data_path + self.args.dataset))
self.load_map(**kwargs)
self.trajectory_set = []
file_list = glob.glob(self.args.data_path + self.args.dataset)
for file in file_list:
print("Loading: " + file)
self.file = open(file, 'rb')
tmp_self = pkl.load(self.file , encoding='latin1')
# Iterate through the data and fill the register
if not self.trajectory_set:
step = int(self.args.dt / 0.1)
for traj_id in tqdm(range(len(tmp_self))):
traj = tmp_self[traj_id]
if len(traj)/step > self.args.truncated_backprop_length + self.args.prediction_horizon + self.args.prev_horizon:
self.trajectory_set.append(
(traj_id, Trajectory.Trajectory(goal=np.asarray(traj[0]["pedestrian_goal_position"]))))
for t_id in range(0,len(traj),step):
timestamp = traj[t_id]["time"]
pose = np.zeros([1, 3])
vel = np.zeros([1, 3])
pose[:, 0:2] = traj[t_id]["pedestrian_state"]["position"]
vel[:, 0:2] = traj[t_id]["pedestrian_state"]["velocity"]
self.trajectory_set[-1][1].time_vec = np.insert(self.trajectory_set[-1][1].time_vec, int(t_id/step), timestamp)
self.trajectory_set[-1][1].pose_vec = np.insert(self.trajectory_set[-1][1].pose_vec, int(t_id/step), pose, axis=0)
self.trajectory_set[-1][1].vel_vec = np.insert(self.trajectory_set[-1][1].vel_vec, int(t_id/step), vel, axis=0)
other_agents_pos = np.asarray(traj[t_id]["other_agents_pos"])
other_agents_vel = np.asarray(traj[t_id]["other_agents_vel"])
self.trajectory_set[-1][1].other_agents_positions.append(other_agents_pos)
self.trajectory_set[-1][1].other_agents_velocities.append(other_agents_vel)
# Dataset Statistics
cnt = 0
avg_len = 0
for traj_id in tqdm(range(len(self.trajectory_set))):
avg_len = (avg_len*cnt+self.trajectory_set[traj_id][1].pose_vec.shape[0])/(cnt+1)
print("Avg. Trajectory Length: " + str(avg_len))
print("Total number of trajectories: " + str(len(self.trajectory_set)))
self.compute_min_max_values()
def _process_simulation_data_(self, **kwargs):
"""
Import the data from the log file stored in the directory of data_path.
This method brings all the data into a suitable format for training.
"""
self.load_map(**kwargs)
# Pedestrian data
# [id, timestep (s), timestep (ns), pos x, pos y, yaw, vel x, vel y, omega, goal x, goal y]
pedestrian_data = np.genfromtxt(os.path.join(self.data_path+self.args.scenario, 'total_log.csv'), delimiter=",")[1:, :]
# Iterate through the data and fill the register
for sample_idx in range(pedestrian_data.shape[0]):
#if pedestrian_data[sample_idx, 0] != -1:
id = pedestrian_data[sample_idx, 0]
timestamp = np.round(pedestrian_data[sample_idx, 1],1)# + pedestrian_data[sample_idx, 2] * 1e-9 # time in seconds
pose = np.zeros([1,3])
vel = np.zeros([1,3])
pose[:,0:2] = np.true_divide(pedestrian_data[sample_idx, 3:5], np.array([self.norm_const_x, self.norm_const_y]))
vel[:,0:2] = np.true_divide(pedestrian_data[sample_idx, 5:7], np.array([self.norm_const_vx, self.norm_const_vy]))
goal = np.true_divide(pedestrian_data[sample_idx, 7:9], np.array([self.norm_const_x, self.norm_const_y]))
self.agent_container.addDataSample(id, timestamp, pose, vel, goal)
# Set the initial indices for agent trajectories (which trajectory will be returned when queried)
self.agent_traj_idx = [0] * self.agent_container.getNumberOfAgents()
# for id in self.agent_container.getAgentIDs():
# for traj in self.agent_container.getAgentTrajectories(id):
# if len(traj) > self.min_length_trajectory:
# traj.smoothenTrajectory(dt=self.dt)
# Subsample trajectories (longer discretization time) from dt=0.1 to dt=0.3
for id in self.agent_container.getAgentIDs():
for traj in self.agent_container.getAgentTrajectories(id):
traj.subsample(int(self.args.dt*10))
# Reconstruct interpolators since they were not pickled with the rest of the trajectory
for id in self.agent_container.getAgentIDs():
for traj_idx, traj in enumerate(self.agent_container.getAgentTrajectories(id)):
if len(traj) > self.min_length_trajectory:
traj.updateInterpolators()
# Put all the trajectories in the trajectory set and randomize
for id in self.agent_container.getAgentIDs():
print("Processing agent {} / {}".format(id, self.agent_container.getNumberOfAgents()))
# Adds trajectory if bigger than a minimum length and maximum size
self.addAgentTrajectoriesToSet(self.agent_container,self.trajectory_set,id)
self.compute_min_max_values()
def shift_data(self):
for traj_id in range(len(self.trajectory_set)):
for t_id in range(1, self.trajectory_set[traj_id][1].pose_vec.shape[0]):
self.trajectory_set[traj_id][1].pose_vec[t_id,0] -= (self.max_pos_x-self.min_pos_y)/2
self.trajectory_set[traj_id][1].pose_vec[t_id, 1] -= (self.max_pos_y-self.min_pos_y)/2
def compute_min_max_values(self):
self.mean_pos_x = 0
self.mean_pos_y = 0
for traj_id in range(len(self.trajectory_set)):
for t_id in range(1, self.trajectory_set[traj_id][1].pose_vec.shape[0]):
self.min_pos_x = min(self.min_pos_x,self.trajectory_set[traj_id][1].pose_vec[t_id,0])
self.min_pos_y = min(self.min_pos_y, self.trajectory_set[traj_id][1].pose_vec[t_id, 1])
self.max_pos_x = max(self.max_pos_x, self.trajectory_set[traj_id][1].pose_vec[t_id, 0])
self.max_pos_y = max(self.max_pos_y, self.trajectory_set[traj_id][1].pose_vec[t_id, 1])
self.min_vel_x = min(self.min_vel_x,self.trajectory_set[traj_id][1].vel_vec[t_id,0])
self.min_vel_y = min(self.min_vel_y, self.trajectory_set[traj_id][1].vel_vec[t_id, 1])
self.max_vel_x = max(self.max_vel_x, self.trajectory_set[traj_id][1].vel_vec[t_id, 0])
self.max_vel_y = max(self.max_vel_y, self.trajectory_set[traj_id][1].vel_vec[t_id, 1])
self.mean_pos_x += np.mean(self.trajectory_set[traj_id][1].pose_vec[:, 0], axis=0)/len(self.trajectory_set)
self.mean_pos_y += np.mean(self.trajectory_set[traj_id][1].pose_vec[:, 1], axis=0)/len(self.trajectory_set)
self.calc_scale()
def _process_real_data_(self):
"""
Import the real-world data from the log file stored in the directory of data_path.
This method brings all the data into a suitable format for training.
"""
print("Extracting the occupancy grid ...")
# Occupancy grid data
self.agent_container.occupancy_grid.resolution = 0.1 # map resolution in [m / cell]
self.agent_container.occupancy_grid.map_size = np.array([50., 50.]) # map size in [m]
self.agent_container.occupancy_grid.gridmap = np.zeros([int(self.agent_container.occupancy_grid.map_size[0] / self.agent_container.occupancy_grid.resolution),
int(self.agent_container.occupancy_grid.map_size[1] / self.agent_container.occupancy_grid.resolution)]) # occupancy values of cells
self.agent_container.occupancy_grid.center = self.agent_container.occupancy_grid.map_size / 2.0
# Extract grid from real data
# Homography matrix to transform from image to world coordinates
H = np.genfromtxt(os.path.join(self.data_path, 'H.txt'), delimiter=' ', unpack=True).transpose()
# Extract static obstacles
obst_threshold = 200
static_obst_img = cv2.imread(os.path.join(self.data_path, 'map.png'), 0)
obstacles = np.zeros([0, 3])
# pixel coordinates do cartesian coordinates
for xx in range(static_obst_img.shape[0]):
for yy in range(static_obst_img.shape[1]):
if static_obst_img[xx, yy] > obst_threshold:
obstacles = np.append(obstacles, np.dot(H, np.array([[xx], [yy], [1]])).transpose(), axis=0)
# Compute obstacles in 2D
self.obstacles_2d = np.zeros([obstacles.shape[0], 2])
self.obstacles_2d[:, 0] = obstacles[:, 0] / obstacles[:, 2]
self.obstacles_2d[:, 1] = obstacles[:, 1] / obstacles[:, 2]
for obst_ii in range(self.obstacles_2d.shape[0]):
obst_idx = self.agent_container.occupancy_grid.getIdx(self.obstacles_2d[obst_ii,0], self.obstacles_2d[obst_ii,1])
self.agent_container.occupancy_grid.gridmap[obst_idx] = 1.0
print("Extracting the pedestrian data ...")
# Pedestrian data
# [id, timestep (s), timestep (ns), pos x, pos y, yaw, vel x, vel y, omega, goal x, goal y]
if os.path.exists(self.data_path +'/obsmat.txt'):
pedestrian_data = np.genfromtxt(os.path.join(self.data_path , 'obsmat.txt'), delimiter=" ")[1:, :]
pixel_data = False
elif os.path.exists(self.data_path +'/obsmat_px.txt'):
pedestrian_data = np.genfromtxt(os.path.join(self.data_path, 'obsmat_px.txt'), delimiter=" ")[1:, :]
pixel_data = True
else:
print("Could not find obsmat.txt or obsmat_px.txt")
idx_frame = 0
idx_id = 1
idx_posx = 2
idx_posy = 4
idx_posz = 3
idx_vx = 5
idx_vy = 7
idx_vz = 6
dt = 0.4 # seconds (equivalent to 2.5 fps)
if os.path.split(self.data_path)[-1] == 'seq_eth':
frames_between_annotation = 6.0
else:
frames_between_annotation = 10.0
# Iterate through the data and fill the register
for sample_idx in range(pedestrian_data.shape[0]):
id = pedestrian_data[sample_idx, idx_id]
timestamp = pedestrian_data[sample_idx, idx_frame] * dt / frames_between_annotation # time in seconds
pose = np.zeros([1,3])
vel = np.zeros([1,3])
pose[:,0] = pedestrian_data[sample_idx, idx_posx]
if self.scenario == "zara_02":
pose[:, 1] = pedestrian_data[sample_idx, idx_posy] + 14
else:
pose[:,1] = pedestrian_data[sample_idx, idx_posy]
vel[:, 0] = pedestrian_data[sample_idx, idx_vx]
vel[:, 1] = pedestrian_data[sample_idx, idx_vy]
if pixel_data:
converted_pose = sup.to_pos_frame(H, np.expand_dims(np.array((pedestrian_data[sample_idx, idx_posx], pedestrian_data[sample_idx, idx_posy])), axis=0).astype(float))
pose[:, 0] = converted_pose[0,0]
pose[:, 1] = converted_pose[0,1]
goal = np.zeros([2])
self.agent_container.addDataSample(id, timestamp, pose, vel, goal)
# Set the initial indices for agent trajectories (which trajectory will be returned when queried)
self.agent_traj_idx = [0] * self.agent_container.getNumberOfAgents()
# Subsample trajectories (longer discretization time)
if dt != self.dt:
for id in self.agent_container.getAgentIDs():
for traj in self.agent_container.getAgentTrajectories(id):
if len(traj) > self.min_length_trajectory:
traj.smoothenTrajectory(dt=self.dt) # before was 0.3
traj.goal = np.expand_dims(traj.pose_vec[-1, :2], axis=0)
else:
self.agent_container.removeAgent(id)
# Put all the trajectories in the trajectory set and randomize
for cnt, id in enumerate(self.agent_container.getAgentIDs()):
self.addAgentTrajectoriesToSet(self.agent_container,self.trajectory_set,id)
#self.compute_min_max_values()
def calc_scale(self, keep_ratio=False):
self.sx_vel = 1 / (self.max_vel_x - self.min_vel_x)
self.sy_vel = 1 / (self.max_vel_y - self.min_vel_y)
if keep_ratio:
if self.sx_vel > self.sy_vel:
self.sx_vel = self.sy_vel
else:
self.sy_vel = self.sx_vel
self.sx_pos = 1 / (self.max_pos_x - self.min_pos_x)
self.sy_pos = 1 / (self.max_pos_y - self.min_pos_y)
if keep_ratio:
if self.sx_pos > self.sy_pos:
self.sx_pos = self.sy_pos
else:
self.sy_pos = self.sx_pos
def addAgentTrajectoriesToSet(self,agent_container,trajectory_set, id):
"""
Goes through all trajectories of agent and adds them to the member set if they fulfill the criteria.
For all the time steps within the trajectory it also computes the positions of the other agents at that
timestep in order to make training more efficient.
"""
for traj_idx, traj in enumerate(agent_container.getAgentTrajectories(id)):
traj_with_collision = False
if len(traj) > self.min_length_trajectory:
#if traj.getMinTime() < 100:
traj.updateInterpolators()
# Find other agent's trajectories which overlap with each time step
for time_idx in range(traj.time_vec.shape[0]):
query_time = traj.time_vec[time_idx]
other_agents_positions = agent_container.getAgentPositionsForTimeExclude(query_time, id)
other_agents_velocities = agent_container.getAgentVelocitiesForTimeExclude(query_time, id)
# Remove ego agent
traj.other_agents_positions.append(other_agents_positions)
traj.other_agents_velocities.append(other_agents_velocities)
trajectory_set.append((id, traj))
def saveTrajectoryData(self, save_path):
print("Saving data to: '{}'".format(save_path))
if not os.path.isdir(self.data_path ):
os.makedirs(self.args.data_path )
# Reconstruct interpolators since they were not pickled with the rest of the trajectory
for id, traj in self.trajectory_set:
traj.updateInterpolators()
#if "test" not in self.args.scenario:
random.shuffle(self.trajectory_set)
self.compute_min_max_values()
self.shift_data()
data = {
"trajectories" : self.trajectory_set,
"agent_container" : self.agent_container,
"min_pos_x" : self.min_pos_x,
"min_pos_y" : self.min_pos_y,
"max_pos_x" : self.max_pos_x,
"max_pos_y" : self.max_pos_y,
"min_vel_x" : self.min_vel_x,
"min_vel_y" : self.min_vel_y,
"max_vel_x" : self.max_vel_x,
"max_vel_y" : self.max_vel_y,
"mean_pos_x" : self.mean_pos_x,
"mean_pos_y" : self.mean_pos_y,
}
pkl.dump(data, open(save_path, 'wb'),protocol=2)
def loadTrajectoryData(self, load_path):
print("Loading data from: '{}'".format(load_path))
self.file = open(load_path, 'rb')
if sys.version_info[0] < 3:
tmp_self = pkl.loads(self.file,encoding='latin1')
else:
tmp_self = pkl.load(self.file , encoding='latin1')
self.trajectory_set = tmp_self["trajectories"]
self.agent_container = tmp_self["agent_container"]
#self.compute_min_max_values()
self.min_pos_x = tmp_self["min_pos_x"]
self.min_pos_y = tmp_self["min_pos_y"]
self.max_pos_x = tmp_self["max_pos_x"]
self.max_pos_y = tmp_self["max_pos_y"]
self.min_vel_x = tmp_self["min_vel_x"]
self.min_vel_y = tmp_self["min_vel_y"]
self.max_vel_x = tmp_self["max_vel_x"]
self.max_vel_y = tmp_self["max_vel_y"]
self.mean_pos_x = tmp_self["mean_pos_x"]
self.mean_pos_y =tmp_self["mean_pos_y"]
# Dataset Statistics
cnt = 0
avg_len = 0
for traj_id in tqdm(range(len(self.trajectory_set))):
avg_len = (avg_len*cnt+self.trajectory_set[traj_id][1].pose_vec.shape[0])/(cnt+1)
print("Avg. Trajectory Length: " + str(avg_len))
print("Total number of trajectories: " + str(len(self.trajectory_set)))
# Reconstruct interpolators since they were not pickled with the rest of the trajectory
for id, traj in self.trajectory_set:
traj.updateInterpolators()
def getAgentTrajectory(self, agent_id):
"""
Return the next agent trajectory in the queue for the agent with id agent_id.
"""
trajectory = self.agent_container.agent_data[agent_id].trajectories[self.agent_traj_idx[agent_id]]
self.agent_traj_idx[agent_id] = (self.agent_traj_idx[agent_id] + 1) % self.agent_container.getNumberOfTrajectoriesForAgent(agent_id)
return trajectory
def getRandomAgentTrajectory(self, agent_id):
"""
Return a totally random trajectory for the agent with id agent_id.
"""
random_traj_idx = np.random.randint(0, len(self.agent_container.agent_data[agent_id].trajectories))
return self.agent_container.agent_data[agent_id].trajectories[random_traj_idx]
def getRandomTrajectory(self):
"""
Return a totally random trajectory.
"""
random_traj_idx = np.random.randint(0, len(self.trajectory_set))
return self.trajectory_set[random_traj_idx]
|
{"hexsha": "e713302b1caadef1c4b96657d3832b9fca4b4721", "size": 17550, "ext": "py", "lang": "Python", "max_stars_repo_path": "gym_collision_avoidance/envs/utils/DataHandlerLSTM.py", "max_stars_repo_name": "mlodel/gym-exploration-2d", "max_stars_repo_head_hexsha": "57a54e3d68d2722cda13e9defd4bd81171e9a621", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gym_collision_avoidance/envs/utils/DataHandlerLSTM.py", "max_issues_repo_name": "mlodel/gym-exploration-2d", "max_issues_repo_head_hexsha": "57a54e3d68d2722cda13e9defd4bd81171e9a621", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gym_collision_avoidance/envs/utils/DataHandlerLSTM.py", "max_forks_repo_name": "mlodel/gym-exploration-2d", "max_forks_repo_head_hexsha": "57a54e3d68d2722cda13e9defd4bd81171e9a621", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0046728972, "max_line_length": 168, "alphanum_fraction": 0.729002849, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4948}
|
import boto3
import itertools
import numpy as np
from .matrix import BigMatrix
from .matrix_utils import load_mmap, chunk, generate_key_name_uop, constant_zeros
from .matrix_init import local_numpy_init
import concurrent.futures as fs
import math
import os
import pywren
from pywren.executor import Executor
import pywren
from scipy.linalg import cholesky, solve
import time
from . import lambdapack as lp
# this one is hard
def reshard(pwex, X, new_shard_sizes, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
# These have some dependencies
def _argmin_remote(X, block_idxs):
X_block = X.get_block(*block_idxs)
offset = block_idxs[0]*X.shard_sizes[0]
return (block_idxs[1], offset + np.argmin(X_block, axis=0), np.min(X_block, axis=0))
def argmin(pwex, X, out_bucket=None, tasks_per_job=1):
futures = pwex.map(lambda x: _argmin_remote(x, X), X.block_idxs)
pywren.wait(futures)
results = [f.result() for f in futures]
if (axis == None):
groups = [(None, results)]
else:
groups = itertools.groupby(sorted(results, key=itemgetter(axis)), key=itemgetter(0))
results = []
for _, group in groups:
group = list(group)
argmins = np.concatenate([g[1] for g in group], axis=axis)
argminmin = np.argmin(np.vstack([g[2] for g in group]), axis=axis)
results.append(argmins[argminmin, :])
return np.hstack(results)
def argmax(pwex, X, out_bucket=None, tasks_per_job=1):
mins = []
for _, group in itertools.groupby(sorted(results, key=itemgetter(0)), key=itemgetter(0)):
group = list(group)
argmins = np.vstack([g[1] for g in group])
argminmin = np.argmin(np.vstack([g[2] for g in group]), axis=0)
mins.append(argmins[argminmin, np.arange(argmins.shape[1])])
return np.hstack(mins)
def min(pwex, X, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def max(pwex, X, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def norm(pwex, X, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def sum(pwex, X, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def prod(pwex, X, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
# these have no dependencies
def abs(pwex, X, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def neg(pwex, X, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def square(pwex, X, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def sqrt(pwex, X, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def sin(pwex, X, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def cos(pwex, X, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def tan(pwex, X, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def exp(pwex, X, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def sign(pwex, X, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def elemwise_uop_func(pwex, X, f, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def power(pwex, X, k, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def chol(pwex, X, out_bucket=None, tasks_per_job=1):
instructions,L_sharded,trailing = lp._chol(X)
config = pwex.config
if (isinstance(pwex.invoker, pywren.queues.SQSInvoker)):
executor = pywren.standalone_executor
else:
executor = pywren.lambda_executor
program = lp.LambdaPackProgram(instructions, executor=executor, pywren_config=config)
futures = program.start()
[f.result() for f in futures]
program.wait()
if (program.program_status() != lp.PS.SUCCESS):
program.unwind()
raise Exception("Lambdapack Exception : {0}".format(program.program_status()))
# delete all intermediate information
[t.free() for t in trailing]
return L_sharded
|
{"hexsha": "e2973b1db8b9898d7e48ae6a25d86611aa805910", "size": 3937, "ext": "py", "lang": "Python", "max_stars_repo_path": "numpywren/uops.py", "max_stars_repo_name": "cloudbutton/lithops-array", "max_stars_repo_head_hexsha": "5e74b881c7db95eccdccf986f1e3b0dc44603889", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "numpywren/uops.py", "max_issues_repo_name": "cloudbutton/lithops-array", "max_issues_repo_head_hexsha": "5e74b881c7db95eccdccf986f1e3b0dc44603889", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "numpywren/uops.py", "max_forks_repo_name": "cloudbutton/lithops-array", "max_forks_repo_head_hexsha": "5e74b881c7db95eccdccf986f1e3b0dc44603889", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0081300813, "max_line_length": 93, "alphanum_fraction": 0.7200914402, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1044}
|
[STATEMENT]
lemma (in domain) pirreducible_roots:
assumes "p \<in> carrier (poly_ring R)" and "pirreducible (carrier R) p" and "degree p \<noteq> 1"
shows "roots p = {#}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. roots p = {#}
[PROOF STEP]
proof (rule ccontr)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. roots p \<noteq> {#} \<Longrightarrow> False
[PROOF STEP]
assume "roots p \<noteq> {#}"
[PROOF STATE]
proof (state)
this:
roots p \<noteq> {#}
goal (1 subgoal):
1. roots p \<noteq> {#} \<Longrightarrow> False
[PROOF STEP]
with \<open>p \<in> carrier (poly_ring R)\<close>
[PROOF STATE]
proof (chain)
picking this:
p \<in> carrier (poly_ring R)
roots p \<noteq> {#}
[PROOF STEP]
obtain a where a: "a \<in> carrier R" and "a \<in># roots p" and "[ \<one>, \<ominus> a ] pdivides p"
and in_carrier: "[ \<one>, \<ominus> a ] \<in> carrier (poly_ring R)"
[PROOF STATE]
proof (prove)
using this:
p \<in> carrier (poly_ring R)
roots p \<noteq> {#}
goal (1 subgoal):
1. (\<And>a. \<lbrakk>a \<in> carrier R; a \<in># roots p; [\<one>, \<ominus> a] pdivides p; [\<one>, \<ominus> a] \<in> carrier (poly_ring R)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
a \<in> carrier R
a \<in># roots p
[\<one>, \<ominus> a] pdivides p
[\<one>, \<ominus> a] \<in> carrier (poly_ring R)
goal (1 subgoal):
1. roots p \<noteq> {#} \<Longrightarrow> False
[PROOF STEP]
hence "[ \<one>, \<ominus> a ] \<sim>\<^bsub>poly_ring R\<^esub> p"
[PROOF STATE]
proof (prove)
using this:
a \<in> carrier R
a \<in># roots p
[\<one>, \<ominus> a] pdivides p
[\<one>, \<ominus> a] \<in> carrier (poly_ring R)
goal (1 subgoal):
1. [\<one>, \<ominus> a] \<sim>\<^bsub>poly_ring R\<^esub> p
[PROOF STEP]
using divides_pirreducible_condition[OF assms(2) in_carrier]
univ_poly_units_incl[OF carrier_is_subring]
[PROOF STATE]
proof (prove)
using this:
a \<in> carrier R
a \<in># roots p
[\<one>, \<ominus> a] pdivides p
[\<one>, \<ominus> a] \<in> carrier (poly_ring R)
[\<one>, \<ominus> a] divides\<^bsub>poly_ring R\<^esub> p \<Longrightarrow> [\<one>, \<ominus> a] \<in> Units (poly_ring R) \<or> [\<one>, \<ominus> a] \<sim>\<^bsub>poly_ring R\<^esub> p
Units (poly_ring R) \<subseteq> {[k] |k. k \<in> carrier R - {\<zero>}}
goal (1 subgoal):
1. [\<one>, \<ominus> a] \<sim>\<^bsub>poly_ring R\<^esub> p
[PROOF STEP]
unfolding pdivides_def
[PROOF STATE]
proof (prove)
using this:
a \<in> carrier R
a \<in># roots p
[\<one>, \<ominus> a] divides\<^bsub>poly_ring R\<^esub> p
[\<one>, \<ominus> a] \<in> carrier (poly_ring R)
[\<one>, \<ominus> a] divides\<^bsub>poly_ring R\<^esub> p \<Longrightarrow> [\<one>, \<ominus> a] \<in> Units (poly_ring R) \<or> [\<one>, \<ominus> a] \<sim>\<^bsub>poly_ring R\<^esub> p
Units (poly_ring R) \<subseteq> {[k] |k. k \<in> carrier R - {\<zero>}}
goal (1 subgoal):
1. [\<one>, \<ominus> a] \<sim>\<^bsub>poly_ring R\<^esub> p
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
[\<one>, \<ominus> a] \<sim>\<^bsub>poly_ring R\<^esub> p
goal (1 subgoal):
1. roots p \<noteq> {#} \<Longrightarrow> False
[PROOF STEP]
hence "degree p = 1"
[PROOF STATE]
proof (prove)
using this:
[\<one>, \<ominus> a] \<sim>\<^bsub>poly_ring R\<^esub> p
goal (1 subgoal):
1. degree p = 1
[PROOF STEP]
using associated_polynomials_imp_same_length[OF carrier_is_subring in_carrier assms(1)]
[PROOF STATE]
proof (prove)
using this:
[\<one>, \<ominus> a] \<sim>\<^bsub>poly_ring R\<^esub> p
[\<one>, \<ominus> a] \<sim>\<^bsub>poly_ring R\<^esub> p \<Longrightarrow> length [\<one>, \<ominus> a] = length p
goal (1 subgoal):
1. degree p = 1
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
degree p = 1
goal (1 subgoal):
1. roots p \<noteq> {#} \<Longrightarrow> False
[PROOF STEP]
with \<open>degree p \<noteq> 1\<close>
[PROOF STATE]
proof (chain)
picking this:
degree p \<noteq> 1
degree p = 1
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
degree p \<noteq> 1
degree p = 1
goal (1 subgoal):
1. False
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1782, "file": null, "length": 16}
|
import numpy as np
import tensorflow as tf
def get_distribution_strategy(num_gpus, all_reduce_alg=None):
"""Return a DistributionStrategy for running the model.
Args:
num_gpus: Number of GPUs to run this model.
all_reduce_alg: Specify which algorithm to use when performing all-reduce.
See tf.contrib.distribute.AllReduceCrossTowerOps for available algorithms.
If None, DistributionStrategy will choose based on device topology.
Returns:
tf.contrib.distribute.DistibutionStrategy object.
"""
if num_gpus == 0:
return tf.contrib.distribute.OneDeviceStrategy("device:CPU:0")
elif num_gpus == 1:
return tf.contrib.distribute.OneDeviceStrategy("device:GPU:0")
else:
if all_reduce_alg:
return tf.contrib.distribute.MirroredStrategy(
num_gpus=num_gpus,
cross_tower_ops=tf.contrib.distribute.AllReduceCrossTowerOps(
all_reduce_alg, num_packs=num_gpus))
else:
return tf.contrib.distribute.MirroredStrategy(num_gpus=num_gpus)
def get_eval_metric(iou):
th = np.arange(0.1, 1.0, 0.1)
vals = [tf.to_float(iou > i) for i in th]
metrics = dict([('IoU/%g' % i, tf.metrics.mean(j)) for i, j in zip(th, vals)])
metrics['IoU/mean'] = tf.metrics.mean(vals[4:])
return metrics
def get_iou(pred, label):
pred_l, pred_r = tf.unstack(pred, axis=1)
for i in range(2, len(pred.shape)):
label = tf.expand_dims(label, axis=i)
label_l, label_r = tf.unstack(label, axis=1)
inter_l = tf.maximum(pred_l, label_l)
inter_r = tf.minimum(pred_r, label_r)
inter = tf.maximum(inter_r - inter_l, 0)
union = pred_r - pred_l + label_r - label_l - inter
return tf.divide(inter, union, name='iou')
|
{"hexsha": "73d43f06360bb385a88e3234b3b212396917d7ff", "size": 1684, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "Sy-Zhang/video_reloc", "max_stars_repo_head_hexsha": "1632fe0928f929622c4a32b9c331b911af72592d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 81, "max_stars_repo_stars_event_min_datetime": "2018-08-07T20:54:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T12:21:45.000Z", "max_issues_repo_path": "utils.py", "max_issues_repo_name": "Sy-Zhang/video_reloc", "max_issues_repo_head_hexsha": "1632fe0928f929622c4a32b9c331b911af72592d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2018-08-20T09:04:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-23T15:09:51.000Z", "max_forks_repo_path": "utils.py", "max_forks_repo_name": "Sy-Zhang/video_reloc", "max_forks_repo_head_hexsha": "1632fe0928f929622c4a32b9c331b911af72592d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2018-09-05T08:29:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T12:44:36.000Z", "avg_line_length": 35.829787234, "max_line_length": 80, "alphanum_fraction": 0.7149643705, "include": true, "reason": "import numpy", "num_tokens": 458}
|
# QuEST_jl/src/base/calculations.jl
#
function calcDensityInnerProduct(ϱ1 ::QuEST_Types.Qureg, ϱ2 ::QuEST_Types.Qureg) ::Qreal
w = ccall(:calcDensityInnerProduct,
Qreal,
(QuEST_Types.Qureg, QuEST_Types.Qureg),
ϱ1,
ϱ2)
return Qreal(w)
end
function calcExpecDiagonalOp(qureg ::QuEST_Types.Qureg, op ::QuEST_Types.DiagonalOp) ::QuEST_Types.Complex
ret= ccall(:calcExpecDiagonalOp,
QuEST_Types.Complex,
(QuEST_Types.Qureg, QuEST_Types.DiagonalOp),
qureg,
op)
return ret
end
function calcExpecPauliHamil(qureg ::QuEST_Types.Qureg,
hamil ::QuEST_Types.PauliHamil,
workspace ::QuEST_Types.Qureg) Qreal
ret = ccall(:calcExpecPauliHamil, Qreal, (QuEST_Types.Qureg, QuEST_Types.PauliHamil, QuEST_Types.Qureg), qureg, hamil, workspace)
return ret
end
function calcExpecPauliProd(qureg ::QuEST_Types.Qureg,
targetQubits ::Vector{QubitIdx},
pauliCodes ::Vector{QuEST_Types.pauliOpType},
workspace ::QuEST_Types.Qureg) ::Qreal
@assert length(targetQubits) == length(pauliCodes)
#@assert all( σ -> 0 ≤ σ ≤ 3, pauliCodes )
expval = ccall(:calcExpecPauliProd,
Qreal,
(QuEST_Types.Qureg, Ptr{QubitIdx}, Ptr{QuEST_Types.pauliOpType}, Cint, QuEST_Types.Qureg),
qureg, targetQubits, pauliCodes, length(targetQubits), workspace)
return expval
end
function calcExpecPauliSum(qureg ::QuEST_Types.Qureg,
allPauliCodes ::Vector{QuEST_Types.pauliOpType},
termCoeffs ::Vector{Qreal},
workspace ::QuEST_Types.Qureg) ::Float64
@assert length(allPauliCodes) == length(termCoeffs) * getNumQubits(qureg)
#@assert all( σ -> 0 ≤ σ ≤ 3, allPauliCodes )
ex = ccall(:calcExpecPauliSum,
Qreal,
(QuEST_Types.Qureg, Ptr{QuEST_Types.pauliOpType}, Ptr{Qreal}, Cint, QuEST_Types.Qureg),
qureg,
allPauliCodes,
termCoeffs,
Cint(length(termCoeffs)),
workspace)
return ex
end
function calcFidelity(qureg ::QuEST_Types.Qureg, pureState ::QuEST_Types.Qureg) ::Qreal
fi = ccall(:calcFidelity, Qreal, (QuEST_Types.Qureg, QuEST_Types.Qureg), qureg, pureState)
return fi
end
function calcHilbertSchmidtDistance(a ::QuEST_Types.Qureg, b ::QuEST_Types.Qureg) ::Qreal
hsd = ccall(:calcHilbertSchmidtDistance, Qreal, (QuEST_Types.Qureg, QuEST_Types.Qureg), a, b)
return hsd
end
function calcInnerProduct(bra ::QuEST_Types.Qureg, ket ::QuEST_Types.Qureg) ::Complex{Qreal}
w = ccall(:calcInnerProduct, QuEST_Types.Complex, (QuEST_Types.Qureg, QuEST_Types.Qureg), bra, ket)
return Complex{Qreal}(w.real,w.imag)
end
function calcProbOfOutcome(qureg ::QuEST_Types.Qureg,
measureQubit ::Integer,
outcome ::Integer) ::Qreal
p = ccall(:calcProbOfOutcome,
Qreal,
(QuEST_Types.Qureg, QubitIdx, Cint),
qureg, measureQubit, outcome)
return p
end
function calcPurity(qureg ::QuEST_Types.Qureg) ::Qreal
pu = ccall(:calcPurity, Qreal, (QuEST_Types.Qureg,), qureg)
return pu
end
function calcTotalProb(qureg ::QuEST_Types.Qureg) ::Qreal
one = ccall(:calcTotalProb, Qreal, (QuEST_Types.Qureg,), qureg)
return one
end
function getAmp(qureg ::QuEST_Types.Qureg, idx ::Integer) ::Complex{Qreal}
α = ccall(:getAmp, QuEST_Types.Complex,
(QuEST_Types.Qureg, Clonglong),
qureg, idx)
return Complex{Qreal}(α.real,α.imag)
end
function getDensityAmp(qureg ::QuEST_Types.Qureg, row ::Integer, col ::Integer) ::Complex{Qreal}
α = ccall(:getDensityAmp,
QuEST_Types.Complex,
(QuEST_Types.Qureg, Clonglong, Clonglong),
qureg,
Clonglong(row),
Clonglong(col))
return Complex{Qreal}(α.real, α.imag)
end
function getImagAmp(qureg ::QuEST_Types.Qureg,
index ::Integer) ::Qreal
ret = ccall(:getImagAmp, Qreal, (QuEST_Types.Qureg, Clonglong), qureg, Clonglong(index))
return ret
end
function getNumAmps(qureg ::QuEST_Types.Qureg) ::Clonglong
return ccall(:getNumAmps, Clonglong, (QuEST_Types.Qureg,), qureg)
end
function getNumQubits(qureg ::QuEST_Types.Qureg) ::QubitIdx
return ccall(:getNumQubits, Cint, (QuEST_Types.Qureg,), qureg)
end
function getProbAmp(qureg ::QuEST_Types.Qureg,
idx ::Integer) :: Qreal
p = ccall(:getProbAmp, Qreal, (QuEST_Types.Qureg, Clonglong), qureg, Clonglong(idx))
return p
end
function getRealAmp(qureg ::QuEST_Types.Qureg,
idx ::Integer) :: Qreal
p = ccall(:getRealAmp, Qreal, (QuEST_Types.Qureg, Clonglong), qureg, Clonglong(idx))
return p
end
#EOF
|
{"hexsha": "296e049dcb3fca7eca245bfe75af8832b77fbaf1", "size": 5260, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/calculations.jl", "max_stars_repo_name": "ketitalabs/QuEST.jl", "max_stars_repo_head_hexsha": "c657c5c78aebbd62fff52b73bf4db7ecd5f7f8e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-12T11:36:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-26T12:19:01.000Z", "max_issues_repo_path": "src/calculations.jl", "max_issues_repo_name": "ketitalabs/QuEST.jl", "max_issues_repo_head_hexsha": "c657c5c78aebbd62fff52b73bf4db7ecd5f7f8e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-04-16T09:25:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-13T10:36:16.000Z", "max_forks_repo_path": "src/calculations.jl", "max_forks_repo_name": "ketitalabs/QuEST.jl", "max_forks_repo_head_hexsha": "c657c5c78aebbd62fff52b73bf4db7ecd5f7f8e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-07-15T11:36:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-12T11:43:22.000Z", "avg_line_length": 29.5505617978, "max_line_length": 133, "alphanum_fraction": 0.6057034221, "num_tokens": 1582}
|
! ##################################################################################################################################
! Begin MIT license text.
! _______________________________________________________________________________________________________
! Copyright 2019 Dr William R Case, Jr (dbcase29@gmail.com)
! Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
! associated documentation files (the "Software"), to deal in the Software without restriction, including
! without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
! copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
! the following conditions:
! The above copyright notice and this permission notice shall be included in all copies or substantial
! portions of the Software and documentation.
! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
! OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
! AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
! LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
! OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
! THE SOFTWARE.
! _______________________________________________________________________________________________________
! End MIT license text.
SUBROUTINE READ_DOF_TABLES
! Reads DOF table data (TSET, TDOF, TDOFI) from file LINK1C
USE PENTIUM_II_KIND, ONLY : BYTE, LONG, DOUBLE
USE IOUNT1, ONLY : FILE_NAM_MAXLEN, WRT_ERR, WRT_LOG, ERR, F04, F06, L1C, LINK1C, L1C_MSG
USE SCONTR, ONLY : BLNK_SUB_NAM, DATA_NAM_LEN, MTDOF, NDOFG, NGRID
USE TIMDAT, ONLY : TSEC
USE SUBR_BEGEND_LEVELS, ONLY : READ_DOF_TABLES_BEGEND
USE DOF_TABLES, ONLY : TDOFI, TDOF, TSET
USE READ_DOF_TABLES_USE_IFs
IMPLICIT NONE
CHARACTER(LEN=LEN(BLNK_SUB_NAM)):: SUBR_NAME = 'READ_DOF_TABLES'
CHARACTER(LEN=DATA_NAM_LEN) :: NAME_Is ! Name of data actually read from file
CHARACTER(LEN=DATA_NAM_LEN) :: NAME_ShouldBe ! Name of data that should be read from file
INTEGER(LONG) :: I,J ! DO loop indices or counters
INTEGER(LONG) :: INT2 ! Integer value read from file
INTEGER(LONG) :: IOCHK ! IOSTAT error number when opening or reading a file
INTEGER(LONG) :: OUNT(2) ! File units to write messages to. Input to subr UNFORMATTED_OPEN
INTEGER(LONG) :: REC_NO ! Record number when reading a file
INTEGER(LONG), PARAMETER :: SUBR_BEGEND = READ_DOF_TABLES_BEGEND
! **********************************************************************************************************************************
IF (WRT_LOG >= SUBR_BEGEND) THEN
CALL OURTIM
WRITE(F04,9001) SUBR_NAME,TSEC
9001 FORMAT(1X,A,' BEGN ',F10.3)
ENDIF
! **********************************************************************************************************************************
! Make units for writing errors the error file and output file
OUNT(1) = ERR
OUNT(2) = F06
! Open L1C and read data. Skip data sets we don't need by reading them but not saving them
CALL FILE_OPEN ( L1C, LINK1C, OUNT, 'OLD', L1C_MSG, 'READ_STIME', 'UNFORMATTED', 'READ', 'REWIND', 'Y', 'N', 'Y' )
! Read TSET array
REC_NO = 0
NAME_ShouldBe = 'TSET'
READ(L1C,IOSTAT=IOCHK) NAME_Is ; REC_NO = REC_NO + 1
CALL READ_CHK ( IOCHK, LINK1C, NAME_ShouldBe, REC_NO, OUNT )
IF (NAME_Is /= NAME_ShouldBe) CALL DATA_SET_NAME_ERROR ( NAME_ShouldBe, LINK1C, NAME_Is )
READ(L1C,IOSTAT=IOCHK) INT2 ; REC_NO = REC_NO + 1
CALL READ_CHK ( IOCHK, LINK1C, NAME_ShouldBe, REC_NO, OUNT )
IF (INT2 /= NGRID) CALL DATA_SET_SIZE_ERROR ( LINK1C, NAME_Is, 'NGRID', NGRID, INT2 )
DO I=1,NGRID
DO J=1,6
READ(L1C,IOSTAT=IOCHK) TSET(I,J) ; REC_NO = REC_NO + 1
CALL READ_CHK ( IOCHK, LINK1C, NAME_ShouldBe, REC_NO, OUNT )
ENDDO
ENDDO
! Read TDOFI array
REC_NO = 0
NAME_ShouldBe = 'TDOFI'
READ(L1C,IOSTAT=IOCHK) NAME_Is ; REC_NO = REC_NO + 1
CALL READ_CHK ( IOCHK, LINK1C, NAME_ShouldBe, REC_NO, OUNT )
IF (NAME_Is /= NAME_ShouldBe) CALL DATA_SET_NAME_ERROR ( NAME_ShouldBe, LINK1C, NAME_Is )
READ(L1C,IOSTAT=IOCHK) INT2 ; REC_NO = REC_NO + 1
CALL READ_CHK ( IOCHK, LINK1C, NAME_ShouldBe, REC_NO, OUNT )
IF (INT2 /= NDOFG) CALL DATA_SET_SIZE_ERROR ( LINK1C, NAME_Is, 'NDOFG', NDOFG, INT2 )
READ(L1C,IOSTAT=IOCHK) INT2 ; REC_NO = REC_NO + 1
CALL READ_CHK ( IOCHK, LINK1C, NAME_ShouldBe, REC_NO, OUNT )
IF (INT2 /= MTDOF ) CALL DATA_SET_SIZE_ERROR ( LINK1C, NAME_Is, 'MTDOF', MTDOF, INT2 )
DO I=1,NDOFG
DO J=1,MTDOF
READ(L1C,IOSTAT=IOCHK) TDOFI(I,J) ; REC_NO = REC_NO + 1
CALL READ_CHK ( IOCHK, LINK1C, NAME_ShouldBe, REC_NO, OUNT )
ENDDO
ENDDO
! Read TDOF array
REC_NO = 0
NAME_ShouldBe = 'TDOF'
READ(L1C,IOSTAT=IOCHK) NAME_Is ; REC_NO = REC_NO + 1
CALL READ_CHK ( IOCHK, LINK1C, NAME_ShouldBe, REC_NO, OUNT )
IF (NAME_Is /= NAME_ShouldBe) CALL DATA_SET_NAME_ERROR ( NAME_ShouldBe, LINK1C, NAME_Is )
READ(L1C,IOSTAT=IOCHK) INT2 ; REC_NO = REC_NO + 1
CALL READ_CHK ( IOCHK, LINK1C, NAME_ShouldBe, REC_NO, OUNT )
IF (INT2 /= NDOFG) CALL DATA_SET_SIZE_ERROR ( LINK1C, NAME_Is, 'NDOFG', NDOFG, INT2 )
READ(L1C,IOSTAT=IOCHK) INT2 ; REC_NO = REC_NO + 1
CALL READ_CHK ( IOCHK, LINK1C, NAME_ShouldBe, REC_NO, OUNT )
IF (INT2 /= MTDOF ) CALL DATA_SET_SIZE_ERROR ( LINK1C, NAME_Is, 'MTDOF', MTDOF, INT2 )
DO I=1,NDOFG
DO J=1,MTDOF
READ(L1C,IOSTAT=IOCHK) TDOF(I,J) ; REC_NO = REC_NO + 1
CALL READ_CHK ( IOCHK, LINK1C, NAME_ShouldBe, REC_NO, OUNT )
ENDDO
ENDDO
CALL FILE_CLOSE ( L1C, LINK1C, 'KEEP', 'Y' )
! **********************************************************************************************************************************
IF (WRT_LOG >= SUBR_BEGEND) THEN
CALL OURTIM
WRITE(F04,9002) SUBR_NAME,TSEC
9002 FORMAT(1X,A,' END ',F10.3)
ENDIF
RETURN
! **********************************************************************************************************************************
END SUBROUTINE READ_DOF_TABLES
|
{"hexsha": "14d9e0a45ca74d9443d1793a960270c5397c89df", "size": 8441, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Source/UTIL/READ_DOF_TABLES.f90", "max_stars_repo_name": "dr-bill-c/MYSTRAN-general-purpose-finite-element-computer-program", "max_stars_repo_head_hexsha": "307d62953924e3945b22b2fcdb7963a8be330a0c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2020-04-04T15:33:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T08:46:05.000Z", "max_issues_repo_path": "Source/UTIL/READ_DOF_TABLES.f90", "max_issues_repo_name": "dr-bill-c/MYSTRAN-general-purpose-finite-element-computer-program", "max_issues_repo_head_hexsha": "307d62953924e3945b22b2fcdb7963a8be330a0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2020-07-06T21:15:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-06T19:54:09.000Z", "max_forks_repo_path": "Source/UTIL/READ_DOF_TABLES.f90", "max_forks_repo_name": "dr-bill-c/MYSTRAN-general-purpose-finite-element-computer-program", "max_forks_repo_head_hexsha": "307d62953924e3945b22b2fcdb7963a8be330a0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2020-05-28T17:16:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-06T18:08:31.000Z", "avg_line_length": 55.5328947368, "max_line_length": 132, "alphanum_fraction": 0.4764838289, "num_tokens": 1932}
|
[STATEMENT]
lemma auxm2_lm0246:
assumes "(\<forall>r< n.(nth_bit a r + nth_bit b r \<le> 1))"
shows "(nth_bit (a+b) n) = (nth_bit a n + nth_bit b n) mod 2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (a + b) \<exclamdown> n = (a \<exclamdown> n + b \<exclamdown> n) mod 2
[PROOF STEP]
using assms no_carry
[PROOF STATE]
proof (prove)
using this:
\<forall>r<n. a \<exclamdown> r + b \<exclamdown> r \<le> 1
\<forall>r<?n. ?a \<exclamdown> r + ?b \<exclamdown> r \<le> 1 \<Longrightarrow> (?a + ?b) \<exclamdown> ?n = (?a \<exclamdown> ?n + ?b \<exclamdown> ?n) mod 2
goal (1 subgoal):
1. (a + b) \<exclamdown> n = (a \<exclamdown> n + b \<exclamdown> n) mod 2
[PROOF STEP]
by auto
|
{"llama_tokens": 323, "file": "DPRM_Theorem_Diophantine_Binary_And", "length": 2}
|
# Run line by line in the REPL, or ctrl + enter in VSCode
# First code section
using MechanicalUnits
m_air = 1000kg; c_p = 1.00kJ/(kg*K)
@import_expand ~W # Watt = Joule / Second is not exported by default. Several: (u1, u2,..)
Q_cp(T1, T2) = m_air * c_p * (T2 - T1) |> (kW*h)
Q_cp(20°C, 985°C)
dm |> upreferred
preferunits(m) # No effect, since upreferred was called once this session
m_s = [30kg/m 28.8lb/ft]
l_s = 93ft*[3 4]m/s
m_s.*l_s .|> (kg*m)
E=206GPa; h_y = 100mm; b = 30mm; I = 1/12 * b * h_y^3
L = 2m; F=100kg*g |> N
F*L^3/(3E*I) |> upreferred
l_wire = 20m
k(d) = E * 0.691 * π/4 * d^2 / l_wire |> N/mm
k.([5 6 8]mm)
δ(d)= F / k(d) |> mm
δ.([5, 6, 8]mm)
d = 6mm
dimension(d)
1d |> s
@import_expand ~V ~W ~A G
sqrt(1G²)
[1V*12.0A 2W 1kg*g*1m/2s]*30minute .|> kJ
ω = 50*2π*rad/s
t = (0:0.006:0.02)s
u = 220V*exp.(im∙(ω∙t))
u*1.5A .|> J
# Second code section
import MechanicalUnits: @import_expand, ∙
@import_expand ~m dyn # ~ : also import SI prefixes for metre
(1.0cm², 2.0mm∙m, 3.0dm⁴/m² ) .|> mm²
typeof(dyn)
1dyn |> μm
# Third code section
strinp = "2 [s]\t11364.56982421875 [N]\t-44553.50244140625 [N]\t-26.586366176605225 [N]\t0.0[N mm]\t0.0[N mm]\t0.0[N mm]\t1561.00350618362 [mm]\t-6072.3729133606 [mm]\t2825.15907287598 [mm]";
time, Fx, Fy, Fz, Mx, My, Mz, px, py, pz = parse.(Quantity{Float64}, split(strinp, '\t'))
# Fourth code section
strain = 10.6μm/m
strain |> upreferred
strain *m/μm
strain |> NoUnits
|
{"hexsha": "e635bcd37df384c4c5074c71fe403bbca89ee3a4", "size": 1439, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "example/readme_example.jl", "max_stars_repo_name": "hustf/MechanicalUnits", "max_stars_repo_head_hexsha": "52660a9dc49d0dc7c42a2fc17b25b303ada67a4d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-02-02T08:39:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-27T19:39:55.000Z", "max_issues_repo_path": "example/readme_example.jl", "max_issues_repo_name": "hustf/MechanicalUnits", "max_issues_repo_head_hexsha": "52660a9dc49d0dc7c42a2fc17b25b303ada67a4d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-05-07T09:09:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-26T08:48:16.000Z", "max_forks_repo_path": "example/readme_example.jl", "max_forks_repo_name": "hustf/MechanicalUnits.jl", "max_forks_repo_head_hexsha": "52660a9dc49d0dc7c42a2fc17b25b303ada67a4d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.78, "max_line_length": 191, "alphanum_fraction": 0.6323835997, "num_tokens": 672}
|
\documentclass{article}
\let\ifluatex\relax
\usepackage{todonotes}
\usepackage{xspace}
\usepackage{url}
\usepackage{listings}
\usepackage{geometry}
\usepackage{enumitem}
\geometry{a4paper, margin=1in}
\usepackage{inconsolata}
\usepackage{hyperref}
\usepackage[tight]{minitoc}
\input{solidity-latex-highlighting}
\input{dea-latex-highlighting}
\lstset{
basicstyle=\ttfamily%, columns=fullflexible
}
\usepackage{fancyvrb}
\newcommand{\contractlarva}{\textsc{contractLarva}\xspace}
\newcommand{\keyword}[1]{\textit{$\langle$#1$\rangle$}}
\newcommand{\tildearrow}{{\raise.37ex\hbox{$\scriptstyle\mathtt{\sim}$}}\hspace{-0.08cm}>\xspace}
\newcommand{\placeholder}[1]{\mbox{$\langle$\textit{#1}$\rangle$}}
\begin{document}
\title{\contractlarva v0.2$\alpha$\\Tutorial}
\author{Shaun Azzopardi\\\texttt{shaun.azzopardi@um.edu.mt}}
\date{19 November 2019}
\maketitle
%\begin{center}
% \begin{tabular}{|lll|}\hline\qquad&&\qquad\\
% &\begin{minipage}{0.8\textwidth}
% \emph{This is an alpha version of \contractlarva. If you are using the tool on smart contracts which will be deployed on Ethereum, it is recommended that you inspect the code created by the tool before deployment. The authors accept no responsibility of any losses, direct or otherwise, incurred due to the use of the tool.}
% \end{minipage}&\\&&\\
% \hline\end{tabular}
%\end{center}
\begin{abstract}
This document illustrates \contractlarva through several use-cases. %\todo{maybe \contractlarva as a tool should include an option that gives certain guarantees, e.g. in verification mode we are guaranteed the smart contract behaviour is not changed, in enforcement mode we are guaranteed that only bad behaviour is prevented, while in adaptivity mode we have no guarantees.}
\end{abstract}
\tableofcontents
\section{Overview}
Smart contracts are programs, and like all programs they have a propensity for bugs and unintended behaviour, motivating the need for methods to detect and handle such behaviour. \contractlarva is one such approach that uses runtime monitors to detect and deal with these bugs. Moreover, \contractlarva can be used to adapt the behaviour of a smart contract, allowing a developer to develop smart contracts, or at least parts of a smart contract, using an event- and automata-based specification language. In this document we instead discuss several use cases, to illustrate the use and power of \contractlarva.
This document is not intended to be an introduction to \contractlarva and its syntax, which we assume the reader is familiar with. For that purpose read the \contractlarva documentation at \url{https://www.github.com/gordonpace/contractlarva/docs/main.pdf}.
% We consider three types of uses for \contractlarva: (i) as a \emph{runtime verifier}, where bad behaviour is detected at runtime; (ii) as a \emph{runtime enforcer} where bad behaviour at runtime is detected and voided, ensuring all the successful behaviour of a smart contract is always compliant; and (iii) as a \emph{runtime adaptor} that can modify the behaviour of the smart contract.
\subsection{Background}
Here we discuss and motivate several general areas of applicability \contractlarva, including runtime verification, enforcement, and behavioural model synthesis. The use cases considered fall to different degrees under these areas.
% \subsubsection{Runtime Verification -- {\large Verifying Compliance with Specifications}}
% \label{s:verif}
The primary motivation for \contractlarva is \textbf{runtime verification}. Verification is an approach to ensure well-behaved programs, by checking that the program is compliant with a specification. \contractlarva is a tool for runtime verification of smart contracts, allowing behaviour recorded on the blockchain, relative to one smart contract, to be given a verdict (satisfying or violating) at runtime.
Ideally smart contracts are verified correct before they are deployed to the blockchain. However, this may not always be possible, either because the problem is too hard or because it involves interaction with other systems. Consider that a smart contract may be used to record real-life events, e.g. a courier service may allow a delivery person to signal delivery though an appropriate smart contract function. A specification can specify bad traces of these real-life events, e.g. we may specify that an item should not be delivered if it was not ordered. Such a smart contract can easily be designed to be compliant with this specification, however then it is no longer in sync with the real-world if a violation occurs, e.g. if the smart contract simply does not allow a delivery to be recorded (i.e. \texttt{revert} is called) without a prior order then it may not maintain correct stock records. This may affect compliance with other aspects of the desired behaviour of the smart contract.
Then, a specification may not necessarily simply be about a smart contract on its own, but also about its interaction with an outside system (e.g. the real-world). For these kinds of specifications methods based on \textbf{static code analysis should fail} (i.e. they should identify that violations can occur): static compliance with such a specification would in fact be evidence in favor of buggy behaviour. On the other hand methods based on runtime verification allow us to monitor for and dealing these violations, without changing the behaviour of the smart contract. This is essential for smart contracts that need to synchronize with other uncontrollable systems. \contractlarva is an ideal tool for this, allowing us to detect these violations at runtime.
% \subsubsection{Runtime Enforcement -- {\large Enforcing Compliance with Specifications}}
% \label{s:enforce}
Not all smart contracts require synchronicity with other systems, but may instead be standalone systems. In this case we may be able to check for compliance with a specification using static code analysis. Runtime verification is also an option, however gas concerns come into play. Consider that given a smart contract that implements a certain specification, and we verify this at runtime by interleaving the same smart contract with the specification, then we are replicating work. This replication may allow us to identify certain bugs in the implementation, which is useful. While replication of the same logic makes the system more robust and trustworthy. However replication increases the amount of gas used by transactions. Using a smart contract instrumented with a specification for \textbf{testing} purposes can also be useful and is a situation where gas concerns do not come into play.
Another approach is to use \contractlarva as part of the software engineering process, in a \textbf{model-driven development} manner. In other words, a developer can use \contractlarva to synthesize automatically certain business logic from a specification. This ensures that the specification is being \textbf{enforced at runtime}, that is the instrumented smart contract is correct\footnote{Modulo the correctness of the implementation of \contractlarva and of the used Solidity compiler.}.
Another interesting use case involves \textbf{proxy smart contracts}. Consider that a deployed smart contract is immutable, however mutability can be simulated by introducing a proxy smart contract that maintains a record of the current implementation address and passes any function call to that address. In this way the behaviour of the proxy smart contract wholly depends on which address its implementation variable points to at runtime, which can be changed. \contractlarva can be used to enforce a certain specification on the proxy smart contract, ensuring that although the implementation changes at runtime its behaviour still remains within certain boundaries.
% \todo{put reference to use cases in background, according to topic. (e.g. courier service is associated with verification)}
\section{Use Cases}
\subsection{Use Case Repository}
The use cases we consider can all be found at \url{https://github.com/gordonpace/contractLarva/tree/master/use-cases}. They all follow the following file structure:
\begin{enumerate}
\item \texttt{<name>/<name>.sol}: The smart contract implementing some business logic.
\item \texttt{<name>/<name>Spec.dea}: A specification of an aspect of the business logic, possibly including some business logic transforming the behaviour of the smart contract.
\item \texttt{<name>/<name>Monitored.sol}: The smart contract after being instrumented/wrapped with the specification.
\end{enumerate}
% One of the benefits of a distributed ledger technology is that it provides for immutable transactions that once recorded to the blockchain cannot be modified.
% We discuss two use-cases falling under this motivation: (i) a courier service contract; and a (ii) procurement contract.
%use cases with real world event that cannot be prevented
% \subsection{Checking Compliance with a Behavioural Contract}
\subsection{Monitoring for Real-World Violations in a Courier Service}
% One of the benefits of a distributed ledger technology is that it provides for immutable transactions that once recorded to the blockchain cannot be modified. This is the ideal ecosystem on which to implement a system that relies
\texttt{<name>:} \verb+CourierService+\\
Given the immutable nature of blockchain transactions, a smart contract can be used to allow for dependable record-keeping. This can be to keep track of orders delivered by a courier service. Listing.~\ref{code:courierservice} illustrates such a smart contract, where an order and delivery can be recorded to the blockchain using appropriate functions.
\small\begin{lstlisting}[language=Solidity,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,label={code:courierservice}, caption={Courier Service smart contract.}]
contract CourierService{
bool ordered;
bool delivered;
function order(uint _eta, address _buyer, string memory _address) public{
require(!ordered);
ordered = true;
}
function deliver(address _signer, string memory _address) public{
require(!delivered);
delivered = true;
}
}
\end{lstlisting}\normalsize
This smart contract only does basic validation, allowing an order to be ordered only once, and a delivery to be delivered only once.
\contractlarva can be used to specify more sophisticated business logic, e.g. in Listing.~\ref{dea:courierservice} we specify that an order cannot be delivered before being ordered (see lines 17 and 23), and that an order should be delivered within the expected estimated time of arrival, and at the appropriate address (see lines 20 and 25).
\small\begin{lstlisting}[language=DEA,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,escapechar=\%,label={dea:courierservice},caption={Monitor that only ordered items are delivered.}]
monitor CourierService{
declarations {
string orderAddress;
uint orderETA;
function stringEquality (string memory a, string memory b) public view
returns (bool) {
return (keccak256(abi.encodePacked((a))) == keccak256(abi.encodePacked((b))) );
}
}
DEA NoDeliveryBeforeOrder {
states {
Start: initial;
Ordered;
Bad: bad;
Good: accept;
}
transitions {
Start -[after(order(_eta, _buyer, _address))
%$\tildearrow$% orderAddress = _address; orderETA = _eta;]-> Ordered;
Ordered -[after(deliver(_signer, _address))
| orderETA <= now && stringEquality(_address, orderAddress)]-> Good;
Start -[after(deliver(_signer, _address))]-> Bad;
Ordered -[after(deliver(_signer, _address))
| orderETA > now || !stringEquality(_address, orderAddress)]-> Bad;
}
}
}
\end{lstlisting}\normalsize
In this use case we are allowing for violating behaviour to be written to the blockchain, instead of failing with a \texttt{require}, since we want the smart contract to reflect the state of the real world. Instead of reverting a violation a user may query the monitored smart contract to check whether it is in a bad state.
A limitation of this use case is that we are limiting ourselves to only one order, rather than multiple orders. DEAs currently lack the power to specify the behaviour of each order in a multi-order smart contract. We intend to extend DEAs with this notion of \emph{typestate} in the future.
\subsection{Enforcing a Real-World Procurement Contract}
\texttt{<name>:} \verb+Procurement+\\
A use case for blockchains and smart contracts is as the theatre wherein parties to a real-world contract interact. This allows us to enforce some aspects of the real-world contract.
In this use-case we consider a procurement contract, i.e. a contract wherein a buyer binds themselves to buy some amount of goods from a seller, at a certain price and by a certain time.
Consider that the seller has presented a legal contract to the buyer, along with a smart contract which the buyer claims only allows the behaviour specified in the real-world contract. Figure.~\ref{f:legal-contract-procurement} specifies such a legal contract, while Listing.~\ref{lt:courierservice} is an extract from such a smart contract.
\begin{figure}[t]
\setlength{\belowcaptionskip}{-20pt}
\scriptsize %footnotesize
{\itshape
\begin{enumerate}
%\setcounter{enumi}{-1}
\item \label{cc:termination-clause} This contract is between \placeholder{buyer-name}, henceforth referred to as `the buyer' and \placeholder{seller-name}, henceforth referred to as `the seller'. The contract will hold until either party requests its termination.
\item \label{cc:contract-parameters} The buyer is obliged to order at least \placeholder{minimum-items}, but no more than \placeholder{maximum-items} items for a fixed price \placeholder{price} before the termination of this contract.
% \item $[\neg requestEnd]O(order(items) \mid max >= items >= \placeholder{minimum-items})$
\item \label{cc:escrow-payment-for-contract} Notwithstanding clause~\ref{cc:termination-clause}, no request for termination will be accepted before \placeholder{contract-end-date}. Furthermore, the seller may not terminate the contract as long as there are pending orders.
% \item $[requestEnd \mid now < \placeholder{contract-end-date}]\bot$
% \item $[requestEnd_{seller} \mid orders.size() > 0]\bot$
\item Upon enactment of this contract, the buyer is obliged to place the cost of the minimum number of items to be ordered in escrow.
% \item $O(escrow(costOf(min))_{buyer});\placeholder{rest-of-contract}$
\item \label{cc:performance-guarantee-escrow} Upon accepting this contract, the seller is obliged to place the amount of \placeholder{performance-guarantee} in escrow, otherwise, if only a partial amount is placed, the seller is obliged to place the rest by a time period at the buyer's discretion.%the contract is terminated and the buyer's and seller's respective escrow is returned.
% \item $O(escrow(\placeholder{performance-guarantee}))$
%guaranteed by "Upon delivery, the seller receives payment of the order"
% \item \label{cc:payment} Upon termination of the contract, the seller is guaranteed to have received payment covering the cost of the minimum number of items to be ordered unless less than this amount is delivered, in which case the cost of the undelivered items is not guaranteed.
% \item $[requestEnd \mid (deliveredItems >= \placeholder{minimum-items} \wedge \neg(paidToSeller >= (costOf(\placeholder{minimum-items})))] \bot$
% \item $[requestEnd \mid (deliveredItems < \placeholder{minimum-items} \wedge \neg(paidToSeller >= (costOf(\placeholder{minimum-items} - deliveredItems)))] \bot$
\item \label{cc:right-to-order} While the contract has not been terminated, the buyer has the right to place an order for an amount of items and a specified time-frame as long as (i) the running number of items ordered does not exceed the maximum stipulated in clause~\ref{cc:contract-parameters}; and (ii) the time-frame must be of at least 24 hours, but may not extend beyond the contract end date specified in clause~\ref{cc:contract-parameters}.
% \item $P(order_{buyer}(items, inTime) \mid items.size() <= \placeholder{maximum-items} \wedge inTime < 24 hours)$
\item \label{cc:escrow-payment-for-order} Upon placing an order, the buyer is obliged to ensure that there is enough money in escrow to cover payment of all pending orders.
% \item $[order \mid escrow_{buyer} < costOf(orders)]\bot$
\item Before termination of the contract, upon delivery the seller must receive payment of the order.
% \item $[deliveryMadeWithPayment^N]\bot$
% \item Upon termination of the contract, any undelivered orders are automatically cancelled, and the seller loses the right to receive payment for these orders.
% \item $[requestEnd \mid pendingOrderCount != 0]\bot$
\item Upon termination of the contract, if either any orders were undelivered or more than 25\% of the orders were delivered late, the buyer has the right to receive the performance guarantee placed in escrow according to clause~\ref{cc:performance-guarantee-escrow}. %Otherwise, it is released back to the seller if the buyer does not claim it in 10 days from termination.
% \item $[requestEnd \mid pendingOrderCount != 0 ]$
\end{enumerate}
}\normalsize
\vspace{-2ex}
\caption{A legal contract regulating a procurement process.}
\label{f:legal-contract-procurement}
\end{figure}
\small\begin{lstlisting}[language=DEA,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,label={lt:courierservice},caption={Excerpt from procument smart contract.}]
pragma solidity ^0.4.15;
contract Procurement {
enum ContractStatus { Proposed, Open, Closed }
enum OrderStatus { Ordered, Delivered }
struct Order {
bool exists;
uint cost;
OrderStatus status;
uint deliveryTimeDue;
}
...
function acceptContract() public payable bySeller {
require(msg.value >= performanceGuarantee);
contractStatus = ContractStatus.Open;
}
function createOrder(
uint8 _orderNumber,
uint8 _orderSize,
uint _orderDeliveryTimeLeft
) public payable byBuyer
{
// Order does not already exist
require(!orders[_orderNumber].exists);
// Number of items ordered does not exceed maximum
require(itemsOrderedCount + _orderSize <= maximumItemsToBeOrdered);
// Order delivery deadline will not be too late
require(now + _orderDeliveryTimeLeft <= endOfContractTimestamp);
// Ensure there is enough money left in the contract to pay for the order
uint orderCost = _orderSize * costPerUnit;
moneyLeftInContract += msg.value;
require(orderCost <= moneyLeftInContract);
moneyLeftInContract -= orderCost;
// Update number of items ordered
itemsOrderedCount += _orderSize;
// Update contract status
pendingOrderCount++;
pendingOrderCost += orderCost;
// Record the order
orders[_orderNumber] = Order(true, orderCost, OrderStatus.Ordered, now+_orderDeliveryTimeLeft);
}
}
\end{lstlisting}\normalsize
Checking the smart contract is always compliant with the required behaviour is a hard problem. Instead, \contractlarva can be used to enforce the behaviour on the execution trace at runtime, simply through building a formal representation of the legal contract.
For example, Listing.~\ref{dea:procurement} specifies formally the second clause of Figure.~\ref{f:legal-contract-procurement}. Consider that with Line 20 we are keeping count of the number of orders, while in Line 21 (Line 22) we are ensuring that the minimum (maximum) items to be ordered is constant and is not modified while the contract is running. With Lines 23 and 24 we ensure that just before the contract is terminated enough items have been ordered, otherwise termination is reverted (note Lines 6-8).
\small\begin{lstlisting}[language=DEA,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,escapechar=\%,label={dea:procurement},caption={Monitor specification that checks whether the number of items ordered is within the minimum and maximum required.}]
monitor Procurement{
declarations {
uint orderCount;
}
reparation {
revert();
}
//The buyer is obliged to order at least <minimum-items>, but no more than <maximum-items> items for a fixed price <price> before the termination of this contract.
DEA EnoughItemsOrdered{
states{
DuringContract: initial;
RangesChanged: bad;
OutsideOfRange: bad;
EnoughItems: accept;
}
transitions{
DuringContract -[after(createOrder(_orderNumber, _orderSize, _orderDeliveryTimeLeft)) | %$\tildearrow$% orderCount += _orderNumber;]-> DuringContract;
DuringContract -[minimumItemsToBeOrdered@(LARVA_previous_minimumItemsToBeOrdered != minimumItemsToBeOrdered)]-> RangesChanged;
DuringContract -[maximumItemsToBeOrdered@(LARVA_previous_minimumItemsToBeOrdered != minimumItemsToBeOrdered)]-> RangesChanged;
DuringContract -[after(terminateContract()) | orderCount < minimumItemsToBeOrdered || orderCount > maximumItemsToBeOrdered]-> OutsideOfRange;
DuringContract -[after(terminateContract()) | orderCount >= minimumItemsToBeOrdered && orderCount <= maximumItemsToBeOrdered]-> EnoughItems;
}
}
}
\end{lstlisting}\normalsize
The buyer can request the smart contract be instrumented with such a DEA to ensure they can trust the smart contract at runtime.
Moreover, DEAs can be used to deal with possible edge cases. For example Listing.~\ref{dea:courierservice2} specifies that any ether left in the contract after termination should be transferred to the contract's deployer. Note how Lines 8-10 ensure that when a contract is initialised we keep track of the contract's deployer in the \texttt{mediator} variable. Line 25 specifies that if the contract ends with some balance then we can transition to a bad state, which activates the reparation specified by Lines 12-14, i.e. that the mediator is transferred the remaining balance.
\small\begin{lstlisting}[language=DEA,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,escapechar=\%,label={dea:courierservice2},caption={Monitor that checks that when the procurement contract terminates no ether is left in the smart contract.}]
monitor Procurement{
declarations {
address mediator;
uint orderCount;
bool noEtherAfterTerminationBadStateReached;
}
initialisation {
mediator = address(uint160(msg.sender));
}
reparation {
noEtherAfterTerminationBadStateReached ? mediator.transfer(this.balance) : ();
}
//When a contract terminates there should not be any ether left in its balance.
DEA NoEtherAfterTermination{
states{
DuringContract: initial;
EndedWithBalance: bad;
EndedWithoutBalance: accept;
}
transitions{
DuringContract -[after(terminateContract) | this.balance != 0 %$\tildearrow$% noEtherAfterTerminationBadStateReached = true;]-> EndedWithBalance;
DuringContract -[after(terminateContract) | this.balance == 0]-> EndedWithoutBalance;
}
}
\end{lstlisting}\normalsize
\subsection{Enforcing an ERC20 Specification}
\texttt{<name>:} \verb+FixedSupplyToken+\\
A common use for blockchains is to manage cryptocurrencies. In fact smart contracts are used to implement wallets (usually following some conventional interface such as ERC20 in Listing.~\ref{code:erc20interface}), allowing users to own a certain amount of tokens and to use them. The design and implementation of these wallets is critical, in fact in the past bugs in wallet implementations have led to a significant amount of tokens with real-world value being lost (e.g. the Parity bug). \contractlarva monitors implementing aspects of the expected behaviour of a wallet can serve as a further defense against such bugs and other attacks.
\small\begin{lstlisting}[language=DEA,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,escapechar=\%,label={code:erc20interface},caption={ERC20 interface.}]
// ----------------------------------------------------------------------------
// ERC Token Standard #20 Interface
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-20-token-standard.md
// ----------------------------------------------------------------------------
interface ERC20TokenImplementation {
function totalSupply () external constant returns (uint);
function balanceOf (address tokenOwner) external constant returns (uint balance);
function allowance (address tokenOwner, address spender) external constant returns (uint remaining);
function transfer (address caller, address to, uint tokens) external returns (bool success);
function approve (address caller, address spender, uint tokens) external returns (bool success);
function transferFrom (address caller, address from, address to, uint tokens) external returns (bool success);
event Transfer (address indexed from, address indexed to, uint tokens);
event Approval (address indexed tokenOwner, address indexed spender, uint tokens);
}
\end{lstlisting}\normalsize
Here we consider a fixed supply token wallet, where the wallet is initialised with a certain amount of tokens that should not change at runtime. Looking at the implementation of the \texttt{transfer} function in Listing.~\ref{code:transferfunc}, we can see that the balance of the function caller is reduced on Line 2, and the balance of the intended recipient is increased on Line 3. Note that here addition and subtraction are encoded in appropriate functions, so that \contractlarva is able to instrument them.
\small\begin{lstlisting}[language=DEA,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,escapechar=\%,label={code:transferfunc},caption={\texttt{transfer} function.}]
function transfer(address to, uint tokens) onlyOwner public returns (bool success) {
balances[msg.sender] = sub(balances[msg.sender],tokens);
balances[to] = add(balances[to],tokens);
emit Transfer(caller, to, tokens);
return true;
}
\end{lstlisting}\normalsize
One way to ensure there is a fixed supply of tokens is to simply check that any addition to any user's balance is paired with a subtraction from another user's balance. Listing.~\ref{dea:fixedsupplytoken} encodes this logic.
\small\begin{lstlisting}[language=DEA,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,escapechar=\%,label={dea:fixedsupplytoken},caption={Monitor that checks that an addition in tokens is accompanied with an equal subtraction.}]
monitor FixedSupplyToken{
declarations{
uint currentTokens;
}
reparation{
revert();
}
//This property checks that any addition to the balance must be coupled immediately with a subtraction,
//ensuring tokens are only moved around, while ensuring that any change is immediately (modulo one step) reflected in the total sum.
DEA AdditionOfBalanceMustBeAccompaniedBySubtraction{
states{
Before: initial;
StartTransfer;
SubAfterAdd;
AddAfterSub;
EndTransfer;
UnMatchedModification: bad;
}
transitions{
Before -[before(transfer(caller, to, tokens)) | %$\tildearrow$% currentTokens = tokens;]-> StartTransfer;
StartTransfer -[after(add(a, tokens)) | currentTokens == tokens]-> SubAfterAdd;
StartTransfer -[after(add(a, tokens)) | currentTokens != tokens]-> UnMatchedModification;
StartTransfer -[after(sub(a, tokens)) | currentTokens == tokens]-> AddAfterSub;
StartTransfer -[after(sub(a, tokens)) | currentTokens != tokens]-> UnMatchedModification;
SubAfterAdd -[after(sub(a, tokens)) | currentTokens == tokens]-> EndTransfer;
SubAfterAdd -[after(sub(a, tokens)) | currentTokens != tokens]-> UnMatchedModification;
SubAfterAdd -[after(add(a, tokens))]-> UnMatchedModification;
SubAfterAdd -[after(add(a, tokens)) | currentTokens == tokens]-> EndTransfer;
SubAfterAdd -[after(add(a, tokens)) | currentTokens != tokens]-> UnMatchedModification;
SubAfterAdd -[after(sub(a, tokens))]-> UnMatchedModification;
EndTransfer -[after(transfer(caller, to, tokens)) | %$\tildearrow$% currentTokens = 0;]-> Before;
SubAfterAdd -[after(transfer(caller, to, tokens))]-> UnMatchedModification;
AddAfterSub -[after(transfer(caller, to, tokens))]-> UnMatchedModification;
}
}
}
\end{lstlisting}\normalsize
Consider that the DEA is a functional specification for the \texttt{transfer} function. That is, it is activated when a transfer starts (Line 24), and should give a verdict or restart after the transfer ends (Line 39). Consider also that an appropriate implementation may either first reduce the balance of the sender and the increase the balance of the recipient, or vice-versa. Similarly the specification is symmetric (see states on Line 17 and Line 18, and transitions on Lines 26-37). The specification first keeps track of the number of tokens to be transferred (see the variable \texttt{currentTokens} declared on Line 4 and the action part of the transition on Line 24). Then, any addition or subtraction must be equivalent to this value (see Line 26), while any divergence leads to a violation (see Line 27). If the transfer ends before a pair of addition and subtraction operations are performed then th monitor ends in a bad state (see Lines 40 and 41).
\subsection{Enforcing a Casino Specification}
\texttt{<name>:} \verb+Casino+\\
% \subsection{Enforcing Call Well-Ordering}
Smart contracts have been used to allow for people to bet a certain amount of crypto-currency tokens, which allows for users who guessed the correct outcome to be automatically rewarded. This use case considers such a smart contract, which can be found in Listing.~\ref{code:casino}.
\small\begin{lstlisting}[language=DEA,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,escapechar=\%,label={code:casino},caption={Casino smart contract.}]
pragma solidity ^0.5.11;
contract Casino{
mapping(uint => mapping (uint => address payable[])) placedBets;
mapping(uint => mapping(address => uint)) potShare;
uint[] numbersGuessed;
uint pot;
uint tableID;
uint tableOpenTime;
address owner;
constructor() public{
owner = msg.sender;
}
function openTable() public{
require(msg.sender == owner);
require(tableOpenTime == 0);
tableOpenTime = now;
tableID++;
}
function closeTable() public{
require(msg.sender == owner);
require(pot == 0);
delete numbersGuessed;
}
function timeoutBet() public{
require(msg.sender == owner);
require(now - tableOpenTime > 60 minutes);
require(pot != 0);
for (uint i = 0; i < numbersGuessed.length; i++) {
uint l = placedBets[tableID][numbersGuessed[i]].length;
for (uint j = 0; j < l; j++) {
address payable better = placedBets[tableID][numbersGuessed[i]][l];
better.transfer(potShare[tableID][better]);
delete placedBets[tableID][numbersGuessed[i]];
}
}
closeTable();
}
function placeBet(uint guessNo) payable public{
require(msg.value > 1 ether);
potShare[tableID][msg.sender] += msg.value;
placedBets[tableID][guessNo].push(msg.sender);
numbersGuessed.push(guessNo);
pot += msg.value;
}
//we assume owner is trusted
function resolveBet(uint _secretNumber) public{
require(msg.sender == owner);
uint l = placedBets[tableID][_secretNumber].length;
if(l != 0){
for (uint i = 0; i < l; i++) {
placedBets[tableID][_secretNumber][i].transfer(pot/l);
}
}
pot = 0;
closeTable();
}
}
\end{lstlisting}\normalsize
A user may not trust such a smart contract enough to handle their money. The owner may increase such confidence by instrumenting the smart contract with appropriate specifications, while the user may also instrument the smart contract and test the resulting specification on a testnet.
In Listing.~\ref{dea:casino} we consider two properties the casino should have: (i) when there is an ongoing bet (i.e. the table is open) the running pot should not be reduced but only increase; and (ii) the table cannot be closed during a bet.
\small\begin{lstlisting}[language=DEA,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,escapechar=\%,label={dea:casino},caption={Monitors that check: (i) that a game's \texttt{pot} is increasing; and (ii) a game is open until it is resolved.}]
monitor Casino{
declarations{
uint total;
}
DEA NoReduction {
states {
TableOpen: initial;
TableClosed: accept;
BetPlaced;
PotReduced: bad;
}
transitions {
TableOpen -[after(closeTable) | pot == 0 ]-> TableClosed;
TableOpen -[after(placeBet(_value)) | _value <= pot %$\tildearrow$% total += _value;]-> BetPlaced;
BetPlaced -[after(timeoutBet)]-> TableOpen;
BetPlaced -[after(resolveBet)]-> TableOpen;
BetPlaced -[pot@(LARVA_previous_pot > pot)]-> PotReduced;
}
}
DEA OpenUntilResolution {
states {
TableClosed: initial;
TableOpen;
BetPlaced;
TableCloseDuringBet: bad;
}
transitions {
TableClosed -[after(openTable)]-> TableOpen;
TableOpen -[after(closeTable)]-> TableClosed;
TableOpen -[after(placeBet)]-> BetPlaced;
BetPlaced -[after(resolveBet)]-> TableOpen;
BetPlaced -[after(timeoutBet)]-> TableOpen;
BetPlaced -[after(closeTable)]-> TableCloseDuringBet;
}
}
}
\end{lstlisting}\normalsize
\subsection{Safe Mutability of an ERC20 Wallet}
\texttt{<name>:} \verb+ERC20Interface+\\
Smart contracts are immutable, however a proxy design pattern can be used to simulate mutability, by having a proxy smart contract act as the main entry-point, which passes on function calls to different versions of the smart contract implementing the main business logic. This can be unsafe, since the business logic may mutate without any notice. A use-case for \contractlarva is to limit the mutability of the business logic by enforcing a certain specification on the proxy smart contract.
Consider an ERC20 wallet, which has the interface specified in Listing.~\ref{code:erc20interface}. A proxy smart contract would be similar to the smart contract extract in Listing.~\ref{code:erc20behavinteface}.
\small\begin{lstlisting}[language=DEA,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,escapechar=\%,label={code:erc20behavinteface},caption={Extract from ERC20 proxy interface.}]
contract ERC20Interface{
ERC20TokenImplementation impl;
address owner;
constructor(ERC20TokenImplementation _impl, address _owner) public{
impl = _impl;
owner = _owner;
}
function updateImplementation(address newImpl) public{
require(msg.sender == owner);
impl = ERC20TokenImplementation(newImpl);
}
function transfer(address to, uint tokens) public returns (bool success){
return impl.transfer(msg.sender, to, tokens);
}
...
}
\end{lstlisting}\normalsize
The DEA in Listing.~\ref{dea:erc20}, when weaved into Listing.~\ref{code:erc20behavinteface} enforces the well-behaviour of the \texttt{transfer} function. In effect this sets pre- and post-conditions for calls of the function, and disallow re-entrancy into the proxy smart contract from the concrete implementation. We can create similar specifications for the other non-pure functions.
\small\begin{lstlisting}[language=DEA,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,escapechar=\%,label={dea:erc20},caption={Monitor that ensures well-behaviour of \texttt{transfer} function with appropriate pre- and post-conditions.}]
monitor ERC20Interface{
declarations {
uint transferPreFrom;
uint transferPreTo;
uint transferFromPreFrom;
uint transferFromPreTo;
uint preAllowance;
}
reparation {
revert();
}
DEA TransferWellBehaviour {
states {
Before: initial;
After;
Bad: bad;
}
transitions {
Before -[before(transfer(to, tokens)) | %$\tildearrow$% {transferPreFrom = balanceOf(msg.sender); transferPreTo = balanceOf(to);}]-> After;
After -[before(transfer(to, tokens))]-> Bad;
After -[after(transfer(to, tokens)) | transferPreFrom < tokens && (balanceOf(msg.sender) != transferPreFrom || balanceOf(to) != transferPreTo)]-> Bad;
After -[after(transfer(to, tokens)) | transferPreFrom < tokens && (balanceOf(msg.sender) == transferPreFrom && balanceOf(to) == transferPreTo)]-> Before;
After -[after(transfer(to, tokens)) | transferPreFrom >= tokens && (balanceOf(msg.sender) != (transferPreFrom - tokens) || balanceOf(to) != (transferPreTo - tokens))]-> Bad;
After -[after(transfer(to, tokens)) | transferPreFrom >= tokens && (balanceOf(msg.sender) == (transferPreFrom - tokens) && balanceOf(to) == (transferPreTo - tokens))]-> Before;
}
}
}
\end{lstlisting}\normalsize
\subsection{Adding Insurance Logic to a Courier Service}
\texttt{<name>:} \verb+InsuredCourierService+\\
We have used \contractlarva to enforce specifications by reverting bad behaviour, however we can go a step further by using it to add more sophisticated logic.
A particular use-case is the addition of insurance logic. Consider the courier service contract in Listing.~\ref{code:courierservice1}, extended with a function \texttt{complain}, wherein the buyer can complain if the order has not yet been delivered.
\small\begin{lstlisting}[language=Solidity,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,label={code:courierservice1},caption={Courier service smart contract extended with a \texttt{complain} function.}]
contract CourierService{
bool ordered;
bool delivered;
uint value = 1 ether;
address buyer;
function order(uint _eta, address _buyer, string memory _address) public{
require(!ordered && msg.value == value);
ordered = true;
buyer = _buyer;
}
function deliver(address _signer, string memory _address) public{
require(!delivered);
delivered = true;
}
function complain() public{
require(msg.sender == buyer && !delivered);
}
}
\end{lstlisting}\normalsize
In this form, the smart contract does not give any assurances about when the order will be delivered. \contractlarva can be used to encode such assurances.
The DEA in Listing.~\ref{dea:insurancecourier} inserts some logic before the smart contract is initialised, where it requires the \texttt{payStake} function to be called before the smart contract is enabled. Consider how if the smart contract is not delivered on time this logic pays the customer an amount of ether (specified on Line 8) to make up for the inconvenience.
\small\begin{lstlisting}[language=DEA,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,escapechar=\%,label={dea:insurancecourier},caption={Monitor that implements state-based insurance logic.}]
monitor CourierService {
declarations {
uint orderedTime;
uint minimumInsuredDeliveryTime = 24*30 hours;
address payable private insurer_address;
function getStake() private returns(uint) { return value; }
function getInsurer() private returns(address payable) { return insurer_address; }
function getInsured() private returns(address payable) { return customer; }
enum STAKE_STATUS { UNPAID, PAID }
STAKE_STATUS private stake_status = STAKE_STATUS.UNPAID;
function payStake() payable public{
require (stake_status == STAKE_STATUS.UNPAID);
require (msg.value == getStake());
require (msg.sender == getInsurer());
stake_status = STAKE_STATUS.PAID;
LARVA_EnableContract();
}
}
initialisation {
insurer_address = msg.sender;
}
reparation {
getInsured().transfer(getStake());
LARVA_DisableContract();
}
satisfaction {
getInsurer().transfer(getStake());
}
DEA UnDelivered{
states{
Start: initial;
Ordered;
Delivered: accept;
Undelivered: bad;
}
transitions{
Start -[after(order) | %$\tildearrow$% orderedTime = now;]-> Ordered;
Ordered -[after(deliver) | now - orderedTime <= minimumInsuredDeliveryTime]-> Delivered;
Ordered -[after(deliver) | now - orderedTime >= minimumInsuredDeliveryTime]-> Undelivered;
}
}
}
\end{lstlisting}\normalsize
\subsection{Adding Fail-safe logic to a Multi-Owner Wallet}
\texttt{<name>:} \verb+MultiOwnersWallet+\\
A wallet may be owned by multiple entities rather than by a single owner, requiring sophisticated logic to orchestrate the voting process between the multiple owners.
Figure.\ref{lt:multi-owner} is an extract of a multi-owner wallet, including a modifier that ensures all owners have signed off on a certain action (Line 6), a function to propose a transaction (Line 18), a transfer function that performs a transaction if all owners sign off on it (Line 24), and a function which owners can use to vote to remove owners (Line 28).
\small\begin{lstlisting}[language=DEA,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,escapechar=\%,label={lt:multi-owner},caption={A wallet smart contract allowing for multiple owners.}]
pragma solidity ^0.5.11;
contract MultiOwners {
...
modifier allOwners (uint _id){
require(owners[msg.sender]);
actionSignOffs[_id][msg.sender] = true;
for(uint i = 0 ; i < ownerList.length; i++){
if(ownerList[i] != address(0) && !actionSignOffs[_id][ownerList[i]]){
return;
}
}
_;
}
function proposeTransaction(address payable _to, uint _val) public anyOwner{
idTo[id] = _to;
idVal[id] = _val;
id++;
}
function transfer(uint _id) allOwners(_id) public{
idTo[_id].transfer(idVal[_id]);
}
function removeOwner(address _address) anyOwner public{
votesToRemove[_address][msg.sender] = true;
votesToRemoveKeys[_address].push(msg.sender);
uint countInFavour = 0;
uint totalCount = ownerCount;
for(uint i = 0; i < totalCount; i++){
if(votesToRemove[_address][votesToRemoveKeys[_address][i]]){
countInFavour++;
}
}
uint limit = 2*totalCount/3;
if(countInFavour >= limit){
owners[_address] = false;
ownerCount--;
for(uint i = 0; i < ownerList.length; i++){
if(ownerList[i] != address(0) && ownerList[i] == _address){
ownerList[i] = address(0);
}
}
}
}
}
\end{lstlisting}\normalsize
This smart contract represents the base functionality of the wallet, with little validation. Instead the well-behaviour of the smart contract can be specified separately using DEAs.
In Listing.~\ref{dea:multi-owner} we specify this well-behaviour, in terms of two DEAs. First we ensure that transactions started by ex-owners are not carried out, and secondly we ensure that an owner can only vote once in removing an owner, since the current implementation allows that.
\small\begin{lstlisting}[language=DEA,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,escapechar=\%,label={dea:multi-owner},caption={A monitor that checks that transactions started by ex-owners are not fulfilled, and that owners can only vote once.}]
monitor MultiOwners {
declarations{
mapping(uint => address) idRequestedBy;
mapping(address => mapping(address => bool)) votes;
}
reparation {
revert();
}
DEA IgnoreTransactionsStartedByExOwners{
states{
BeforeTransfer: initial;
BadTransfer: bad;
}
transitions{
BeforeTransfer -[after(proposeTransaction(_to,_val)) | %$\tildearrow$% idRequestedBy[--id] = msg.sender;]-> BeforeTransfer;
BeforeTransfer -[before(transfer(_id)) | !owners[idRequestedBy[id]]]-> BadTransfer;
}
}
DEA NeutraliseDoubleVote{
states{
BeforeVote: initial;
DoubleVote: bad;
}
transitions{
BeforeVote -[after(removeOwner(_address)) | %$\tildearrow$% votes[_address][msg.sender] = true;]-> BeforeVote;
BeforeVote -[before(removeOwner(_address)) | votes[_address][msg.sender]]-> DoubleVote;
}
}
}
\end{lstlisting}\normalsize
\subsection{Adding Logic to an Auction House}
\texttt{<name>:} \verb+SmartAuctionHouse+\\
Smart contracts are used to automate some part of a real-world process, ensuring that part of the process is carried according to a strict set of rules. An auction is an example of such a process that can be carried out using a smart contract and that we consider in this use case.
Listing.~\ref{lt:auctionhouse} is an implementation of an auction house, allowing auctions to be started (Line 33), allowing people to make offers (Line 48), the auctioneer to start calling the auction (Line 40), and to declare a winning offer (Line 56). A winner can then fulfill their offer (Line 63).
\small\begin{lstlisting}[language=DEA,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,escapechar=\%,label={lt:auctionhouse},caption={A auction house smart contract.}]
pragma solidity ^0.4.24;
contract SmartAuctionHouse{
uint currentItem;
uint startingOffer;
uint currentOffer;
address currentWinner;
uint ticks;
mapping(uint => address) winners;
mapping(uint => uint) winningOffer;
mapping(uint => bool) fulfilled;
address owner;
function SmartAuctionHouse(){
owner = msg.sender;
}
modifier onlyOwner(){
require(msg.sender == owner);
_;
}
modifier onlyOwnerOrInternal(){
require(msg.sender == owner || msg.sender == address(this));
_;
}
function auctionOffItem(uint _offerID, uint _startingOffer) public {
require(!ongoingAuction());
currentItem = _offerID;
startingOffer = _startingOffer;
}
function tick() public onlyOwner{
require(ongoingAuction());
ticks++;
if(ticks > 2) declareWinningOffer();
}
function makeOffer(uint _offer) public {
require(_offer > currentOffer);
currentOffer = _offer;
currentWinner = msg.sender;
}
function declareWinningOffer() public onlyOwnerOrInternal{
require(ticks > 2);
winners[currentItem] = currentWinner;
winningOffer[currentItem] = currentOffer;
reset();
}
function fulfillOffer(uint _id) payable public {
require(winners[_id] == msg.sender && winningOffer[_id] == msg.value && !fulfilled[_id]);
fulfilled[_id] = true;
}
function ongoingAuction() internal returns(bool){
return startingOffer == 0 && currentOffer == 0;
}
function reset() internal {
currentItem = 0;
startingOffer = 0;
currentOffer = 0;
currentWinner = address(0);
ticks = 0;
}
function getItemWinningOffer(uint _id) public returns(address,uint){
return (winners[_id], winningOffer[_id]);
}
function getItemWinningBidder(uint _id) public returns(address){
(address bidder, ) = getItemWinningOffer(_id);
return bidder;
}
function getItemWinningOffer(uint _id) public returns(uint){
(, uint winningOffer) = getItemWinningOffer(_id);
return winningOffer;
}
}
\end{lstlisting}\normalsize
In Listing.~\ref{dea:auctionhouse} we define monitor specifications that check: (i) that only one auction is held at a time, while any auction is automatically ended if fifteen minutes have passed since the last offer; and (ii) any winning bidder is obliged to fulfill their offer by canceling their offer if they attempt to bid more than three times.
For the first property (Lines 42-56), we transition to the \texttt{AuctionStart} start when an auction starts (Line 50), while anytime an offer is made the last offer time variable (declared on Line 9) is updated (Line 51). When a winner is declared the monitor transitions back to the initial state (Line 52). Any attempt to start an auction while another is running and the time since last offer is less than fifteen minutes causes a transition to a bad state (Line 53), which means the call fails (Lines 36-38). If however the time since the last offer is more or equal to fifteen minutes the contract is forced to declare a winner (see Lines 54 and Lines 11-15).
For the second property (Lines 61-75), we only have an initial and bad state. For this property we require a mapping to keep track of unfulfilled offers (defined on Line 4). When a winning offer is declared the offer is marked as unfulfilled (Line 68), while upon fulfillment it is marked as fulfilled (Line 69). When a bidder makes an offer and has no unfulfilled bids (checked with the function defined on Lines 25-30) then the offer is allowed treated normally (Line 70), while if there are unfulfilled offers and the bid attempt counter for the bidder is less than three, the bid attempt counter for the bidder is increased (Line 71). If it is equal or more than three then any unfulfilled bids are canceled and the bid canceled (see Line 72, using the function defined on Lines 17-22). If the user attempts to fulfill a bid that has been canceled the property marks a violation (see Line 73) and cancels the transaction (Lines 36-38).
\small\begin{lstlisting}[language=DEA,basicstyle=\scriptsize,numbers=left,numbersep=2pt,xleftmargin=0.3cm,escapechar=\%,label={dea:auctionhouse},caption={A monitor that checks that only one auction is ongoing at any point in time and setting time limits on when a winning bid needs to be fulfilled.}]
monitor SmartAuctionHouse{
declarations{
mapping(address => uint) attemptsBeforeFullfillment;
mapping(uint => bool) cancelledItems;
mapping(uint => bool) unfulfilled;
mapping(address => uint[]) wonBids;
uint timeSinceLastOffer;
function forceDeclareWinner() private{
ticks = 3;
declareWinningOffer();
timeSinceLastOffer = 0;
}
function cancelAnyUnfulfilledBids(address _bidder) private{
for(uint i = wonBids[_bidder].length - 1; i >= 0; i--){
if(unfulfilled[wonBids[_bidder][i]]){
cancelledItems[wonBids[_bidder][i]] = false;
}
}
}
function areAnyUnfullfilled(address _bidder) private returns(bool){
for(uint i = wonBids[_bidder].length - 1; i >= 0; i--){
if(unfulfilled[wonBids[_bidder][i]]){
return true;
}
}
return false;
}
}
reparation{
revert();
}
//auctionOffItem cannot occur subsequently without declareWinningOffer in between
//if 15 minutes have passed since the last offer then automatically declare the winner
DEA OneAuctionAtATime{
states{
NoOngoingAuction: initial;
AuctionStart;
AuctionAttempted: bad;
}
transitions{
NoOngoingAuction -[after(auctionOffItem(_offerID, _startingOffer))]-> AuctionStart;
AuctionStart -[after(makeOffer(_offer)) | %$\tildearrow$% timeSinceLastOffer = now;]-> AuctionStart;
AuctionStart -[after(declareWinningOffer())]-> NoOngoingAuction;
AuctionStart -[before(auctionOffItem(_offerID, _startingOffer)) | now - timeSinceLastOffer < 15 minutes]-> AuctionAttempted;
AuctionStart -[before(auctionOffItem(_offerID, _startingOffer)) | now - timeSinceLastOffer >= 15 minutes %$\tildearrow$% forceDeclareWinner();]-> AuctionStart;
}
}
//if winning bid is not fulfilled within a day's time
// then the bidder is not allowed to bid on other items
// and any attempt to bid more than 3 times before paying then the winning bid is cancelled
DEA WinningBidsMustBeFulfilledOrCancelled{
states{
Initial: initial;
UnfulfilledBids: bad;
}
transitions{
Initial -[before(declareWinningOffer()) | %$\tildearrow$% unfulfilled[currentItem] = true;]-> Initial;
Initial -[after(fulfillOffer(_id)) | %$\tildearrow$% unfulfilled[_id] = false;]-> Initial;
Initial -[before(makeOffer(_offer)) | !areAnyUnfullfilled(msg.sender) %$\tildearrow$% attemptsBeforeFullfillment[msg.sender] = 0;]-> Initial;
Initial -[before(makeOffer(_offer)) | areAnyUnfullfilled(msg.sender) && attemptsBeforeFullfillment[msg.sender] < 3 %$\tildearrow$% attemptsBeforeFullfillment[msg.sender]++;]-> Initial;
Initial -[before(makeOffer(_offer)) | areAnyUnfullfilled(msg.sender) && attemptsBeforeFullfillment[msg.sender] >= 3 %$\tildearrow$% cancelAnyUnfulfilledBids(msg.sender); return;]-> UnfulfilledBids;
Initial -[before(fulfillOffer(_id)) | cancelledItems[_id]]-> UnfulfilledBids;
}
}
}
\end{lstlisting}\normalsize
\end{document}
|
{"hexsha": "e354e1a9a5e32719e19275382be2563c381abb1c", "size": 55138, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "use-cases/tutorial/main.tex", "max_stars_repo_name": "gordonpace/contractLarva", "max_stars_repo_head_hexsha": "80e4f5e6c7b1aa782fb04072bf11b265b9c327e7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2017-12-14T19:36:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T10:51:08.000Z", "max_issues_repo_path": "use-cases/tutorial/main.tex", "max_issues_repo_name": "gordonpace/contractLarva", "max_issues_repo_head_hexsha": "80e4f5e6c7b1aa782fb04072bf11b265b9c327e7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2017-12-18T19:45:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-24T16:38:58.000Z", "max_forks_repo_path": "use-cases/tutorial/main.tex", "max_forks_repo_name": "gordonpace/contractLarva", "max_forks_repo_head_hexsha": "80e4f5e6c7b1aa782fb04072bf11b265b9c327e7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-02-18T14:52:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-23T08:39:54.000Z", "avg_line_length": 53.3765730881, "max_line_length": 999, "alphanum_fraction": 0.7074431427, "num_tokens": 12850}
|
-----------------------------------------------------------------------------
-- |
-- Module : Data.Packed.Vector
-- Copyright : (c) Alberto Ruiz 2009
-- License : GPL
--
-- Maintainer : Alberto Ruiz <aruiz@um.es>
-- Stability : provisional
--
-- Random vectors and matrices.
--
-----------------------------------------------------------------------------
module Data.Packed.Random (
Seed,
RandDist(..),
randomVector,
gaussianSample,
uniformSample
) where
import Numeric.GSL.Vector
import Data.Packed
import Numeric.ContainerBoot
import Numeric.LinearAlgebra.Algorithms
type Seed = Int
-- | Obtains a matrix whose rows are pseudorandom samples from a multivariate
-- Gaussian distribution.
gaussianSample :: Seed
-> Int -- ^ number of rows
-> Vector Double -- ^ mean vector
-> Matrix Double -- ^ covariance matrix
-> Matrix Double -- ^ result
gaussianSample seed n med cov = m where
c = dim med
meds = konst 1 n `outer` med
rs = reshape c $ randomVector seed Gaussian (c * n)
m = rs `mXm` cholSH cov `add` meds
-- | Obtains a matrix whose rows are pseudorandom samples from a multivariate
-- uniform distribution.
uniformSample :: Seed
-> Int -- ^ number of rows
-> [(Double,Double)] -- ^ ranges for each column
-> Matrix Double -- ^ result
uniformSample seed n rgs = m where
(as,bs) = unzip rgs
a = fromList as
cs = zipWith subtract as bs
d = dim a
dat = toRows $ reshape n $ randomVector seed Uniform (n*d)
am = konst 1 n `outer` a
m = fromColumns (zipWith scale cs dat) `add` am
|
{"hexsha": "dabb17d72b859b4059537317683806d2e9328426", "size": 1678, "ext": "hs", "lang": "Haskell", "max_stars_repo_path": "benchmarks/hmatrix-0.15.0.1/lib/Data/Packed/Random.hs", "max_stars_repo_name": "curiousleo/liquidhaskell", "max_stars_repo_head_hexsha": "a265c044159480b3ddedbbf4982736a33ec8872c", "max_stars_repo_licenses": ["MIT", "BSD-3-Clause"], "max_stars_count": 941, "max_stars_repo_stars_event_min_datetime": "2015-01-13T10:51:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T10:32:32.000Z", "max_issues_repo_path": "benchmarks/hmatrix-0.15.0.1/lib/Data/Packed/Random.hs", "max_issues_repo_name": "curiousleo/liquidhaskell", "max_issues_repo_head_hexsha": "a265c044159480b3ddedbbf4982736a33ec8872c", "max_issues_repo_licenses": ["MIT", "BSD-3-Clause"], "max_issues_count": 1300, "max_issues_repo_issues_event_min_datetime": "2015-01-01T05:41:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T18:11:03.000Z", "max_forks_repo_path": "benchmarks/hmatrix-0.15.0.1/lib/Data/Packed/Random.hs", "max_forks_repo_name": "curiousleo/liquidhaskell", "max_forks_repo_head_hexsha": "a265c044159480b3ddedbbf4982736a33ec8872c", "max_forks_repo_licenses": ["MIT", "BSD-3-Clause"], "max_forks_count": 145, "max_forks_repo_forks_event_min_datetime": "2015-01-12T08:34:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T02:29:30.000Z", "avg_line_length": 28.9310344828, "max_line_length": 77, "alphanum_fraction": 0.5643623361, "num_tokens": 399}
|
"""KAGRA/LIGO Foton related utilities.
"""
import numpy as np
import kontrol.core.controlutils
import kontrol.logger
def tf2foton(
tf, expression="zpk", root_location="s", decimal_places=6,
itol=1e-25, epsilon=1e-25):
"""Convert a single transfer function to foton expression.
Parameters
----------
tf : TransferFunction
The transfer function object.
expression: str, optional
Format of the foton expression.
Choose from ["zpk", "rpoly"].
Defaults to "zpk".
root_location : str, optional
Root location of the zeros and poles for expression=="zpk".
Choose from ["s", "f", "n"].
"s": roots in s-plane, i.e. zpk([...], [...], ..., "s").
"f": roots in frequency plane, i.e. zpk([...], [,,,], ..., "f").
"n": roots in frequency plane but negated and gains are normalized,
i.e. real parts are positive zpk([...], [...], ..., "n").
Defaults to "s".
decimal_places : int, optional
Number of decimal places to print out.
Defaults to 6.
itol : float, optional
Treating complex roots as real roots if the ratio of
the imaginary part and the real part is smaller than this tolerance
Defaults to 1e-25.
epsilon : float, optional
Small number to add to denominator to prevent division error.
Defaults to 1e-25.
Returns
-------
foton_expression : str
The foton expression in selected format.
"""
if expression not in ["zpk", "rpoly"]:
raise ValueError("expression {} not available."
"Please select expression from [\"zpk\", \"rpoly\"."
"".format(expression))
## Divide tf into tfs with less than 20 order.
## Do tf conversion.
## Stack the string
foton_expression = ""
tf_list = kontrol.core.controlutils.tf_order_split(tf, max_order=20)
if len(tf_list) > 1:
kontrol.logger.logger.warning("The transfer function has "
"order higher than 20. This is not "
"supported by KAGRA's Foton software. "
"The Foton expression is splitted into "
"multiple expressions with less order.")
for tf_ in tf_list:
if expression == "zpk":
foton_expression += tf2zpk(
tf_, root_location=root_location, itol=itol, epsilon=epsilon)
elif expression == "rpoly":
foton_expression += tf2rpoly(tf_)
else:
foton_expression += ""
print("If you see this, contact maintainer.")
foton_expression += "\n\n"
foton_expression = foton_expression.rstrip("\n\n")
return foton_expression
def tf2zpk(tf, root_location="s", decimal_places=6, itol=1e-25, epsilon=1e-25):
"""Convert a single transfer function to foton zpk expression.
Parameters
----------
tf : TransferFunction
The transfer function object.
root_location : str, optional
Root location of the zeros and poles.
Choose from ["s", "f", "n"].
"s": roots in s-plane, i.e. zpk([...], [...], ..., "s").
"f": roots in frequency plane, i.e. zpk([...], [,,,], ..., "f").
"n": roots in frequency plane but negated and gains are normalized,
i.e. real parts are positive zpk([...], [...], ..., "n").
Defaults to "s".
decimal_places : int, optional
Number of decimal places to print out.
Defaults to 6.
itol : float, optional
Treating complex roots as real roots if the ratio of
the imaginary part and the real part is smaller than this tolerance
Defaults to 1e-25.
epsilon : float, optional
Small number to add to denominator to prevent division error.
Defaults to 1e-25.
Returns
-------
str
The foton zpk expression in selected format.
Notes
-----
Only works for transfer functions with less than 20 orders.
"""
# if _order_gt(tf, 20):
# raise ValueError("Order of transfer function is not less than 20")
if root_location not in ["s", "f", "n"]:
raise ValueError("Select root_location from [\"s\", \"f\", \"n\"]")
zeros = tf.zero()
poles = tf.pole()
str_zeros = "" # String of list of zeros (placeholder)
str_poles = "" # String of list of poles (placeholder)
## get zeros and poles.
if root_location in ["f", "n"]:
zeros /= 2*np.pi
poles /= 2*np.pi
if root_location == "n":
zeros = -zeros.conjugate()
poles = -poles.conjugate()
## get zeros and poles list, and sort.
z_wn = np.sqrt(tf.zero().real**2 + tf.zero().imag**2)
p_wn = np.sqrt(tf.pole().real**2 + tf.pole().imag**2)
z_sort_arg = z_wn.argsort()
p_sort_arg = p_wn.argsort()
z_wn.sort()
p_wn.sort()
## get gain
gain = tf.minreal().num[0][0][0]
if root_location in ["n"]:
for wn in p_wn:
if wn != 0:
gain /= wn
else:
gain /= 2*np.pi
for wn in z_wn:
if wn != 0:
gain *= wn
else:
gain *= 2*np.pi
## Convert to zpk expressing string
for zero in zeros[z_sort_arg]:
if abs(zero.imag)/abs(zero.real+epsilon) < itol:
str_zeros += "{:.{}f}".format(zero.real, decimal_places)
else:
str_zeros += "{:.{dp}f}+i*{:.{dp}f}".format(
zero.real, zero.imag, dp=decimal_places)
str_zeros += ";"
for pole in poles[p_sort_arg]:
if abs(pole.imag)/abs(pole.real+epsilon) < itol:
str_poles += "{:.{}f}".format(pole.real, decimal_places)
else:
str_poles += "{:.{dp}f}+i*{:.{dp}f}".format(
pole.real, pole.imag, dp=decimal_places)
str_poles += ";"
str_zeros = str_zeros.rstrip(";")
str_poles = str_poles.rstrip(";")
zpk_expression = "zpk([{}],[{}],{:.{}f},\"{}\")".format(
str_zeros, str_poles, gain, decimal_places, root_location)
return zpk_expression
def tf2rpoly(tf, decimal_places=6):
"""Convert a transfer function to foton rpoly expression.
Parameters
----------
tf : TransferFunction
The transfer function object
decimal_places : int, optional
Number of decimal places to print out.
Defaults to 6.
Returns
-------
str :
Foton express in foton rpoly expression.
Notes
-----
Only works for transfer functions with less than 20 orders.
"""
if _order_gt(tf, 20):
raise ValueError("Order of transfer function is not less than 20")
num = tf.minreal().num[0][0]
den = tf.minreal().den[0][0]
str_num = "" ## String of numerator coefficients
str_den = "" ## String of numerator coefficients
gain = num[0]
num /= num[0]
for coef in num:
str_num += "{:.{}f}".format(coef, decimal_places)
str_num += ";"
for coef in den:
str_den += "{:.{}f}".format(coef, decimal_places)
str_den += ";"
str_num = str_num.rstrip(";")
str_den = str_den.rstrip(";")
rpoly_expression = "rpoly([{}],[{}],{})".format(str_num, str_den, gain)
return rpoly_expression
def _order(tf):
"""Returns the number of coefficients in numerator and denominator
Parameters
----------
tf : TransferFunction
The transfer function object
Returns
-------
nnum : int
Number of coefficients in numerator.
nden : int
Number of coefficients in denominator.
"""
nnum = len(tf.minreal().num[0][0])
nden = len(tf.minreal().den[0][0])
return nnum, nden
def _order_gt(tf, order):
"""Returns true if transfer function order is greater than the specified.
Parameters
----------
tf : TransferFunction
The transfer function object.
order : int
Order threshold.
Returns
-------
boolean
True if order(tf) > order, False otherwise.
"""
nnum, nden = _order(tf)
return max(nnum, nden) > order
def notch(frequency, q, depth, decimal_places=6):
"""Returns the foton expression of a notch filter.
Parameters
----------
frequency : float
The notch frequency (Hz).
q : float
The quality factor.
depth : float
The depth of the notch filter (magnitude).
decimal_places : int, optional
Number of decimal places to print out.
Defaults to 6.
Returns
-------
str
The foton representation of this notch filter.
"""
depth_db = 20*np.log10(depth)
expression = "notch({:.{dp}f},{:.{dp}f},{:.{dp}f})".format(
frequency, q, depth_db, dp=decimal_places)
return expression
|
{"hexsha": "6997930eb07d17891b25a07dc8061992f278c88d", "size": 8868, "ext": "py", "lang": "Python", "max_stars_repo_path": "kontrol/core/foton.py", "max_stars_repo_name": "terrencetec/kontrol", "max_stars_repo_head_hexsha": "ba6461784e38d01399efeb7a42911259f9254db0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-08-31T10:34:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-10T20:48:59.000Z", "max_issues_repo_path": "kontrol/core/foton.py", "max_issues_repo_name": "terrencetec/kontrol", "max_issues_repo_head_hexsha": "ba6461784e38d01399efeb7a42911259f9254db0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 33, "max_issues_repo_issues_event_min_datetime": "2020-06-16T18:38:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T00:48:55.000Z", "max_forks_repo_path": "kontrol/core/foton.py", "max_forks_repo_name": "terrencetec/kontrol", "max_forks_repo_head_hexsha": "ba6461784e38d01399efeb7a42911259f9254db0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6714285714, "max_line_length": 79, "alphanum_fraction": 0.5705908886, "include": true, "reason": "import numpy", "num_tokens": 2211}
|
/*!
* Copyright (C) tkornuta, IBM Corporation 2015-2019
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*!
* \file image_encoder_test.cpp
* \brief Program for testing ImageEncoder and Visualization.
* \author tkornuta
* \date Nov 20, 2015
*/
#include <boost/thread/thread.hpp>
#include <boost/bind.hpp>
#include <types/Color.hpp>
#include <importers/RawTextImporter.hpp>
#include <encoders/CharMatrixXfEncoder.hpp>
#include <logger/Log.hpp>
#include <logger/ConsoleOutput.hpp>
using namespace mic::logger;
#include <application/ApplicationState.hpp>
/*!
* \brief Main program function - tests character importer and encoder.
* \author tkornuta
* @param[in] argc Number of parameters - not used.
* @param[in] argv List of parameters - not used.
* @return (not used)
*/
int main(int argc, char* argv[]) {
// Set console output to logger.
LOGGER->addOutput(new ConsoleOutput());
LOG(LINFO) << "Logger initialized. Starting application";
// Manualy set sdr and batch size.
size_t sdr_size = 128;
size_t batch_size = 2;
mic::encoders::CharMatrixXfEncoder encoder(sdr_size);
// Load dataset.
mic::importers::RawTextImporter importer;
// Manually set paths. DEPRICATED! Used here only for simplification of the test.
importer.setDataFilename("/Users/tkornut/Documents/workspace/machine-intelligence-core/data/txt/pl/ep-06-01-16-003.txt");
importer.setBatchSize(batch_size);
if (!importer.importData())
return -1;
LOG(LINFO)<<"There were " << importer.classes() << " distinctive classes imported";
// Main application loop.
while (!APP_STATE->Quit()) {
// If not paused.
if (!APP_STATE->isPaused()) {
// If single step mode - pause after the step.
if (APP_STATE->isSingleStepModeOn())
APP_STATE->pressPause();
// Random select sample.
mic::types::CharSample sample = importer.getRandomSample();
// Encode the selected sample into SDR.
std::shared_ptr<mic::types::MatrixXf> sdr = encoder.encodeSample(sample.data());
// Decode SDR.
std::shared_ptr<char> dec_char = encoder.decodeSample(sdr);
// Display result.
LOG(LINFO)<<" Orig = '" << *(sample.data()) << "' decoded SDR = '" << (*dec_char) << "' label = '" << *(sample.label()) << "'";
// Get next batch.
mic::types::CharBatch batch = importer.getNextBatch();
LOG(LINFO)<<" Batch: ";
for (size_t i=0; i < batch.size(); i++ ) {
LOG(LINFO)<<" ["<<i<< "] = '" << *(batch.data(i)) <<"'";
}//: for
// Encode the whole batch.
std::shared_ptr<mic::types::MatrixXf> batch_matrix = encoder.encodeBatch(batch.data());
LOG(LDEBUG)<<" Batched matrix: ";
LOG(LDEBUG) << (*batch_matrix);
// Decode batch matrix.
std::vector<std::shared_ptr<char> > decoded_batch = encoder.decodeBatch(batch_matrix);
LOG(LINFO)<<" Decoded batch: ";
for (size_t i=0; i < decoded_batch.size(); i++ ) {
LOG(LINFO)<<" ["<<i<< "] = '" << *(decoded_batch[i]) <<"'";
}//: for
// Check if the batch was the last one.
if (importer.isLastBatch())
break;
}//: if ! paused
// Sleep.
APP_SLEEP();
}//: while
LOG(LINFO) << "Terminating application";
}//: main
|
{"hexsha": "047a8a852eee9273275079e335d7b1616751edf1", "size": 3645, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/tests/char_encoder_test.cpp", "max_stars_repo_name": "kant/mi-algorithms", "max_stars_repo_head_hexsha": "7e510577f57cb5e7d36c9d2506b61395739b0bef", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/tests/char_encoder_test.cpp", "max_issues_repo_name": "kant/mi-algorithms", "max_issues_repo_head_hexsha": "7e510577f57cb5e7d36c9d2506b61395739b0bef", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/tests/char_encoder_test.cpp", "max_forks_repo_name": "kant/mi-algorithms", "max_forks_repo_head_hexsha": "7e510577f57cb5e7d36c9d2506b61395739b0bef", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-07-30T09:51:14.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-30T09:51:14.000Z", "avg_line_length": 28.4765625, "max_line_length": 130, "alphanum_fraction": 0.672702332, "num_tokens": 957}
|
program nemo2d
use globals_mod
use setup_mod
use mesh_mod, only: mesh_t
use mesh_mod, only: mesh_init
use mesh_mod, only: mesh_dump
use kernel_mod, only: kernel_init
use timedisc_mod, only: evolve
use timestep_mod, only: calc_timestep
use setup_mod, only: setup_init
# ifdef _OPENMP
USE OMP_LIB, only: omp_get_thread_num
USE OMP_LIB, only: omp_get_num_threads
# endif
real(dp) :: simtime, timestep, nextdump, hydrotimestep
integer :: nsteps,ndumps
logical :: dodump, doloop
type(mesh_t) :: mesh
character(len=*), parameter :: iohdr = "(5x,a4,5x,a5,5x,a10,8x,a10)"
character(len=*), parameter :: iofmt = "(2(I9),2(ES18.5))"
real(dp), parameter :: CFL_eff = 0.5**(N_DIMS-1) / REAL(2*N_NODES-1,dp) * CFL
write (*,*) ' . '
write (*,*) ' ____ _ ":" '
write (*,*) ' _ __ ___ _ __ ___ ___|___ \ __| | ___:____ |"\/"| '
write (*,*) " | '_ \ / _ \ '_ ` _ \ / _ \ __) / _` | ,' `. \ / "
write (*,*) " | | | | __/ | | | | | (_) / __/ (_| | | O \___/ | "
write (*,*) " |_| |_|\___|_| |_| |_|\___/_____\__,_| ~^~^~^~^~^~^~^~^~^~^~^~^~"
write (*,*) " ~^~^ ~^~ ^~^~^ ~^~^ "
write (*,*)
# ifdef _OPENMP
!$OMP Parallel
if (omp_get_thread_num() == 0) then
write (*,*) ' ## OPENMP IS ACTIVE: number of threads',omp_get_num_threads()
end if
!$OMP end Parallel
write (*,*)
# endif
call kernel_init()
call mesh_init(mesh)
call setup_init(mesh)
simtime = inittime
timestep = 0.0
nsteps = 0
ndumps = 0
call mesh_dump(mesh,ndumps)
write (*,iohdr) '#io','#steps','simtime','timestep'
write (*,*)
write (*,iofmt) ndumps,nsteps, simtime, hydrotimestep
nextdump = simtime + dtdump
ndumps = ndumps + 1
dodump = .false.
doloop = .true.
do while (doloop)
hydrotimestep = calc_timestep(mesh)
timestep = CFL_eff*hydrotimestep
if (simtime + timestep > stoptime) then
timestep = abs(stoptime - simtime)
doloop = .false.
dodump = .true.
else if (simtime + timestep > nextdump) then
timestep = abs(nextdump - simtime)
dodump = .true.
end if
call evolve(mesh,simtime,timestep)
nsteps = nsteps + 1
simtime = simtime + timestep
if (dodump) then
call mesh_dump(mesh,ndumps)
write (*,iofmt) ndumps,nsteps, simtime, hydrotimestep
nextdump = simtime + dtdump
ndumps = ndumps + 1
dodump = .false.
end if
end do
end program
|
{"hexsha": "01094308c0e68a6bc986222f9fd766bbbe938a7d", "size": 2549, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "source/nemo2d_prog.f90", "max_stars_repo_name": "jmark/nemo2d", "max_stars_repo_head_hexsha": "a508f192d0f6da49e485ee9c8d1c049dbb81d033", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source/nemo2d_prog.f90", "max_issues_repo_name": "jmark/nemo2d", "max_issues_repo_head_hexsha": "a508f192d0f6da49e485ee9c8d1c049dbb81d033", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/nemo2d_prog.f90", "max_forks_repo_name": "jmark/nemo2d", "max_forks_repo_head_hexsha": "a508f192d0f6da49e485ee9c8d1c049dbb81d033", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.7475728155, "max_line_length": 81, "alphanum_fraction": 0.562965869, "num_tokens": 835}
|
#!/usr/local/bin/python3
'''
This script trains a new (predictor) model and stores result in the datadir.
Text feature extraction needs to be separately.
'''
import sys
import argparse
import os
import numpy as np
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.svm import SVC
from sklearn.externals import joblib
from sklearn.model_selection import KFold
import fasttext
# Import our custom scripts
sys.path.append('libs/')
import textfeatures
import fileio
import classification
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('input', help='Input')
parser.add_argument('--annotations', help='Directory for annotated files (extends training material)', default='')
parser.add_argument('--outputdir', help='Output directory', required=True)
parser.add_argument('--featurename', help='Feature extraction name', required=True)
parser.add_argument('--featurefile', help='Feature extraction file', required=True)
parser.add_argument('--classifier', help='Predictor file', required=True)
parser.add_argument('--kfold', help='Number of splits in K-Fold', required=False)
args = parser.parse_args(argv)
print('Inputs:')
print(args)
# Load training data
# TODO: This data should come from real database
print('Loading training data')
y, messages, classes = fileio.read_fasttext_train_file(args.input)
y = np.array(y)
print(len(messages), y.shape)
if len(args.annotations) > 0:
newmessages, labels = fileio.read_annotated_files(args.annotations)
y = np.hstack((y, labels))
messages += newmessages
print(len(messages), len(y))
if len(messages) != len(y):
print("Data shape and annotations do not match!")
return 0
#TODO: We need to have BoW Features here too..
# Load FastText textfeatures
print('Loading text feature extractor')
feature_extractor = textfeatures.FeatureExtractor(method=args.featurename,
filename=args.featurefile)
# Extract text features from training data
print('Extracting text features from training data')
x = feature_extractor.extract(messages)
print("Number of samples x features: %d x %d" % (x.shape[0], x.shape[1]))
# Make sure that we have just two classes
y = np.array(y > 0, dtype=int)
# Select all the posite samples (hate speech) and some negative samples
n_neg = np.sum(y)
n_pos = np.sum(y == 0)
print(n_neg, n_pos)
# Train the model
# TODO: It would make sense to define training as a pipeline so that all the
# parameters could be given in
print('Training a new model..')
if args.classifier.upper() == 'RF':
clf = RF().fit(x, y)
elif args.classifier.upper() == 'SVM':
clf = SVC(kernel='linear', probability=True).fit(x, y)
# Save the model
#TODO: The name of the file should be also depend on the method
predictor_model_file = os.path.join(args.outputdir, args.featurename +
'_' + args.classifier + '.pkl')
if os.path.exists(os.path.dirname(predictor_model_file)) == False:
os.makedirs(os.path.dirname(predictor_model_file))
print('Storing the result file in %s' % predictor_model_file)
joblib.dump(clf, predictor_model_file)
if __name__ == "__main__":
main(sys.argv[1:])
|
{"hexsha": "b0c429e023bf66d7a17b7f541d8209d43e7419b0", "size": 3415, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "futurice/spice-hate_speech_detection", "max_stars_repo_head_hexsha": "ddaed64428a931f4f22748a611b5865ed037a52f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2017-05-03T15:08:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-25T06:49:57.000Z", "max_issues_repo_path": "train.py", "max_issues_repo_name": "futurice/spice-hate_speech_detection", "max_issues_repo_head_hexsha": "ddaed64428a931f4f22748a611b5865ed037a52f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-05-03T15:07:03.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-18T03:20:59.000Z", "max_forks_repo_path": "train.py", "max_forks_repo_name": "futurice/spice-hate_speech_detection", "max_forks_repo_head_hexsha": "ddaed64428a931f4f22748a611b5865ed037a52f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2017-07-14T06:32:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-28T21:53:46.000Z", "avg_line_length": 32.5238095238, "max_line_length": 118, "alphanum_fraction": 0.6799414348, "include": true, "reason": "import numpy", "num_tokens": 770}
|
[STATEMENT]
lemma find_path_fields[THEN mp]:
"find_path_ty_f P ty = Some path \<longrightarrow> (\<exists>fs. fields_in_path_f path = fs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. find_path_ty_f P ty = Some path \<longrightarrow> (\<exists>fs. fields_in_path_f path = fs)
[PROOF STEP]
by (force)
|
{"llama_tokens": 118, "file": "LightweightJava_Lightweight_Java_Proof", "length": 1}
|
# VERSION 2
# Add user id as feature
#
#
#
import tensorflow as tf
import tensorflow.contrib as tc
import numpy as np
import tensorflow.contrib.keras as keras
# import tensorflow.contrib as slim
VERSION = "v3"
IS_TRAINING = True
NUM_EPOCHS = 1000000
# IS_TRAINING = False
# NUM_EPOCHS = 1
LEARNING_RATE = 0.01
def read_and_decode(filename):
# Create queue
filename_queue = tf.train.string_input_producer([filename], num_epochs=NUM_EPOCHS)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue) # Filename
features = tf.parse_single_example(serialized_example,
features={
'label': tf.FixedLenFeature([], tf.float32),
'feature': tf.FixedLenFeature([25], tf.float32),
})
target_label = tf.cast(features['label'], tf.int64)
input_feature = tf.cast(features['feature'], tf.float64) # Input shape batch_size x 25
# Drop user id content's
# input_feature = tf.concat([input_feature[:11], input_feature[12:]], axis=0)
return input_feature, target_label
class RDWModel(object):
def __init__(self):
self._build_model()
def _build_model(self):
if IS_TRAINING:
self.dropout_prob = 0.65
self.pos_fix = "train"
else:
self.dropout_prob = 1
self.pos_fix = "test"
with tf.name_scope("Config"):
self.global_step = tf.Variable(0, name="global_step", trainable=False, dtype=tf.int32)
# Read dest embedding data
self.destination_embedding = tf.Variable(
tf.convert_to_tensor(np.load("../data/destinations.npy"), dtype=tf.float64), trainable=False,
name="des_embedding")
with tf.name_scope("Input" + self.pos_fix):
if IS_TRAINING is True:
feature, label = read_and_decode("../data/train-13-all-book-type.tfrecords")
self.feature, self.label_batch = tf.train.shuffle_batch([feature, label], batch_size=128, num_threads=3,
capacity=2000,
min_after_dequeue=1000,
allow_smaller_final_batch=False)
else:
feature, label = read_and_decode("../data/train-14.tfrecords")
self.feature, self.label_batch = tf.train.batch([feature, label], batch_size=512, num_threads=3,
capacity=2000,
allow_smaller_final_batch=True)
# Load test Data-set
#
# self.input = tf.placeholder(tf.float32, shape=[None, 24], name="user_input")
# self.target_label = tf.placeholder(tf.float32, shape=[None, 1])
with tf.name_scope("Des_Embedding"):
# Time duriation
src_ci_month = self.add_bucket_embedding(tf.cast(self.feature[:, 0], tf.int64), 12, 8, "src_ci_month")
src_ci_day = self.add_bucket_embedding(tf.cast(self.feature[:, 1], tf.int64), 31, 8, "src_ci_day")
src_co_month = self.add_bucket_embedding(tf.cast(self.feature[:, 2], tf.int64), 12, 8, "src_co_month")
src_co_day = self.add_bucket_embedding(tf.cast(self.feature[:, 3], tf.int64), 31, 8, "src_co_day")
self.time_feature = tf.concat([src_ci_month, src_ci_day, src_co_day, src_co_month], axis=1)
self.time_feature = self.add_norm(self.time_feature, 4 * 8)
self.time_feature = self.add_fc_stack_layers(self.time_feature, [64, 128, 256, 128])
# Source
is_mobile = self.add_bucket_embedding(tf.cast(self.feature[:, 12], tf.int64), 2, 8, "is_mobile")
is_package = self.add_bucket_embedding(tf.cast(self.feature[:, 13], tf.int64), 2, 8, "is_package")
channel = self.add_bucket_embedding(tf.cast(self.feature[:, 14], tf.int64), 10000, 8, "channel")
site_name = self.add_bucket_embedding(tf.cast(self.feature[:, 5], tf.int64), 1000, 8, "site_name")
posa_continent = self.add_bucket_embedding(tf.cast(self.feature[:, 6], tf.int64), 100, 8, "posa_continent")
self.source_feature = tf.concat([is_mobile, is_package, channel, site_name, posa_continent], axis=1)
self.source_feature = self.add_norm(self.source_feature, 5 * 8)
self.source_feature = self.add_fc_stack_layers(self.source_feature, [128, 256, 256, 128])
# Destination
des_embedding_feature = tf.nn.embedding_lookup(self.destination_embedding,
tf.cast(self.feature[:, 18], tf.int64))
des_type_id = self.add_bucket_embedding(tf.cast(self.feature[:, 19], tf.int64), 100000, 8, "des_type_id")
# Hotel info
h_continent = self.add_bucket_embedding(tf.cast(self.feature[:, 22], tf.int64), 100, 8, "h_continent")
h_contry = self.add_bucket_embedding(tf.cast(self.feature[:, 23], tf.int64), 1000, 8, "h_contry")
h_market = self.add_bucket_embedding(tf.cast(self.feature[:, 24], tf.int64), 100000, 8, "h_market")
self.des_feature = tf.concat([des_embedding_feature, des_type_id, h_market, h_contry, h_continent], axis=1)
self.des_feature = self.add_norm(self.des_feature, 4 * 8 + 149)
self.des_feature = self.add_fc_stack_layers(self.des_feature, [256, 512, 512, 256])
# User info
u_loc_contry = self.add_bucket_embedding(tf.cast(self.feature[:, 7], tf.int64), 1000, 8, "u_loc_contry")
u_loc_region = self.add_bucket_embedding(tf.cast(self.feature[:, 8], tf.int64), 100000, 8, "u_loc_region")
u_loc_city = self.add_bucket_embedding(tf.cast(self.feature[:, 9], tf.int64), 100000, 8, "u_loc_city")
self.user_feature = tf.concat([u_loc_city, u_loc_region, u_loc_contry, self.feature[:, 10:11]], axis=1)
self.user_feature = self.add_norm(self.user_feature, 3 * 8 + 1)
self.user_feature = self.add_fc_stack_layers(self.user_feature, [64, 128, 128])
# Query Requirements
self.query_feature = tf.concat([self.feature[:, 15:18]], axis=1)
self.query_feature = self.add_norm(self.query_feature, 3)
self.query_feature = self.add_fc_stack_layers(self.query_feature, [64, 128, 256, 128])
# other feature
tran_month = self.add_bucket_embedding(tf.cast(self.feature[:, 4], tf.int64), 12, 8, "trans_month")
booking = self.add_bucket_embedding(tf.cast(self.feature[:, 20], tf.int64), 2, 8, "is_booking")
self.other_feature = tf.concat([tran_month, booking], axis=1)
self.other_feature = self.add_norm(self.other_feature, 16)
self.other_feature = self.add_fc_stack_layers(self.other_feature, [64, 128, 128])
# user id
user_id = self.add_bucket_embedding(tf.cast(self.feature[:, 11], tf.int64), 100000, 8, "user_id")
self.user_id_feature = self.add_norm(user_id, 8)
self.stack_features = tf.concat([self.time_feature,
self.source_feature,
self.des_feature,
self.user_feature,
self.query_feature,
self.other_feature,
self.user_id_feature], axis=1)
self.feature_weight = tf.Variable(tf.ones([self.stack_features.get_shape()[-1]], dtype=tf.float64))
self.stack_features = tf.multiply(self.stack_features, self.feature_weight)
with tf.name_scope("FC"):
self.net = self.add_fc_stack_layers(self.stack_features, [1024])
self.net = self.add_fc_stack_layers(self.stack_features, [1024])
self.net = self.add_fc_stack_layers(self.stack_features, [512])
self.net = self.add_fc_stack_layers(self.stack_features, [512])
self.net = self.add_fc_stack_layers(self.stack_features, [256])
with tf.name_scope("Output"):
self.output = tc.layers.fully_connected(self.net, 100, activation_fn=None)
with tf.name_scope("Batch_eval"):
self.num_correct_prediction = tf.reduce_sum(
tf.cast(tf.equal(self.label_batch, tf.argmax(self.output, 1)), tf.float32))
self.mAP, self.mAP_update = tc.metrics.streaming_sparse_average_precision_at_k(self.output,
self.label_batch, 5)
if IS_TRAINING is False:
return
with tf.name_scope("Loss"):
# self.label_vector = tf.one_hot(self.label_batch, 100, dtype=tf.float64)
# self.s_output = tf.nn.softmax(self.output)
# self.loss = tf.reduce_mean(
# tf.reduce_sum(keras.backend.binary_crossentropy(self.s_output, self.label_vector, from_logits=False),
# axis=1))
# self.loss = tf.reduce_mean(
# keras.backend.sparse_categorical_crossentropy(output=self.output, target=self.label_batch,
# from_logits=True))
self.loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.label_batch, logits=self.output))
tf.summary.scalar('loss', self.loss)
with tf.name_scope("Train"):
self.train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(self.loss)
self.increase_step = self.global_step.assign_add(1)
def _add_fc_layer(self, layer_input, size, activation_fn=tf.nn.relu, dropout=True, norm=True):
output = tc.layers.fully_connected(layer_input, size, activation_fn=activation_fn)
if norm:
output = self.add_norm(output, size=size)
if dropout is True:
output = tf.nn.dropout(output, self.dropout_prob)
return output
def add_fc_stack_layers(self, inputs, layer_configure, norm=True):
out = inputs
for size in layer_configure:
out = self._add_fc_layer(out, size, dropout=IS_TRAINING, norm=norm)
return out
@staticmethod
def add_bucket_embedding(inputs, bucket_size, dim, name):
with tf.variable_scope(name):
embeddings = tf.Variable(
tf.random_uniform([bucket_size, dim], -1.0, 1.0, dtype=tf.float64), dtype=tf.float64)
mod_input = tf.mod(inputs, bucket_size)
return tf.nn.embedding_lookup(embeddings, mod_input)
@staticmethod
def add_norm(layer_input, size):
scale = tf.Variable(tf.ones([size], dtype=tf.float64))
shift = tf.Variable(tf.zeros([size], dtype=tf.float64))
pop_mean = tf.Variable(tf.zeros([layer_input.get_shape()[-1]], dtype=tf.float64), trainable=False)
pop_var = tf.Variable(tf.ones([layer_input.get_shape()[-1]], dtype=tf.float64), trainable=False)
epsilon = 0.001
if IS_TRAINING:
# batch_mean, batch_var = tf.nn.moments(layer_input, axes=[0])
fc_mean, fc_var = tf.nn.moments(layer_input, axes=[0])
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([fc_mean, fc_var])
with tf.control_dependencies([ema_apply_op, tf.assign(pop_var, fc_var), tf.assign(pop_mean, fc_mean)]):
return tf.identity(fc_mean), tf.identity(fc_var)
mean, var = mean_var_with_update()
layer_output = tf.nn.batch_normalization(layer_input, mean, var, shift, scale, epsilon)
# decay = 0.5
# train_mean = tf.assign(pop_mean,
# pop_mean * decay + batch_mean * (1 - decay))
# train_var = tf.assign(pop_var,
# pop_var * decay + batch_var * (1 - decay))
# with tf.control_dependencies([train_mean, train_var]):
# return tf.nn.batch_normalization(layer_input,
# pop_mean, pop_var, shift, scale, epsilon)
else:
layer_output = tf.nn.batch_normalization(layer_input, pop_mean, pop_var, shift, scale, epsilon)
return layer_output
def run_train(self, sess):
step = 0
try:
while True:
_, _, merged_summary, step_value, loss_value, net_output = sess.run(
[self.train_op, self.increase_step, merged, self.global_step, self.loss, self.output])
writer.add_summary(merged_summary, global_step=step_value)
if step % 100 == 0:
saver.save(sess, "model/" + VERSION + "/model.ckpt")
print ("Step %d: loss= %.4f" % (step, loss_value))
step += len(net_output)
except tf.errors.OutOfRangeError:
print ("Done training for %d epochs, %d steps." % (NUM_EPOCHS, step))
def run_evl(self, sess):
step = 0
correnct_entry = 0.0
try:
while True:
# sess.run(self.mAP_update)
mAP, _, net_output, feature_value, target_label, num_correct = sess.run(
[self.mAP, self.mAP_update, self.output, self.feature, self.label_batch,
self.num_correct_prediction])
test_out = np.argmax(net_output, axis=1)
correnct_entry += num_correct
# print test_out
# print feature_value
# print target_label
# print correnct_entry
# net_output, mAP, _, _, _ = sess.run(
# [self.output, self.mAP, self.mAP_update, self.output, self.num_correct_prediction])
# print mAP
step += len(net_output)
print step
print mAP
except tf.errors.OutOfRangeError:
print ("Done training for %d epochs, %d steps, %f mAP@5 %f accuracy ." % (
NUM_EPOCHS, step, mAP, correnct_entry / step))
# print ("Done training for %d epochs, %d steps %f mAP" % (
# NUM_EPOCHS, step, mAP))
if __name__ == "__main__":
# RDWModel.run_test()
model = RDWModel()
with tf.Session() as session:
keras.backend.set_session(session)
saver = tf.train.Saver()
if tf.gfile.Exists("log/" + VERSION) is False:
tf.gfile.MkDir("log/" + VERSION)
if tf.gfile.Exists("model/" + VERSION) is False:
tf.gfile.MkDir("model/" + VERSION)
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("log/" + VERSION, session.graph)
ckpt = tf.train.get_checkpoint_state("model/" + VERSION)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
if ckpt and ckpt.model_checkpoint_path:
saver.restore(session, ckpt.model_checkpoint_path)
print ("Restore ckpt")
else:
print ("No ckpt found")
if IS_TRAINING:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=session, coord=coord)
model.run_train(session)
coord.request_stop()
coord.join(threads)
else:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=session, coord=coord)
model.run_evl(session)
coord.request_stop()
coord.join(threads)
session.close()
|
{"hexsha": "04534b3f1e2f383b86a1f33d224bd4d62697a41a", "size": 16011, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/dl_v3.py", "max_stars_repo_name": "Lucklyric/Expedia-Recommendation", "max_stars_repo_head_hexsha": "d0496fec5305b02d4e17785e6ea5d635e51e92c1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/dl_v3.py", "max_issues_repo_name": "Lucklyric/Expedia-Recommendation", "max_issues_repo_head_hexsha": "d0496fec5305b02d4e17785e6ea5d635e51e92c1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/dl_v3.py", "max_forks_repo_name": "Lucklyric/Expedia-Recommendation", "max_forks_repo_head_hexsha": "d0496fec5305b02d4e17785e6ea5d635e51e92c1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.3490566038, "max_line_length": 120, "alphanum_fraction": 0.5863468865, "include": true, "reason": "import numpy", "num_tokens": 3540}
|
# python train_mixup.py --dataset cifar10 --model resnet18 --data_augmentation --mixup
import pdb
import argparse
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from torch.optim.lr_scheduler import MultiStepLR
from torchvision.utils import make_grid
from torchvision import datasets, transforms
from util.misc import CSVLogger
from util.cutout import Cutout
from model.resnet import ResNet18
"""
add by yanfan
tensorboard 训练过程中误差精确度曲线的可视化
结果可视化
"""
from tensorboardX import SummaryWriter
SumWriter = SummaryWriter(log_dir = "logs/log_mixup")
model_options = ['resnet18', 'wideresnet']
dataset_options = ['cifar10', 'cifar100', 'svhn']
parser = argparse.ArgumentParser(description='CNN')
parser.add_argument('--dataset', '-d', default='cifar10',
choices=dataset_options)
parser.add_argument('--model', '-a', default='resnet18',
choices=model_options)
parser.add_argument('--batch_size', type=int, default=128,
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=200,
help='number of epochs to train (default: 20)')
parser.add_argument('--learning_rate', type=float, default=0.1,
help='learning rate')
parser.add_argument('--data_augmentation', action='store_true', default=False,
help='augment data by flipping and cropping')
## for cutout
parser.add_argument('--cutout', action='store_true', default=False,
help='apply cutout')
parser.add_argument('--n_holes', type=int, default=1,
help='number of holes to cut out from image')
parser.add_argument('--length', type=int, default=16,
help='length of the holes')
## for mixup
parser.add_argument('--mixup', action='store_true', default=False,
help='apply mixup')
parser.add_argument('--alpha', default=1., type=float,
help='mixup interpolation coefficient (default: 1)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=0,
help='random seed (default: 1)')
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
args.cuda = not args.no_cuda and torch.cuda.is_available()
cudnn.benchmark = True # Should make training should go faster for large models
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
test_id = args.dataset + '_' + args.model + '_'+ 'mixup'
print(args)
# Image Preprocessing
if args.dataset == 'svhn':
normalize = transforms.Normalize(mean=[x / 255.0 for x in[109.9, 109.7, 113.8]],
std=[x / 255.0 for x in [50.1, 50.6, 50.8]])
else:
normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
train_transform = transforms.Compose([])
if args.data_augmentation:
train_transform.transforms.append(transforms.RandomCrop(32, padding=4))
train_transform.transforms.append(transforms.RandomHorizontalFlip())
train_transform.transforms.append(transforms.ToTensor())
train_transform.transforms.append(normalize)
if args.cutout: ##apply cutout
train_transform.transforms.append(Cutout(n_holes=args.n_holes, length=args.length))
# mixup function
def mixup_data(x, y, alpha=1.0, use_cuda=True):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha) # bata分布随机数
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda() # 返回一个[0, batch_size-1]的随机数组
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
test_transform = transforms.Compose([
transforms.ToTensor(),
normalize])
if args.dataset == 'cifar10':
num_classes = 10
train_dataset = datasets.CIFAR10(root='./data',
train=True,
transform=train_transform,
download=True)
test_dataset = datasets.CIFAR10(root='./data',
train=False,
transform=test_transform,
download=True)
elif args.dataset == 'cifar100':
num_classes = 100
train_dataset = datasets.CIFAR100(root='data/',
train=True,
transform=train_transform,
download=True)
test_dataset = datasets.CIFAR100(root='data/',
train=False,
transform=test_transform,
download=True)
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True,
pin_memory=True,
num_workers=2)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=args.batch_size,
shuffle=False,
pin_memory=True,
num_workers=2)
if args.model == 'resnet18':
cnn = ResNet18(num_classes=num_classes)
cnn = cnn.cuda()
criterion = nn.CrossEntropyLoss().cuda()
cnn_optimizer = torch.optim.SGD(cnn.parameters(), lr=args.learning_rate,
momentum=0.9, nesterov=True, weight_decay=5e-4)
if args.dataset == 'svhn':
scheduler = MultiStepLR(cnn_optimizer, milestones=[80, 120], gamma=0.1)
else:
scheduler = MultiStepLR(cnn_optimizer, milestones=[60, 120, 160], gamma=0.2)
filename = 'logs/' + test_id + '.csv'
csv_logger = CSVLogger(args=args, fieldnames=['epoch', 'train_acc', 'test_acc'], filename=filename)
def test(loader):
cnn.eval() # Change model to 'eval' mode (BN uses moving mean/var).
correct = 0.
total = 0.
for images, labels in loader:
images = images.cuda()
labels = labels.cuda()
with torch.no_grad():
pred = cnn(images)
pred = torch.max(pred.data, 1)[1]
total += labels.size(0)
correct += (pred == labels).sum().item()
val_acc = correct / total
cnn.train()
return val_acc
def mixup_train(epoch):
print('\nEpoch: %d' % epoch)
cnn.train()
train_loss = 0
reg_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(train_loader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets_a, targets_b, lam = mixup_data(inputs, targets,
args.alpha, use_cuda)
inputs, targets_a, targets_b = map(Variable, (inputs,
targets_a, targets_b))
outputs = cnn(inputs)
loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)
train_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (lam * predicted.eq(targets_a.data).cpu().sum().float()
+ (1 - lam) * predicted.eq(targets_b.data).cpu().sum().float())
correct = correct.item()
cnn_optimizer.zero_grad()
loss.backward()
cnn_optimizer.step()
if batch_idx % 100 == 0:
print(batch_idx, len(train_loader),
'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1),
correct/total, correct, total))
return (train_loss/batch_idx, reg_loss/batch_idx, correct/total)
for epoch in range(args.epochs):
train_loss, reg_loss, train_acc = mixup_train(epoch)
test_acc = test(test_loader)
tqdm.write('test_acc: %.3f' % (test_acc))
#scheduler.step(epoch) # Use this line for PyTorch <1.4
scheduler.step() # Use this line for PyTorch >=1.4
row = {'epoch': str(epoch), 'train_acc': str(round(train_acc, 5)), 'test_acc': str(test_acc)}
csv_logger.writerow(row)
SumWriter.add_scalar("test_acc",test_acc,global_step=epoch+1) ## test_acc可视化记录
SumWriter.add_scalar("train_acc",train_acc,global_step=epoch+1) ## train_acc可视化记录
SumWriter.add_scalar("train_loss",train_loss,global_step=epoch+1) ## train_loss可视化记录
torch.save(cnn.state_dict(), 'checkpoints/' + test_id + '.pt')
csv_logger.close()
|
{"hexsha": "48da46e2da824b38794ff2f50194641e46539b18", "size": 9142, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_mixup.py", "max_stars_repo_name": "Ivanyan0516/nndl-pj", "max_stars_repo_head_hexsha": "c4f8d8e1cfaeffe01a8a7fd949e5932dc59a7c69", "max_stars_repo_licenses": ["ECL-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train_mixup.py", "max_issues_repo_name": "Ivanyan0516/nndl-pj", "max_issues_repo_head_hexsha": "c4f8d8e1cfaeffe01a8a7fd949e5932dc59a7c69", "max_issues_repo_licenses": ["ECL-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_mixup.py", "max_forks_repo_name": "Ivanyan0516/nndl-pj", "max_forks_repo_head_hexsha": "c4f8d8e1cfaeffe01a8a7fd949e5932dc59a7c69", "max_forks_repo_licenses": ["ECL-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2777777778, "max_line_length": 99, "alphanum_fraction": 0.5970247211, "include": true, "reason": "import numpy", "num_tokens": 2103}
|
%
% revised at Jan. 30th; 2009
% we have added two parts: the unitary operator and the
% project operator
%
% revised on Mar. 22th, 2009
% to use the environment of theorem and law to improve it
%
% revised at July 27th, 2009
% finish the part of project operator and the unitary operator
%
% revised at Aug. 10th, 2009
% nearly rewrite all the part in the operator.tex
%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% general discussion for the operator:
% definition and Algorithms
% linear operator
% hermite operator
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{General discussion to operator}
%
%
%
%
In the above content, we have discussed the Hilbert space. However,
how to define the Hilbert space which is correspond to a given
physical system? How to extract physical information from the
Hilbert space? In this chapter, we will give some general discussion
over this topic.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Definition of operators}
%
% 1 definition
% 2 linear operators,required by the superposition principle
%
%
%
Generally to say, mathematically the operator can be seen as the
correspondence between two different vectors in a Hilbert
space\footnote{In the following content, the Greek letters are used
to indicate the vectors in the Hilbert space.}:
\begin{equation}\label{OPERATOReq:4}
\hat{O}\ket{\Psi} = \ket{\Phi}
\end{equation}
If there's no specific instruction, the operator in the following
content will all done on the vectors from ket space.
Mathematically to say, operator can be seen as some kind of ``function
transformation machine'', it transforms the function from one form to
another. For example, the $\frac{d^{2}}{dx^{2}}$ is a kind of
operator. For the function of $e^{ikx}$, we have:
\begin{equation}\label{}
\frac{d^{2}}{dx^{2}} (e^{ikx}) = -k^{2}e^{ikx}
\end{equation}
So it converts the $e^{ikx}$ to $-k^{2}e^{ikx}$.
In quantum mechanics, all the physical quantity are corresponding to
some form of operators. For example, the kinetic operator is expressed
as $-\frac{\hbar^{2}}{2m}\nabla^{2}$, the momentum operator is
expressed as $-i\hbar\nabla$.
However, it's not all the operators that are physically meaningful
in quantum mechanics. Most of the operators used in quantum
mechanics, are the linear operators; which satisfy the conditions
below:
\begin{eqnarray}
% \nonumber to remove numbering (before each equation)
\hat{A}(c_{1}\ket{\Psi} + c_{2}\ket{\Phi}) &=&
c_{1}\hat{A}\ket{\Psi} + c_{2}\hat{A}\ket{\Phi} \nonumber \\
\hat{A}(\ket{\Psi}a) &=& (\hat{A}\ket{\Psi})a
\end{eqnarray}
Here it can be seen that the linear operator actually is required by
the superposition principle. Since that an arbitrary wave function
can be composed by some linear combination, thus the action of an
operator on some arbitrary wave function should also be decomposed
into the actions on its linear components.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Axioms for quantum mechanics in terms of operator}
\label{AFQMITOO_in_operator}
% 1 physical quantity, and should be hermitian
% 2 observable
% 3 operator to an arbitrary wave function; how to express it
%
%
Now let's state the axioms related to operator in quantum
mechanics. The first one is the physical meaning of operator:
\begin{axiom}\label{axiom2}
\textbf{In quantum mechanics, all the physical quantities are
corresponding to hermitian operator working on the Hilbert space.}
\end{axiom}
As what we have demonstrated, here such operators corresponding to the
physical quantity should be also the linear operator. In the following
content, we will discuss what's the hermitian character of the
operator.
Furthermore, the second axiom is related to the ``observable''
physical quantity in the quantum mechanics:
\begin{axiom}\label{axiom3}
\textbf{ In quantum mechanics, the observable physical quantity
has the eigen functions below to determine the observed value:
\begin{equation}\label{OPERATOReq:8}
\hat{A}\ket{\Psi_{i}} = a_{i}\ket{\Psi_{i}} \quad i=1, 2, \cdots
\end{equation}
Here the $\ket{\Psi_{i}}$ is called the eigen states for the
observable physical quantity of $\hat{A}$, and the real number of
$a_{i}$ is just corresponding to the observed value (on the other
words, the measured result of the physical quantity) for the physical
quantity.}
\end{axiom}
Now there comes very important question, that is what's the
``observable'' physical quantity in the quantum mechanics?
Here the meaning of ``observable'' is that such physical quantity can
be measured and given certain measured result. As we know, in classic
mechanics, all the physical quantities are ``observable'', the
position, the momentum, the energy etc. are all measurable and can be
given some certain observed value.
However, things in quantum mechanics is totally different. For
example, as what has been demonstrated in the chapter
\ref{basic_chapter}, the position of the quantum particles can not be
measured to give some definite value, only the probability of the
position can be obtained through the $|\Psi(\bm{r},
t)|^{2}d^{3}\bm{r}$ of wave function of $\Psi(\bm{r}, t)$; so there's
some great difference between the physical operators: some can be
observable, the other are not.
The observable physical quantity has very important meaning in quantum
mechanics, that through the (\ref{OPERATOReq:8}) we can derive the eigen
states for the physical quantity of $\hat{A}$. From the contents
below, we can see that such group of eigen states are forming some
complete sets, that means, we are able to get the Hilbert space from
solving the eigen function. Hence, we have answered the question
prompted at the beginning: \textbf{the observable physical quantities
are used to define the Hilbert space corresponding the system.}
In quantum mechanics, the Schrodinger equation is holding true for all
system; then if the Hamiltonian does not contain the time, we can
split the time part from the Schrodinger equation (see the following
chapter discussing the Schrodinger equation), hence we have the eigen
function for the Hamiltonian operator; that is to say, the energy is
some observable value:
\begin{equation}
\label{OPERATOReq:20}
\hat{H}\ket{\Psi_{i}} = E_{i} \ket{\Psi_{i}}
\end{equation}
$E_{i}$ is the energy of the system. In quantum mechanics, nearly all
the circumstances are belonging to this type of situation. To some
extent, we can say that the equation in (\ref{OPERATOReq:20}) is used
to define the Hilbert space for the given quantum system.
In such circumstance, we can prove that if the physical quantity is
commuted with the Hamiltonian operator, then such physical quantity
may have eigen function so that to give observable value. The further
information will be detailed analyzed in the following content.
On the other hand, for the physical quantity which is not
observable, we can define the expectation value over the complete
sets of $\Psi_{i}$ (in other words, the average measurable value)
for the physical quantity:
\begin{equation}\label{OPERATOReq:7}
\bra{\Psi_{i}}\hat{A}\ket{\Psi_{i}} = \langle\hat{A}\rangle
\end{equation}
For example, the mean coordinate for the position operator of
$\hat{x}$:
\begin{equation}\label{}
\langle\hat{x}\rangle = \bra{\Psi_{i}}\hat{x}\ket{\Psi_{i}}
\end{equation}
This value depicts the average position for the $\ket{\Psi_{i}}$ over
the x axis.
Now there's still a question left that for some arbitrary quantum
state of $\ket{\Phi}$, how to evaluate the physical information for
this state? This is the second question prompted in the beginning, and
we have the third axiom below to answer the question:
\begin{axiom}\label{axiom4}
\textbf{When some physical quantity of $\hat{A}$ is measured on an
arbitrary quantum state of $\ket{\Psi}$, if the $\ket{\Psi}$ is
not the eigen state for $\hat{A}$ then the measurable result can
not achieve definite value. Instead, there are a range of
measurable results can be potentially achieved, the probability
for each of measurable result is determined as:
\begin{equation}
\label{OPERATOReq:21}
\text{probability of measurable result} = \langle \Psi|\Psi_{i} \rangle^{2}
\end{equation}
The $\ket{\Psi_{i}}$ is some complete sets on which the
$\ket{\Psi}$ is expanding over:
\begin{equation}
\ket{\Psi} = \sum_{i}c_{i}\ket{\Psi_{i}}
\end{equation}
Hence, the measurable result for the $\hat{A}$ is just the
collection of measurements on each $\ket{\Psi_{i}}$, each of them
has some probability to appear in the final result; and the
probability is given by (\ref{OPERATOReq:21}). Since $c_{i} =
\langle \Psi|\Psi_{i} \rangle$, then we can also express the above
result as:
\begin{equation}
\text{probability of measurable result} = |c_{i}|^{2}
\end{equation} }
\end{axiom}
Usually in quantum mechanics, the physical quantity of $\hat{A}$ is
chosen to be observable kind (obviously it meaningless to pursue the
measurable result for some unobservable physical quantity over an
arbitrary quantum state! ), therefore, we can have that:
\begin{eqnarray}\label{OPERATOReq:2}
% \nonumber to remove numbering (before each equation)
\hat{A}\ket{\Psi}
&=& \sum_{i}c_{i}\hat{A}\Psi_{i} \nonumber \\
&=& \sum_{i}c_{i}a_{i}\Psi_{i} \Rightarrow \nonumber \\
\bra{\Psi}\hat{A}\ket{\Psi}
&=&
\sum_{i}\sum_{j}c^{*}_{j}c_{i}a_{i}\langle\Psi_{j}|\Psi_{i}\rangle
\nonumber \\
&=& \sum_{i}|c_{i}|^{2}a_{i}
\end{eqnarray}
This is the average measurable result for the $\hat{A}$ over the
arbitrary state of $\ket{\Psi}$.
Now we have established all the axioms related to the operators, and
answered the questions that how to define the Hilbert space for a
system, and how to extract physical information from given arbitrary
quantum state. In the following content, we are going to present a
detailed analysis in term of the items above.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Algorithm of operators}
% 1 some special operators, unit operator etc.
% 2 addition, substraction, multiplication, division
% 3 commutation relationship
% 4 transposing and conjugating operators
% 5 hermite operation, related to the transposing and
% conjugating operations
%
Now we concentrate on the algorithms of the linear operators. It can
see that the algorithms are similar to the rules in matrix.
Unit operator:
\begin{equation}\label{}
\hat{I}\ket{\Psi} = \ket{\Psi}
\end{equation}
Zero operator:
\begin{equation}\label{}
\hat{O}\ket{\Psi} = 0
\end{equation}
Two identical operators which leads to $\hat{A}=\hat{B}$:
\begin{equation}\label{}
\hat{A}\ket{\Psi} = \hat{B} \ket{\Psi} \qquad \text{for any $\ket{\Psi}$}
\end{equation}
Addition and substraction of operators:
\begin{equation}\label{}
(\hat{A} \pm \hat{B})\ket{\Psi} = \hat{A}\ket{\Psi} \pm \hat{B} \ket{\Psi}
\end{equation}
Such algorithms introduced above are simple and straightforward, yet
the multiplication for the operators is some kind of complex: if
$\hat{A}$ and $\hat{B}$ are two operators, their multiplication is
determined by their sequence:
\begin{equation}\label{}
\hat{A}\hat{B}\ket{\Psi} \quad \text{is not same to} \quad
\hat{B}\hat{A}\ket{\Psi} \nonumber
\end{equation}
This is similar to the matrix multiplication. Two operators are
commuted if $\hat{A}\hat{B} = \hat{B}\hat{A}$, but most of time they
are not commuted with each other. Commutation is some significant
relation, therefore in the following content, we use $[\hat{A},
\hat{B}]$ to represent $\hat{A}\hat{B} - \hat{B}\hat{A}$; so if
$[\hat{A}, \hat{B}] = 0$, the two operators are commuting with each
other.
The commutation relationship of the operators is vital in quantum
mechanics, so this is going to leave for a detailed discussion in
the following content.
Like the matrix, where we have inverse matrix; in quantum mechanics,
we also have the inverse operation. For any $\ket{\Psi}$ and
$\ket{\Phi}$ , if we have:
\begin{eqnarray}
% \nonumber to remove numbering (before each equation)
\hat{A}\ket{\Psi} &=& \ket{\Phi} \nonumber \\
\hat{B}\ket{\Phi} &=& \ket{\Psi}
\end{eqnarray}
We say that $\hat{B}$ is the inverse operator for the $\hat{A}$,
which is labeled as $\hat{A}^{-1}$. It's easy to see that
$\hat{A}\hat{A}^{-1} = I$. However, not every operator has its own
inverse operator.
Besides the algorithms above, the operator also has conjugating
operations and transposing operations. The conjugating operation is
defined as: to change the operator of $\hat{O}$ into its conjugated
form. For example, the conjugating operator for the operator of
$-i\hbar \frac{d}{dx}$ is $i\hbar \frac{d}{dx}$.
Transposing operation is a bit more complex, for $\langle\psi|
\hat{O}|\phi\rangle$ it's transposing defined as:
\begin{equation}\label{OPERATOReq:5}
\langle\psi|\widetilde{\hat{O}}|\phi\rangle = \langle\phi|
\hat{O}|\psi\rangle
\end{equation}
In transposing operation, it's clear to see in the integrals the bra
and ket exchanged their position.
It's convenient to take $-i\hbar\frac{d}{dx}$ operator as an example
to show how the transposing operation works. Since we have:
\begin{align}\label{OPERATOReq:3}
\int^{+\infty}_{-\infty}\phi^{*}\frac{d}{dx}\psi dx &=
\phi^{*}\psi|^{+\infty}_{-\infty} -
\int^{+\infty}_{-\infty}\psi^{*}\frac{d}{dx}\phi dx \nonumber \\
&= - \int^{+\infty}_{-\infty}\psi^{*}\frac{d}{dx}\phi dx
\end{align}
We have $\widetilde{\hat{p_{x}}} = -\hat{p_{x}}$.
Here the transposing operation seems to be a bit of obscure. Actually
we can see that the transposing operation and the conjugating
operation are closely related to the adjacent operation, which is
going to be demonstrated right now. first let's introduce the
operator on the \brat{\Psi}.
Since bra space is conjugated to the ket space, we can define the
operator working on the bra space. Following the same condition
defined in (\ref{OPERATOReq:4}), we have:
\begin{equation}\label{}
\bra{\Psi}\hat{P} = \bra{\Phi}
\end{equation}
The operator $\hat{P}$ working on the \brat{\Psi} is called
$\hat{A}$'s adjacent operator, labeled as $\hat{A}^{+}$.
It's easy to see that we have:
\begin{equation}\label{}
\hat{A} \ket{\Psi} = a\ket{\Psi}, \quad \bra{\Psi}\hat{A}^{+} =
a^{*}\bra{\Psi}
\end{equation}
To see the association between the operator of $\hat{A}$ and
$\hat{A}^{+}$, we can strick up an example here. Suppose that
$\ket{\Psi_{1}}$ and $\ket{\Psi_{2}}$ constitute some complete sets
for $\hat{A}$, they are orthogonal with each other; where any other
vectors in this space can be expressed as $\ket{\Psi} =
\lambda_{1}\ket{\Psi_{1}} + \lambda_{2}\ket{\Psi_{2}}$, the
$\lambda$ is some complex number. Therefore we have:
\begin{align}\label{}
(\bra{\Psi_{1}} + \bra{\Psi_{2}})\hat{A}(\lambda_{1}\ket{\Psi_{1}} +
\lambda_{2}\ket{\Psi_{2}}) &=
\lambda_{1}\bra{\Psi_{1}}\hat{A}\ket{\Psi_{1}} +
\lambda_{2}\bra{\Psi_{2}}\hat{A}\ket{\Psi_{2}} \nonumber \\
&= \lambda_{1}a_{1} + \lambda_{2}a_{2}
\end{align}
For the $\ket{\Psi} = \lambda_{1}\ket{\Psi_{1}} +
\lambda_{2}\ket{\Psi_{2}}$, it's conjugated vector is: $\bra{\Psi} =
\lambda_{1}^{*}\bra{\Psi_{1}} + \lambda_{2}^{*}\bra{\Psi_{2}}$, thus
for the $\hat{A}^{+}$; we have:
\begin{align}\label{}
(\lambda_{1}^{*}\bra{\Psi_{1}} +
\lambda_{2}^{*}\bra{\Psi_{2}})\hat{A}^{+}(\ket{\Psi_{1}} +
\ket{\Psi_{2}}) &=
\lambda_{1}^{*}\bra{\Psi_{1}}\hat{A}^{+}\ket{\Psi_{1}} +
\lambda_{2}^{*}\bra{\Psi_{2}}\hat{A}^{+}\ket{\Psi_{2}} \nonumber \\
&= \lambda_{1}^{*}a_{1}^{*} + \lambda_{2}^{*}a_{2}^{*}
\end{align}
From this example, we can see that the operation of $\hat{A}
\rightarrow \hat{A}^{+}$ is equivalent to the transposing operation
plus the conjugating operation. This specific operation is called
adjacent operation.
The adjacent operation can be finally defined as:
\begin{equation}\label{}
\bra{\Psi}\hat{A}^{+}\ket{\Phi} = \bra{\Phi}\hat{A}\ket{\Psi}^{*}
\end{equation}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Hermite operators}
%
% why the hermite operator is important?
% to prove the hermite operator
%
%
In the section related to the axioms of operators, we have shown that
the operators which correspond to physical quantity should be
hermitian, so in this part let's see why it should be.
The hermite operator is defined as: $\hat{O} = \hat{O}^{+}$; that
is, such operator equals to its adjacent operator. What's the
important meaning behind hermite operators? That is, only the
hermite operators can produce the real eigen value, which is
demanded by all the physical dynamic values. Since the result of
measurement should be physically meaningful, so it must be a real
number.
Thus we prove the judgement below:
\begin{theorem}
if and only if the operator of $\hat{O}$ is hermite, the
$\langle\Psi|\hat{O}|\Psi\rangle$ equals to a real number.
\end{theorem}
\begin{proof}
If $\hat{O} = \hat{O}^{+}$, then for an arbitrary \kett{\Psi}, we
have:
\begin{equation}\label{}
\bra{\Psi}\hat{O}\ket{\Psi} = \langle\Psi|\hat{O}^{+}\ket{\Psi}^{*}
= \langle\Psi|\hat{O}\ket{\Psi}^{*}
\end{equation}
Thus the $\bra{\Psi}\hat{O}\ket{\Psi}$ is a real number.
On the other hand, if for any \kett{\Psi}, the
$\bra{\Psi}\hat{O}\ket{\Psi}$ is a real number (here the operator
can be both type, on bra or ket); we have:
\begin{equation}\label{}
\bra{\Psi}\hat{O}\ket{\Psi} =\bra{\Psi}\hat{O}\ket{\Psi}^{*} =
\langle\Psi|\hat{O}^{+}\ket{\Psi}
\end{equation}
Thus for any \kett{\Psi}, $\bra{\Psi}(\hat{O}-\hat{O}^{+})\ket{\Psi}
= 0$. Then $\hat{O} = \hat{O}^{+}$. \qedhere
\end{proof}
Finally the hermite operator can be written as:
\begin{equation}\label{OPERATOReq:6}
\bra{\psi}\hat{O}\ket{\phi} = \bra{\psi}\hat{O}^{+}\ket{\phi} =
\bra{\phi}\hat{O}\ket{\psi}^{*}
\end{equation}
Formally this theorem implies one thing: that the hermite operator
can both working the bra space and the ket space (they give the same
result); thus for the hermite operator we need not to distinguish
the hermite operator and its adjacent operator:
\begin{equation}\label{}
\langle\Psi|\hat{O}\Phi\rangle = \langle\Psi\hat{O}|\Phi\rangle =
\langle\hat{O}\Psi|\Phi\rangle
\end{equation}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{What kind of operator is hermitian?}
\label{hermitian_in_operator}
%
% What kind of operator is hermitian?
%
%
Now we come to some question that what kind of operator is
hermitian? The hermitian operator is that $\hat{A} = \hat{A}^{+}$,
potentially; to judge that whether a given operator is hermitian
needs its concrete expression. However, just as what we are going to
demonstrate, there are some general rules to determine whether a
operator is hermitian or not.
Firstly, let's prove some character related to the adjacent
operation:
\begin{theorem}
For any two arbitrary operators, it satisfy that:
\begin{equation}\label{OPERATOReq:28}
(\hat{A}\hat{B})^{+} = \hat{B}^{+}\hat{A}^{+}
\end{equation}
\end{theorem}
\begin{proof}
Suggest that $\hat{B}\ket{\Psi} = \ket{\Phi}$, $\hat{A}\ket{\Phi} =
\ket{\Omega}$. So we have:
\begin{equation}\label{}
\hat{A}\hat{B}\ket{\Psi} =\ket{\Omega}
\end{equation}
On the other hand, we have:
\begin{equation}\label{}
\bra{\Psi}\hat{B}^{+}\hat{A}^{+} = \bra{\Phi}\hat{A}^{+} =
\bra{\Omega} =\bra{\Psi}(\hat{A}\hat{B})^{+}
\end{equation}
Furthermore, it's obvious that for $\hat{A}^{+}\hat{B}^{+}$ on
$\bra{\Psi}$ we can not get the $\bra{\Omega}$. Hence according to
the one to one correspondence between the bra and ket, the
(\ref{OPERATOReq:28}) is true.
\qedhere
\end{proof}
For the adjacent operation, we have some other theorems which are
easily proved (so the proof is omitted here):
\begin{theorem}
For any arbitrary operators, it satisfy that:
\begin{equation}\label{OPERATOReq:29}
\begin{split}
(\hat{A}^{+})^{+} &= \hat{A} \\
(\lambda\hat{A})^{+} &= \lambda^{*}\hat{A}^{+} \\
(\hat{A} + \hat{B})^{+} &= \hat{A}^{+} + \hat{B}^{+}
\end{split}
\end{equation}
\end{theorem}
then let's go to prove some general characters related to the
hermitian operator. Suggest that $\hat{A}$ and $\hat{B}$ are two
hermitian operators, now we can prove that the operators below are
also hermitian.
\begin{theorem}
\begin{align}\label{OPERATOReq:30}
\hat{C} &= \hat{A} \pm \hat{B} \nonumber \\
\hat{C} &= c\hat{A} \quad \text{c is a real number} \nonumber \\
\hat{C} &= \hat{A}\hat{B} \quad \text{as long as $[\hat{A}, \hat{B}] = 0$} \nonumber \\
\hat{C} &= c(\hat{A}\hat{B} + \hat{B}\hat{A}) \nonumber \\
\hat{C} &= \frac{c}{i}(\hat{A}\hat{B} - \hat{B}\hat{A})
\end{align}
\end{theorem}
\begin{proof}
The first two expressions for $\hat{C}$ are straightforward. For
$\hat{C} = \hat{A}\hat{B}$, we have that:
\begin{equation}\label{}
\begin{split}
\hat{C}^{+} &= (\hat{A}\hat{B})^{+} \\
&= \hat{B}\hat{A} \\
&= \hat{A}\hat{B} \quad
\text{$\hat{A}$ and $\hat{B}$ are commutative} \\
&= \hat{C}
\end{split}
\end{equation}
The fourth expression for $\hat{C}$ can be also proved in the
similar way. For the fifth expression, we have:
\begin{equation}\label{OPERATOReq:24}
\begin{split}
\hat{C}^{+} &= \left(\frac{1}{i}\right)^{*}
[\hat{A}\hat{B} - \hat{B}\hat{A}]^{+}
\\
&= i[\hat{B}\hat{A} - \hat{A}\hat{B}] \\
&=-i[\hat{A},\hat{B}] \\
&=\frac{1}{i}[\hat{A},\hat{B}] \\
&=\hat{C}
\end{split}
\end{equation}
\qedhere
\end{proof}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Observable operator}
%
% 1 what's the physical meaning of observable operator?
% 2 the theorems related to the observable
%
%
%
In axiom \ref{axiom3}, we have introduced the observable operator;
which has some eigen function to correspond it:
\begin{equation}\label{OPERATOReq:1}
\hat{A}\ket{\Psi_{i}} = a_{i}\ket{\Psi_{i}} \quad i=1, 2, \cdots
\end{equation}
$\ket{\Psi_{i}}$ are $\hat{A}$'s eigen states, and the $a_{i}$ are
the corresponding measurable value for $\hat{A}$.
Firstly, how to physically understand the observable operator? Now
let's start from axiom \ref{axiom4}.
Suggest that $\hat{A}$ is some observable operator, from
(\ref{OPERATOReq:2}) we know that for some arbitrary state of
$\ket{\Psi}$:
\begin{equation}\label{OPERATOReq:9}
\bra{\Psi}\hat{A}\ket{\Psi} = \sum_{i}c_{i}^{2}a_{i} \quad
\sum_{i}c_{i}^{2} = 1
\end{equation}
Here each $a_{i}$ has some possibility to appear in the final
measurable result, hence the measurement for $\hat{A}$ on
$\ket{\Psi}$ is not certain; maybe the result is $a_{1}$ in this
time measurement, but change to be $a_{2}$ in the next.
Hence, the mean value of $\sum_{i}c_{i}^{2}a_{i}$ depicts some
``average'' situation for the measurable result, The real
measurement is fluctuating around this average value. However, if
the $\ket{\Psi}$ is the eigen state for $\hat{A}$, then we have
$\bra{\Psi}\hat{A}\ket{\Psi} = a\langle\Psi|\Psi\rangle =a$; there's
no fluctuation anymore, all the measurements will give only one
certain value, which is the $a$. That's the physical meaning behind
the (\ref{OPERATOReq:9}).
For the observable operator, since the measurable result is
meaningful so that it's demanded to be the hermitian operator first.
secondly, we can have such theorem below for the observable:
\begin{theorem}\label{OPERATOR:3}
For a given observable operator, its eigen states which give
different eigen value are orthogonal with each other.
\end{theorem}
\begin{proof}
Suggest that the operator is $\hat{A}$, then it has two different
eigen states; namely $\ket{\Psi_{1}}$ and $\ket{\Psi_{2}}$, they
give different eigen value of $a_{1}$ and $a_{2}$ ($a_{1} \neq
a_{2}$). Therefore we have:
\begin{align}\label{}
\bra{\Psi_{1}}\hat{A}\ket{\Psi_{2}} &=
a_{2}\langle\Psi_{1}|\Psi_{2}\rangle \nonumber \\
\bra{\Psi_{1}}\hat{A}\ket{\Psi_{2}} &=
a_{1}\langle\Psi_{1}|\Psi_{2}\rangle \Rightarrow \nonumber \\
(a_{1}-a_{2})\langle\Psi_{1}|\Psi_{2}\rangle &= 0 \Rightarrow \nonumber \\
\langle\Psi_{1}|\Psi_{2}\rangle &= 0
\end{align}
\qedhere
\end{proof}
As a result, according to the analysis in section
\ref{LIV_in_Hilbert}, the eigen states for the $\hat{A}$ is able to
constitute into some complete sets to represent the Hilbert space.
However, there's some unproved proposition; that whether the eigen
states for a given observable operator $\hat{A}$ is complete to
describe the corresponding Hilbert space?
This question is very difficult to answer, however, in physics such
proposition is considered to hold true for any observable operator
$\hat{A}$, that the whole eigen states for the $\hat{A}$ are really
constitute some complete sets where the corresponding Hilbert space
can be expressed over this group of basis functions\footnote{Here
such detailed analysis can read the book by Xinlin
Ke\cite{XingLinKe}, PP 33-36}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Subspace for eigen states}
\label{subspace_in_operator}
%
% 1 what's the subspace for eigen states
% 2 physical meaning of subspace
% 3 consider the completeness for the Hilbert space plus the
% subspace
%
%
So far for the observable $hat{A}$ and its eigen states of
$\ket{\Psi_{i}}$ in (\ref{OPERATOReq:1}), we have only considered
it's non-degenerate cases; where it's eigen values of $a_{i}$ are
different from each other. However, there has some circumstance that
the eigen values are degenerate:
\begin{equation}\label{OPERATOReq:32}
\hat{A}\ket{\Psi_{ij}} = a_{i}\ket{\Psi_{ij}} \quad j=1, 2, \cdots,
m
\end{equation}
It's easy to see that for such $m$ $\ket{\Psi_{ij}}$, their
arbitrary linear combination $\ket{\Psi}$ is also the eigen state
for $\hat{A}$ and gives the eigen value of $a_{i}$:
\begin{equation}\label{OPERATOReq:33}
\begin{split}
\hat{A}\ket{\Psi} &= \hat{A}(\sum_{j=1}^{m}c_{j}\ket{\Psi_{ij}}) \\
&= \sum_{j=1}^{m}c_{j}(\hat{A}\ket{\Psi_{ij}}) \\
&= \sum_{j=1}^{m}c_{j}a_{i}\ket{\Psi_{ij}} \\
&= a_{i}\sum_{j=1}^{m}c_{j}\ket{\Psi_{ij}} \\
&= a_{i}\ket{\Psi}
\end{split}
\end{equation}
Hence it turns out that such $m$ eigen states has been configured
into some small Hilbert space where all the other vector who gives
the eigen value of $a_{i}$ should be constructed from its linear
combination:
\begin{equation}\label{}
\ket{\Psi} = \sum_{j=1}^{m}c_{j}\ket{\Psi_{ij}} \quad \text{$c_{j}$
is some real number}
\end{equation}
In quantum mechanics, the concept of subspace is very important.
Here we wish to present an example to demonstrate its importance
that in symmetry theory of quantum mechanics, the subspace for
Hamiltonian operator is considered to carry an irreducible
representation for the symmetry group. Later, we will discuss the
origin for the subspace.
It's only after considering the degeneracy of the eigen states then
the completeness for these eigen states to form the complete sets is
strictly established. According to the theorem (\ref{OPERATOR:3}),
it's easy to see that for any vector in the subspace it's orthogonal
with the eigen states outside this subspace. Furthermore, for such
$m$ eigen states in the subspace, we can always form some orthogonal
basis functions according to section \ref{LIV_in_Hilbert}. Hence, by
counting all the normalized basis functions in subspace, plus the
eigen states that are non-degenerate; we can form the complete sets
to represent the corresponding Hilbert space.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Origin of the degeneracy in eigen states}
\label{origin_degeneracy_in_operator}
%
% the origin for the degeneracy for the eigen states
%
%
Generally to say, the degeneracy of the eigen states is usually due
to the fact that only one physical quantity can not fix the whole
system; in other words, we need other independent physical freedom
to identify the corresponding system.
Now let's give an example. The plane wave function of $\Psi_{p}(x) =
\frac{1}{2\pi\hbar}e^{\frac{ipx}{\hbar}}$ is the eigen state for the
momentum operator $\hat{p}$(it's expressed as $\hat{p} = -i\hbar
\frac{\partial }{\partial x}$, in the following content; we will
prove this fact):
\begin{equation}\label{}
-i\hbar \frac{\partial }{\partial
x}\left(\frac{1}{2\pi\hbar}e^{\frac{ipx}{\hbar}}\right)
=p\Psi_{p}(x)
\end{equation}
Now let's consider the Hamiltonian for the system, it's $\hat{H} =
\frac{p^{2}}{2m}$; it's easily seen that both of the $p$ and $-p$
will give the same energy level so it's double degenerate. Hence for
the free particle state which in energy level $\frac{p^{2}}{2m}$, it
can be $\Psi_{p}(x)$ or $\Psi_{-p}(x)$ or any linear combination
between the two components. However, these wave functions are
physically distinguished with each other. Hence, for such state with
certain energy level, what kind of wave function I can choose to
portray it?
Now let's prove that the origin in degeneracy in the eigen states,
which is the most potential reason for the degeneracy.
\begin{theorem}
Suggest we have some observable $hat{A}$ and its eigen states
$\ket{\Psi_{i}}$. On the other hand, we have two operators $\hat{F}$
and $\hat{G}$ that they both commute with $\hat{A}$, but they do not
commute with each other ($[\hat{F}, \hat{G}] \neq 0$). then there
must have degeneracy in the eigen states of $\ket{\Psi_{i}}$.
\end{theorem}
\begin{proof}
Now we prove it. For the $\hat{F}$:
\begin{equation}\label{}
\hat{A}\hat{F}\ket{\Psi_{i}} = \hat{F}\hat{A}\ket{\Psi_{i}} =
\hat{F}a_{i}\ket{\Psi_{i}} = a_{i}\hat{F}\ket{\Psi_{i}}
\end{equation}
For $\hat{G}$:
\begin{equation}\label{}
\hat{A}\hat{G}\ket{\Psi_{i}} = \hat{G}\hat{A}\ket{\Psi_{i}} =
\hat{G}a_{i}\ket{\Psi_{i}} = a_{i}\hat{G}\ket{\Psi_{i}}
\end{equation}
However, the two vectors of $\hat{F}\ket{\Psi_{i}}$ and
$\hat{G}\ket{\Psi_{i}}$ should differentiate by more than the
constant, that is : $\hat{F}\ket{\Psi_{i}} \neq C\times
\hat{G}\ket{\Psi_{i}}$. Since $\hat{F}$ and $\hat{G}$ do not commute
with each other, so they can not share the same eigen states
simultaneously (this point will be proved in the next section). On
the other hand, it's clear that they are all the eigen states for
the operator $\hat{A}$, so both of the two eigen states take up the
same eigen value. Thus the eigen states has degeneracy. \qedhere
\end{proof}
Finally, let's give some example to illustrate the degenerate case
above. For hydrogen atom, we have its angular momentum operator of
$\hei{l}$ ($\hei{l}$ is some vector operator) commutes with the
Hamiltonian, however; the three components for the $\hei{l}$ can not
commute with each other:
\begin{equation}\label{}
[\hat{l}_{i}, \hat{l}_{j}] \neq 0 \quad i, j \in x, y, z
\end{equation}
Therefore, it can be well expected that the energy level
corresponding to different angular momentum $l=1, l=2, \cdots$ ($l$
is the eigen states for the angular momentum operator) are all
degenerate; and the calculation result for the hydrogen atom just
confirms such guess.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{The essence of commutation}
\label{essence_in_operator}
%
% derive the Heisenberg relationships
%
%
%
%
In the above section, we have just demonstrated that the commutation
relationship is the origin for the eigen states degeneracy. Thus
what's the essence of the commutation ($\hat{A}\hat{B}=
\hat{A}\hat{B}$)? Here below it can proved that two operators are
commutative as long as they share the same eigen states.
Suppose we have two operators which correspond to dynamic quality,
so they are hermitian. Consider the $|\Phi\rangle$ below:
\begin{equation}\label{OPERATOReq:19}
|\Phi\rangle = \xi\hat{A}|\Psi\rangle+i\hat{B}|\Psi\rangle
\end{equation}
Here $|\Psi\rangle$ is an arbitrary vector in Hilbert space, and
$\xi$ is an arbitrary real number. What's more, we note that the
corresponding vector in bra is:
\begin{equation}\label{}
\langle\Phi| = \langle\Psi|\hat{A}\xi-i\langle\Psi|\hat{B}
\end{equation}
We have made use of the hermitian character of $\hat{A}$ and
$\hat{B}$.
Now consider the expression below:
\begin{align}\label{OPERATOReq:22}
I(\xi) &= \langle\Phi|\Phi\rangle \nonumber \\
&= \Big\langle \langle\Psi|\hat{A}\xi-i\langle\Psi|\hat{B}\Big|
\xi\hat{A}|\Psi\rangle+i\hat{B}|\Psi\rangle\Big\rangle \geq 0
\end{align}
To expand this expression, then we have:
\begin{multline}\label{OPERATOReq:23}
\Big\langle \bra{\Psi}\hat{A}\xi-i\bra{\Psi}\hat{B} \Big|
\xi\hat{A}\ket{\Psi}+i\hat{B}\ket{\Psi} \Big\rangle =
\\
\xi^{2}\bra{\Psi}\hat{A}^{2}\ket{\Psi} +
i\xi\bra{\Psi}\hat{A}\hat{B}\ket{\Psi} -
i\xi\bra{\Psi}\hat{B}\hat{A}\ket{\Psi}+
\bra{\Psi}\hat{B}^{2}\ket{\Psi} \\
=\xi^{2}\bra{\Psi}\hat{A}^{2}\ket{\Psi} +
\bra{\Psi}\hat{B}^{2}\ket{\Psi} +
i\xi\bra{\Psi}\hat{A}\hat{B}-\hat{B}\hat{A}\ket{\Psi} \\
=\xi^{2}\bra{\Psi}\hat{A}^{2}\ket{\Psi} +
\bra{\Psi}\hat{B}^{2}\ket{\Psi} + i\xi\bra{\Psi}[\hat{A},
\hat{B}]\ket{\Psi}
\end{multline}
Now suggest that $[\hat{A},\hat{B}] = i\hat{C}$. From
(\ref{OPERATOReq:24}) We have known that $\hat{C}$ is also some
hermitian operator.
By using the $\hat{C}$, we can drop the complex term in
(\ref{OPERATOReq:23}):
\begin{equation}\label{OPERATOReq:25}
i\xi\langle\Psi|[\hat{A},\hat{B}]|\Psi\rangle =
i\xi\langle\Psi|i\hat{C}|\Psi\rangle =
i^{2}\xi\langle\Psi|\hat{C}|\Psi\rangle =
-\xi\langle\Psi|\hat{C}|\Psi\rangle
\end{equation}
Let's express the expectation value of
$\langle\Psi|\hat{A}|\Psi\rangle = \overline{A}$, then the
(\ref{OPERATOReq:23}) can be expressed as:
\begin{eqnarray}\label{EIGENCTONSeq:4}
% \nonumber to remove numbering (before each equation)
I(\xi) &=& \xi^{2}\overline{A^{2}} - \xi\overline{C} +
\overline{B^{2}} \nonumber \\
&=& \overline{A^{2}}
\left ( \xi - \frac{\overline{C}}{2\overline{A^{2}}}\right)^{2} +
\left ( \overline{B^{2}} - \frac{\overline{C}^{2}}{4\overline{A^{2}}}
\right) \nonumber \\
&\geq& 0
\end{eqnarray}
Additionally, we mention that since $\hat{A}$ and $\hat{B}$ are
hermitian operators, then $\hat{A}^{2}$ and $\hat{B}^{2}$ is also
hermitian. So their expectation value is real.
Since that $\xi$ is an arbitrary number, we can make $\xi =
\frac{\overline{C}}{2\overline{A^{2}}}$. Thus we have
$\overline{B^{2}} - \frac{\overline{C}^{2}}{4\overline{A^{2}}} \geq
0$. This equation finally leads to:
\begin{eqnarray}\label{OPERATOReq:26}
% \nonumber to remove numbering (before each equation)
\overline{A^{2}}\cdot
\overline{B^{2}} &\geq& \frac{1}{4}\overline{C}^{2} \rightarrow \nonumber \\
\sqrt{ \overline{A^{2}}\cdot
\overline{B^{2}}} &\geq& \frac{1}{2} |\overline{C}|
\end{eqnarray}
Here $|\overline{C}|$ indicates the absolute value for the
$\overline{C}$.
Now let's make some transformation:
\begin{equation}\label{}
\begin{split}
\hat{C} &= \frac{1}{i}
[\hat{A}\hat{B} - \hat{B}\hat{A}] \Rightarrow \\
|\overline{C}| &= |\frac{1}{i}\overline{[\hat{A},\hat{B}]}|
\\
&= |\frac{1}{i}| |\overline{[\hat{A},\hat{B}]}| \\
&= |\overline{[\hat{A},\hat{B}]}|
\end{split}
\end{equation}
So the result in the (\ref{OPERATOReq:26}) finally can be:
\begin{equation}\label{OPERATOReq:27}
\sqrt{\overline{A^{2}}\cdot \overline{B^{2}}} \geq \frac{1}{2}
\left|\overline{[\hat{A}, \hat{B}]} \right|
\end{equation}
Next let's make some modification to the result to get some
conclusion. Since $\overline{A}$ is the average value of the
operator A; now in the (\ref{OPERATOReq:27}) we make that the
$\hat{A}$ and $\hat{B}$ are replaced by $\Delta\hat{A}$ and $\Delta
\hat{B}$, respectively:
\begin{equation}\label{}
\Delta \hat{A} = \hat{A} - \langle\Phi|\hat{A}|\Phi\rangle
\end{equation}
Since $\langle\Phi|\hat{A}|\Phi\rangle$ is some real number, we can
prove that $\Delta\hat{A}$ is also some hermitian operator.
Furthermore, we can show that:
\begin{equation}\label{}
\begin{split}
[\Delta \hat{A}, \Delta \hat{B}] &=
[\hat{A} - \overline{A}, \hat{B} - \overline{B}] \\
&= [\hat{A}, \hat{B}]
\end{split}
\end{equation}
After such replacement, the (\ref{OPERATOReq:27}) becomes:
\begin{equation}\label{OPERATOReq:28}
\sqrt{\overline{\Delta \hat{A}^{2}}\cdot \overline{\Delta
\hat{B}^{2}}} \geq \frac{1}{2} \left|\overline{[\Delta\hat{A},
\Delta\hat{B}]} \right| \Rightarrow \sqrt{\overline{\Delta
\hat{A}^{2}}\cdot \overline{\Delta \hat{B}^{2}}} \geq \frac{1}{2}
\left|\overline{[\hat{A}, \hat{B}]} \right|
\end{equation}
For the $\overline{\Delta \hat{A}^{2}}$, actually we can prove that
$\overline{\Delta \hat{A}^{2}}= \overline{\Delta \hat{A}}^{2}$.
However, since this result is trivial and not be used in other
place, we do not intend to give the proof; instead just to accept
this result. so the result can be finally transformed as:
\begin{equation}\label{OPERATOReq:31}
\overline{\Delta A}\cdot \overline{\Delta B} \geq \frac{1}{2}
\left|\overline{[\hat{A}, \hat{B}]} \right|
\end{equation}
This result holds true for any two hermite operators. What's more,
in this universal inequation of (\ref{OPERATOReq:31}) the
implication within it is some very important and physical
meaningful.
For two arbitrary hermite operators of $\hat{A}$ and $\hat{B}$; if
they are not commutative; $\overline{[\hat{A}, \hat{B}]} > 0$. Thus
$\Delta A$ and $\Delta B$ can not equal to $0$ simultaneously. As a
result, the measurement of $\hat{A}$ and the measurement of
$\hat{B}$ can not have definite measurement result at the same time;
in other words, $\hat{A}$ and $\hat{B}$ can not share the same eigen
states. By the way, what we have deducted in the above process is
the universal uncertainty principle. If we have $\hat{A} = \hat{x}$
and $\hat{B} = \hat{p}$, according to the (\ref{OPERATOReq:31}), it
leads to: $\Delta x\Delta p \geq \frac{\hbar}{2}$, here we have used
the relation that $[\hat{x}, \hat{p}] = i\hbar$ (this relation will
be revealed in the following chapter). That's the final conclusion
behind the commutation.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Complete sets of commuting observables}
\label{CSCO_in_operator}
%
% What's the complete sets of commuting observables?
% How to do that?
%
In section \ref{origin_degeneracy_in_operator}, we have put forward
plane wave function example to show that there's some uncertainty in
fixing the wave functions if there's degenerate situation. Thus, how
to eliminate such uncertainty in the description for Hilbert space?
Generally to say, while there's some degeneracy in the the space of
eigen states, we can use a group of observable operators rather than
one observable operator to determine the corresponding Hilbert
space. In this group of operators any two of them commute with each
other (physically to say, each physical quantity provides a freedom
to specify the investigated system) so that to ensure they can share
the same eigen states. It can be proved that this group of
observable operators determine some ``definite'' Hilbert space to
describe the quantum system. Therefore, this group is called
``complete set of commutating observables''; in the future, they
will be abbreviated as ``CSCO''.
Now let's going to demonstrate the validity of the above words. The
validity can be confirmed by the theorem below:
\begin{theorem}
For two commutative observable operators of $\hat{A}$ and $\hat{B}$,
there must have some space of eigen states for both of them.
\end{theorem}
Here for simplicity we only consider the discrete case. However, for
the continuous Hilbert space such as plane wave functions, we can
get the same result; but the mathematical demonstration is much more
complicated.
\begin{proof}
Suggest that \kett{\Psi_{i}} ($i=1, 2, \cdots$) are the complete set
of normalized eigen states for the $\hat{A}$. We can know that
$\hat{B}\ket{\Psi_{i}}$ is also $\hat{A}$'s eigen states:
\begin{equation}\label{}
\hat{A}(\hat{B}\ket{\Psi_{i}}) = \hat{B}\hat{A}\ket{\Psi_{i}}
=a_{i}(\hat{B}\ket{\Psi_{i}})
\end{equation}
The process will be classified into two parts in terms of the
presence of degeneracy in the space of $\ket{\Psi_{i}}$.
First, there's no degeneracy in the eigen states of $\hat{A}$.
Thereby, if $\hat{A}\ket{\Psi_{i}} = a_{i}\ket{\Psi_{i}}$, and
$\hat{A}(\hat{B}\ket{\Psi_{i}}) = a_{i}(\hat{B}\ket{\Psi_{i}})$; the
$\ket{\Psi_{i}}$ and $\hat{B}\ket{\Psi_{i}}$ should distinguish by
only an constant: $\hat{B}\ket{\Psi_{i}} = b_{i}\ket{\Psi_{i}}$.
Hence each of $\ket{\Psi_{i}}$ is also the eigen function for
$\hat{B}$, and the eigen value is $b_{i}$.
Second, there's degeneracy in the eigen states of $\hat{A}$.
suggest that there's a $s$ dimensional orthogonal subspace ($s$ can
be some infinite number, but the proof below is same) of
$\ket{\Psi_{mi}}$ ($i=1, 2, \cdots, s$); they all give the same
eigen value of $l$ by $\hat{A}\ket{\Psi_{mi}} = l\ket{\Psi_{mi}}$ .
Now base on this subspace we are going to construct a new set of
$\ket{\Psi^{'}_{m1}}, \cdots, \ket{\Psi^{'}_{ms}}$; each of them are
the linear combination of the original vectors:
\begin{equation}\label{}
\ket{\Psi^{'}_{mi}} = \sum^{s}_{j=1}c_{ij}\ket{\Psi_{mj}}
\end{equation}
each of the $\ket{\Psi^{'}_{mi}}$ is the eigen function for both
$\hat{A}$ and $\hat{B}$.
So we have (we use $\ket{\Psi^{'}_{m}}$ to represent an arbitrary
$\ket{\psi^{'}_{mi}}$):
\begin{eqnarray}
% \nonumber to remove numbering (before each equation)
\hat{B} \ket{\Psi^{'}_{m}} &=& b^{'}\ket{\Psi^{'}_{m}} \nonumber \\
&=&b^{'} \sum^{s}_{j=1}c_{j}\ket{\Psi_{mj}}
\end{eqnarray}
By multiplying $\bra{\Psi_{m1}}, \bra{\Psi_{m2}}, \cdots,
\bra{\Psi_{ms}}$ to the above equation, we can get $s$ equations:
\begin{eqnarray}
% \nonumber to remove numbering (before each equation)
\sum^{s}_{j=1}c_{j} \bra{\Psi_{m1}}\hat{B}\ket{\Psi_{mj}} &=& b^{'}c_{1} \nonumber \\
\sum^{s}_{j=1}c_{j} \bra{\Psi_{m2}}\hat{B}\ket{\Psi_{mj}} &=& b^{'}c_{2} \nonumber \\
\cdots &\cdots& \cdots \nonumber \\
\sum^{s}_{j=1}c_{j} \bra{\Psi_{ms}}\hat{B}\ket{\Psi_{mj}} &=& b^{'}c_{s} \nonumber \\
\end{eqnarray}
Here we implicitly use the orthogonality of the subspace:
$\langle\Psi_{mi}\ket{\Psi_{mj}} = \delta _{ij}$.
If the $\bra{\Psi_{mi}}\hat{B}\ket{\Psi_{mj}}$ is abbreviated as
$B_{ij}$, it can see that the above equations can be transformed in
to a matrix:
\begin{equation}\label{SEeq:4}
\begin{bmatrix}
B_{11}-b^{'} & B_{12} & \cdots & B_{1s} \\
B_{21} & B_{22}-b^{'} & \cdots & B_{2s} \\
\cdots & \cdots & \cdots & \cdots \\
B_{s1} & B_{s2} & \cdots & B_{ss}-b^{'} \\
\end{bmatrix}
\begin{bmatrix}
c_{1} \\
c_{2} \\
\cdots \\
c_{s} \\
\end{bmatrix}
= 0
\end{equation}
Here in this matrix, $B_{ij}$ is some number we can calculate out,
the coefficient of $c_{i}$ is the solution we are pursuing, the
$b^{'}$ is the corresponding eigen values, which is also need to
know.
The presence of the solution of $c_{i}$ requires that:
\begin{equation}\label{}
\begin{vmatrix}
B_{11}-b^{'} & B_{12} & \cdots & B_{1s} \\
B_{21} & B_{22}-b^{'} & \cdots & B_{2s} \\
\cdots & \cdots & \cdots & \cdots \\
B_{s1} & B_{s2} & \cdots & B_{ss}-b^{'} \\
\end{vmatrix} = 0
\end{equation}
For this determinant, it has roots of $b^{'}_{i}$ ($i=1, 2, \cdots,
s$), some of them may get to be same. For each of $b^{'}_{i}$, the
linear equation of (\ref{SEeq:4}) can yield a group of coefficients
of $c_{ij}$ ($j=1, 2, \cdots, s$); thus the $\ket{\Psi^{'}_{mi}}$
has been fixed out.
If different $b^{'}_{i}$ takes different values, the set of Hilbert
space therefore has been determined. If there's still degeneracy in
the new space, we will take another operator of $\hat{C}$ which
commuted with the $\hat{A}$ and $\hat{B}$ to work out some new space
until the complete and definite Hilbert space has been found out.
\qedhere
\end{proof}
For the complete set of observable operators, we can use their eigen
values to label each vector in the Hilbert space. For example, for
the $\ket{\Psi_{i}}$, the $\hat{A}$, $\hat{B}$ and $\hat{C}$
correspond to the eigen values as $a_{i}$, $b_{i}$ and $c_{i}$
respectively; so this vector can be abbreviated as
$\ket{a_{i}b_{i}c_{i}}$. In the H atom schrodinger equation, we will
see how can we do this.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Further discussion to the operator}
\label{further_in_operator}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{How to bridge the physical quantities in quantum mechanics
with the ones in classic mechanics}
\label{vector_schalar_in_operator}
%
% 1 how to understand the correspondence between the physical
% quantities in quantum mechanics and the ones in classic
% 1.1 how to build the operators? from r and p
% 1.2 they are correspond to each other
% 2 axiom for the r and p
% 3 representations for the r and p
% 4 vector operator and scalar operator
%
So far we have made many discussions to the general properties of
the operator, which is concentrating on the mathematical properties
of the operators. However, how can be understand the physical
essence of the operator? How can we bridge the physical quantities
in quantum mechanics with the ones in classic mechanics?
The understanding for the physical quantity in quantum mechanics
should be based on the physical quantities in classical mechanics.
In most cases, we have physical quantities one to one mapping
between the microworld to the macrocosm, that is to say; both of
them share the same expressions. The only difference is, in classic
mechanics such physical quantities are expressed by variables who
are defining on the real number field; and in quantum mechanics the
corresponding physical quantities are expressed by operators, which
is restrict to be hermitian type. However, the expression for them
are same. For example, the kinetic energy are both expressed as
$\frac{\bm{p}^{2}}{2m}$, the angular momentum are both expressed as
$\bm{r}\times\bm{p}$ etc. In essence, perhaps such one to one
correspondence depicts some kind of ``continuity'' and
``consistency'' for our real world.
However, there's not all the physical quantities that can be found
one to one correspondence between the the microworld and the
macrocosm. For example, the spin is contained in quantum particles
but diminished in classical objects. The spin phenomenon is some
relativistic effects, so we can only ``phenomenologically''
introduce the spin operator by analogous with the angular momentum
operator; the detailed will be discussed in the following contents.
Nevertheless, for most of the physical quantities in quantum
mechanics, such one to one mapping relation exists. Hence we can
construct the operators in quantum mechanics by analogous with the
corresponding physical quantities in classic mechanics.
Additionally, there are two important illustrations for the one to
one corresponding relationship:
\begin{itemize}
\item the vector quantities in classical mechanics is still
corresponding to vector quantities, so as the scalar physical
quantities.
\item in classical mechanics, all the physical quantities can be
expressed based on some function of
position $\bm{r}$ and momentum $\bm{p}$, hence in quantum
mechanics all the physical operators can be constructed
by position operator and momentum operator.
\end{itemize}
In the following content, the vector operators will be labeled as
$\hei{A}$, and the scalar operator is labeled as $\hat{A}$.
So far it can see that similar to the classical mechanics, the
position and the momentum also constitutes the foundation for
quantum mechanics. However, since that the methodology in classical
mechanics and the quantum mechanics are distinguished greatly, so we
can expect that there must have some ``distinctness'' between the
description for the position and the momentum, and actually we
indeed have an axiom:
\begin{axiom}\label{axiom5}
\textbf{In quantum mechanics, each particle's position operator
under Cartesian coordinate, namely the $\hat{x}_{i}$ ($\hat{x}_{1} =
\hat{x}$, $\hat{x}_{2} = \hat{y}$ and $\hat{x}_{3} = \hat{z}$); and
it's corresponding momentum operator $\hat{p}_{i}$ satisfy the
commutation rules below:
\begin{equation}\label{PRAMReq:37}
[\hat{x}_{i}, \hat{x}_{j}] = 0 \quad [\hat{p}_{i}, \hat{p}_{j}] = 0
\quad [\hat{x}_{i}, \hat{p}_{j}] = i\hbar\delta_{ij}
\end{equation}
Furthermore, the operators for different particle are all commuting
with each other.}
\end{axiom}
Now let's give some physical analysis to the axiom above. According
to the previous discussion in (\ref{CSCO_in_operator}), if two
physical operators are commuting with each other; physically such
two operators stand for two independent freedoms to describe the
corresponding system. therefore, the relation $[\hat{x}_{i},
\hat{p}_{j}] = i\hbar\delta_{ij}$ indicates that the position
information and momentum information for the system are not
independent with each other. On the other hand, if we can obtain the
expression for $\hei{r}$, then the (\ref{PRAMReq:37}) indicates that
we can get the expression for $\hei{p}$ via $\hei{r}$. Hence from
this aspect, they are also not independent.
Here such knowledge has been already obtained through the
commutation relationship discussion in (\ref{essence_in_operator}),
there the analysis on the commutation shows a coherent interaction
between the position information and the momentum information.
What's more, in the discussion related to the basis functions (see
the \ref{sec:PWF_in_Hilbert}), we can see something interesting that
for any square-integrable wave function, from Fourier transformation
it can be decompressed into the $\Phi(x)$ (the eigen state for
$\hat{x}$) or the $\Psi(p)$ (the eigen state for $\hat{p}$). Such
relation indicates that we can express the quantum state either by
position or by momentum; they are identical with each other. This is
another character to understand the dependence between the position
information and momentum information.
Furthermore, in the following chapter we will prove such relation
that if we express the momentum operator via position operator, then
the correspondent quantum state can be expressed as some function of
position; on the other hand, if the position operator is expressed
via the momentum operator, then the quantum state can be expressed
as some function of momentum; such relation can be depicted as:
\begin{equation}\label{OPERATOReq:34}
\begin{split}
\hei{r} = \hei{r}(\bm{r}), \hei{p} = \hei{p}(\bm{r})
&\Rightarrow
\ket{\Psi} \Leftrightarrow \Psi(\bm{r}) \\
\hei{r} = \hei{r}(\bm{p}), \hei{p} =
\hei{p}(\bm{p}) &\Rightarrow \ket{\Psi} \Leftrightarrow
\Psi(\bm{p})
\end{split}
\end{equation}
The corresponding $\Psi(\bm{r})$ is the position representation for
vector of $\ket{\Psi}$ in given Hilbert space, and similarly
$\Psi(\bm{p})$ is called momentum representation.
In the derivation for such relation, we firstly introduce the eigen
states for the position and the momentum operator (From the
discussion in (\ref{ASCWFFFP_in_basic}) it can see that their eigen
states are only for free particles. The wave function in
(\ref{BASICeq:14}) is the eigen states for momentum operator, and by
Fourier transformation in (\ref{BASICeq:19}) and (\ref{BASICeq:20})
we can get the corresponding eigen state for position operator).
Here the details for the eigen functions will be omitted and they
are only expressed as:
\begin{align}
\label{PRAMReq:1}
\hat{x}\ket{x} &= x\ket{x} \nonumber \\
\hat{p}\ket{p} &= p\ket{p}
\end{align}
By investigating the changing of eigen states and eigen values, and
through the transformation between the $\ket{x}$ and $\ket{p}$; we
can finally get the expression that how to express $\hat{p}$ via
$\hat{x}$, or express $\hat{x}$ via $\hat{p}$. The details will be
given in chapter \ref{position_momentum representation}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Operator functions}
\label{operator_functions_in_operator}
%
%
%
the operators can be further organized as some function form. In the
following contents, we can see that such form is vital in some
discussions (see \ref{eigen_states_in_position_momentum}). For
example, we can have such expression for $\hat{A}$:
\begin{equation}\label{}
\hat{B}_{\xi} = e^{\frac{i}{\hbar}\xi\hat{A}}
\end{equation}
According to the results in mathematical analysis, such expression
can be expanded into series that:
\begin{equation}
\hat{B}_{\xi} = 1 + \left(\frac{i}{\hbar}\xi\hat{A}\right) +
\frac{\left(\frac{i}{\hbar}\xi\hat{A}\right)^{2}}{2!} +
\frac{\left(\frac{i}{\hbar}\xi\hat{A}\right)^{3}}{3!} + \cdots
\end{equation}
Similarly, we can also have the differential etc. operations to the
operator. The details for this part discussion can be referred to
the book by XingLin Ke\cite{XingLinKe}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Unitary operator}\label{OPERATOR:2}
%
% the definition of the unitary operator
%
%
Now we introduce another important operator in the quantum
mechanics, which is called unitary operator. It can be
mathematically defined as:
\begin{align}\label{OPERATOReq:10}
\hat{U}^{+}\hat{U} &= \hat{U}\hat{U}^{+}= I \nonumber \\
U^{+} &= U^{-1}
\end{align}
Such unitary operator has some important feature that it's able to
commute with all the other operators which represents some physical
quantity:
\begin{align}\label{OPERATOReq:14}
\hat{A}\hat{U} &= \hat{U}\hat{A} \nonumber \\
\hat{A}\hat{U}^{+} &= \hat{U}^{+}\hat{A}
\end{align}
Physically to say, the unitary operator is usually corresponding to
some transformation between two sets of basis functions in the same
Hilbert space. Such transformation is also called unitary
transformation. For example, in quantum chemistry we can get a group
of orbitals after Hatree-Fock calculation, then we can convert such
orbitals which dispersed among the whole molecule into some
localized ones, such transformation is just done by the unitary
operator (see the \ref{HFT:3} for more contents).
According to the analysis in the (\ref{LIV_in_Hilbert}), the basis
functions which depicts the same Hilbert space are actually
identical with each other. Hence here we usher in some important
question: How to describe the identity between two sets of basis
functions in the same Hilbert space?
Here we have two general rules:
\begin{itemize}\label{OPERATOReq:13}
\item Two sets of basis functions should retain the same
space structure, that is to say:
\begin{equation}\label{}
|\Psi\rangle = \hat{U}|\Phi\rangle \quad
\langle\Phi_{i}|\Phi_{j}\rangle = \delta_{ij} \Rightarrow
\langle\Psi_{i}|\Psi_{j}\rangle = \delta_{ij}
\end{equation}
\item For any arbitrary operator of $\hat{A}$ the
expectation value should be same:
\begin{equation}
\langle\Phi_{i}|\hat{A}|\Phi_{j}\rangle =
\langle\Psi_{i}|\hat{A}|\Psi_{j}\rangle
\end{equation}
\end{itemize}
Now let's prove that the (\ref{OPERATOReq:10}) will stratify the
identity requirements. Suggest that we have some discrete basis
functions of $\Phi_{i}$ ($i = 1, 2, \cdots, n, \cdots$) to depict
the Hilbert space, then some unitary transformation is used to
convert it into some new sets:
\begin{equation}\label{}
|\Psi_{i}\rangle = \hat{U}|\Phi_{i}\rangle
\end{equation}
then we can have:
\begin{align}\label{OPERATOReq:11}
\langle \Psi_{i}|\Psi_{j}\rangle & = \langle
\hat{U}\Phi_{i}|\hat{U}\Phi_{j}\rangle \nonumber \\
&=\langle
\Phi_{i}|\hat{U}^{+}\hat{U}|\Phi_{j}\rangle \nonumber \\
&=\langle \Phi_{i}|\Phi_{j}\rangle
\end{align}
Besides, since the unitary operator make one to one correspondence
tween $\Phi_{i}$ and $\Psi_{i}$, so the old sets of $\Phi_{i}$ and
the new sets of $\Psi_{i}$ share the same space structure. The
(\ref{OPERATOReq:13}) has been proved.
Now let's prove the (\ref{OPERATOReq:13}):
\begin{align}\label{}
\langle\Psi_{i}|\hat{A}|\Psi_{j}\rangle &=
\langle\Phi_{i}|\hat{U}^{+}\hat{A}\hat{U}|\Phi_{j}\rangle \nonumber \\
&=\langle\Phi_{i}|\hat{A}\hat{U}^{+}\hat{U}|\Phi_{j}\rangle
\underrightarrow{
\text{ from the definition in \ref{OPERATOReq:14}}}\nonumber \\
&=\langle\Phi_{i}|\hat{A}|\Phi_{j}\rangle
\end{align}
Hence we can see that the unitary transformation does not alter the
characters of Hilbert space.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Density operator}\label{OPERATOR:1}
%
%
%
%
In the above content, we have define the inner product between two
vectors in Hilbert space as $\langle\Phi|\Psi\rangle$. Now if we
rewrite its sequence as $|\Psi\rangle\langle\Phi|$, so what is it?
Actually such type of ``product'' between $|\Psi\rangle$ and
$\langle\Phi|$ is a kind of operator. For example, suggest
$|\Omega\rangle$ is some arbitrary state, we have:
\begin{equation}\label{}
|\Psi\rangle\langle\Phi|\Omega\rangle =
(\langle\Phi|\Omega\rangle)|\Psi\rangle
\end{equation}
So it just converts the $|\Omega\rangle$ into some new state of
$(\langle\Phi|\Omega\rangle)|\Psi\rangle$.
Usually in quantum mechanics we use the operator in the form of
$\ket{\Psi}\bra{\Psi}$, here $\ket{\Psi}$ is some arbitrary vector
in the Hilbert space. For this type of operator, we call it as
``density operator''. Now let's discuss the characters related to
this operator. Now we define this operator as $\hat{\gamma}$.
\begin{theorem}\label{}
$\hat{\gamma}^{+} = \hat{\gamma}$. So $\hat{\gamma}$ is hermitian.
\end{theorem}
\begin{proof}
\begin{equation}\label{}
\hat{\gamma}^{+} = (\ket{\Psi}\bra{\Psi})^{+} = \ket{\Psi}\bra{\Psi}
= \hat{\gamma}
\end{equation}
Hence the operator of $\hat{\gamma}$ is hermitian. \qedhere
\end{proof}
\begin{theorem}\label{}
For any $\Phi$, $\langle\Phi|\hat{\gamma}|\Phi\rangle \geq 0$.
\end{theorem}
\begin{proof}
\begin{equation}\label{}
\langle\Phi|\hat{\gamma}|\Phi\rangle = \langle\Phi|\Psi\rangle^{2}
\geq 0
\end{equation}
\qedhere
\end{proof}
\begin{theorem}\label{}
$\hat{\gamma}^{2} = \hat{\gamma}$.
\end{theorem}
\begin{proof}
\begin{equation}\label{}
\hat{\gamma}^{2} = \ket{\Psi}\bra{\Psi}\Psi\rangle\bra{\Psi} =
\ket{\Psi}\bra{\Psi} = \hat{\gamma}
\end{equation}
So $\hat{\gamma}$ is idempotent. \qedhere
\end{proof}
\begin{theorem}\label{}
$\hat{\gamma}\hat{B} = \hat{B}\hat{\gamma}$. If $\hat{B}$ is
hermitian.
\end{theorem}
\begin{proof}
Suggest that we have some arbitrary wave function of $\Phi$:
\begin{equation}\label{}
\langle\Phi|\hat{\gamma}\hat{B}|\Phi\rangle =
\langle\Phi\ket{\Psi}\bra{\Psi}\hat{B}|\Phi\rangle =
\bra{\Psi}\hat{B}|\Phi\rangle\langle\Phi\ket{\Psi} =
\bra{\Phi}\hat{B}|\Psi\rangle\langle\Psi\ket{\Phi} =
\langle\Phi|\hat{B}\hat{\gamma}|\Phi\rangle
\end{equation}
So $\hat{\gamma}$ is able to exchange with any hermitian operators.
\qedhere
\end{proof}
Finally, let's prove some very important equation in quantum
mechanics:
\begin{theorem}\label{}
For any arbitrary $\hat{\gamma}$, we have:
\begin{equation}\label{}
i \hbar\frac{\partial \hat{\gamma}}{\partial t} = [\hat{H},
\hat{\gamma}]
\end{equation}
\end{theorem}
\begin{proof}
Suggest that $\hat{\gamma} = \ket{\Psi}\bra{\Psi}$, now let's start
from the Schrodinger equation:
\begin{align}\label{}
\hat{H}\ket{\Psi} &= i \hbar \frac{\partial \ket{\Psi}}{\partial t}
\nonumber \\
\bra{\Psi}\hat{H} &= -i \hbar \frac{\partial \bra{\Psi}}{\partial t}
\end{align}
Hence we have:
\begin{equation}\label{}
\begin{split}
i \hbar\frac{\partial \hat{\gamma}}{\partial t} &=
i \hbar \frac{\partial \ket{\Psi}\bra{\Psi}}{\partial t}\\
&=
i \hbar \frac{\partial \ket{\Psi}}{\partial t}\bra{\Psi} +
\ket{\Psi}i \hbar \frac{\partial \bra{\Psi}}{\partial t} \\
&= (\hat{H}\ket{\Psi})\bra{\Psi} - \ket{\Psi}(\bra{\Psi}\hat{H})
\\
&= [\hat{H}, \hat{\gamma}]
\end{split}
\end{equation}
\qedhere
\end{proof}
Now let's go to see some specific kind of $\hat{\gamma}$:
\begin{equation}\label{OPERATOReq:15}
\hat{\gamma}_{i} =|\Psi_{i}\rangle\langle\Psi_{i}|
\end{equation}
Here $|\Psi_{i}\rangle$ designates the $i$th component in the basis
functions of $\Psi_{i}$ ($i = 1, 2, \cdots$). It can see that for
some arbitrary state of $|\Psi\rangle$, we have:
\begin{equation}\label{}
\hat{\gamma}_{i}|\Psi\rangle =
|\Psi_{i}\rangle\langle\Psi_{i}|\Psi\rangle = c_{i}|\Psi_{i}\rangle
\end{equation}
$c_{i}$ characterizes the weight for the component of $i$ in the
$|\Psi\rangle$, so the effects of the operator is just to projecting
out the $i$th component in the $|\Psi\rangle$. Hence, this kind of
operator is also called ``project operator''.
Finally, we note that if we add all the $\hat{\gamma}_{i}$ across
the whole Hilbert space:
\begin{equation}\label{}
\hat{\Gamma} = \sum_{i} \hat{\gamma}_{i}
\end{equation}
We can see that for some arbitrary state of $|\Psi\rangle$:
\begin{align}\label{OPERATOReq:18}
\hat{\Gamma}|\Psi\rangle &=
\sum_{i}|\Psi_{i}\rangle\langle\Psi_{i}|\Psi\rangle \nonumber \\
&=\sum_{i}|(\langle\Psi_{i}|\Psi\rangle)\Psi_{i}\rangle
\underrightarrow{
\text{ according to \ref{Hilbert:1}}}\nonumber \\
&=|\Psi\rangle
\end{align}
Hence we have $\hat{\Gamma} \equiv I$. If we compare the
(\ref{OPERATOReq:18}) with the (\ref{Hilberteq:15}), we can see that
they are the two faces on the same coin. So the
(\ref{OPERATOReq:18}) is another expression for the closure relation
(more details see \ref{sec:CR_in_Hilbert}).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "../../main"
%%% End:
|
{"hexsha": "d6b73dea86f65da4c1c21c43357b3b43f19df458", "size": 61264, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "theory/physics/operator.tex", "max_stars_repo_name": "murfreesboro/fenglai-note", "max_stars_repo_head_hexsha": "7bdf943f681e54948cd68775a31e4c93a53a13f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-16T07:23:48.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-16T07:23:48.000Z", "max_issues_repo_path": "theory/physics/operator.tex", "max_issues_repo_name": "murfreesboro/fenglai-note", "max_issues_repo_head_hexsha": "7bdf943f681e54948cd68775a31e4c93a53a13f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "theory/physics/operator.tex", "max_forks_repo_name": "murfreesboro/fenglai-note", "max_forks_repo_head_hexsha": "7bdf943f681e54948cd68775a31e4c93a53a13f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6799468792, "max_line_length": 87, "alphanum_fraction": 0.6944861583, "num_tokens": 18666}
|
SUBROUTINE scrv1(x1,y1,x2,y2,a,b)
!
IMPLICIT NONE
!
! Subroutine arguments
!
REAL :: a,b,x1,x2,y1,y2
!
! Local variables
!
REAL :: alog
REAL :: xx,xx1,xx2
!
! author : amare retta
! + + + purpose + + +
! program to compute parameters for an s-curve.
! + + + argument declarations + + +
! + + + local variables + + +
xx1 = abs(x1)
xx2 = abs(x2)
! write(*,*) 'scrv1',(y1-xx1)
xx = alog(xx1/y1-xx1)
! write(*,*) 'scrv2',(y2-xx2),(xx2-xx1)
b = (xx-alog(xx2/y2-xx2))/(xx2-xx1)
a = xx + b*xx1
! write(21,900)
! 900 format(/,'scrv1')
! write(21,901)
! 901 format('a,b,xx1,xx2,y1,y2')
! write(21,*)a,b,xx1,xx2,y1,y2
!
END SUBROUTINE scrv1
|
{"hexsha": "58c3e2d1e2b2359237373bb00503453d7a23d406", "size": 691, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Project Documents/Source Code Original/Scrv1.f90", "max_stars_repo_name": "USDA-ARS-WMSRU/upgm-standalone", "max_stars_repo_head_hexsha": "1ae5dee5fe2cdba97b69c19d51b1cf61ceeab3d7", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Project Documents/Source Code Original/Scrv1.f90", "max_issues_repo_name": "USDA-ARS-WMSRU/upgm-standalone", "max_issues_repo_head_hexsha": "1ae5dee5fe2cdba97b69c19d51b1cf61ceeab3d7", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Project Documents/Source Code Original/Scrv1.f90", "max_forks_repo_name": "USDA-ARS-WMSRU/upgm-standalone", "max_forks_repo_head_hexsha": "1ae5dee5fe2cdba97b69c19d51b1cf61ceeab3d7", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.7428571429, "max_line_length": 51, "alphanum_fraction": 0.5557163531, "num_tokens": 286}
|
import pandas as pd
import numpy as np
# Load the dataset in a dataframe object and include only four features as mentioned
df = pd.read_csv("data3.csv")
include = ['Longeivity','Length of screen name','Does the profile have a description','Length of the description','Does the profile have a URL','Followee count of the user','Follower count of the user','Followee-by-follower ratio','Total number of tweets','Annotation (0: Bot, 1: Normal customers, 2: Promotional customers, 3: Genuine users)'] # Only four features
df_ = df[include]
# Data Preprocessing
categoricals = []
for col, col_type in df_.dtypes.iteritems():
if col_type == 'O':
categoricals.append(col)
else:
df_[col].fillna(0, inplace=True)
df_ohe = pd.get_dummies(df_, columns=categoricals, dummy_na=True)
# Logistic Regression classifier
from sklearn.linear_model import LogisticRegression
dependent_variable = 'Annotation (0: Bot, 1: Normal customers, 2: Promotional customers, 3: Genuine users)'
x = df_ohe[df_ohe.columns.difference([dependent_variable])]
y = df_ohe[dependent_variable]
lr = LogisticRegression()
lr.fit(x, y)
model_accuracy=lr.score(x,y)
print(model_accuracy)
# Save your model
from sklearn.externals import joblib
joblib.dump(lr, 'mod.pkl')
print("Model dumped!")
# Load the model that you just saved
lr = joblib.load('mod.pkl')
# Saving the data columns from training
model_columns = list(x.columns)
joblib.dump(model_columns, 'model_columns.pkl')
print("Models columns dumped!")
|
{"hexsha": "6b4612c64937cc36b0e070962d212e981ef4a9b8", "size": 1505, "ext": "py", "lang": "Python", "max_stars_repo_path": "final_app/lr.py", "max_stars_repo_name": "PCOXX/DeCORE", "max_stars_repo_head_hexsha": "0e664901cbe67fe491f24adb3fa3149a4061db48", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-03-08T03:59:51.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-08T03:59:55.000Z", "max_issues_repo_path": "final_app/lr.py", "max_issues_repo_name": "Impranjal/DeCORE", "max_issues_repo_head_hexsha": "0e664901cbe67fe491f24adb3fa3149a4061db48", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "final_app/lr.py", "max_forks_repo_name": "Impranjal/DeCORE", "max_forks_repo_head_hexsha": "0e664901cbe67fe491f24adb3fa3149a4061db48", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8333333333, "max_line_length": 364, "alphanum_fraction": 0.7488372093, "include": true, "reason": "import numpy", "num_tokens": 367}
|
# © 2021 ushita37
import numpy as np
F = open('stationName.csv')
ekimeiList = F.readline().replace('\n', '').split(',')
uchimawaList = ['uchimawari', '内回り', 'inner', 'inner_track', 'inner-track', 'innertrack','うちまわり','ウチマワリ','ウチマワリ']
sotomawaList = ['sotomawari', '外回り', 'outer', 'outer_track', 'outer-track', 'outertrack','そとまわり','ソトマワリ','ソトマワリ']
a = np.loadtxt('JYarray1.csv',delimiter=',',dtype='int64')
def yamanote_search(k, l, direction):
plus_min = 0
n = ""
if direction == 1:
if a[k, l] == 9999:
l = l + 1
n = None
elif a[k, l] > 0:
plus_min = a[k, l]
k = l
n = ekimeiList[l]
l = 0
else:
l = l + 1
n = None
else:
if a[k, l] < 0:
plus_min = abs(a[k, l])
k = l
n = ekimeiList[l]
l = 0
else:
l = l + 1
n = None
return [k, l, n, plus_min]
def check_input(input1, input2, input3):
ret1 = False
ret2 = False
ret3 = 0
if input1 in ekimeiList:
ret1 = True
if input2 in ekimeiList:
ret2 = True
if input3 in uchimawaList:
ret3 = 1
elif input3 in sotomawaList:
ret3 = -1
else:
ret3 = None
return [ret1, ret2, ret3]
keyboard_dep = input('出発駅を入力してください>')
keyboard_arr = input('到着駅を入力してください>')
keyboard_direction = input('方向を入力してください>').lower()
def initialize(keyboard_dep):
i = ekimeiList.index(keyboard_dep) #キーボードで入力された出発駅に対応するekimeiListのインデックスで行iを初期化
j = 0
finish_route = False
station_name = ekimeiList[i]
min = 0
return [i, j, finish_route, station_name, min]
[check1, check2, check3] = check_input(keyboard_dep, keyboard_arr, keyboard_direction)
if check1 == False:
print(f'{keyboard_dep}駅は存在しません')
exit()
if check2 == False:
print(f'{keyboard_arr}駅は存在しません')
exit()
if check3 == None:
print('方向の入力が間違っています')
exit()
if keyboard_arr == keyboard_dep:
print('出発駅と到着駅が同じです')
exit()
[i, j, finish_route, station_name, min] = initialize(keyboard_dep)
print(station_name)
while finish_route == False:
[i, j, station_name, plus_min] = yamanote_search(i, j, check3)
if station_name != None:
print(station_name)
min += plus_min
if j > 29:
print(f'aの{j}行目には隣駅の情報がありません')
break
if station_name == keyboard_arr:
print (f'累計所要時間は{min}分です')
print('到着しました')
finish_route =True
|
{"hexsha": "120eb922e76f2d26404108910f88f7e574868d4b", "size": 2540, "ext": "py", "lang": "Python", "max_stars_repo_path": "early_works/yamanote_search.py", "max_stars_repo_name": "ushita37/shortest_path", "max_stars_repo_head_hexsha": "a04eea43480809f528fa4b8d5878b23f11e58c1b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "early_works/yamanote_search.py", "max_issues_repo_name": "ushita37/shortest_path", "max_issues_repo_head_hexsha": "a04eea43480809f528fa4b8d5878b23f11e58c1b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "early_works/yamanote_search.py", "max_forks_repo_name": "ushita37/shortest_path", "max_forks_repo_head_hexsha": "a04eea43480809f528fa4b8d5878b23f11e58c1b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6565656566, "max_line_length": 113, "alphanum_fraction": 0.5708661417, "include": true, "reason": "import numpy", "num_tokens": 890}
|
import sys
import logging
import numpy as np
import pickle
import argparse
import torch
import torch.nn.functional as F
import utils
import os
def get_args():
parser = argparse.ArgumentParser()
utils.add_shared_args(parser)
parser.add_argument('--resume-dir1', type=str, default=None)
parser.add_argument('--resume-name1', type=str, default=None)
parser.add_argument('--resume-dir2', type=str, default=None)
parser.add_argument('--resume-name2', type=str, default=None)
parser.add_argument('--rm-idx-dir1', type=str, default=None)
parser.add_argument('--rm-idx-dir2', type=str, default=None)
parser.add_argument('--samp-T', type=int, default=5)
return parser.parse_args()
def get_conf_and_acc(model, loader, cpu):
conf_vec = []
acc = utils.AverageMeter()
model.eval()
with torch.no_grad():
for x, y in loader:
if not cpu: x, y = x.cuda(), y.cuda()
_y = model(x).softmax(dim=1)
# conf_vec.append( _y.gather(1, y.view(len(y),1)) )
conf_vec.append(_y)
ac = (_y.argmax(dim=1) == y).sum().item() / len(y)
acc.update(ac, len(y))
conf_vec = torch.cat(conf_vec)
return conf_vec.cpu().numpy(), acc.average()
def get_confs(model, loaders, args):
confs = {}
for name in ['train', 'forget', 'test']:
conf, _ = get_conf_and_acc(model, loaders[name], args.cpu)
confs[name] = conf
return confs
def get_loaders(trainset, testset, batch_size=128, samp_rm_idx_path=None, tar_rm_idx_path=None):
loaders = {}
with open(samp_rm_idx_path, 'rb') as f:
samp_forgetted_idx = pickle.load(f)
loaders['sample'] = utils.DataSampler(trainset, batch_size)
loaders['sample'].remove( samp_forgetted_idx )
with open(tar_rm_idx_path, 'rb') as f:
tar_forgetted_idx = pickle.load(f)
loaders['train'] = utils.DataLoader(trainset, batch_size)
loaders['train'].remove(tar_forgetted_idx)
loaders['forget'] = utils.DataLoader(trainset, batch_size)
loaders['forget'].set_sampler_indices(tar_forgetted_idx)
loaders['test'] = utils.DataLoader(testset, batch_size)
return loaders
def get_results(trainset, testset, samp_rm_idx_dir, tar_rm_idx_dir, resume_dir, resume_name, args):
model = utils.get_mcmc_bnn_arch(args.arch, args.dataset, args.prior_sig)
if not args.cpu:
model.cuda()
confs = {'train':[], 'forget':[], 'test':[]}
for i in range(1, args.samp_T+1):
''' restore data loaders '''
samp_rm_idx_path = os.path.join(samp_rm_idx_dir, '{}'.format(i), 'rm-idx.pkl')
tar_rm_idx_path = os.path.join(tar_rm_idx_dir, '{}'.format(i), 'rm-idx.pkl')
loaders = get_loaders(trainset, testset, args.batch_size, samp_rm_idx_path, tar_rm_idx_path)
''' restore model / sampler '''
resume_path = os.path.join(resume_dir, '{}'.format(i), resume_name)
state_dict = torch.load(resume_path)
model.load_state_dict(state_dict['model_state_dict'])
model.n = len(loaders['sample'])
temp_confs = get_confs(model, loaders, args)
for key in confs.keys():
confs[key].append(temp_confs[key])
return confs
def main(args, logger):
trainset, testset = utils.get_dataset(args.dataset)
confs_1 = get_results(trainset, testset, args.rm_idx_dir1, args.rm_idx_dir2, args.resume_dir1, args.resume_name1, args)
confs_2 = get_results(trainset, testset, args.rm_idx_dir2, args.rm_idx_dir2, args.resume_dir2, args.resume_name2, args)
for name in ['train', 'forget', 'test']:
res = []
for i in range(args.samp_T):
res.append( np.abs(confs_1[name][i] - confs_2[name][i]).sum(axis=1).mean().item() )
res = np.array(res)
print('pred_diff: {}: mean={:.3f}, std={:.3f}'.format(name, res.mean(), res.std()) )
if __name__ == '__main__':
args = get_args()
fmt = '%(asctime)s %(name)s:%(levelname)s: %(message)s'
formatter = logging.Formatter(
fmt, datefmt='%Y-%m-%d %H:%M:%S')
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=fmt, datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger()
# logger.info('Arguments')
# for arg in vars(args):
# logger.info(' {:<22} {}'.format(arg+':', getattr(args,arg)) )
# logger.info('')
try:
main(args, logger)
except Exception as e:
logger.exception('Unexpected exception! %s', e)
|
{"hexsha": "49a6491ebf51c7563112faa7521910b090d9bb22", "size": 4500, "ext": "py", "lang": "Python", "max_stars_repo_path": "BNN/calc_pred_diff.py", "max_stars_repo_name": "fshp971/mcmc-unlearning", "max_stars_repo_head_hexsha": "3113dedca6de33bcaf316b804cb9c1e636db7fd5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2022-03-16T02:28:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T08:36:57.000Z", "max_issues_repo_path": "BNN/calc_pred_diff.py", "max_issues_repo_name": "fshp971/mcmc-unlearning", "max_issues_repo_head_hexsha": "3113dedca6de33bcaf316b804cb9c1e636db7fd5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BNN/calc_pred_diff.py", "max_forks_repo_name": "fshp971/mcmc-unlearning", "max_forks_repo_head_hexsha": "3113dedca6de33bcaf316b804cb9c1e636db7fd5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8219178082, "max_line_length": 123, "alphanum_fraction": 0.6402222222, "include": true, "reason": "import numpy", "num_tokens": 1185}
|
import numpy as np
from wolf_agent import WoLFAgent
from matrix_game_local_only import MatrixGame_local
from matrix_game_mec_only import MatrixGame_mec
from matrix_game import MatrixGame
from queue_relay import QueueRelay
import matplotlib.pyplot as plt
from gpd import GPD ## TLIU
from dataToExcel import DTE ## TLIU
import xlrd ## TLIU
import xlsxwriter ## TLIU
class draw_picture():
def __init__(self):
self.bandwidth = []
self.usersnumber = []
def run_for_all_mode(self,bw,un):
nb_episode = 700
actions = np.arange(8)
user_num = un
lambda_n = np.zeros(user_num)
for i in range(user_num): # 每比特需要周期量 70~800 cycles/bits
if i % 5 == 0:
lambda_n[i] = 0.001
if i % 5 == 1:
lambda_n[i] = 0.01
if i % 5 == 2:
lambda_n[i] = 0.1
if i % 5 == 3:
lambda_n[i] = 0.001
if i % 5 == 4:
lambda_n[i] = 0.01
actions_set = [[0, 5 * pow(10, 6), 0.4],
[0, 5 * pow(10, 6), 0.4],
[0, 5 * pow(10, 6), 0.4],
[0, 5 * pow(10, 6), 0.4],
[1, 0, 0.4],
[1, 0, 0.4],
[1, 0, 0.4],
[1, 0, 0.4]]
GPD1_array = [4 * pow(10, 6) for _ in range(user_num)]
GPD2_array = [0.3 for _ in range(user_num)]
# init wolf agent
wolf_agent_array = []
for i in range(user_num):
wolf_agent_array.append(WoLFAgent(alpha=0.1, actions=actions, high_delta=0.004, low_delta=0.002))
queue_relay_array = []
for i in range(user_num):
queue_relay_array.append(QueueRelay(lambda_n[i], GPD1_array[i], GPD2_array[i]))
# set reward functio
# reward = Reward()
reward_history = []
# init_Queue_relay
Q_array_histroy = [[10] for i in range(user_num)] ## TLIU
for episode in range(nb_episode):
Q_array = []
Qx_array = []
Qy_array = []
Qz_array = []
M1_array = []
M2_array = []
for i in range(user_num):
Q_array.append(queue_relay_array[i].Q)
Qx_array.append(queue_relay_array[i].Qx)
Qy_array.append(queue_relay_array[i].Qy)
Qz_array.append(queue_relay_array[i].Qz)
M1_array.append(queue_relay_array[i].M1)
M2_array.append(queue_relay_array[i].M2)
for i in range(user_num):
Q_array_histroy[i].append(Q_array[i])
if episode % 50 == 0 and episode != 0:
for i in range(user_num):
aa = GPD()
data = Q_array_histroy[i]
# data = [10000000000000 for i in range(200) ]
# res = aa.gpd( data , 3.96*pow(10,5) )
res = aa.gpd(data, 3.96 * pow(10, 6))
if res:
queue_relay_array[i].GPD1 = res[0][0]
queue_relay_array[i].GPD2 = res[0][1]
queue_relay_array[i].updateM1()
queue_relay_array[i].updateM2()
iteration_actions = []
for i in range(user_num):
iteration_actions.append(wolf_agent_array[i].act())
game = MatrixGame(actions=iteration_actions, Q=Q_array,
Qx=Qx_array, Qy=Qy_array, Qz=Qz_array,
M1=M1_array,
M2=M2_array , BW= bw)
reward, bn, lumbda, rff = game.step(actions=iteration_actions)
print("episode",episode,"reward",sum(reward))
for i in range(user_num):
# wolf agent act
# update_Queue_relay
queue_relay_array[i].lumbda = lumbda[i]
queue_relay_array[i].updateQ(bn[i], actions_set[iteration_actions[i]][0], rff[i])
queue_relay_array[i].updateQx()
queue_relay_array[i].updateQy()
queue_relay_array[i].updateQz()
# reward step
reward_history.append(sum(reward))
for i in range(user_num):
wolf_agent_array[i].observe(reward=reward[i])
# for i in range(user_num):
# print(wolf_agent_array[i].pi_average)
# plt.plot(np.arange(len(reward_history)), reward_history, label="")
# plt.show()
return reward_history[-1]
def run_for_only_mec(self,bw1,un1):
nb_episode = 700
actions_set = [
[1, 0, 0.1],
[1, 0, 0.5],
[1, 0, 1],
[1, 0, 2]]
actions = np.arange(len(actions_set))
user_num = un1
lambda_n = np.zeros(user_num)
for i in range(user_num): # 每比特需要周期量 70~800 cycles/bits
if i % 5 == 0:
lambda_n[i] = 0.001
if i % 5 == 1:
lambda_n[i] = 0.01
if i % 5 == 2:
lambda_n[i] = 0.1
if i % 5 == 3:
lambda_n[i] = 0.001
if i % 5 == 4:
lambda_n[i] = 0.01
GPD1_array = [4 * pow(10, 6) for _ in range(user_num)]
GPD2_array = [0.3 for _ in range(user_num)]
# init wolf agent
wolf_agent_array = []
for i in range(user_num):
wolf_agent_array.append(WoLFAgent(alpha=0.1, actions=actions, high_delta=0.004, low_delta=0.002))
queue_relay_array = []
for i in range(user_num):
queue_relay_array.append(QueueRelay(lambda_n[i], GPD1_array[i], GPD2_array[i]))
# set reward functio
# reward = Reward()
reward_history = []
# init_Queue_relay
Q_array_histroy = [[10] for i in range(user_num)] ## TLIU
for episode in range(nb_episode):
Q_array = []
Qx_array = []
Qy_array = []
Qz_array = []
M1_array = []
M2_array = []
for i in range(user_num):
Q_array.append(queue_relay_array[i].Q)
Qx_array.append(queue_relay_array[i].Qx)
Qy_array.append(queue_relay_array[i].Qy)
Qz_array.append(queue_relay_array[i].Qz)
M1_array.append(queue_relay_array[i].M1)
M2_array.append(queue_relay_array[i].M2)
for i in range(user_num):
Q_array_histroy[i].append(Q_array[i])
if episode % 50 == 0 and episode != 0:
for i in range(user_num):
aa = GPD()
data = Q_array_histroy[i]
# data = [10000000000000 for i in range(200) ]
# res = aa.gpd( data , 3.96*pow(10,5) )
res = aa.gpd(data, 3.96 * pow(10, 6))
if res:
queue_relay_array[i].GPD1 = res[0][0]
queue_relay_array[i].GPD2 = res[0][1]
queue_relay_array[i].updateM1()
queue_relay_array[i].updateM2()
iteration_actions = []
for i in range(user_num):
iteration_actions.append(wolf_agent_array[i].act())
game = MatrixGame_mec(actions=iteration_actions, Q=Q_array,
Qx=Qx_array, Qy=Qy_array, Qz=Qz_array,
M1=M1_array,
M2=M2_array, BW=bw1)
#print('Q value :' + str(Q_array) + str(Qx_array) + str(Qy_array) + str(Qz_array))
reward, bn, lumbda, rff = game.step(actions=iteration_actions)
for i in range(user_num):
# wolf agent act
# update_Queue_relay
queue_relay_array[i].lumbda = lumbda[i]
queue_relay_array[i].updateQ(bn[i], actions_set[iteration_actions[i]][0], rff[i])
queue_relay_array[i].updateQx()
queue_relay_array[i].updateQy()
queue_relay_array[i].updateQz()
# reward step
reward_history.append(sum(reward))
for i in range(user_num):
wolf_agent_array[i].observe(reward=reward[i])
# for i in range(user_num):
# print(wolf_agent_array[i].pi_average)
# plt.plot(np.arange(len(reward_history)), reward_history, label="")
# plt.show()
return reward_history[-1]
def run_for_only_local(self,bw2,un2):
nb_episode = 700
actions_set = [
[0, 5 * pow(10, 6), 0],
[0, 10 * pow(10, 6), 0],
[0, 20 * pow(10, 6), 0],
[0, 30 * pow(10, 6), 0]]
actions = np.arange(len(actions_set))
user_num = un2
lambda_n = np.zeros(user_num)
for i in range(user_num): # 每比特需要周期量 70~800 cycles/bits
if i % 5 == 0:
lambda_n[i] = 0.001
if i % 5 == 1:
lambda_n[i] = 0.01
if i % 5 == 2:
lambda_n[i] = 0.1
if i % 5 == 3:
lambda_n[i] = 0.001
if i % 5 == 4:
lambda_n[i] = 0.01
GPD1_array = [4 * pow(10, 6) for _ in range(user_num)]
GPD2_array = [0.3 for _ in range(user_num)]
# init wolf agent
wolf_agent_array = []
for i in range(user_num):
wolf_agent_array.append(WoLFAgent(alpha=0.1, actions=actions, high_delta=0.004, low_delta=0.002))
queue_relay_array = []
for i in range(user_num):
queue_relay_array.append(QueueRelay(lambda_n[i], GPD1_array[i], GPD2_array[i]))
# set reward functio
# reward = Reward()
reward_history = []
# init_Queue_relay
Q_array_histroy = [[10] for i in range(user_num)] ## TLIU
for episode in range(nb_episode):
Q_array = []
Qx_array = []
Qy_array = []
Qz_array = []
M1_array = []
M2_array = []
for i in range(user_num):
Q_array.append(queue_relay_array[i].Q)
Qx_array.append(queue_relay_array[i].Qx)
Qy_array.append(queue_relay_array[i].Qy)
Qz_array.append(queue_relay_array[i].Qz)
M1_array.append(queue_relay_array[i].M1)
M2_array.append(queue_relay_array[i].M2)
for i in range(user_num):
Q_array_histroy[i].append(Q_array[i])
if episode % 50 == 0 and episode != 0:
for i in range(user_num):
aa = GPD()
data = Q_array_histroy[i]
# data = [10000000000000 for i in range(200) ]
# res = aa.gpd( data , 3.96*pow(10,5) )
res = aa.gpd(data, 3.96 * pow(10, 6))
if res:
queue_relay_array[i].GPD1 = res[0][0]
queue_relay_array[i].GPD2 = res[0][1]
queue_relay_array[i].updateM1()
queue_relay_array[i].updateM2()
iteration_actions = []
for i in range(user_num):
iteration_actions.append(wolf_agent_array[i].act())
game = MatrixGame_local(actions=iteration_actions, Q=Q_array,
Qx=Qx_array, Qy=Qy_array, Qz=Qz_array,
M1=M1_array,
M2=M2_array, BW=bw2)
reward, bn, lumbda, rff = game.step(actions=iteration_actions)
for i in range(user_num):
# wolf agent act
# update_Queue_relay
queue_relay_array[i].lumbda = lumbda[i]
queue_relay_array[i].updateQ(bn[i], actions_set[iteration_actions[i]][0], rff[i])
queue_relay_array[i].updateQx()
queue_relay_array[i].updateQy()
queue_relay_array[i].updateQz()
# reward step
reward_history.append(sum(reward))
for i in range(user_num):
wolf_agent_array[i].observe(reward=reward[i])
# for i in range(user_num):
# print(wolf_agent_array[i].pi_average)
# plt.plot(np.arange(len(reward_history)), reward_history, label="")
# plt.show()
return reward_history[-1]
if __name__ == '__main__':
#bandwidth = np.array([2*pow(10,6),4*pow(10,6),6*pow(10,6),8*pow(10,6),10*pow(10,6),12*pow(10,6),14*pow(10,6),16*pow(10,6)])
usernumber = np.array([10,15,20,25,30,35,40,45])
draw = draw_picture()
cost_of_all = []
cost_of_mec = []
cost_of_local = []
cost_of_all_6mhz = []
cost_of_all_8mhz = []
cost_of_all_12mhz = []
for i in range(8):
cost_of_all.append(draw.run_for_all_mode(bw=10*pow(10,6), un=usernumber[i]))
cost_of_mec.append(draw.run_for_only_mec(bw1=10*pow(10,6), un1=usernumber[i]))
cost_of_local.append(draw.run_for_only_local(bw2=10*pow(10,6), un2=usernumber[i]))
cost_of_all_6mhz.append(draw.run_for_all_mode(bw=6 * pow(10, 6), un=usernumber[i]))
cost_of_all_8mhz.append(draw.run_for_all_mode(bw=8 * pow(10, 6), un=usernumber[i]))
cost_of_all_12mhz.append(draw.run_for_all_mode(bw=12 * pow(10, 6), un=usernumber[i]))
plt.plot(usernumber, cost_of_all, '^-', linewidth=0.4, label='all selection')
plt.plot(usernumber, cost_of_local, '<-', linewidth=0.4, label='only local selection')
plt.plot(usernumber, cost_of_mec, '>-', linewidth=0.4, label='only MEC selection')
plt.plot(usernumber, cost_of_all_6mhz, '<-', linewidth=0.2, label='all selection of 6mhz')
plt.plot(usernumber, cost_of_all_8mhz, '<-', linewidth=0.2, label='all selection of 8mhz')
plt.plot(usernumber, cost_of_all_12mhz, '<-', linewidth=0.2, label='all selection of 12mhz')
plt.grid(True) #显示网格
plt.xlabel('The number of UE')
plt.ylabel('Sum Cost')
plt.legend(loc='upper left') #图例右上角
plt.show()
data = DTE("./picture/pic2/all") ## TLIU
print(cost_of_all)
data.write(cost_of_all)
data = DTE("./picture/pic2/mec") ## TLIU
print(cost_of_mec)
data.write(cost_of_mec)
data = DTE("./picture/pic2/local") ## TLIU
print(cost_of_local)
data.write(cost_of_local)
data = DTE("./picture/pic2/all_6MHZ") ## TLIU
print(cost_of_all_6mhz)
data.write(cost_of_all_6mhz)
data = DTE("./picture/pic2/all_8MHZ") ## TLIU
print(cost_of_all_8mhz)
data.write(cost_of_all_8mhz)
data = DTE("./picture/pic2/all_12MHZ") ## TLIU
print(cost_of_all_12mhz)
data.write(cost_of_all_12mhz)
|
{"hexsha": "900d11aedc3aa70ad91e5ffd4183fa0f21043a15", "size": 14868, "ext": "py", "lang": "Python", "max_stars_repo_path": "draw_num_ue.py", "max_stars_repo_name": "T610/MEC", "max_stars_repo_head_hexsha": "351b83362e0c8a7128bd95d20de2a720a87b5c48", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "draw_num_ue.py", "max_issues_repo_name": "T610/MEC", "max_issues_repo_head_hexsha": "351b83362e0c8a7128bd95d20de2a720a87b5c48", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "draw_num_ue.py", "max_forks_repo_name": "T610/MEC", "max_forks_repo_head_hexsha": "351b83362e0c8a7128bd95d20de2a720a87b5c48", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2634146341, "max_line_length": 128, "alphanum_fraction": 0.5237422653, "include": true, "reason": "import numpy", "num_tokens": 4067}
|
/*
* Copyright (c) CERN 2013-2015
*
* Copyright (c) Members of the EMI Collaboration. 2010-2013
* See http://www.eu-emi.eu/partners for details on the copyright
* holders.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** \file serverconfig.cpp Implementation of FTS3 server configuration. */
#include <boost/program_options.hpp>
#include "common/Exceptions.h"
#include "ServerConfigReader.h"
#include "ServerConfig.h"
using namespace fts3::config;
using namespace fts3::common;
ServerConfig::ServerConfig() : cfgmonitor (this), reading(0), getting(0),
readTime(0)
{
FTS3_COMMON_LOGGER_NEWLOG(TRACE) << "ServerConfig created" << commit;
}
ServerConfig::~ServerConfig()
{
FTS3_COMMON_LOGGER_NEWLOG(TRACE) << "ServerConfig destroyed" << commit;
}
const std::string &ServerConfig::_get_str(const std::string &aVariable)
{
_t_vars::iterator itr = _vars.find(aVariable);
if (itr == _vars.end()) {
throw UserError("Server config variable " + aVariable + " not defined.");
}
// No worry, it will not be 0 pointer due to the exception
return itr->second;
}
void ServerConfig::read(int argc, char** argv)
{
_read<ServerConfigReader> (argc, argv);
}
void ServerConfig::startMonitor(void)
{
cfgmonitor.start(
get<std::string>("configfile")
);
}
time_t ServerConfig::getReadTime()
{
return readTime;
}
void ServerConfig::waitIfReading()
{
boost::mutex::scoped_lock lock(qm);
while (reading) {
qv.wait(lock);
}
getting++;
}
void ServerConfig::notifyReaders()
{
boost::mutex::scoped_lock lock(qm);
getting--;
qv.notify_all(); // there is anyway only one thread to be notified
}
void ServerConfig::waitIfGetting()
{
boost::mutex::scoped_lock lock(qm);
while (getting > 0) qv.wait(lock);
reading = true;
}
void ServerConfig::notifyGetters()
{
boost::mutex::scoped_lock lock(qm);
reading = false;
qv.notify_all();
}
|
{"hexsha": "c574927ff892ec750270ba78db01f5f868986198", "size": 2473, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/config/ServerConfig.cpp", "max_stars_repo_name": "cern-fts/fts3", "max_stars_repo_head_hexsha": "cf9eb5c9f52728929965edf58a86381eec0c4e88", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 9.0, "max_stars_repo_stars_event_min_datetime": "2018-06-27T09:53:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-19T09:54:37.000Z", "max_issues_repo_path": "src/config/ServerConfig.cpp", "max_issues_repo_name": "cern-fts/fts3", "max_issues_repo_head_hexsha": "cf9eb5c9f52728929965edf58a86381eec0c4e88", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/config/ServerConfig.cpp", "max_forks_repo_name": "cern-fts/fts3", "max_forks_repo_head_hexsha": "cf9eb5c9f52728929965edf58a86381eec0c4e88", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2018-07-13T06:17:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-08T04:57:04.000Z", "avg_line_length": 22.0803571429, "max_line_length": 81, "alphanum_fraction": 0.6906591185, "num_tokens": 619}
|
'''
File name: nodes.py
Author: Joke Durnez
Date created: 10/31/2017
Date last modified: 10/31/2017
Python Version: 2.7
Description: Script to clean timeseries
Project: Psychosis
'''
from nilearn.input_data import NiftiMasker, NiftiMapsMasker, NiftiLabelsMasker
from nipype.utils.filemanip import split_filename
from scipy.signal import periodogram,detrend
from nipype.interfaces.fsl import Smooth
from nipype.interfaces import fsl, afni
import nipype.pipeline.engine as pe
from datetime import datetime
from nipype import algorithms
import pandas as pd
import nibabel as nib
import numpy as np
import argparse
import nilearn
import shutil
import sys
import os
sys.path.append(os.environ.get("CODEDIR"))
from postbids.rest.Text2Vest import Text2Vest
from postbids.rest import nodes, reho, utils
# get command line arguments
subject = os.environ.get("SUBJECT")
# if output already exist: overwrite?
redo = True
# load environment variables for psychosis PROJECT
cleandir = os.path.join(os.environ.get('CONDIR'),"sub-%s"%subject)
if os.path.exists(cleandir) and redo:
shutil.rmtree(cleandir)
if not os.path.exists(cleandir):
os.mkdir(cleandir)
prepdir = os.environ.get('PREPDIR')
CODEDIR = os.environ.get('CODEDIR')
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("Start preparing masks for subject %s"%subject)
# get scan ID's
subprep = os.path.join(prepdir,"sub-"+subject,"MNINonLinear/Results")
keys = [x for x in os.listdir(subprep) if 'rest' in x]
os.chdir(cleandir)
########################
## CREATE WM/GM MASKS ##
########################
GMmaskfile = os.path.join(cleandir,"GM_mask.nii.gz")
WMmaskfile = os.path.join(cleandir,"WM_mask.nii.gz")
ribbonfile = os.path.join(prepdir,"sub-"+subject,"MNINonLinear",'ribbon.nii.gz')
reffile = os.path.join(prepdir,"sub-"+subject,"MNINonLinear",'T1w_restore.2.nii.gz')
utils.create_mask(GMmaskfile,WMmaskfile,ribbonfile,reffile)
#############################
## START CLEANING PIPELINE ##
#############################
for run in keys:
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("=============================================")
print('Started analysing %s'%run)
print("=============================================")
rundir = os.path.join(cleandir,run)
if not os.path.exists(rundir):
os.mkdir(rundir)
os.chdir(rundir)
############################
# cut off first timepoints #
############################
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("%s in file %s: Cutting off first timepoints"%(subject,run))
#in/out
infile = os.path.join(subprep,run,run+".nii.gz") #original file
outfile = os.path.join(rundir,run+"_removed_first10.nii.gz")
#action
totaltp = nib.load(infile).shape[3]
if totaltp <= 10:
continue
fslroi = fsl.ExtractROI(in_file=infile,roi_file=outfile,t_min=10,t_size=totaltp-10)
if not os.path.exists(outfile) or redo:
fslroi.run()
###########
# despike #
###########
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("%s in file %s: Despiking"%(subject,run))
#in/out
infile = outfile
_,base,_=split_filename(infile)
outfile = os.path.join(rundir,base+"_despiked.nii.gz")
despiker = afni.Despike()
despiker.inputs.in_file = infile
despiker.inputs.args = '-NEW'
despiker.inputs.out_file = outfile
if not os.path.exists(outfile) or redo:
despiker.run()
##############
# APPLY MASK #
##############
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("%s in file %s: Masking"%(subject,run))
infile = outfile
_,base,_=split_filename(infile)
outfile = os.path.join(rundir,base+"_masked.nii.gz")
maskfile = os.path.join(subprep,run,"brainmask_fs.2.nii.gz")
masker = fsl.maths.ApplyMask()
masker.inputs.in_file = infile
masker.inputs.mask_file = maskfile
masker.inputs.out_file = outfile
if not os.path.exists(outfile) or redo:
masker.run()
####################################################
# motion regression (and global signal regression) #
####################################################
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("%s in file %s: Regressing out motion"%(subject,run))
movement_regressors = os.path.join(rundir,base+"_mvmreg.txt")
movement_regressors_mat = os.path.join(rundir,base+"_mvmreg.mat")
#++++++++++++++++++++
# prepare regressors
#++++++++++++++++++++
# in/out (there's a lot of other files generated here)
infile = outfile
_,base,_=split_filename(infile)
outfile_residuals = os.path.join(rundir,base+"_mvmreg.nii.gz")
outfile_beta = os.path.join(rundir,base+"_mvmregbeta.nii.gz")
motion_regressed = outfile_residuals
# movement regressors
longmovementfile = os.path.join(subprep,run,"Movement_Regressors.txt")
movementfile = os.path.join(subprep,run,"Movement_Regressors_removed_first10.txt")
movement = pd.read_csv(longmovementfile,delim_whitespace=True,header=None,engine='python')
movement = movement.iloc[range(totaltp)]
movementsq = movement**2
movement = pd.concat([movement,movementsq],axis=1)
movement = movement.drop(range(10))
movement = movement.reset_index()
movement = movement.drop('index',1)
movement = movement.fillna(0)
if not os.path.exists(movementfile) or redo:
movement.to_csv(movementfile,index=False,header=None)
# DVARS
longdvarsfile = os.path.join(subprep,run,"Movement_RelativeRMS.txt")
dvarsfile = os.path.join(subprep,run,"Movement_RelativeRMS_removed_first10.txt")
motionDF = pd.read_csv(longdvarsfile,sep="",header=None,engine='python',names=['dvars'])
motionDF = motionDF.drop(range(10))
motionDF = motionDF.reset_index()
motionDF = motionDF.drop('index',1)
# compute FD
FD = nodes.ComputeFD(movementfile)
motionDF['FD'] = FD
# save DVARS and FD
motionfile = os.path.join(rundir,run+"_mvmderiv.txt")
if not os.path.exists(motionfile) or redo:
motionDF.to_csv(motionfile,sep="\t",header=None,index=False)
# generate regressors
movement = pd.read_csv(movementfile,sep=",",header=None,engine='python')
#cte = pd.Series([1]*movement.shape[0]) # no longer necessary: data is centered
reg = pd.concat([movement,motionDF],axis=1)
if not os.path.exists(movement_regressors) or redo:
reg.to_csv(movement_regressors,sep="\t",header=None,index=False)
# generate regressors file readable for FSL
create_reg = Text2Vest()
create_reg.inputs.in_file = movement_regressors
create_reg.inputs.out_file = movement_regressors_mat
if not os.path.exists(movement_regressors_mat) or redo:
create_reg.run()
#++++++++++++++++++++++++++++
# actual regression of motion
#++++++++++++++++++++++++++++
glm = fsl.GLM()
glm.inputs.in_file = infile
glm.inputs.design = movement_regressors_mat
glm.inputs.dat_norm = False
glm.inputs.var_norm = True
glm.inputs.demean = True
glm.inputs.out_res_name = outfile_residuals
glm.inputs.out_file = outfile_beta
if not os.path.exists(outfile_residuals) or redo:
glm.run()
#############################
# global signal computation #
#############################
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("%s in file %s: Computing global signal"%(subject,run))
#in/out
infile = outfile_residuals
_,base,_=split_filename(infile)
outfile_residuals_gs = os.path.join(rundir,base+"_gsr.nii.gz")
outfile_beta_gs = os.path.join(rundir,base+"_gsrbeta.nii.gz")
gs_regressors = os.path.join(rundir,base+"_gsr.txt")
gs_regressors_mat = os.path.join(rundir,base+"_gsr.mat")
#action
data = nib.load(infile).get_data()
meants = np.mean(data,axis=(0,1,2))
meantsSeries = pd.Series(meants)
meantsSeries_st = (meantsSeries-np.mean(meantsSeries))/np.std(meantsSeries)
#############################
# global signal regression #
#############################
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("%s in file %s: Global signal regression"%(subject,run))
meantsSeries_st.to_csv(gs_regressors,sep="\t",header=None,index=False)
# generate regressors file readable for FSL
create_reg = Text2Vest()
create_reg.inputs.in_file = gs_regressors
create_reg.inputs.out_file = gs_regressors_mat
if not os.path.exists(gs_regressors_mat) or redo:
create_reg.run()
#+++++++++++++++++++++++++++++++++++++++++++++++++
# actual regression of global signal
#+++++++++++++++++++++++++++++++++++++++++++++++++
glm = fsl.GLM()
glm.inputs.in_file = infile
glm.inputs.design = gs_regressors_mat
glm.inputs.dat_norm = False
glm.inputs.var_norm = True
glm.inputs.demean = True
glm.inputs.out_res_name = outfile_residuals_gs
glm.inputs.out_file = outfile_beta_gs
if not os.path.exists(outfile_beta_gs) or redo:
glm.run()
#######################################################
#######################################################
## PATH 1: clean on both gsr-signal and nogsr-signal ##
#######################################################
#######################################################
for infile in [outfile_residuals_gs,outfile_residuals]:
############
# ANATICOR # --> not anaticor anymore, but high variance confounds (as in nilearn)
############
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("%s in file %s: Regressing out high var cf"%(subject,run))
#in/out
_,base,_=split_filename(infile)
regfile = os.path.join(rundir,base+"_cmpc.txt")
regfile_mat = os.path.join(rundir,base+"_cmpc.mat")
outfile_residuals = os.path.join(rundir,base+"_cmpc.nii.gz")
outfile_beta = os.path.join(rundir,base+"_cmpcbeta.nii.gz")
#compcor
cv = nilearn.image.high_variance_confounds(infile,detrend=False)
# prepare regressors
cte = pd.Series([1]*cv.shape[0])
reg = pd.concat([cte,pd.DataFrame(cv)],axis=1)
reg.to_csv(regfile,sep="\t",header=None,index=False)
create_reg = Text2Vest()
create_reg.inputs.in_file = regfile
create_reg.inputs.out_file = regfile_mat
create_reg.run()
#++++++++++++++++++++++++++++++++
# actual regression of compcors
#++++++++++++++++++++++++++++++++
glm = fsl.GLM()
glm.inputs.in_file = infile
glm.inputs.design = regfile_mat
glm.inputs.dat_norm = False
glm.inputs.var_norm = False
glm.inputs.demean = False
glm.inputs.out_res_name = outfile_residuals
glm.inputs.out_file = outfile_beta
if not os.path.exists(outfile_beta) or redo:
glm.run()
######################
# bandpass filtering #
######################
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("%s in file %s: Bandpass filtering"%(subject,run))
infile = outfile_residuals
_,base,_=split_filename(infile)
outfile = os.path.join(rundir,base+"_bp.nii.gz")
bandpass = afni.Bandpass()
bandpass.inputs.in_file = infile
bandpass.inputs.highpass = 0.01
bandpass.inputs.lowpass = 0.125
bandpass.inputs.normalize = False
bandpass.inputs.outputtype = "NIFTI_GZ"
bandpass.inputs.out_file = outfile
if not os.path.exists(outfile) or redo:
bandpass.run()
##########################################################
##########################################################
## PATH 2: smooth, compute global signal after cleaning ##
##########################################################
##########################################################
_,base,_=split_filename(infile)
outfile_gs = os.path.join(rundir,base+"_globalsignal.txt")
# compute GS after all cleaning (in non-gsr situation)
cleaned_no_filtering = nib.load(infile).get_data()
meants = np.mean(cleaned_no_filtering,axis=(0,1,2))
meantsSeries = pd.Series(meants)
meantsSeries_st = (meantsSeries-np.mean(meantsSeries))/np.std(meantsSeries)
np.savetxt(outfile_gs,meantsSeries_st)
outfile_GMgs = os.path.join(rundir,base+"_GMglobalsignal.txt")
masker = NiftiLabelsMasker(labels_img = GMmaskfile,standardize=False)
ts = masker.fit_transform(infile)[:,0]
np.savetxt(outfile_GMgs,ts)
_,base,_=split_filename(outfile)
outfile_gs = os.path.join(rundir,base+"_globalsignal.txt")
# compute GS after all bpfilter (in non-gsr situation)
cleaned_no_filtering = nib.load(outfile).get_data()
meants = np.mean(cleaned_no_filtering,axis=(0,1,2))
meantsSeries = pd.Series(meants)
meantsSeries_st = (meantsSeries-np.mean(meantsSeries))/np.std(meantsSeries)
np.savetxt(outfile_gs,meantsSeries_st)
outfile_GMgs = os.path.join(rundir,base+"_GMglobalsignal.txt")
masker = NiftiLabelsMasker(labels_img = GMmaskfile,standardize=False)
ts = masker.fit_transform(outfile)[:,0]
np.savetxt(outfile_GMgs,ts)
#################################################################
#################################################################
## PATH 3: smooth, compute ALFF, tempvar, regional homogeneity ##
#################################################################
#################################################################
# skip this for now:
continue
###############
# Smooth data #
###############
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("%s in file %s: Smoothing data"%(subject,run))
# input/output
infile = motion_regressed
_,base,_=split_filename(infile)
outfile = os.path.join(rundir,base+"_smooth.nii.gz")
bim = afni.BlurInMask()
bim.inputs.mask = maskfile
bim.inputs.in_file = infile
bim.inputs.out_file = outfile
bim.inputs.fwhm = 5.0
bim.run()
##############################
# GLM smoothed global signal #
##############################
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("%s in file %s: Computing global signal regressor"%(subject,run))
# input/output
infile = outfile
_,base,_=split_filename(infile)
betas = os.path.join(rundir,base+"_gsr_betas.nii.gz")
glm = fsl.GLM()
glm.inputs.in_file = infile
glm.inputs.design = movement_regressors_mat
glm.inputs.dat_norm = True
glm.inputs.var_norm = True
glm.inputs.demean = True
glm.inputs.out_file = betas
glm.run()
######################
# Voxelwise variance #
######################
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("%s in file %s: Computing voxelwise variance"%(subject,run))
#input/output
infile = outfile
_,base,_=split_filename(infile)
outfile = os.path.join(rundir,base+"_variance.nii.gz")
# compute variance (i.e. voxelwise distance from global signal)
image = nib.load(infile)
data = image.get_data()
voxvar = np.std(data,axis=3)
img = nib.Nifti1Image(voxvar,affine=image.get_affine(),header=image.get_header())
img.to_filename(outfile)
######################
# Compute ALFF/fALFF #
######################
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("%s in file %s: Computing fALFF"%(subject,run))
# input/output
infile = infile
_,base,_=split_filename(infile)
outfile = os.path.join(rundir,base+"_fALFF.nii.gz")
# compute fALFF
image = nib.load(infile)
data = image.get_data()
per = periodogram(data,axis=3,fs=0.72,nfft=720,scaling='spectrum')
fALFFspectr = np.where(np.logical_and(per[0]>=0.01,per[0]<=0.08))
amplitudes = np.sqrt(per[1])
fALFFnum = np.sum(amplitudes[:,:,:,fALFFspectr[0]],axis=3)
fALFFdenom = np.sum(amplitudes,axis=3)
fALFF = fALFFnum/fALFFdenom
fALFF = (fALFF-np.mean(fALFF))/np.std(fALFF)
fALFF[np.where(np.isnan(fALFF))]=0
fALFF_nii = nib.Nifti1Image(fALFF,affine=image.get_affine(),header=image.get_header())
fALFF_nii.to_filename(outfile)
# compute ALFF
outfile = os.path.join(rundir,base+"_ALFF.nii.gz")
ALFF = np.mean(amplitudes[:,:,:,fALFFspectr[0]],axis=3)
ALFF = (ALFF-np.mean(ALFF))/np.std(ALFF)
ALFF[np.where(np.isnan(ALFF))]=0
ALFF_nii = nib.Nifti1Image(ALFF,affine=image.get_affine(),header=image.get_header())
ALFF_nii.to_filename(outfile)
##########################
# Regional Homogeneities #
##########################
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("%s in file %s: Computing regional homogeneities"%(subject,run))
# input/output
infile = infile
_,base,_=split_filename(infile)
outfile = os.path.join(rundir,base+"_reho.nii.gz")
# compute reho
out = reho.compute_reho(infile, maskfile, 27,out_file=outfile)
|
{"hexsha": "9ae99d9e3882955a509b97b7d2fc3caa8a699bdb", "size": 17138, "ext": "py", "lang": "Python", "max_stars_repo_path": "postbids/bin/scripts/timeseries_clean.py", "max_stars_repo_name": "jokedurnez/Psychosis", "max_stars_repo_head_hexsha": "1887ecb374b001de7f6aadb028c04e09e5bc4f7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "postbids/bin/scripts/timeseries_clean.py", "max_issues_repo_name": "jokedurnez/Psychosis", "max_issues_repo_head_hexsha": "1887ecb374b001de7f6aadb028c04e09e5bc4f7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "postbids/bin/scripts/timeseries_clean.py", "max_forks_repo_name": "jokedurnez/Psychosis", "max_forks_repo_head_hexsha": "1887ecb374b001de7f6aadb028c04e09e5bc4f7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9366336634, "max_line_length": 94, "alphanum_fraction": 0.599544871, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4371}
|
import numpy as np
import numba
@numba.njit(cache=True)
def maxprominencedip(events, start, end, top, n):
"""
Find the negative peaks with the maximum prominence in arrays.
For computing the prominence, maxima occuring on the border of the array
are ignored, unless both the left and right maxima occur on the border.
Parameters
----------
events : array (nevents, N)
The arrays.
start, end : int array (nevents,)
Each row of `events` is used only from the sample specified by
`start` (inclusive) to `end` (exclusive).
top : array (nevents,)
For computing the prominence, maxima are capped at `top`.
n : int
The number of peaks to keep in order of prominence.
Return
------
position : int array (nevents, n)
The indices of the peaks in each event, sorted along the second axis
from lower to higher prominence. -1 for no peak found. If a local
minimum has a flat bottom, the index of the central (rounding toward
zero) sample is returned.
prominence : int array (nevents, n)
The prominence of the peaks.
"""
# TODO implement using guvectorize
shape = (len(events), n)
prominence = np.full(shape, -2 ** 20, events.dtype)
position = np.full(shape, -1)
for ievent, event in enumerate(events):
assert start[ievent] >= 0
assert end[ievent] <= len(event)
maxprom = prominence[ievent]
maxprompos = position[ievent]
relminpos = -1
for i in range(start[ievent] + 1, end[ievent] - 1):
if event[i - 1] > event[i] < event[i + 1]:
# narrow local minimum
relmin = True
relminpos = i
elif event[i - 1] > event[i] == event[i + 1]:
# possibly beginning of wide local minimum
relminpos = i
elif event[i - 1] == event[i] < event[i + 1] and relminpos >= 0:
# end of wide local minimum
relmin = True
relminpos = (relminpos + i) // 2
else:
relminpos = -1
if relmin:
# search for maximum before minimum position
irev = relminpos
lmax = event[irev]
ilmax = irev
maxmax = top[ievent]
while irev >= start[ievent] and event[irev] >= event[relminpos] and lmax < maxmax:
if event[irev] > lmax:
lmax = event[irev]
ilmax = irev
irev -= 1
lmax = min(lmax, maxmax)
lmaxb = ilmax == start[ievent]
# search for maximum after minimum position
ifwd = relminpos
rmax = event[ifwd]
irmax = ifwd
while ifwd < end[ievent] and event[ifwd] >= event[relminpos] and rmax < maxmax:
if event[ifwd] > rmax:
rmax = event[ifwd]
irmax = ifwd
ifwd += 1
rmax = min(rmax, maxmax)
rmaxb = irmax == end[ievent] - 1
# compute prominence
if (not rmaxb and not lmaxb) or (rmaxb and lmaxb):
maximum = min(lmax, rmax)
elif rmaxb:
maximum = lmax
elif lmaxb:
maximum = rmax
prom = maximum - event[relminpos]
# insert minimum into list sorted by prominence
if prom > maxprom[0]:
for j in range(1, n):
if prom <= maxprom[j]:
break
else:
maxprom[j - 1] = maxprom[j]
maxprompos[j - 1] = maxprompos[j]
else:
j = n
maxprom[j - 1] = prom
maxprompos[j - 1] = relminpos
# reset minimum flag
relmin = False
relminpos = -1
return position, prominence
def test_maxprominencedip():
"""
Plot a random test of `maxprominencedip`.
"""
t = np.linspace(0, 1, 1000)
mu = np.random.uniform(0, 1, 20)
logsigma = np.random.randn(len(mu))
sigma = 0.2 * np.exp(logsigma)
wf = -np.sum(np.exp(-1/2 * ((t[:, None] - mu) / sigma) ** 2), axis=-1)
start = 500
pos, prom = maxprominencedip(wf[None], np.array([start]), np.array([0]), 2)
fig, ax = plt.subplots(num='maxprominencedip.test_maxprominencedip', clear=True)
ax.plot(wf)
ax.axvline(start, linestyle='--')
for i, p in zip(pos[0], prom[0]):
print(i, p)
if i >= 0:
ax.vlines(i, wf[i], wf[i] + p)
ax.axhline(wf[i] + p)
fig.tight_layout()
fig.show()
if __name__ == '__main__':
from matplotlib import pyplot as plt
test_maxprominencedip()
|
{"hexsha": "16e387c30c550c4f2dae24588eed8d6146257c60", "size": 5176, "ext": "py", "lang": "Python", "max_stars_repo_path": "maxprominencedip.py", "max_stars_repo_name": "Gattocrucco/sipmfilter", "max_stars_repo_head_hexsha": "74215d6c53b998808fc6c677b46030234d996bdf", "max_stars_repo_licenses": ["CC-BY-4.0", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "maxprominencedip.py", "max_issues_repo_name": "Gattocrucco/sipmfilter", "max_issues_repo_head_hexsha": "74215d6c53b998808fc6c677b46030234d996bdf", "max_issues_repo_licenses": ["CC-BY-4.0", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "maxprominencedip.py", "max_forks_repo_name": "Gattocrucco/sipmfilter", "max_forks_repo_head_hexsha": "74215d6c53b998808fc6c677b46030234d996bdf", "max_forks_repo_licenses": ["CC-BY-4.0", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7382550336, "max_line_length": 98, "alphanum_fraction": 0.4886012365, "include": true, "reason": "import numpy,import numba", "num_tokens": 1292}
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import numpy as np
from . import variables
def test_softmax_categorical_deterministic() -> None:
token = variables.SoftmaxCategorical(["blu", "blublu", "blublublu"], deterministic=True)
np.testing.assert_equal(token.data_to_argument([1, 1, 1.01], random=True), "blublublu")
def test_softmax_categorical() -> None:
np.random.seed(12)
token = variables.SoftmaxCategorical(["blu", "blublu", "blublublu"])
np.testing.assert_equal(token.data_to_argument([.5, 1, 2.]), "blublu")
np.testing.assert_equal(token.data_to_argument(token.argument_to_data("blu"), random=False), "blu")
def test_ordered_discrete() -> None:
token = variables.OrderedDiscrete(["blu", "blublu", "blublublu"])
np.testing.assert_equal(token.data_to_argument([5]), "blublublu")
np.testing.assert_equal(token.data_to_argument([0]), "blublu")
np.testing.assert_equal(token.data_to_argument(token.argument_to_data("blu"), random=False), "blu")
def test_gaussian() -> None:
token = variables.Gaussian(1, 3)
np.testing.assert_equal(token.data_to_argument([.5]), 2.5)
np.testing.assert_equal(token.data_to_argument(token.argument_to_data(12)), 12)
def test_scalar() -> None:
token = variables.Scalar(int)
np.testing.assert_equal(token.data_to_argument([.7]), 1)
np.testing.assert_equal(token.argument_to_data(1), [1.])
def test_array_as_ascalar() -> None:
var = variables.Array(1).exponentiated(10, -1).asscalar()
data = np.array([2])
output = var.data_to_argument(data)
np.testing.assert_equal(output, 0.01)
np.testing.assert_almost_equal(var.argument_to_data(output), data)
# int
var = variables.Array(1).asscalar(int)
np.testing.assert_equal(var.data_to_argument(np.array([.4])), 0)
np.testing.assert_equal(var.data_to_argument(np.array([-.4])), 0)
output = var.data_to_argument(np.array([.6]))
np.testing.assert_equal(output, 1)
assert type(output) == int # pylint: disable=unidiomatic-typecheck
# errors
with pytest.raises(RuntimeError):
variables.Array(1).asscalar(int).asscalar(float)
with pytest.raises(RuntimeError):
variables.Array(2).asscalar(int)
with pytest.raises(ValueError):
variables.Array(1).asscalar(np.int64) # type: ignore
def test_array() -> None:
var = variables.Array(2, 2).affined(1000000).bounded(3, 5, transform="arctan")
data = np.array([-10, 10, 0, 0])
output = var.data_to_argument(data)
np.testing.assert_almost_equal(output, [[3., 5], [4, 4]])
np.testing.assert_almost_equal(var.argument_to_data(output), data)
|
{"hexsha": "5b39fd7bd17de7dc1eb5c94861d2c9a22f3b9636", "size": 2803, "ext": "py", "lang": "Python", "max_stars_repo_path": "nevergrad/instrumentation/test_variables.py", "max_stars_repo_name": "vishalshar/nevergrad", "max_stars_repo_head_hexsha": "07b5b332786ce5ff831dfabee892bb9397838f70", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-22T16:18:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-22T16:18:01.000Z", "max_issues_repo_path": "nevergrad/instrumentation/test_variables.py", "max_issues_repo_name": "akhti/nevergrad", "max_issues_repo_head_hexsha": "98a4ca92dff704f9df0bc58554bd51e5fa477362", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nevergrad/instrumentation/test_variables.py", "max_forks_repo_name": "akhti/nevergrad", "max_forks_repo_head_hexsha": "98a4ca92dff704f9df0bc58554bd51e5fa477362", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0428571429, "max_line_length": 103, "alphanum_fraction": 0.7103103817, "include": true, "reason": "import numpy", "num_tokens": 744}
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# We're not responsible for pytest decorators
# mypy: disallow_untyped_decorators = False
"""
Collection of some testing utilities for the Fairscale library. Please complement as
you see fit, but refrain from ad-hoc test utils within the different feature sets and
relative imports.
"""
import contextlib
import functools
import gc
import inspect
import logging
import multiprocessing
import os
import random
from statistics import mean
import subprocess
import sys
import tempfile
from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, List, Optional, Tuple, Union
import numpy
import pytest
import torch
from torch import Tensor
import torch.distributed as dist
from torch.distributed import rpc
import torch.multiprocessing as mp
import torch.nn as nn
from fairscale.nn.model_parallel import destroy_model_parallel, initialize_model_parallel
from fairscale.nn.model_parallel.random import model_parallel_cuda_manual_seed
from fairscale.utils import torch_version
if TYPE_CHECKING:
Base = nn.Module[Tensor]
else:
Base = nn.Module
skip_if_cuda = pytest.mark.skipif(torch.cuda.is_available(), reason="Testing only on CPUs to save time")
skip_if_no_cuda = pytest.mark.skipif(
not torch.cuda.is_available() or torch.cuda.device_count() < 1, reason="CUDA required"
)
skip_if_single_gpu = pytest.mark.skipif(
not torch.cuda.is_available() or torch.cuda.device_count() < 2, reason="multiple GPUs required"
)
skip_if_less_than_four_gpu = pytest.mark.skipif(
not torch.cuda.is_available() or torch.cuda.device_count() < 4, reason="4 GPUs or more required"
)
skip_if_py38 = pytest.mark.skipif(
sys.version_info.major == 3 and sys.version_info.minor == 8, reason="Python3.8 is skipped"
)
skip_if_py39_no_cuda = pytest.mark.skipif(
not torch.cuda.is_available() and sys.version_info.major == 3 and sys.version_info.minor == 9,
reason="Python3.9 without CUDA is skipped",
)
available_devices = ["cpu"]
if torch.cuda.is_available():
available_devices.append("cuda")
filename_mpi: Optional[str] = None
class IdentityLayer(Base):
def __init__(self, size: int, scale: float = 1.0) -> None:
super(IdentityLayer, self).__init__()
self.weight = torch.nn.Parameter(scale * torch.randn(size))
def forward(self, *_: Any, **__: Any) -> Tensor:
return self.weight
def set_random_seed(seed: int) -> None:
"""Set random seed for reproducibility."""
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
model_parallel_cuda_manual_seed(seed)
# Global variable to cache the results from the first nvidia-smi execution.
_smi_ver: Optional[str] = None
def torch_cuda_version(compiled: bool = False) -> Tuple[int, ...]:
if compiled:
numbering = torch.version.cuda.split(".")[:2]
else:
global _smi_ver
if _smi_ver is None:
def get_smi_ver() -> str:
"""Get CUDA version from nvidia-smi"""
for line in subprocess.check_output("nvidia-smi".split()).decode("utf-8").split("\n"):
if "CUDA Version" in line:
res = line.split()[8]
assert res.startswith("10.") or res.startswith("11."), res
return res
assert False
_smi_ver = get_smi_ver()
numbering = _smi_ver.split(".")[:2]
return tuple(int(n) for n in numbering)
def dist_init(rank: int, world_size: int, filename: str, filename_rpc: str = "") -> bool:
"""
Initialize torch distributed, based on a temporary file shared across ranks, which makes it possible for unrelated
tests to be run concurrently.
Return false if not enough GPUs present in the system.
.. warning: This limits the usecase to all ranks being on the same node
"""
try:
torch.distributed.rpc.shutdown()
except Exception:
pass
print(f"dist init r={rank}, world={world_size}")
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["RANK"] = str(rank)
url = "file://" + filename
url_rpc = "file://" + filename_rpc
if torch_version() >= (1, 6, 0):
backend = "nccl" if torch.cuda.is_available() else "gloo"
if backend == "nccl" and torch.cuda.device_count() < world_size:
logging.warning("Requested world size cannot be reached on this machine, not enough GPUs")
return False
torch.distributed.init_process_group(backend=backend, rank=rank, world_size=world_size, init_method=url)
tp_options = {"init_method": url_rpc}
# Workaround for bug in torch v1.8.0. Should be fixed in v1.8.1
if torch_version() == (1, 8, 0):
if torch.cuda.is_available():
# Workaround for https://github.com/pytorch/pytorch/issues/53844
tp_options["_transports"] = ["ibv", "uv"] # type: ignore
else:
# Workaround for https://github.com/pytorch/pytorch/issues/54266
tp_options["_channels"] = ["mpt_uv", "basic", "cuda_ipc", "cuda_gdr", "cuda_xth", "cuda_basic"] # type: ignore
rpc.init_rpc(
f"Test{rank}",
rank=rank,
world_size=world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(**tp_options),
)
else:
if world_size > 1:
# TensorPipe is not available in Torch 1.5
rpc.init_rpc(
name=f"Test{rank}",
rank=rank,
world_size=world_size,
rpc_backend_options=rpc.ProcessGroupRpcBackendOptions(init_method=url_rpc),
)
elif torch.cuda.is_available():
torch.distributed.init_process_group(backend="nccl", rank=rank, world_size=world_size, init_method=url)
else:
return False
if torch.cuda.is_available() and torch.cuda.device_count():
torch.cuda.set_device(rank % torch.cuda.device_count())
return True
def get_worker_map() -> Dict[Any, Any]:
return {rank: f"Test{rank}" for rank in range(dist.get_world_size())}
def get_world_sizes() -> List[int]:
limit = torch.cuda.device_count()
return [x for x in [1, 2, 4, 8] if x <= limit]
def spawn_for_all_world_sizes(test_func: Callable, world_sizes: List[int] = get_world_sizes(), args: Any = []) -> None:
for world_size in world_sizes:
_, filename = tempfile.mkstemp()
_, filename_rpc = tempfile.mkstemp()
# (lefaudeux) Let mp handle the process joining, join=False and handling context has been unstable in the past
mp.spawn(test_func, args=(world_size, filename, filename_rpc, *args), nprocs=world_size, join=True)
def worker_process(
rank: int, world_size: int, filename: str, filename_rpc: str, func: Callable, args: Any, error_queue: Any
) -> None:
"""Main function for unit tests launced with torch_spawn"""
if not dist_init(rank, world_size, filename, filename_rpc):
logging.warning("failed initializing torch distributed")
teardown()
return
kwargs = {}
if "OMPI_COMM_WORLD_RANK" not in os.environ:
kwargs["pipeline_backend"] = "gloo"
initialize_model_parallel(1, world_size, **kwargs)
try:
func(*args)
teardown()
except BaseException as e:
logging.warning(f" Rank {rank}: {e}")
# Make sure that the group is properly destroyed, even for tests which check for exceptions being raised
teardown()
# If the function raises 'Skipped', this indicates pytest.skip(), so
# forward it to parent so we can call pytest.skip() there
if e.__class__.__name__ == "Skipped":
error_queue.put(str(e))
return
raise e
def teardown() -> None:
destroy_model_parallel()
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
try:
# torch 1.5 hangs on shutdown if waiting for all processes
torch.distributed.rpc.shutdown(graceful=False)
except Exception:
pass
def torch_spawn(world_sizes: Optional[List[int]] = None) -> Callable:
if world_sizes is None:
world_sizes = get_world_sizes()
def prepare_test(func: Callable) -> Callable:
"""Function called with the test function as the argument. Generates a
replacement which serves as the actual test function."""
name = func.__name__
parameters = inspect.signature(func).parameters
if name.startswith("test"):
raise ValueError(
f"Tests marked with @torch_spawn (i.e. '{name}') should not have names beginning in 'test' as they will"
" be picked up by pytest without running the spawn wrapper"
)
@functools.wraps(func)
def replacement(*args: Any, **kwargs: Any) -> None:
assert args == tuple()
assert world_sizes is not None # mypy crutch
args = tuple(
kwargs[p] for p in parameters if p != "rank"
) # converting named parameters to positional parameters to pass to `spawn`
error_queue = multiprocessing.get_context("spawn").SimpleQueue()
if "OMPI_COMM_WORLD_RANK" in os.environ:
# TODO (Min): this global used to be assigned every time this file is imported.
# I changed it to be assigned on first use. Should be the same, but I am not
# sure this is used or is correct since different processes would have different
# file names to init_process_group below. By initing, here, we don't leave
# a temp file behind on importing time.
global filename_mpi
if filename_mpi is None:
filename_mpi = tempfile.mkstemp()[1]
os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"]
torch.distributed.init_process_group("mpi", init_method=f"file://{filename_mpi}")
world_size = torch.distributed.get_world_size()
destroy_model_parallel()
initialize_model_parallel(1, world_size)
torch.cuda.set_device(torch.distributed.get_rank() % torch.cuda.device_count())
if world_size in world_sizes:
try:
func(*args)
teardown()
except BaseException as e:
teardown()
import traceback
print(f"{traceback.format_exc()}")
raise e
else:
pytest.skip("Requested world size doesn't match current world size")
else:
spawn_for_all_world_sizes(worker_process, world_sizes, (func, args, error_queue))
if not error_queue.empty():
msg = error_queue.get()
pytest.skip(msg)
# Register a function with the same name, prefixed with "test_" in the
# calling module, so it will be picked up by pytest
current_frame = inspect.currentframe()
assert current_frame is not None
caller_module = inspect.getmodule(current_frame.f_back)
setattr(caller_module, f"test_{name}", replacement)
return func
return prepare_test
class _Block(Base):
def __init__(self, embed_dim: int, num_heads: int) -> None:
super().__init__()
self.ln_1 = nn.LayerNorm(embed_dim)
self.ln_2 = nn.LayerNorm(embed_dim)
self.attn = nn.MultiheadAttention(embed_dim, num_heads) # type: ignore
self.mlp = nn.Sequential(nn.Linear(embed_dim, embed_dim * 4), nn.GELU(), nn.Linear(embed_dim * 4, embed_dim),)
def forward(self, *inputs: Any, **kwargs: Any) -> Tensor:
x = inputs[0]
attn_mask = torch.full((len(x), len(x)), -float("Inf"), device=x.device, dtype=x.dtype)
attn_mask = torch.triu(attn_mask, diagonal=1)
x = self.ln_1(x)
a, _ = self.attn(x, x, x, attn_mask=attn_mask, need_weights=False)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
return x
class GPT2(Base):
"""
GPT2 pytorch implementation, for testing purposes in the image-GPT context
Credits: https://github.com/teddykoker/image-gpt"""
def __init__(
self, embed_dim: int, num_heads: int, num_layers: int, num_positions: int, num_vocab: int, num_classes: int
) -> None:
super().__init__()
self.embed_dim = embed_dim
# start of sequence token
self.sos = torch.nn.Parameter(torch.zeros(embed_dim))
nn.init.normal_(self.sos)
self.token_embeddings = nn.Embedding(num_vocab, embed_dim)
self.position_embeddings = nn.Embedding(num_positions, embed_dim)
self.layers = nn.ModuleList()
for _ in range(num_layers):
self.layers.append(_Block(embed_dim, num_heads))
self.ln_f = nn.LayerNorm(embed_dim)
self.head = nn.Linear(embed_dim, num_vocab, bias=False)
self.clf_head = nn.Linear(embed_dim, num_classes)
def forward(self, x: Tensor, classify: bool = False) -> Any: # type: ignore
"""
Expect input as shape [sequence len, batch]
If classify, return classification logits
"""
length, batch = x.shape
h = self.token_embeddings(x)
# prepend sos token
sos = torch.ones(1, batch, self.embed_dim, device=x.device) * self.sos
h = torch.cat([sos, h[:-1, :, :]], dim=0)
# add positional embeddings
positions = torch.arange(length, device=x.device).unsqueeze(-1)
h = h + self.position_embeddings(positions).expand_as(h)
# transformer
for layer in self.layers:
h = layer(h)
h = self.ln_f(h)
logits = self.head(h)
if not classify:
# return logits
return logits
h = torch.mean(h, dim=0) # average pool over sequence
# return classification logits and generative logits
return self.clf_head(h), logits
def objects_are_equal(a: Any, b: Any, raise_exception: bool = False, dict_key: Optional[str] = None) -> bool:
"""
Test that two objects are equal. Tensors are compared to ensure matching
size, dtype, device and values.
"""
if type(a) is not type(b):
if raise_exception:
raise ValueError(f"type mismatch {type(a)} vs. {type(b)}")
return False
if isinstance(a, dict):
if set(a.keys()) != set(b.keys()):
if raise_exception:
raise ValueError(f"keys mismatch {a.keys()} vs. {b.keys()}")
return False
for k in a.keys():
if not objects_are_equal(a[k], b[k], raise_exception, k):
return False
return True
elif isinstance(a, (list, tuple, set)):
if len(a) != len(b):
if raise_exception:
raise ValueError(f"length mismatch {len(a)} vs. {len(b)}")
return False
return all(objects_are_equal(x, y, raise_exception) for x, y in zip(a, b))
elif torch.is_tensor(a):
try:
# assert_allclose doesn't strictly test shape, dtype and device
shape_dtype_device_match = a.size() == b.size() and a.dtype == b.dtype and a.device == b.device
if not shape_dtype_device_match:
if raise_exception:
msg = f"sizes: {a.size()} vs. {b.size()}, "
msg += f"types: {a.dtype} vs. {b.dtype}, "
msg += f"device: {a.device} vs. {b.device}"
raise AssertionError(msg)
else:
return False
# assert_allclose.
torch.testing.assert_allclose(a, b)
return True
except (AssertionError, RuntimeError) as e:
if raise_exception:
if dict_key and isinstance(e, AssertionError):
# Add dict key to the assertion error.
msg = e.args[0]
new_msg = f"For dict key '{dict_key}': {msg}"
raise AssertionError(new_msg) from None
else:
raise e
else:
return False
else:
return a == b
def check_same_model_params(model_a: torch.nn.Module, model_b: torch.nn.Module, message: str = "") -> None:
for p_a, p_b in zip(model_a.parameters(), model_b.parameters()):
assert torch.allclose(p_a, p_b, atol=1e-3), f"Model parameters differ\n{p_a} {p_b}\n" + message
for b_a, b_b in zip(model_a.buffers(), model_b.buffers()):
assert torch.allclose(b_a, b_b), f"Model buffers differ {b_a} - {b_b}\n" + message
def check_same_models_across_ranks(
model: torch.nn.Module, process_group: Any, params_should_be_equal: bool, check_broadcast_buffers: bool
) -> None:
world_size = dist.get_world_size(process_group)
rank = dist.get_rank(process_group)
for param in model.parameters():
# collect the params across the rank
receptacle = [param.clone() for _ in range(world_size)]
dist.all_gather(receptacle, param, group=process_group)
if rank == 0:
for sync_p in receptacle[1:]:
assert not params_should_be_equal or torch.all(
torch.eq(receptacle[0], sync_p)
), f"Models differ in between ranks {receptacle[0]} - {sync_p}"
# Check that all the buffers are in sync (authoritative rank is 0, its buffer is 0)
if check_broadcast_buffers:
for buffer in model.buffers():
receptacle = [buffer.clone() for _ in range(world_size)]
dist.all_gather(receptacle, buffer, group=process_group)
if rank == 0:
for sync_b in receptacle[1:]:
assert not params_should_be_equal or torch.all(
torch.eq(receptacle[0], sync_b)
), f"Models differ in between ranks {receptacle[0]} - {sync_b}"
class DeviceAndTypeCheckModule(Base):
"""A simple module for checking Tensor devices and dtypes."""
def __init__(
self,
expected_input_dtype: Optional[torch.dtype] = None,
expected_input_device: Optional[torch.device] = None,
expected_param_dtype: Optional[torch.dtype] = None,
expected_param_device: Optional[torch.device] = None,
expected_loss_dtype: Optional[torch.dtype] = None,
expected_loss_device: Optional[torch.device] = None,
expected_buffer_dtype: Optional[torch.device] = None,
):
super().__init__()
self.expected_input_dtype = expected_input_dtype
self.expected_input_device = expected_input_device
self.expected_param_dtype = expected_param_dtype
self.expected_param_device = expected_param_device
self.expected_loss_dtype = expected_loss_dtype
self.expected_loss_device = expected_loss_device
self.expected_buffer_dtype = expected_buffer_dtype
self.linear = nn.Linear(5, 5)
self.register_buffer("buffer", torch.rand((5,)))
def _check(
self,
key: str,
x: Union[torch.device, torch.dtype],
expected: Union[Optional[torch.device], Optional[torch.dtype]],
) -> None:
assert expected in {None, x}, f"{key} ({x}) != expected ({expected})"
def forward(self, *input: Tensor, **kwargs: Any) -> Tensor:
x = input[0]
self._check("input.dtype", x.dtype, self.expected_input_dtype)
self._check("input.device", x.device, self.expected_input_device)
param = self.linear.weight
self._check("param.dtype", param.dtype, self.expected_param_dtype)
self._check("param.device", param.device, self.expected_param_device)
self._check("buffer.dtype", self.buffer.dtype, self.expected_buffer_dtype) # type: ignore
x = x + self.buffer
loss = (self.linear(x) + self.buffer).sum()
self._check("loss.dtype", loss.dtype, self.expected_loss_dtype)
self._check("loss.device", loss.device, self.expected_loss_device)
return loss
@functools.lru_cache()
def get_cycles_per_ms() -> float:
"""Measure and return approximate number of cycles per millisecond for torch.cuda._sleep
Copied from: github.com/pytorch/pytorch/blob/master/test/test_cuda.py
"""
def measure() -> float:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
return cycles_per_ms
# Get 10 values and remove the 2 max and 2 min and return the avg.
# This is to avoid system disturbance that skew the results, e.g.
# the very first cuda call likely does a bunch of init, which takes
# much longer than subsequent calls.
#
# Tested on both Tesla V100, Quadro GP100, Titan RTX, RTX 3090 GPUs
# and seems to return stable values. Therefore, we enable caching
# using lru_cache decorator above.
num = 10
vals = []
for _ in range(num):
vals.append(measure())
vals = sorted(vals)
return mean(vals[2 : num - 2])
class DummyProcessGroup:
def __init__(self, rank: int, size: int):
self._rank = rank
self._size = size
def rank(self) -> int:
return self._rank
def size(self) -> int:
return self._size
class SGDWithPausingCompute(torch.optim.SGD):
def __init__(self, *args, **kwargs) -> None: # type: ignore
self.rank = kwargs["rank"]
del kwargs["rank"]
super().__init__(*args, **kwargs)
def step(self, closure: Optional[Any] = None) -> Any:
loss = super().step(closure=closure)
# This is used to make sure that OSS and ShardedDDP enforce a proper stream synchronization
# - Add a long cuda wait on a compute stream, non blocking from the CPU perspective
with torch.cuda.stream(torch.cuda.Stream()):
torch.cuda._sleep(100000000)
# - optionally change the params on a per rank basis
with torch.no_grad():
for param_group in self.param_groups:
for param in param_group["params"]:
param *= 1.0 + self.rank / 10.0
return loss
def state_dict_norm(state: Dict[str, torch.Tensor]) -> torch.Tensor:
"""Compute the norm from a state_dict for simple comparison."""
norm = torch.zeros(1)
for v in state.values():
if not v.is_floating_point():
v = v.float()
norm += v.norm()
return norm
def rmf(filename: str) -> None:
"""Remove a file like rm -f."""
try:
os.remove(filename)
except FileNotFoundError:
pass
@contextlib.contextmanager
def in_temporary_directory() -> Generator:
"""
Context manager to create a temporary direction and remove
it at the end of the context
"""
old_cwd = os.getcwd()
with tempfile.TemporaryDirectory() as temp_dir:
os.chdir(temp_dir)
try:
yield temp_dir
finally:
os.chdir(old_cwd)
@contextlib.contextmanager
def temp_files_ctx(num: int) -> Generator:
""" A context to get tempfiles and ensure they are cleaned up. """
files = [tempfile.mkstemp()[1] for _ in range(num)]
try:
yield tuple(files)
finally:
# temp files could have been removed, so we use rmf.
for name in files:
rmf(name)
def dump_all_tensors(rank: int) -> None:
"""Useful tool for debugging memory issues from the python side."""
if rank != 0:
return
for obj in gc.get_objects():
try:
ttype = str(type(obj))
if torch.is_tensor(obj) or (hasattr(obj, "data") and torch.is_tensor(obj.data)):
print(ttype, obj.shape, obj.dtype, obj.device, obj.storage().size())
except Exception:
pass
print(torch.cuda.memory_summary())
def get_smi_memory() -> float:
"""Return process's GPU memory in MB."""
pid = os.getpid()
info_string = torch.cuda.list_gpu_processes()
for line in info_string.splitlines():
if str(pid) in line:
toks = line.split()
return float(toks[3])
# If the process is not in the list, we are not using the GPU.
return 0.0
|
{"hexsha": "6f2f6abfdbce77aae6bafc31ff519d406ff38990", "size": 25478, "ext": "py", "lang": "Python", "max_stars_repo_path": "fairscale/utils/testing.py", "max_stars_repo_name": "zhaojuanmao/fairscale", "max_stars_repo_head_hexsha": "61ece000bd1b70029270e2dccab66ffa2ca16d51", "max_stars_repo_licenses": ["MIT", "Apache-2.0", "BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fairscale/utils/testing.py", "max_issues_repo_name": "zhaojuanmao/fairscale", "max_issues_repo_head_hexsha": "61ece000bd1b70029270e2dccab66ffa2ca16d51", "max_issues_repo_licenses": ["MIT", "Apache-2.0", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fairscale/utils/testing.py", "max_forks_repo_name": "zhaojuanmao/fairscale", "max_forks_repo_head_hexsha": "61ece000bd1b70029270e2dccab66ffa2ca16d51", "max_forks_repo_licenses": ["MIT", "Apache-2.0", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1903409091, "max_line_length": 127, "alphanum_fraction": 0.6286207709, "include": true, "reason": "import numpy", "num_tokens": 5802}
|
in_channel = 10
out_channel = 5
num_V = 6
num_E = 7
adj = [0. 1. 0. 0. 0. 0.;
1. 0. 0. 1. 1. 1.;
0. 0. 0. 0. 0. 1.;
0. 1. 0. 0. 1. 0.;
0. 1. 0. 1. 0. 1.;
0. 1. 1. 0. 1. 0.]
ne = [[2], [1,4,5,6], [6], [2,5], [2,4,6], [2,3,5]]
struct NewLayer <: MessagePassing
weight
end
NewLayer(m, n) = NewLayer(randn(m,n))
(l::NewLayer)(fg) = propagate(l, fg, :add)
X = Array(reshape(1:num_V*in_channel, in_channel, num_V))
fg = FeaturedGraph(adj, X)
l = NewLayer(out_channel, in_channel)
@testset "msgpass" begin
@testset "no message or update" begin
fg_ = l(fg)
@test graph(fg_) === adj
@test size(node_feature(fg_)) == (in_channel, num_V)
@test size(edge_feature(fg_)) == (in_channel, 2*num_E)
@test size(global_feature(fg_)) == (0,)
end
GeometricFlux.message(l::NewLayer, x_i, x_j, e_ij) = l.weight * x_j
@testset "message function" begin
fg_ = l(fg)
@test graph(fg_) === adj
@test size(node_feature(fg_)) == (out_channel, num_V)
@test size(edge_feature(fg_)) == (out_channel, 2*num_E)
@test size(global_feature(fg_)) == (0,)
end
GeometricFlux.update(l::NewLayer, m, x) = l.weight * x + m
@testset "message and update" begin
fg_ = l(fg)
@test graph(fg_) === adj
@test size(node_feature(fg_)) == (out_channel, num_V)
@test size(edge_feature(fg_)) == (out_channel, 2*num_E)
@test size(global_feature(fg_)) == (0,)
end
end
|
{"hexsha": "88c00c45294529ea45aabd66117446056dd8edc1", "size": 1510, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/layers/msgpass.jl", "max_stars_repo_name": "ChrisRackauckas/GeometricFlux.jl", "max_stars_repo_head_hexsha": "aa427da343cb0f40d12d3b7ddc3535e762346ccd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-14T12:49:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-14T12:49:03.000Z", "max_issues_repo_path": "test/layers/msgpass.jl", "max_issues_repo_name": "ChrisRackauckas/GeometricFlux.jl", "max_issues_repo_head_hexsha": "aa427da343cb0f40d12d3b7ddc3535e762346ccd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/layers/msgpass.jl", "max_forks_repo_name": "ChrisRackauckas/GeometricFlux.jl", "max_forks_repo_head_hexsha": "aa427da343cb0f40d12d3b7ddc3535e762346ccd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4545454545, "max_line_length": 71, "alphanum_fraction": 0.5635761589, "num_tokens": 568}
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import numpy as np
import pytest
from numpy.testing import assert_allclose
import bigdl.dllib.nn.layer as BLayer
from bigdl.dllib.keras.converter import WeightLoader
from bigdl.dllib.keras.converter import DefinitionLoader
np.random.seed(1337) # for reproducibility
from test.bigdl.test_utils import BigDLTestCase, TestModels
from bigdl.dllib.nn.keras.keras_utils import *
import keras.backend as K
class TestLoadModel(BigDLTestCase):
def __kmodel_load_def_weight_test(self, kmodel, input_data):
keras_model_path_json, keras_model_path_hdf5 = dump_keras(kmodel, dump_weights=True)
bmodel = DefinitionLoader.from_json_path(keras_model_path_json)
WeightLoader.load_weights_from_hdf5(bmodel,
kmodel,
keras_model_path_hdf5)
bmodel.training(False)
boutput = bmodel.forward(input_data)
koutput = kmodel.predict(input_data)
assert_allclose(boutput, koutput, rtol=1e-5)
def test_load_api_with_hdf5(self):
K.set_image_dim_ordering("th")
kmodel, input_data, output_data = TestModels.kmodel_graph_1_layer()
keras_model_json_path, keras_model_hdf5_path = dump_keras(kmodel, dump_weights=True)
bmodel = BLayer.Model.load_keras(json_path=keras_model_json_path,
hdf5_path=keras_model_hdf5_path)
self.assert_allclose(kmodel.predict(input_data),
bmodel.forward(input_data))
def test_load_model_with_hdf5_with_definition(self):
kmodel, input_data, output_data = TestModels.kmodel_graph_1_layer()
keras_model_json_path, keras_model_hdf5_path = dump_keras(kmodel, dump_weights=True)
bmodel = BLayer.Model.load_keras(hdf5_path=keras_model_hdf5_path)
self.assert_allclose(kmodel.predict(input_data),
bmodel.forward(input_data))
def test_load_api_no_hdf5(self):
K.set_image_dim_ordering("th")
kmodel, input_data, output_data = TestModels.kmodel_graph_1_layer()
keras_model_json_path, keras_model_hdf5_path = dump_keras(kmodel, dump_weights=True)
bmodel = BLayer.Model.load_keras(json_path=keras_model_json_path)
def test_load_def_weights_graph_1_layer(self):
K.set_image_dim_ordering("th")
kmodel, input_data, output_data = TestModels.kmodel_graph_1_layer()
self.__kmodel_load_def_weight_test(kmodel, input_data)
def test_load_def_weights_graph_activation(self):
K.set_image_dim_ordering("th")
kmodel, input_data, output_data = TestModels.kmodel_graph_activation_is_layer()
self.__kmodel_load_def_weight_test(kmodel, input_data)
def test_load_def_weights_kmodel_seq_lenet_mnist(self):
K.set_image_dim_ordering("th")
kmodel, input_data, output_data = TestModels.kmodel_seq_lenet_mnist()
self.__kmodel_load_def_weight_test(kmodel, input_data)
def test_load_definition(self):
K.set_image_dim_ordering("th")
kmodel, input_data, output_data = TestModels.kmodel_seq_lenet_mnist()
keras_model_json_path, keras_model_hdf5_path = dump_keras(kmodel, dump_weights=True)
bmodel = DefinitionLoader.from_json_path(keras_model_json_path)
WeightLoader.load_weights_from_kmodel(bmodel, kmodel)
self.assert_allclose(bmodel.forward(input_data), kmodel.predict(input_data))
def test_load_weights(self):
K.set_image_dim_ordering("th")
kmodel, input_data, output_data = TestModels.kmodel_graph_1_layer()
keras_model_json_path, keras_model_hdf5_path = dump_keras(kmodel, dump_weights=True)
bmodel = DefinitionLoader.from_json_path(keras_model_json_path)
kmodel.set_weights([kmodel.get_weights()[0] + 100, kmodel.get_weights()[1]])
WeightLoader.load_weights_from_hdf5(bmodel, kmodel, filepath=keras_model_hdf5_path)
self.assert_allclose(bmodel.forward(input_data), kmodel.predict(input_data))
if __name__ == "__main__":
pytest.main([__file__])
|
{"hexsha": "ce38f6d8b62e20a276c962e10b0f173dab295523", "size": 4701, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/dllib/test/bigdl/nn/test_load_model.py", "max_stars_repo_name": "DirkFi/BigDL", "max_stars_repo_head_hexsha": "7493209165c046116470b9a1e1c8f527915d6f1e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-07-14T01:28:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T01:16:32.000Z", "max_issues_repo_path": "python/dllib/test/bigdl/nn/test_load_model.py", "max_issues_repo_name": "DirkFi/BigDL", "max_issues_repo_head_hexsha": "7493209165c046116470b9a1e1c8f527915d6f1e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/dllib/test/bigdl/nn/test_load_model.py", "max_forks_repo_name": "DirkFi/BigDL", "max_forks_repo_head_hexsha": "7493209165c046116470b9a1e1c8f527915d6f1e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.0882352941, "max_line_length": 92, "alphanum_fraction": 0.7358008934, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1089}
|
function main()
call_your_code()
end
function call_your_code()
l::Int64 = 3
arrx = Vector{Float64}([5,5,5])
arry = Vector{Float64}([2,3,7])
s::Float64 = 5
# x = ccall((:__mod_julfort_MOD_dot, "./mod_julfort.so"),
# Float64,
# (Ref{Int64}, Ptr{Float64}, Ptr{Float64}),
# l, arrx, arry)
# println(x)
println(arrx)
ccall((:mult_arr, "./mod_julfort.so"),
Cvoid,
(Ref{Int64}, Ptr{Float64}, Ref{Float64}),
l, arrx, s)
println(arrx)
end
main()
|
{"hexsha": "9f4735c4fd20b541c66469e569e1c6bfeced64b2", "size": 584, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "julia-learning-src/advance/10-calling_fortran.jl", "max_stars_repo_name": "nunesmelo/djs-office-hours", "max_stars_repo_head_hexsha": "aa5cc3dfe3072c5608bf25f5dab27cbfa3664713", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-03-27T14:23:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T14:50:37.000Z", "max_issues_repo_path": "julia-learning-src/advance/10-calling_fortran.jl", "max_issues_repo_name": "nunesmelo/djs-office-hours", "max_issues_repo_head_hexsha": "aa5cc3dfe3072c5608bf25f5dab27cbfa3664713", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "julia-learning-src/advance/10-calling_fortran.jl", "max_forks_repo_name": "nunesmelo/djs-office-hours", "max_forks_repo_head_hexsha": "aa5cc3dfe3072c5608bf25f5dab27cbfa3664713", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-03-29T20:01:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T14:33:47.000Z", "avg_line_length": 20.1379310345, "max_line_length": 62, "alphanum_fraction": 0.5017123288, "num_tokens": 191}
|
(*section {* Denotational semantics for \ac{algebra} *}*)
text {*
\label{sec:theory-algebra-dlist}
In the following we present the denotational semantics for \ac{algebra} in terms of sets of distinct lists.
*}
(*<*)
theory Algebra_of_Temporal_Faults_dlist
imports
Algebra_of_Temporal_Faults Sliceable_dlist Dlist_finiteness Enum
"~~/src/HOL/Library/Dlist"
begin
(*>*)
(*subsection {* Formula: distinct lists *}*)
text{*
The definition of a formula in the \ac{algebra} is a set of sets of distinct lists (dlist).
*}
typedef 'a formula = "UNIV::'a dlist set set" by simp
(*subsubsection {* Formula as Boolean algebra*}*)
(*<*)
notation
bot ("\<bottom>") and
top ("\<top>") and
inf (infixl "\<sqinter>" 70) and
sup (infixl "\<squnion>" 65)
(*
notation (latex output)
xbefore ("_\<rightarrow>_" [80,80] 80) and
slice ("(3_\<^bsub>[_.._]\<^esub>)" [80,80,80] 80) and
slice_left ("(2_\<^bsub>[_..]\<^esub>)" [80,80] 80) and
slice_right ("(2_\<^bsub>[.._]\<^esub>)" [80,80] 80) and
tempo1 ("vremya\<^sub>1 _" 80) and
tempo2 ("vremya\<^sub>2 _" 80) and
tempo3 ("vremya\<^sub>3 _" 80) and
tempo4 ("vremya\<^sub>4 _" 80) and
Abs_formula ("_\<^bsub>formula\<^esub>") and
Rep_formula ("_\<^bsub>dlist set\<^esub>") and
list_of_dlist ("_\<^bsub>list\<^esub>" 80)
*)
(*>*)
text{* In the following we instantiate the formula as a Boolean algebra and prove that Boolean operators are valid. *}
instantiation formula :: (type) boolean_algebra
begin
definition
"x \<sqinter> y = Abs_formula (Rep_formula x \<inter> Rep_formula y)"
definition
"x \<squnion> y = Abs_formula (Rep_formula x \<union> Rep_formula y)"
definition
"\<top> = Abs_formula UNIV"
definition
"\<bottom> = Abs_formula {}"
definition
"x \<le> y \<longleftrightarrow> Rep_formula x \<subseteq> Rep_formula y"
definition
"x < y \<longleftrightarrow> Rep_formula x \<subset> Rep_formula y"
definition
"- x = Abs_formula (- (Rep_formula x))"
definition
"x - y = Abs_formula (Rep_formula x - Rep_formula y)"
lemma Rep_formula_inf:
"Rep_formula (x \<sqinter> y) = Rep_formula x \<inter> Rep_formula y"
unfolding inf_formula_def
by (simp add: Abs_formula_inverse Rep_formula)
lemma Rep_formula_sup:
"Rep_formula (x \<squnion> y) = Rep_formula x \<union> Rep_formula y"
unfolding sup_formula_def
by (simp add: Abs_formula_inverse Rep_formula)
lemma Rep_formula_top[simp]: "Rep_formula \<top> = UNIV"
unfolding top_formula_def
by (simp add: Abs_formula_inverse)
lemma Rep_formula_bot[simp]: "Rep_formula \<bottom> = {}"
unfolding bot_formula_def
by (simp add: Abs_formula_inverse)
lemma Rep_formula_compl: "Rep_formula (- x) = - Rep_formula x"
unfolding uminus_formula_def
by (simp add: Abs_formula_inverse Rep_formula)
lemma Rep_formula_diff:
"Rep_formula (x - y) = Rep_formula x - Rep_formula y"
unfolding minus_formula_def
by (simp add: Abs_formula_inverse Rep_formula)
lemmas eq_formula_iff = Rep_formula_inject [symmetric]
lemmas Rep_formula_boolean_algebra_simps =
less_eq_formula_def less_formula_def eq_formula_iff
Rep_formula_sup Rep_formula_inf Rep_formula_top Rep_formula_bot
Rep_formula_compl Rep_formula_diff
instance proof
qed (unfold Rep_formula_boolean_algebra_simps, auto)
text {*
The instantiation and this proof shows that \ac{algebra} is a Boolean algebra as shown in \cref{def:algebraset-var,def:algebraset-compl,def:algebraset-inf,def:algebraset-xbefore,def:algebraset-true,def:algebraset-false,def:algebraset-sup}.
*}
end
lemma bot_neq_top_formula [simp]: "(\<bottom> :: 'a formula) \<noteq> \<top>"
unfolding Rep_formula_boolean_algebra_simps by auto
lemma top_neq_bot_formula [simp]: "(\<top> :: 'a formula) \<noteq> \<bottom>"
unfolding Rep_formula_boolean_algebra_simps by auto
(*<*)
no_notation
bot ("\<bottom>") and
top ("\<top>") and
inf (infixl "\<sqinter>" 70) and
sup (infixl "\<squnion>" 65)
(*>*)
(*subsubsection {* Tempo properties *}*)
text {* In this section we define the tempo properties. *}
text {* Tempo1: disjoint split *}
definition dlist_tempo1 :: "('a dlist \<Rightarrow> bool) \<Rightarrow> bool"
where
"dlist_tempo1 S \<equiv> \<forall>i j l. i \<le> j \<longrightarrow> \<not> ((S (l\<dagger>..i) \<and> S (l\<dagger>j..)))"
text {* Tempo2: belonging iff *}
definition dlist_tempo2 :: "('a dlist \<Rightarrow> bool) \<Rightarrow> bool"
where
"dlist_tempo2 S \<equiv> \<forall>i l. S l \<longleftrightarrow> (S (l\<dagger>..i) \<or> S (l\<dagger>i..))"
definition dlist_tempo3 :: "('a dlist \<Rightarrow> bool) \<Rightarrow> bool"
where
"dlist_tempo3 S \<equiv> \<forall>i j l. j < i \<longrightarrow> (S (l\<dagger>j..i) \<longleftrightarrow>
(S (l\<dagger>..i) \<and> S (l\<dagger>j..)))"
definition dlist_tempo4 :: "('a dlist \<Rightarrow> bool) \<Rightarrow> bool"
where
"dlist_tempo4 S \<equiv> \<forall> l. S l \<longleftrightarrow> (\<exists>i. S (l\<dagger>i..(Suc i)))"
definition dlist_tempo5 :: "('a dlist \<Rightarrow> bool) \<Rightarrow> bool"
where
"dlist_tempo5 S \<equiv>
\<forall> i j l. (i \<noteq> j \<and> i < (#l) \<and> j < (#l)) \<longrightarrow>
\<not>(S (l\<dagger>i..(Suc i)) \<and> S (l\<dagger>j..(Suc j)))"
definition dlist_tempo6 :: "('a dlist \<Rightarrow> bool) \<Rightarrow> bool"
where
"dlist_tempo6 S \<equiv> \<forall>l. (\<forall> i j. \<not> S (l\<dagger>i..j)) \<longleftrightarrow> \<not> S l"
definition dlist_tempo7 :: "('a dlist \<Rightarrow> bool) \<Rightarrow> bool"
where
"dlist_tempo7 S \<equiv> \<forall>l. (\<exists> i j. i < j \<and> S (l\<dagger>i..j)) \<longleftrightarrow> S l"
definition dlist_tempo :: "('a dlist \<Rightarrow> bool) \<Rightarrow> bool"
where
"dlist_tempo S \<equiv> dlist_tempo1 S \<and> dlist_tempo2 S \<and>
dlist_tempo3 S \<and> dlist_tempo5 S \<and> dlist_tempo4 S \<and> dlist_tempo6 S \<and>
dlist_tempo7 S"
lemmas tempo_defs = dlist_tempo_def dlist_tempo1_def dlist_tempo2_def
dlist_tempo3_def dlist_tempo5_def dlist_tempo4_def dlist_tempo6_def
dlist_tempo7_def
lemma dlist_tempo_1_no_gap:
"dlist_tempo1 S \<Longrightarrow> \<forall>i l. \<not> ((S (l\<dagger>..i) \<and> S (l\<dagger>i..)))"
unfolding dlist_tempo1_def
by auto
corollary dlist_tempo_1_no_gap_append:
"dlist_tempo1 S \<Longrightarrow>
\<forall>zs xs ys. list_of_dlist zs = list_of_dlist xs @ list_of_dlist ys \<longrightarrow>
\<not> ((S xs \<and> S ys))"
using dlist_tempo_1_no_gap
by (metis Dlist_list_of_dlist append_eq_conv_conj slice_left_drop
take_slice_right)
(*subsubsection {* Tempo properties for list member *}*)
text {*
We use the naming convention of variable, but in fact, a variable is equivalent to a list membership:
@{term "var a = {xs . a \<in> set (list_of_dlist xs)}"}.
*}
lemma dlist_tempo1_member: "dlist_tempo1 (\<lambda>xs. Dlist.member xs a)"
unfolding dlist_tempo1_def Dlist.member_def List.member_def
by (meson distinct_in_set_slice1_not_in_slice2)
lemma dlist_tempo2_member: "dlist_tempo2 (\<lambda>xs. Dlist.member xs a)"
unfolding dlist_tempo2_def Dlist.member_def List.member_def
by (metis (no_types, lifting) Un_iff set_slice )
lemma dlist_tempo3_member: "dlist_tempo3 (\<lambda>xs. Dlist.member xs a)"
unfolding dlist_tempo3_def Dlist.member_def List.member_def
by (metis DiffD2 Un_iff distinct_slice_diff2 dlist_append_extreme_left
dlist_append_extreme_right less_imp_le_nat set_append)
lemma dlist_tempo5_member: "dlist_tempo5 (\<lambda>xs. Dlist.member xs a)"
unfolding dlist_tempo5_def Dlist.member_def List.member_def
by (metis Dlist_list_of_dlist Suc_leI disjoint_dlist_def disjoint_slice_suc
distinct_list_of_dlist dlist_empty_slice dlist_member_suc_nth1 empty_slice
less_Suc_eq_0_disj not_less_eq slice_singleton)
lemma dlist_tempo4_member: "dlist_tempo4 (\<lambda>xs. Dlist.member xs a)"
unfolding dlist_tempo4_def Dlist.member_def List.member_def
(*by (metis Un_iff length_pos_if_in_set set_slice size_dlist_def slice_none
slice_right_slice_left_absorb)*)
by (metis dlist_member_suc_nth in_set_conv_nth in_set_dropD in_set_takeD
list_of_dlist_Dlist set_remdups size_dlist_def slice_dlist_def)
lemma dlist_tempo6_member: "dlist_tempo6 (\<lambda>xs. Dlist.member xs a)"
unfolding dlist_tempo6_def Dlist.member_def List.member_def
by (metis append_Nil in_set_conv_decomp in_set_conv_nth in_set_dropD
in_set_takeD length_pos_if_in_set list_of_dlist_slice take_drop_suc)
lemma dlist_tempo7_member: "dlist_tempo7 (\<lambda>xs. Dlist.member xs a)"
unfolding dlist_tempo7_def Dlist.member_def List.member_def
by (metis Un_iff dlist_append_extreme_left dlist_member_suc_nth2
in_set_conv_nth lessI less_imp_le_nat set_append set_slice size_dlist_def)
theorem dlist_tempo_member: "dlist_tempo (\<lambda>xs. Dlist.member xs a)"
unfolding dlist_tempo_def
by (simp add: dlist_tempo1_member dlist_tempo2_member dlist_tempo3_member
dlist_tempo5_member dlist_tempo4_member dlist_tempo6_member
dlist_tempo7_member)
(*subsubsection {* Tempo properties for other operators *}*)
lemma dlist_tempo1_inf: "\<lbrakk>dlist_tempo1 a; dlist_tempo1 b\<rbrakk> \<Longrightarrow>
dlist_tempo1 (\<lambda>zs. a zs \<and> b zs)"
unfolding dlist_tempo1_def
by simp
lemma dlist_tempo3_inf: "\<lbrakk>dlist_tempo3 a; dlist_tempo3 b\<rbrakk> \<Longrightarrow>
dlist_tempo3 (\<lambda>zs. a zs \<and> b zs)"
unfolding dlist_tempo3_def
by auto
lemma dlist_tempo2_sup: "\<lbrakk>dlist_tempo2 a; dlist_tempo2 b\<rbrakk> \<Longrightarrow>
dlist_tempo2 (\<lambda>zs. a zs \<or> b zs)"
unfolding dlist_tempo2_def
by auto
lemma dlist_tempo4_sup: "\<lbrakk>dlist_tempo4 a; dlist_tempo4 b\<rbrakk> \<Longrightarrow>
dlist_tempo4 (\<lambda>zs. a zs \<or> b zs)"
unfolding dlist_tempo4_def
by blast
(*subsection {* \acs*{XBefore} of distinct lists *}*)
definition dlist_xbefore :: "('a dlist \<Rightarrow> bool) \<Rightarrow> ('a dlist \<Rightarrow> bool) \<Rightarrow>
'a dlist \<Rightarrow> bool"
where
"dlist_xbefore a b xs \<equiv> \<exists>i. a (xs\<dagger>..i) \<and> b (xs\<dagger>i..)"
(*<*)
notation (latex output)
dlist_xbefore ("_\<rightarrow>_ (_)" [80,80,80] 80)
(*>*)
(*subsubsection {* \acs*{XBefore} and temporal properties *}*)
lemma dlist_tempo1_xbefore: "\<lbrakk>dlist_tempo1 a; dlist_tempo1 b\<rbrakk> \<Longrightarrow>
dlist_tempo1 (dlist_xbefore a b)"
unfolding dlist_tempo1_def dlist_xbefore_def slice_slice_simps
by (smt le_add1 min.absorb2 min.cobounded1 slice_right_slice_left_absorb
slice_right_slice_right_absorb)
(*subsubsection {* \acs*{XBefore} and appending *}*)
lemma Rep_slice_append:
"list_of_dlist zs = (list_of_dlist (zs\<dagger>..i)) @ (list_of_dlist (zs\<dagger>i..))"
by (metis distinct_append distinct_list_of_dlist distinct_slice_inter_empty
list_of_dlist_Dlist remdups_id_iff_distinct slice_append)
lemma dlist_xbefore_append:
"dlist_xbefore a b zs \<longleftrightarrow>
(\<exists>xs ys. set (list_of_dlist xs) \<inter> set (list_of_dlist ys) =
{} \<and> a xs \<and> b ys \<and>
list_of_dlist zs = ((list_of_dlist xs) @ (list_of_dlist ys)))"
unfolding dlist_xbefore_def
by (metis Rep_slice_append append_Nil2 append_eq_conv_conj
distinct_slice_inter_empty dlist_xbefore_def drop_take max_0L
size_dlist_def slice_append slice_dlist_def slice_left_def slice_right_def
take_slice_right)
(*subsubsection {* \acs*{XBefore}, bot, top and idempotency *}*)
lemma dlist_xbefore_bot_1: "dlist_xbefore (\<lambda>xs. False) b zs = False"
unfolding dlist_xbefore_def
by simp
corollary dlistset_xbefore_bot_1:
"Collect (dlist_xbefore (\<lambda>xs. False) b) = {}"
by (simp add: dlist_xbefore_bot_1)
(*
unfolding dlist_xbefore_def dlist_tempo_def dlist_tempo1_def dlist_tempo2_def
dlist_tempo3_def dlist_tempo4_def dlist_tempo5_def dlist_tempo6_def dlist_tempo7_def
by auto*)
lemma dlist_xbefore_bot_2: "dlist_xbefore a (\<lambda>xs. False) zs = False"
unfolding dlist_xbefore_def
by simp
lemma dlistset_xbefore_bot_2:
"Collect (dlist_xbefore a (\<lambda>xs. False)) = {}"
by (simp add: dlist_xbefore_bot_2)
lemma dlist_xbefore_idem:
"dlist_tempo1 a \<Longrightarrow> dlist_xbefore a a zs = False"
unfolding dlist_xbefore_def dlist_tempo1_def
by blast
lemma dlistset_xbefore_idem:
"dlist_tempo1 a \<Longrightarrow> Collect (dlist_xbefore a a) = {}"
by (simp add: dlist_xbefore_idem)
lemma dlist_xbefore_implies_idem:
"\<forall>xs. b xs \<longrightarrow> a xs \<Longrightarrow> dlist_tempo1 a \<Longrightarrow> dlist_xbefore a b zs = False"
unfolding dlist_tempo1_def dlist_xbefore_def
by blast
(*subsubsection {* \acs*{XBefore} neutral*}*)
lemma dlist_xbefore_neutral_1:
"dlist_xbefore (\<lambda>xs. xs = dlist_of_list []) a zs = a zs"
by (metis (full_types) Dlist_list_of_dlist Rep_slice_append append.simps(1)
dlist_of_list dlist_xbefore_def take_0 take_slice_right)
corollary dlistset_xbefore_neutral_1:
"Collect (dlist_xbefore (\<lambda>xs. xs = Dlist []) a) = Collect a"
using dlist_xbefore_neutral_1 by auto
lemma dlist_xbefore_neutral_2:
"dlist_xbefore a (\<lambda>xs. xs = Dlist []) zs = a zs"
by (smt Dlist_list_of_dlist append_Nil2 distinct_append distinct_list_of_dlist dlist_of_list
dlist_xbefore_append list_of_dlist_empty)
corollary dlistset_xbefore_neutral_2:
"Collect (dlist_xbefore a (\<lambda>xs. xs = Dlist [])) = Collect a"
using dlist_xbefore_neutral_2 by auto
(*subsubsection {* \acs*{XBefore} associativity*}*)
theorem dlist_xbefore_assoc1:
"(dlist_xbefore (dlist_xbefore S T) U zs) \<longleftrightarrow>
(dlist_xbefore S (dlist_xbefore T U) zs)"
unfolding dlist_xbefore_def slice_slice_simps dlist_tempo_def
apply auto
apply (metis diff_is_0_eq less_imp_le max_0L min_def not_le
ordered_cancel_comm_monoid_diff_class.le_iff_add slice_dlist_def
take_eq_Nil)
by (metis le_add1 min.absorb2)
corollary dlist_xbefore_assoc:
"(dlist_xbefore (dlist_xbefore S T) U) =
(dlist_xbefore S (dlist_xbefore T U))"
using dlist_xbefore_assoc1 by blast
corollary dlistset_xbefore_assoc:
"Collect (dlist_xbefore (dlist_xbefore S T) U) =
Collect (dlist_xbefore S (dlist_xbefore T U))"
by (simp add: dlist_xbefore_assoc)
(*subsubsection {* \acs*{XBefore} equivalences *}*)
lemma dlist_tempo1_le_uniqueness:
"dlist_tempo1 S \<Longrightarrow> S (l\<dagger>..i) \<Longrightarrow> i \<le> j \<Longrightarrow> \<not> S (l\<dagger>j..)" and
"dlist_tempo1 S \<Longrightarrow> S (l\<dagger>j..) \<Longrightarrow> i \<le> j \<Longrightarrow> \<not> S (l\<dagger>..i)"
unfolding dlist_tempo1_def
by auto
lemma dlist_xbefore_not_sym:
"dlist_tempo1 S \<Longrightarrow> dlist_tempo1 T \<Longrightarrow> dlist_xbefore S T xs \<Longrightarrow>
dlist_xbefore T S xs \<Longrightarrow> False"
by (metis dlist_xbefore_def le_cases dlist_tempo1_le_uniqueness)
corollary dlist_xbefore_and:
"dlist_tempo1 S \<Longrightarrow> dlist_tempo1 T \<Longrightarrow>
((dlist_xbefore S T zs) \<and> (dlist_xbefore T S zs)) = False"
using dlist_xbefore_not_sym by blast
corollary dlistset_xbefore_and:
"dlist_tempo1 S \<Longrightarrow> dlist_tempo1 T \<Longrightarrow>
(Collect (dlist_xbefore S T)) \<inter> (Collect (dlist_xbefore T S)) = {}"
using dlist_xbefore_and
by auto
lemma dlist_tempo2_left_absorb: "dlist_tempo2 S \<Longrightarrow> S (l\<dagger>i..) \<Longrightarrow> S l"
unfolding dlist_tempo2_def
by auto
lemma dlist_tempo2_right_absorb: "dlist_tempo2 S \<Longrightarrow> S (l\<dagger>..i) \<Longrightarrow> S l"
unfolding dlist_tempo2_def
by auto
lemma dlist_xbefore_implies_member1[simp]:
"dlist_tempo2 S \<Longrightarrow> dlist_xbefore S T l \<Longrightarrow> S l"
by (meson dlist_xbefore_def dlist_tempo2_right_absorb)
lemma dlist_xbefore_implies_member2[simp]:
"dlist_tempo2 T \<Longrightarrow> dlist_xbefore S T l \<Longrightarrow> T l"
by (meson dlist_xbefore_def dlist_tempo2_left_absorb)
lemma dlist_xbefore_or1:
"dlist_tempo2 S \<Longrightarrow> dlist_tempo2 T \<Longrightarrow>
dlist_xbefore S T l \<or> dlist_xbefore T S l \<Longrightarrow> S l \<and> T l"
using dlist_xbefore_implies_member1 dlist_xbefore_implies_member2 by blast
(*TODO: review independent events definition*)
definition dlist_independent_events ::
"('a dlist \<Rightarrow> bool) \<Rightarrow> ('a dlist \<Rightarrow> bool) \<Rightarrow> bool"
where
"dlist_independent_events S T \<equiv>
(\<forall>i l. \<not> (S (l\<dagger>i..(Suc i)) \<and> T (l\<dagger>i..(Suc i))))"
lemma dlist_indepentent_events_member: "a \<noteq> b \<Longrightarrow>
dlist_independent_events (\<lambda> dl. Dlist.member dl a) (\<lambda> dl. Dlist.member dl b)"
apply (simp add: dlist_independent_events_def Dlist.member_def List.member_def)
by (metis dlist_member_suc_nth1)
(*Verificar se faz sentido a regra abaixo. *)
(*
lemma "dlist_independent_events a b \<Longrightarrow> \<forall>xs. b xs \<longrightarrow> a xs \<Longrightarrow> False"
unfolding dlist_independent_events_def
sorry
*)
(*TODO: try to remove all these requirements*)
lemma dlist_and_split9:
"dlist_independent_events S T \<Longrightarrow>
dlist_tempo2 S \<Longrightarrow> dlist_tempo2 T \<Longrightarrow>
dlist_tempo3 S \<Longrightarrow> dlist_tempo3 T \<Longrightarrow>
dlist_tempo4 S \<Longrightarrow> dlist_tempo4 T \<Longrightarrow>
S l \<and> T l \<longleftrightarrow> (\<exists>i j. i \<le> j \<and>
((S (l\<dagger>..i) \<and> T (l\<dagger>j..)) \<or> (S (l\<dagger>j..) \<and> T (l\<dagger>..i))))"
unfolding dlist_independent_events_def
dlist_tempo2_def dlist_tempo3_def dlist_tempo4_def
by (metis le_refl not_less not_less_eq_eq)
lemma dlist_tempo_equiv_xor:
"dlist_tempo1 S \<Longrightarrow> dlist_tempo2 S \<Longrightarrow>
\<forall>l. S l \<longleftrightarrow> (\<forall>i. (S (l\<dagger>..i) \<and> \<not> S (l\<dagger>i..)) \<or> (\<not> S (l\<dagger>..i) \<and> S (l\<dagger>i..)))"
unfolding tempo_defs
by (meson order_refl)
corollary dlist_tempo_equiv_not_eq: "dlist_tempo1 S \<Longrightarrow> dlist_tempo2 S \<Longrightarrow>
\<forall>l. S l \<longleftrightarrow> (\<forall>i. S (l\<dagger>..i) \<noteq> S (l\<dagger>i..))"
using dlist_tempo_equiv_xor
by auto
lemma dlists_xbefore_or2:
"dlist_independent_events S T \<Longrightarrow>
dlist_tempo1 S \<Longrightarrow> dlist_tempo1 T \<Longrightarrow>
dlist_tempo2 S \<Longrightarrow> dlist_tempo2 T \<Longrightarrow>
dlist_tempo3 S \<Longrightarrow> dlist_tempo3 T \<Longrightarrow>
dlist_tempo4 S \<Longrightarrow> dlist_tempo4 T \<Longrightarrow>
S l \<and> T l \<Longrightarrow> dlist_xbefore S T l \<or> dlist_xbefore T S l"
unfolding dlist_xbefore_def dlist_tempo_def
by (metis dlist_and_split9 dlist_tempo_equiv_not_eq
dlist_tempo1_le_uniqueness)
theorem dlist_xbefore_or_one_list:
"dlist_independent_events S T \<Longrightarrow>
dlist_tempo1 S \<Longrightarrow> dlist_tempo1 T \<Longrightarrow>
dlist_tempo2 S \<Longrightarrow> dlist_tempo2 T \<Longrightarrow>
dlist_tempo3 S \<Longrightarrow> dlist_tempo3 T \<Longrightarrow>
dlist_tempo4 S \<Longrightarrow> dlist_tempo4 T \<Longrightarrow>
dlist_xbefore S T l \<or> dlist_xbefore T S l \<longleftrightarrow> S l \<and> T l"
using dlist_xbefore_or1 dlists_xbefore_or2 dlist_tempo_def
by blast
corollary dlist_xbefore_or:
"dlist_independent_events S T \<Longrightarrow>
dlist_tempo1 S \<Longrightarrow> dlist_tempo1 T \<Longrightarrow>
dlist_tempo2 S \<Longrightarrow> dlist_tempo2 T \<Longrightarrow>
dlist_tempo3 S \<Longrightarrow> dlist_tempo3 T \<Longrightarrow>
dlist_tempo4 S \<Longrightarrow> dlist_tempo4 T \<Longrightarrow>
(\<lambda>zs. (dlist_xbefore S T zs) \<or> (dlist_xbefore T S zs)) =
(\<lambda>zs. S zs \<and> T zs)"
using dlist_xbefore_or_one_list
by blast
corollary dlistset_xbefore_or:
"dlist_independent_events S T \<Longrightarrow>
dlist_tempo1 S \<Longrightarrow> dlist_tempo1 T \<Longrightarrow>
dlist_tempo2 S \<Longrightarrow> dlist_tempo2 T \<Longrightarrow>
dlist_tempo3 S \<Longrightarrow> dlist_tempo3 T \<Longrightarrow>
dlist_tempo4 S \<Longrightarrow> dlist_tempo4 T \<Longrightarrow>
(Collect (dlist_xbefore S T)) \<union> (Collect (dlist_xbefore T S)) =
Collect S \<inter> Collect T"
using dlist_xbefore_or
by (smt Collect_cong Collect_conj_eq Collect_disj_eq)
(*subsubsection {* \acs*{XBefore} transitivity *}*)
theorem dlist_xbefore_trans: "
\<lbrakk>dlist_tempo1 a; dlist_tempo1 b\<rbrakk> \<Longrightarrow>
\<lbrakk>dlist_tempo2 a\<rbrakk> \<Longrightarrow>
dlist_xbefore a b zs \<and> dlist_xbefore b c zs \<Longrightarrow>
dlist_xbefore a c zs"
using dlist_xbefore_not_sym
by (metis dlist_tempo2_def dlist_xbefore_def)
corollary dlistset_xbefore_trans: "
\<lbrakk>dlist_tempo1 a; dlist_tempo1 b\<rbrakk> \<Longrightarrow>
\<lbrakk>dlist_tempo2 a\<rbrakk> \<Longrightarrow>
(Collect (dlist_xbefore a b) \<inter> Collect (dlist_xbefore b c)) \<subseteq>
Collect (dlist_xbefore a c)"
using dlist_xbefore_trans
by auto
(*subsubsection {* Boolean operators mixed with \acs*{XBefore} *}*)
theorem mixed_dlist_xbefore_or1: "
dlist_xbefore (\<lambda>xs. a xs \<or> b xs) c zs =
((dlist_xbefore a c zs) \<or> (dlist_xbefore b c zs))"
unfolding dlist_xbefore_def by auto
corollary mixed_dlistset_xbefore_or1: "
Collect (dlist_xbefore (\<lambda>xs. a xs \<or> b xs) c) =
Collect (dlist_xbefore a c) \<union> Collect (dlist_xbefore b c)"
proof-
have "Collect (\<lambda>zs. (dlist_xbefore a c zs) \<or> (dlist_xbefore b c zs)) =
(Collect (dlist_xbefore a c) \<union> Collect (dlist_xbefore b c))"
by (simp add: Collect_disj_eq)
thus ?thesis using mixed_dlist_xbefore_or1 by blast
qed
theorem mixed_dlist_xbefore_or2: "
dlist_xbefore a (\<lambda>xs. b xs \<or> c xs) zs =
((dlist_xbefore a b zs) \<or> (dlist_xbefore a c zs))"
unfolding dlist_xbefore_def by auto
corollary mixed_dlistset_xbefore_or2: "
Collect (dlist_xbefore a (\<lambda>xs. b xs \<or> c xs)) =
Collect (dlist_xbefore a b) \<union> Collect (dlist_xbefore a c)"
proof-
have "Collect (\<lambda>zs. (dlist_xbefore a b zs) \<or> (dlist_xbefore a c zs)) =
Collect (dlist_xbefore a b) \<union> Collect (dlist_xbefore a c)"
by (simp add: Collect_disj_eq)
thus ?thesis using mixed_dlist_xbefore_or2 by blast
qed
lemma and_dlist_xbefore_equiv_or_dlist_xbefore:
"dlist_tempo2 a \<Longrightarrow>
(a zs \<and> dlist_xbefore b c zs) \<longleftrightarrow>
(dlist_xbefore (\<lambda> xs. a xs \<and> b xs) c zs \<or>
dlist_xbefore b (\<lambda>xs. a xs \<and> c xs) zs)"
proof-
assume "dlist_tempo2 a"
hence 0: "\<forall>i xs. a xs \<longleftrightarrow> (a (xs\<dagger>..i) \<or> a (xs\<dagger>i..))"
using dlist_tempo2_def by auto
have "a zs \<and> dlist_xbefore b c zs \<longleftrightarrow>
a zs \<and> (\<exists>i. b (zs\<dagger>..i) \<and> c (zs\<dagger>i..))"
by (auto simp add: dlist_xbefore_def)
thus ?thesis using 0 by (auto simp add: dlist_xbefore_def)
qed
corollary and_dlistset_xbefore_equiv_or_dlistset_xbefore:
"dlist_tempo2 a \<Longrightarrow>
((Collect a) \<inter> (Collect (dlist_xbefore b c)))=
(Collect (dlist_xbefore (\<lambda> xs. a xs \<and> b xs) c) \<union>
Collect (dlist_xbefore b (\<lambda>xs. a xs \<and> c xs)))"
by (smt Collect_cong Collect_conj_eq Collect_disj_eq dlist_tempo2_def
dlist_xbefore_def)
lemma dlist_xbefore_implies_not_sym_dlist_xbefore: "
\<lbrakk>dlist_tempo1 a; dlist_tempo1 b\<rbrakk> \<Longrightarrow>
dlist_xbefore a b zs \<Longrightarrow> \<not> dlist_xbefore b a zs"
unfolding dlist_xbefore_def dlist_tempo1_def
by (meson nat_le_linear)
corollary dlistset_xbefore_implies_not_sym_dlistset_xbefore:
"\<lbrakk>dlist_tempo1 a; dlist_tempo1 b\<rbrakk> \<Longrightarrow>
Collect (dlist_xbefore a b) \<subseteq> - Collect (dlist_xbefore b a)"
using dlist_xbefore_implies_not_sym_dlist_xbefore
by (metis (mono_tags, lifting) CollectD ComplI subsetI)
theorem mixed_not_dlist_xbefore: "dlist_independent_events a b \<Longrightarrow>
\<lbrakk>dlist_tempo1 a; dlist_tempo1 b\<rbrakk> \<Longrightarrow>
\<lbrakk>dlist_tempo2 a; dlist_tempo2 b\<rbrakk> \<Longrightarrow>
\<lbrakk>dlist_tempo3 a; dlist_tempo3 b\<rbrakk> \<Longrightarrow>
\<lbrakk>dlist_tempo4 a; dlist_tempo4 b\<rbrakk> \<Longrightarrow>
(\<not> (dlist_xbefore a b zs)) =
((\<not> a zs) \<or> (\<not> b zs) \<or> (dlist_xbefore b a zs))"
using dlist_xbefore_implies_not_sym_dlist_xbefore dlist_xbefore_or_one_list
by blast
corollary mixed_not_dlistset_xbefore: "dlist_independent_events a b \<Longrightarrow>
\<lbrakk>dlist_tempo1 a; dlist_tempo1 b\<rbrakk> \<Longrightarrow>
\<lbrakk>dlist_tempo2 a; dlist_tempo2 b\<rbrakk> \<Longrightarrow>
\<lbrakk>dlist_tempo3 a; dlist_tempo3 b\<rbrakk> \<Longrightarrow>
\<lbrakk>dlist_tempo4 a; dlist_tempo4 b\<rbrakk> \<Longrightarrow>
(- Collect (dlist_xbefore a b)) =
((- Collect a) \<union> (- Collect b) \<union> Collect (dlist_xbefore b a))"
proof-
assume 0: "dlist_independent_events a b" "dlist_tempo1 a" "dlist_tempo1 b"
"dlist_tempo2 a" "dlist_tempo2 b" "dlist_tempo3 a" "dlist_tempo3 b"
"dlist_tempo4 a" "dlist_tempo4 b"
have "((- Collect a) \<union> (- Collect b) \<union> Collect (dlist_xbefore b a)) =
((Collect (\<lambda>zs. \<not> a zs \<or> \<not> b zs)) \<union> Collect (dlist_xbefore b a))"
by blast
also have "... = (Collect (\<lambda>zs. \<not> a zs \<or> \<not> b zs \<or> dlist_xbefore b a zs))"
by blast
hence "Collect (\<lambda>zs. (\<not> a zs) \<or> (\<not> b zs) \<or> (dlist_xbefore b a zs)) =
((- Collect a) \<union> (- Collect b) \<union> Collect (dlist_xbefore b a))"
"Collect (\<lambda>zs. \<not> (dlist_xbefore a b zs)) =
- Collect (dlist_xbefore a b)"
by blast+
thus ?thesis using 0 mixed_not_dlist_xbefore by blast
qed
theorem not_1_dlist_xbefore:
"\<lbrakk> dlist_tempo1 a; dlist_tempo2 b\<rbrakk> \<Longrightarrow>
dlist_xbefore (\<lambda>xs. \<not> a xs) b zs = b zs"
by (metis Dlist_list_of_dlist dlist_tempo_1_no_gap dlist_xbefore_def dlist_xbefore_implies_member2 drop_0 slice_left_drop slice_right_take take_0)
corollary not_1_dlistset_xbefore:
"\<lbrakk> dlist_tempo1 a; dlist_tempo2 b\<rbrakk> \<Longrightarrow>
Collect (dlist_xbefore (\<lambda>xs. \<not> a xs) b) = Collect b"
using not_1_dlist_xbefore by blast
theorem not_2_dlist_xbefore:
"\<lbrakk> dlist_tempo1 b; dlist_tempo2 a \<rbrakk> \<Longrightarrow> dlist_xbefore a (\<lambda>xs. \<not> b xs) zs = a zs"
by (metis Dlist.empty_def append_Nil2 dlist_tempo_1_no_gap
dlist_xbefore_append dlist_xbefore_implies_member1 drop_0 inf.commute
inf_bot_left list.set(1) list_of_dlist_empty slice_left_drop
slice_right_take take_0)
corollary not_2_dlistset_xbefore:
"\<lbrakk> dlist_tempo1 b; dlist_tempo2 a \<rbrakk> \<Longrightarrow>
Collect (dlist_xbefore a (\<lambda>xs. \<not> b xs)) = Collect a"
using not_2_dlist_xbefore by blast
lemma empty_dlist_implies_false[simp]:
"\<lbrakk> dlist_tempo1 a; dlist_tempo2 a \<rbrakk> \<Longrightarrow> a (Dlist []) \<Longrightarrow> False"
unfolding dlist_tempo1_def dlist_tempo2_def dlist_tempo3_def dlist_tempo4_def
slice_left_def slice_right_def size_dlist_def slice_dlist_def
by (metis Dlist.empty_def list.size(3) list_of_dlist_empty nat_le_linear)
lemma dlist_inf_xbefore_trans:
"\<lbrakk> dlist_tempo1 b; dlist_tempo3 b \<rbrakk> \<Longrightarrow> ((dlist_xbefore a b zs) \<and> (dlist_xbefore b c zs)) \<longleftrightarrow>
(dlist_xbefore (dlist_xbefore a b) c) zs"
proof-
assume 0: "dlist_tempo1 b" "dlist_tempo3 b"
hence 1: "\<exists> i. (\<exists> j. a (zs\<dagger>..i) \<and> b (zs\<dagger>i..) \<and> b (zs\<dagger>..j) \<and> c (zs\<dagger>j..) \<longleftrightarrow>
a (zs\<dagger>..i) \<and> b (zs\<dagger>i..j) \<and> c (zs\<dagger>j..))"
by (metis slice_left_def slice_right_def)
have 2: "(\<exists> x y. a (zs\<dagger>..x) \<and> b (zs\<dagger>x..) \<and> b (zs\<dagger>..y) \<and> c (zs\<dagger>y..)) \<longleftrightarrow>
(\<exists> x y. a (zs\<dagger>..x) \<and> b (zs\<dagger>x..y) \<and> c (zs\<dagger>y..))"
using 0
by (metis (no_types, hide_lams) diff_zero dlist_empty_slice dlist_tempo1_le_uniqueness dlist_tempo3_def dlist_tempo_1_no_gap drop_0 list_of_dlist_empty list_of_dlist_simps(3) max_0L not_le slice_left_drop slice_right_def take_0)
have 3: "((\<exists>i. a (zs\<dagger>..i) \<and> b (zs\<dagger>i..)) \<and> (\<exists>j. b (zs\<dagger>..j) \<and> c (zs\<dagger>j..))) \<longleftrightarrow>
(\<exists> i j. a (zs\<dagger>..i) \<and> b (zs\<dagger>i..) \<and> b (zs\<dagger>..j) \<and> c (zs\<dagger>j..))"
"(\<exists>i. (\<exists>j. a (zs\<dagger>..min i j) \<and> b (zs\<dagger>j..i)) \<and> c (zs\<dagger>i..)) \<longleftrightarrow>
(\<exists>i j. a (zs\<dagger>..min i j) \<and> b (zs\<dagger>j..i) \<and> c (zs\<dagger>i..))"
by auto
have 4: "(\<exists> x y. a (zs\<dagger>..min x y) \<and> b (zs\<dagger>x..y) \<and> c (zs\<dagger>y..)) \<longleftrightarrow>
(\<exists> x y. a (zs\<dagger>..x) \<and> b (zs\<dagger>x..y) \<and> c (zs\<dagger>y..))"
using 0
by (metis (no_types, lifting) Dlist.empty_def append_Nil2 dlist_empty_slice dlist_tempo_1_no_gap_append list_of_dlist_empty min.cobounded1 min_def)
have "(\<exists>i j. a (zs\<dagger>..i) \<and> b (zs\<dagger>i..j) \<and> c (zs\<dagger>j..)) \<longleftrightarrow>
(\<exists>i j. a (zs\<dagger>..min i j) \<and> b (zs\<dagger>i..j) \<and> c (zs\<dagger>j..))"
using 4 by simp
hence "(\<exists>i j. a (zs\<dagger>..i) \<and> b (zs\<dagger>i..) \<and> b (zs\<dagger>..j) \<and> c (zs\<dagger>j..)) \<longleftrightarrow>
(\<exists>i j. a (zs\<dagger>..min i j) \<and> b (zs\<dagger>i..j) \<and> c (zs\<dagger>j..))"
using 0 2 by simp
hence "((\<exists>i. a (zs\<dagger>..i) \<and> b (zs\<dagger>i..)) \<and> (\<exists>j. b (zs\<dagger>..j) \<and> c (zs\<dagger>j..))) \<longleftrightarrow>
(\<exists>i j. a (zs\<dagger>..min i j) \<and> b (zs\<dagger>i..j) \<and> c (zs\<dagger>j..))"
using 0 3 by simp
hence "((\<exists>i. a (zs\<dagger>..i) \<and> b (zs\<dagger>i..)) \<and> (\<exists>j. b (zs\<dagger>..j) \<and> c (zs\<dagger>j..))) \<longleftrightarrow>
(\<exists>j. (\<exists>i. a (zs\<dagger>..min i j) \<and> b (zs\<dagger>i..j)) \<and> c (zs\<dagger>j..))"
using 3 by auto
hence "(dlist_xbefore a b zs \<and> dlist_xbefore b c zs) \<longleftrightarrow>
(\<exists>j. (\<exists>i. a (zs\<dagger>..min i j) \<and> b (zs\<dagger>i..j)) \<and> c (zs\<dagger>j..))"
using dlist_xbefore_def by auto
hence "(dlist_xbefore a b zs \<and> dlist_xbefore b c zs) \<longleftrightarrow>
(\<exists>j. (\<exists>i. a ((zs\<dagger>..j)\<dagger>..i) \<and> b ((zs\<dagger>..j)\<dagger>i..)) \<and> c (zs\<dagger>j..))"
by (simp add: min.commute slice_right_slice_left_absorb slice_right_slice_right_absorb)
thus ?thesis unfolding dlist_xbefore_def by simp
qed
lemma dlistset_inf_xbefore_trans:
"\<lbrakk> dlist_tempo1 b; dlist_tempo3 b \<rbrakk> \<Longrightarrow> (Collect (dlist_xbefore a b) \<inter> Collect (dlist_xbefore b c)) =
Collect (dlist_xbefore (dlist_xbefore a b) c)"
using dlist_inf_xbefore_trans
using Collect_cong Collect_conj_eq by blast
lemma dlist_inf_xbefore_inf_1:
"\<lbrakk>dlist_tempo1 a; dlist_tempo1 b\<rbrakk> \<Longrightarrow>
\<lbrakk>dlist_tempo2 a; dlist_tempo2 b\<rbrakk> \<Longrightarrow>
((dlist_xbefore a c zs) \<and> (dlist_xbefore b c zs)) \<longleftrightarrow>
(dlist_xbefore (\<lambda>xs. a xs \<and> b xs) c zs)"
unfolding dlist_xbefore_def
by (metis dlist_tempo1_le_uniqueness dlist_tempo2_right_absorb
dlist_tempo_equiv_xor nat_le_linear)
lemma dlistset_inf_xbefore_inf_1:
"\<lbrakk>dlist_tempo1 a; dlist_tempo1 b\<rbrakk> \<Longrightarrow>
\<lbrakk>dlist_tempo2 a; dlist_tempo2 b\<rbrakk> \<Longrightarrow>
(Collect (dlist_xbefore a c) \<inter> Collect (dlist_xbefore b c)) =
Collect (dlist_xbefore ((\<lambda>xs. a xs \<and> b xs)) c)"
proof-
assume 0: "dlist_tempo1 a" "dlist_tempo1 b" "dlist_tempo2 a" "dlist_tempo2 b"
hence "Collect (\<lambda>xs. (dlist_xbefore a c xs) \<and> (dlist_xbefore b c xs)) =
Collect ((dlist_xbefore (\<lambda>xs. a xs \<and> b xs) c))"
using 0 dlist_inf_xbefore_inf_1 by blast
thus ?thesis using 0 by blast
qed
lemma dlist_inf_xbefore_inf_2:
"\<lbrakk>dlist_tempo1 b; dlist_tempo1 c\<rbrakk> \<Longrightarrow>
\<lbrakk>dlist_tempo2 b; dlist_tempo2 c\<rbrakk> \<Longrightarrow>
((dlist_xbefore a b zs) \<and> (dlist_xbefore a c zs)) \<longleftrightarrow>
(dlist_xbefore a (\<lambda>xs. b xs \<and> c xs) zs)"
unfolding dlist_xbefore_def
by (metis dlist_tempo1_le_uniqueness dlist_tempo2_left_absorb dlist_tempo_equiv_xor nat_le_linear)
lemma dlistset_inf_xbefore_inf_2:
"\<lbrakk>dlist_tempo1 b; dlist_tempo1 c\<rbrakk> \<Longrightarrow>
\<lbrakk>dlist_tempo2 b; dlist_tempo2 c\<rbrakk> \<Longrightarrow>
Collect (dlist_xbefore a b) \<inter> Collect (dlist_xbefore a c) =
Collect (dlist_xbefore a (\<lambda>xs. b xs \<and> c xs))"
proof-
assume 0: "dlist_tempo1 b" "dlist_tempo1 c" "dlist_tempo2 b" "dlist_tempo2 c"
hence "Collect (\<lambda>xs. (dlist_xbefore a b xs) \<and> (dlist_xbefore a c xs)) =
Collect (dlist_xbefore a (\<lambda>xs. b xs \<and> c xs))"
using 0 dlist_inf_xbefore_inf_2 by blast
thus ?thesis using 0 by blast
qed
(*subsection {* Formulas as \ac{algebra} *}*)
text {*
In the following we prove that a formula is a valid type instantiation for all \ac{algebra} classes.
*}
(*subsubsection {* Basic properties of \ac{algebra} *}*)
instantiation formula :: (type) algebra_of_temporal_faults_basic
begin
definition
"neutral = Abs_formula { Dlist [] }"
definition
"xbefore a b = Abs_formula { zs .
dlist_xbefore (\<lambda>xs. xs \<in> Rep_formula a) (\<lambda>ys. ys \<in> Rep_formula b) zs }"
definition
"tempo1 a = dlist_tempo1 (\<lambda>xs. xs \<in> Rep_formula a)"
lemma Rep_formula_neutral[simp]: "Rep_formula neutral = { Dlist [] }"
unfolding neutral_formula_def
by (simp add: Abs_formula_inverse)
lemma Rep_formula_xbefore_to_dlist_xbefore:
"Rep_formula (xbefore a b) =
Collect (dlist_xbefore (\<lambda>x. x \<in> Rep_formula a) (\<lambda>y. y \<in> Rep_formula b))"
unfolding dlist_xbefore_def xbefore_formula_def
by (simp add: Abs_formula_inverse)
lemma Rep_formula_xbefore_bot_1: "Rep_formula (xbefore bot a) =
Rep_formula bot"
unfolding xbefore_formula_def
by (simp add: Abs_formula_inverse dlist_xbefore_bot_1)
lemma Rep_formula_xbefore_bot_2: "Rep_formula (xbefore a bot) =
Rep_formula bot"
unfolding xbefore_formula_def
by (simp add: Abs_formula_inverse dlist_xbefore_bot_2)
lemma Rep_formula_xbefore_neutral_1: "Rep_formula (xbefore neutral a) = Rep_formula a"
unfolding xbefore_formula_def neutral_formula_def
apply (simp add: Abs_formula_inverse)
using dlistset_xbefore_neutral_1
by (metis Collect_mem_eq)
lemma Rep_formula_xbefore_neutral_2: "Rep_formula (xbefore a neutral) = Rep_formula a"
unfolding xbefore_formula_def neutral_formula_def
apply (simp add: Abs_formula_inverse)
using dlistset_xbefore_neutral_2
by (metis Collect_mem_eq)
lemma Rep_formula_xbefore_not_idempotent:
"tempo1 a \<Longrightarrow> Rep_formula (xbefore a a) = Rep_formula bot"
unfolding xbefore_formula_def tempo1_formula_def
by (simp add: Abs_formula_inverse dlist_xbefore_idem)
lemma Rep_formula_xbefore_not_sym:
"\<lbrakk> tempo1 a; tempo1 b\<rbrakk> \<Longrightarrow>
Rep_formula (xbefore a b) \<subseteq> Rep_formula (-xbefore b a)"
unfolding xbefore_formula_def tempo1_formula_def uminus_formula_def
by (simp add: Abs_formula_inverse
dlistset_xbefore_implies_not_sym_dlistset_xbefore)
instance proof
fix a::"'a formula"
show "xbefore bot a = bot"
unfolding eq_formula_iff Rep_formula_xbefore_bot_1 by auto
next
fix a::"'a formula"
show "xbefore a bot = bot"
unfolding eq_formula_iff Rep_formula_xbefore_bot_2 by auto
next
fix a::"'a formula"
show "xbefore neutral a = a"
unfolding eq_formula_iff
using Rep_formula_xbefore_neutral_1 by auto
next
fix a::"'a formula"
show "xbefore a neutral = a"
unfolding eq_formula_iff
using Rep_formula_xbefore_neutral_2 by auto
next
fix a::"'a formula"
assume "tempo1 a"
thus "xbefore a a = bot"
unfolding eq_formula_iff
using Rep_formula_xbefore_not_idempotent by auto
next
fix a::"'a formula" and b::"'a formula"
assume "tempo1 a" "tempo1 b"
thus "xbefore a b \<le> - xbefore b a"
unfolding eq_formula_iff less_eq_formula_def
using Rep_formula_xbefore_not_sym by simp
fix a::"'a formula" and b::"'a formula"
assume "tempo1 a" "tempo1 b"
thus "tempo1 (inf a b)"
unfolding tempo1_formula_def
by (simp add: dlist_tempo1_inf Rep_formula_inf)
qed
text {* The above proof shows basic laws about \ac{algebra}, as shown in \cref{thm:xbefore-of-false-1,thm:xbefore-of-false-2,thm:xbefore_neutral_1,thm:xbefore_neutral_2,thm:xbefore-not-idempotent,law:tempo1-inter}. *}
end
(*subsubsection {* Associativity of \ac{algebra} *}*)
instantiation formula :: (type) algebra_of_temporal_faults_assoc
begin
instance proof
fix a::"'a formula" and b::"'a formula" and c::"'a formula"
show "xbefore (xbefore a b) c = xbefore a (xbefore b c)"
unfolding xbefore_formula_def tempo1_formula_def
by (simp add: Abs_formula_inverse dlist_xbefore_assoc)
qed
text {* The above proof shows associativity law about \ac{algebra}, as shown in \cref{thm:xbefore-associativity}. *}
end
(*subsubsection {* Equivalences in \ac{algebra} *}*)
instantiation formula :: (type) algebra_of_temporal_faults_equivs
begin
definition
"independent_events a b =
dlist_independent_events
(\<lambda>xs. xs \<in> Rep_formula a) (\<lambda>xs. xs \<in> Rep_formula b)"
definition
"tempo2 a = dlist_tempo2 (\<lambda>xs. xs \<in> Rep_formula a)"
definition
"tempo3 a = dlist_tempo3 (\<lambda>xs. xs \<in> Rep_formula a)"
definition
"tempo4 a = dlist_tempo4 (\<lambda>xs. xs \<in> Rep_formula a)"
instance proof
fix a::"'a formula" and b::"'a formula"
assume "tempo1 a" "tempo1 b"
thus "inf (xbefore a b) (xbefore b a) = bot"
unfolding xbefore_formula_def tempo1_formula_def bot_formula_def
inf_formula_def
by (simp add: dlistset_xbefore_and Abs_formula_inverse)
next
fix a::"'a formula" and b::"'a formula"
assume "independent_events a b" "tempo1 a" "tempo1 b" "tempo2 a" "tempo2 b"
"tempo3 a" "tempo3 b" "tempo4 a" "tempo4 b"
thus "sup (xbefore a b) (xbefore b a) = inf a b"
unfolding xbefore_formula_def tempo1_formula_def tempo2_formula_def
tempo3_formula_def tempo4_formula_def independent_events_formula_def
sup_formula_def inf_formula_def
by (simp add: dlistset_xbefore_or Abs_formula_inverse)
next
fix a::"'a formula" and b::"'a formula"
assume "tempo2 a" "tempo2 b"
thus "tempo2 (sup a b)"
unfolding tempo2_formula_def
by (simp add: dlist_tempo2_sup Rep_formula_sup)
next
fix a::"'a formula" and b::"'a formula"
assume "tempo3 a" "tempo3 b"
thus "tempo3 (inf a b)"
unfolding tempo3_formula_def
by (simp add: dlist_tempo3_inf Rep_formula_inf)
next
fix a::"'a formula" and b::"'a formula"
assume "tempo4 a" "tempo4 b"
thus "tempo4 (sup a b)"
unfolding tempo4_formula_def
by (simp add: dlist_tempo4_sup Rep_formula_sup)
qed
text {* The above proof shows equivalences in \ac{algebra}, as shown in \cref{thm:xbefore-inf-equiv-bot,thm:xbefore-sup-equiv-inf,law:tempo2-union,law:tempo3-inter,law:tempo4-union}. *}
end
(*subsubsection {* Transitivity in \ac{algebra} *}*)
instantiation formula :: (type) algebra_of_temporal_faults_trans
begin
instance proof
fix a::"'a formula" and b::"'a formula" and c::"'a formula"
assume "tempo1 a" "tempo1 b" "tempo2 a"
thus "inf (xbefore a b) (xbefore b c) \<le> xbefore a c"
unfolding tempo1_formula_def tempo2_formula_def xbefore_formula_def
less_eq_formula_def inf_formula_def
by (simp add: dlistset_xbefore_trans Abs_formula_inverse)
next
fix a::"'a formula" and b::"'a formula" and c::"'a formula"
assume "tempo1 b" "tempo3 b"
thus "inf (xbefore a b) (xbefore b c) = xbefore (xbefore a b) c"
unfolding xbefore_formula_def inf_formula_def tempo1_formula_def
tempo3_formula_def
by (simp add: Abs_formula_inverse dlistset_inf_xbefore_trans)
qed
text {* The above proof shows transitivity in \ac{algebra}, as shown in \cref{thm:inf_xbefore_trans}. *}
end
(*subsubsection {* Mixed operators in \ac{algebra} *}*)
instantiation formula :: (type) algebra_of_temporal_faults_mixed_ops
begin
instance proof
fix a::"'a formula" and b::"'a formula" and c::"'a formula"
show "xbefore (sup a b) c = sup (xbefore a c) (xbefore b c)"
unfolding xbefore_formula_def sup_formula_def
by (simp add: mixed_dlistset_xbefore_or1 Abs_formula_inverse)
next
fix a::"'a formula" and b::"'a formula" and c::"'a formula"
show "xbefore a (sup b c) = sup (xbefore a b) (xbefore a c)"
unfolding xbefore_formula_def sup_formula_def
by (simp add: mixed_dlistset_xbefore_or2 Abs_formula_inverse)
next
fix a::"'a formula" and b::"'a formula" and c::"'a formula"
assume "tempo1 a" "tempo1 b" "tempo2 a" "tempo2 b"
thus "xbefore (inf a b) c = inf (xbefore a c) (xbefore b c)"
unfolding xbefore_formula_def inf_formula_def tempo1_formula_def
tempo2_formula_def
by (simp add: dlistset_inf_xbefore_inf_1 Abs_formula_inverse)
next
fix a::"'a formula" and b::"'a formula" and c::"'a formula"
assume "tempo1 b" "tempo1 c" "tempo2 b" "tempo2 c"
thus "xbefore a (inf b c) = inf (xbefore a b) (xbefore a c)"
unfolding xbefore_formula_def inf_formula_def tempo1_formula_def
tempo2_formula_def
by (simp add: dlistset_inf_xbefore_inf_2 Abs_formula_inverse)
next
fix a::"'a formula" and b::"'a formula"
assume "independent_events a b" "tempo1 a" "tempo1 b" "tempo2 a" "tempo2 b"
"tempo3 a" "tempo3 b" "tempo4 a" "tempo4 b"
thus "(- xbefore a b) = (sup (sup (- a) (- b)) (xbefore b a))"
by (simp add: Abs_formula_inverse xbefore_formula_def uminus_formula_def
sup_formula_def independent_events_formula_def tempo1_formula_def
tempo2_formula_def tempo3_formula_def tempo4_formula_def
mixed_not_dlistset_xbefore)
next
fix a::"'a formula" and b::"'a formula" and c::"'a formula"
assume "tempo2 a"
thus "inf a (xbefore b c) =
sup (xbefore (inf a b) c) (xbefore b (inf a c))"
apply (auto simp add: xbefore_formula_def sup_formula_def inf_formula_def
tempo2_formula_def Abs_formula_inverse)
using and_dlistset_xbefore_equiv_or_dlistset_xbefore Abs_formula_inverse
by fastforce
next
fix a::"'a formula" and b::"'a formula"
assume "tempo1 a" "tempo2 b"
thus "xbefore (- a) b = b"
unfolding xbefore_formula_def tempo1_formula_def tempo2_formula_def
uminus_formula_def
by (auto simp add: Abs_formula_inverse not_1_dlistset_xbefore
Rep_formula_inverse)
next
fix a::"'a formula" and b::"'a formula"
assume "tempo1 b" "tempo2 a"
thus "xbefore a (- b) = a"
unfolding xbefore_formula_def tempo1_formula_def tempo2_formula_def
uminus_formula_def
by (auto simp add: Abs_formula_inverse not_2_dlistset_xbefore
Rep_formula_inverse)
qed
text {* The above proof shows laws with mixed Boolean and \ac{XBefore} operators, as shown in \cref{thm:xbefore-sup-1,thm:xbefore-sup-2,thm:not_xbefore,thm:and_xbefore_equiv_or_xbefore,thm:not_1_xbefore_equiv,thm:not_2_xbefore_equiv}. *}
end
(*<*)
(*subsection {* Equivalence of the new definition of \acs*{XBefore} with the old one *}*)
definition old_dlist_xbefore
where
"old_dlist_xbefore S T zs \<equiv>
(\<exists> xs ys. S xs \<and> \<not> T xs \<and> T ys \<and> \<not> S ys \<and>
set (list_of_dlist xs) \<inter> set (list_of_dlist ys) = {} \<and>
list_of_dlist zs = (list_of_dlist xs) @ (list_of_dlist ys))"
theorem old_dlist_xbefore_equals_new_xbefore:
"\<lbrakk> dlist_tempo1 S; dlist_tempo1 T \<rbrakk> \<Longrightarrow>
dlist_xbefore S T zs = old_dlist_xbefore S T zs"
unfolding dlist_xbefore_append old_dlist_xbefore_def
using dlist_tempo_1_no_gap_append
by blast
(*>*)
(*<*)
(*subsection {* Soundness and completeness on the mapping rules*}*)
theorem algebra_of_temporal_faults_mapping_soundness:
"\<forall> (f\<^sub>1::'a formula) (f\<^sub>2::'a formula). \<exists> S. ((Rep_formula f\<^sub>1 = S \<and> Rep_formula f\<^sub>2 = S) \<longleftrightarrow> f\<^sub>1 = f\<^sub>2)"
by blast
theorem algebra_of_temporal_faults_mapping_completeness:
"\<forall> (S::'a dlist set). \<exists> f::'a formula. Rep_formula f = S"
using Abs_formula_inverse by auto
(*>*)
(*<*)
end
(*>*)
|
{"author": "andredidier", "repo": "phd", "sha": "113f7c8b360a3914a571db13d9513e313954f4b2", "save_path": "github-repos/isabelle/andredidier-phd", "path": "github-repos/isabelle/andredidier-phd/phd-113f7c8b360a3914a571db13d9513e313954f4b2/thesis/algebra/Algebra_of_Temporal_Faults_dlist.thy"}
|
#include <iostream>
#include <numbers>
#include <Eigen/Geometry>
#include "functional_mesh.h"
///////////////////////////////////////////////////////////////
// CFunctionalMesh
///////////////////////////////////////////////////////////////
CFunctionalMesh::CFunctionalMesh()
{
SetColorFunctor(nullptr);
}
void CFunctionalMesh::m_InvalidateAll()const
{
m_valid_points=m_valid_normals=m_valid_colors=false;
m_levels_valid[0]=m_levels_valid[1]=m_levels_valid[2]=false;
}
void CFunctionalMesh::m_SetBoundedBox()const
{
using eig_size_t=decltype(m_points.rows());
m_bounded_box.first=m_bounded_box.second=m_points(0,0);
for(eig_size_t i_s=0;i_s<m_points.rows();++i_s)
{
for(eig_size_t i_t=0;i_t<m_points.cols();++i_t)
{
for(size_t dim=0;dim<3;dim++)
{
const point_t& current=m_points(i_s,i_t);
m_bounded_box.first[dim]=std::min(current[dim],m_bounded_box.first[dim]);
m_bounded_box.second[dim]=std::max(current[dim],m_bounded_box.second[dim]);
}
}
}
}
// normal to parametrically defined surface
// defined as || (dr / ds) x (dr / dt) ||
// derivatives are approximated by finite differences
void CFunctionalMesh::m_FillNormals()const
{
using eig_size_t=decltype(m_points.rows());
float s_delta=m_grid.s_delta();
auto get_s_tangent=[this,&s_delta](size_t i,size_t j)->point_t
{
return (m_points(i+1,j)-m_points(i-1,j))/(2*s_delta);
};
auto get_s_top_bound=[&](size_t j)->point_t
{
return (m_points(1,j)-m_points(0,j))/(s_delta);
};
auto get_s_down_bound=[&](size_t j)->point_t
{
return (m_points(m_points.rows()-1,j)-m_points(m_points.rows()-2,j))/(s_delta);
};
float t_delta=m_grid.t_delta();
auto get_t_tangent=[this,&t_delta](size_t i,size_t j)->point_t
{
return (m_points(i,j+1)-m_points(i,j-1) )/(2*t_delta);
};
auto get_t_left_bound=[&](size_t i)->point_t
{
return (m_points(i,1)-m_points(i,0))/(t_delta);
};
auto get_t_right_bound=[&](size_t i)->point_t
{
return (m_points(i,m_points.cols()-1)-m_points(i,m_points.cols()-2))/(t_delta);
};
m_normals.resize(m_points.rows(),m_points.cols());
// internal domain
for(eig_size_t i=1;i<m_normals.rows()-1;++i)
for(eig_size_t j=1;j<m_normals.cols()-1;++j)
{
m_normals(i,j)=get_s_tangent(i,j).cross(get_t_tangent(i,j));
m_normals(i,j).normalize();
}
// left bound
for(eig_size_t i=1;i<m_normals.rows()-1;++i)
{
m_normals(i,0)=get_s_tangent(i,0).cross(get_t_left_bound(i));
m_normals(i,0).normalize();
}
// right bound
for(eig_size_t i=1;i<m_normals.rows()-1;++i)
{
m_normals(i,m_normals.cols()-1)=get_s_tangent(i,0).cross(get_t_right_bound(i));
m_normals(i,m_normals.cols()-1).normalize();
}
// top bound
for(eig_size_t i=1;i<m_normals.cols()-1;++i)
{
m_normals(0,i)=get_s_top_bound(i).cross(get_t_tangent(0,i));
m_normals(0,i).normalize();
}
// down bound
for(eig_size_t i=1;i<m_normals.cols()-1;++i)
{
m_normals(m_normals.rows()-1,i)=get_s_down_bound(i).cross(get_t_tangent(m_normals.rows()-1,i));
m_normals(m_normals.rows()-1,i).normalize();
}
// corners
m_normals(0,0)=get_s_top_bound(0).cross(get_t_left_bound(0));
m_normals(0,0).normalize();
m_normals(0,m_normals.cols()-1)=get_s_top_bound(m_normals.cols()-1).cross(get_t_right_bound(0));
m_normals(0,m_normals.cols()-1).normalize();
m_normals(m_normals.rows()-1,m_normals.cols()-1)=get_s_down_bound(m_normals.cols()-1).
cross(get_t_right_bound(m_normals.rows()-1));
m_normals(m_normals.rows()-1,m_normals.cols()-1).normalize();
m_normals(m_normals.rows()-1,0)=get_s_down_bound(0).cross(get_t_left_bound(m_normals.rows()-1));
m_normals(m_normals.rows()-1,0).normalize();
}
void CFunctionalMesh::m_SetLevelLines(int index,float time)const
{
assert(index==0||index==1||index==2);
assert(m_valid_points);
m_levels[index].clear();
auto cell_pocess=[index,time,this](float level,int i,int j,int& lines)
->std::array<point_t,4>
{
auto is_same_sign=[](float _1,float _2){ return (_1>=0) == (_2>=0);};
std::array<float,4> diffs;
std::array<point_t,4> cell={m_points(i+1,j),m_points(i+1,j+1),m_points(i,j+1),m_points(i,j)};
std::array<int,4> intersections;
int num_intersetions=0;
auto intersection_point=[&](int side)
{
int next=(1+side)%4;
return (cell[side]*diffs[next]-cell[next]*diffs[side])/(diffs[next]-diffs[side]);
};
diffs[0]=cell[0][index]-level;
for(int i=0;i<4;++i)
{
int next_index=(i+1)%4;
diffs[next_index]=cell[next_index][index]-level;
if(!is_same_sign(diffs[i],diffs[next_index])) intersections[num_intersetions++]=i;
}
lines=num_intersetions/2;
if(lines==0) return std::array<point_t,4>{};
else if(num_intersetions==2)
{
return std::array<point_t,4>{intersection_point(intersections[0]),
intersection_point(intersections[1]),
point_t{},point_t{}};
}
else if(num_intersetions==4)
{
float center_diff=m_points_functor(m_grid.s(i)+m_grid.s_delta()/2,
m_grid.t(j)+m_grid.t_delta()/2,
time)[index]-level;
if(is_same_sign(diffs[0],center_diff))
{
return {
intersection_point(intersections[0]),
intersection_point(intersections[1]),
intersection_point(intersections[2]),
intersection_point(intersections[3])
};
}
else
{
return {
intersection_point(intersections[1]),
intersection_point(intersections[2]),
intersection_point(intersections[3]),
intersection_point(intersections[0])
};
}
}
else assert(false);
};
float delta=(m_bounded_box.second[index]-m_bounded_box.first[index])/(1+m_num_levels[index]);
for(size_t k=0;k<m_num_levels[index];++k)
{
float level=delta*(k+1);
m_levels[index].push_back(level_line_t(level));
auto¤t=m_levels[index].back();
for(size_t i=0;i<m_grid.s_resolution;++i)
for(size_t j=0;j<m_grid.t_resolution;++j)
{
int num_lines;
auto lines=cell_pocess(level+m_bounded_box.first[index],i,j,num_lines);
if(num_lines==0) continue;
if(num_lines==1) current.push_back(lines[0],lines[1]);
if(num_lines==2) current.push_back(lines[2],lines[3]);
}
}
}
CFunctionalMesh& CFunctionalMesh::SetUniformColor(const point_t&p)
{
SetColorFunctor([p](float,float,float){return p;});
m_traits.SetColored(true);
m_valid_colors=false;
assert(m_traits.IsColored());
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetTraits(const CRenderingTraits&traits)
{
m_traits=traits;
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetAmbientReflection(float val)
{
m_material.ambient=val;
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetDiffuseReflection(float val)
{
m_material.diffuse=val;
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetSpecularReflection(float val)
{
m_material.specular=val;
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetShininess(float val)
{
m_material.shininess=val;
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetGrid(grid_t grid)
{
assert(!grid.empty());
if(m_grid!=grid) m_InvalidateAll();
m_grid=grid;
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetResolution(size_t s_resol,size_t t_resol)
{
if(s_resol==m_grid.s_resolution&&t_resol==m_grid.t_resolution) return*this;
m_grid.s_resolution=s_resol;
m_grid.t_resolution=t_resol;
assert(!m_grid.empty());
m_InvalidateAll();
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetRange(std::pair<float,float> s_range,std::pair<float,float> t_range)
{
if(s_range==m_grid.s_range&&t_range==m_grid.t_range) return *this;
m_grid.s_range=s_range;
m_grid.t_range=t_range;
m_InvalidateAll();
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetNumberOfLevelsX(uint32_t x)
{
m_levels_valid[0]= x==m_num_levels[0];
m_num_levels[0]=x;
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetNumberOfLevelsY(uint32_t y)
{
m_levels_valid[1]= y==m_num_levels[1];
m_num_levels[1]=y;
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetNumberOfLevelsZ(uint32_t z)
{
m_levels_valid[2]= z==m_num_levels[2];
m_num_levels[2]=z;
return *this;
}
void CFunctionalMesh::Clear()
{
m_points_functor=nullptr;
m_InvalidateAll();
}
CFunctionalMesh::CUpdateResult CFunctionalMesh::UpdateData(float time)
{
int update=0;
if(Empty()) return CUpdateResult(0);
if(IsDynamic())
{
if(time!=m_last_update_time) m_InvalidateAll();
}
else
{
time=0.0f;
}
if(!m_valid_points)
{
//std::cout<<"UPDATE POINTS\n";
if(m_grid.s_resolution+1!=m_points.rows()||m_grid.t_resolution+1!=m_points.cols())
{
m_points.resize(m_grid.s_resolution+1,m_grid.t_resolution+1);
update|=CUpdateResult::update_grid;
}
m_fill_functor(m_points,m_grid,time);
m_SetBoundedBox();
m_valid_points=true;
update|=CUpdateResult::update_points;
}
if(!m_valid_normals&&m_traits.IsSpecularSurface())
{
//std::cout<<"UPDATE NORMALS\n";
m_normals.resize(m_grid.s_resolution+1,m_grid.t_resolution+1);
m_FillNormals();
m_valid_normals=true;
update|=CUpdateResult::update_normals;
}
if(!m_valid_colors&&m_traits.IsColored())
{
//std::cout<<"UPDATE COLORS\n";
m_colors.resize(m_grid.s_resolution+1,m_grid.t_resolution+1);
m_colors_functor(m_points,m_colors);
m_valid_colors=true;
update|=CUpdateResult::update_colors;
}
for(int i=0;i<3;++i)
{
if(!m_levels_valid[i]&&m_traits.IsLevelLines(i))
{
//std::cout<<"UPDATE LEVELS\n";
m_SetLevelLines(i,time);
m_levels_valid[i]=true;
update|=CUpdateResult::update_levels(i);
}
}
m_last_update_time=time;
if(m_update_callback)
{
m_update_callback(*this,CUpdateResult(update));
}
return CUpdateResult(update);
}
CFunctionalMesh::CUpdateResult CFunctionalMesh::UpdateData()
{
return UpdateData(m_last_update_time);
}
// Get functions
CFunctionalMesh::grid_t CFunctionalMesh::GetGrid()const
{
return m_grid;
}
const material_t&CFunctionalMesh::GetMaterial()const
{
return m_material;
}
uint32_t CFunctionalMesh::GetNumberOfLevel(int i)const
{
return m_num_levels[i];
}
const CFunctionalMesh::matrix_t*CFunctionalMesh::Points()const
{
return m_valid_points? &m_points:nullptr;
}
const CFunctionalMesh::matrix_t*CFunctionalMesh::Colors()const
{
return m_valid_colors? &m_colors:nullptr;
}
const CFunctionalMesh::matrix_t*CFunctionalMesh::Normals()const
{
return m_valid_normals? &m_normals:nullptr;
}
const std::pair<CFunctionalMesh::point_t,CFunctionalMesh::point_t>* CFunctionalMesh::BoundedBox()const
{
return m_valid_points? &m_bounded_box:nullptr;
}
const std::vector<CFunctionalMesh::level_line_t>* CFunctionalMesh::Levels(int i)const
{
return m_levels_valid[i]? &m_levels[i]:nullptr;
}
bool CFunctionalMesh::Empty()const
{
return m_points_functor==nullptr;
}
float CFunctionalMesh::LastUpdateTime()const
{
return m_last_update_time;
}
CRenderingTraits&CFunctionalMesh::RenderingTraits()
{
return m_traits;
}
const CRenderingTraits&CFunctionalMesh::RenderingTraits()const
{
return m_traits;
}
// Set specific surface
CFunctionalMesh& CFunctionalMesh::SetSphere(float r,size_t teta_resol,size_t phi_resol)
{
using namespace std::numbers;
SetMeshFunctor(plot::spherical([r](float,float){return r;}));
SetRange({0.00001,pi_v<float>-0.00001},{0.0f,2*pi_v<float>});
SetResolution(teta_resol,phi_resol);
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetTorus(float rad,float tubular,size_t rad_resol,size_t tubular_resol)
{
using namespace std::numbers;
auto mesh_f=[rad,tubular](float teta,float phi)
{
float r=rad+tubular*std::cos(phi);
return point_t(r*std::cos(teta),r*std::sin(teta),tubular*std::sin(phi));
};
SetMeshFunctor(mesh_f);
SetRange({0.0f,2*pi_v<float>},{0.0f,2*pi_v<float>});
SetResolution(rad_resol,tubular_resol);
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetCylinder(float rad,float h,size_t phi_resol)
{
using namespace std::numbers;
auto mesh_f=[rad](float phi,float z)
{
return point_t(rad*std::cos(phi),rad*std::sin(phi),z);
};
SetMeshFunctor(mesh_f);
SetRange({0.0f,2*pi_v<float>},{0.0f,h});
SetResolution(phi_resol,1);
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetCone(float bottom_rad,float h,size_t phi_resol)
{
using namespace std::numbers;
auto mesh_f=[bottom_rad,h](float phi,float z)
{
float rad=bottom_rad*(1.0f-z/h);
return point_t(rad*std::cos(phi),rad*std::sin(phi),z);
};
SetMeshFunctor(mesh_f);
SetRange({0.0f,2*pi_v<float>},{0.0f,h});
SetResolution(phi_resol,1);
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetPlane(float dx,float dy,size_t x_resol,size_t y_resol)
{
SetMeshFunctor([](float x,float y){return point_t(x,y,0);});
SetRange({-dx/2,dx/2},{-dy/2,dy/2});
SetResolution(x_resol,y_resol);
return *this;
}
// Transformations
CFunctionalMesh& CFunctionalMesh::SetAnchor(const point_t&p)
{
m_anchor=p;
return *this;
}
CFunctionalMesh& CFunctionalMesh::DeltaOrg(float x,float y,float z)
{
m_rigid.DeltaOrg(x,y,z);
return *this;
}
CFunctionalMesh& CFunctionalMesh::DeltaOrg(const point_t&p)
{
m_rigid.DeltaOrg(p);
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetOrg(float x,float y,float z)
{
m_rigid.SetOrg(x,y,z);
return *this;
}
CFunctionalMesh& CFunctionalMesh::SetOrg(const point_t&p)
{
m_rigid.SetOrg(p);
return *this;
}
CRigidTransform::point_t CFunctionalMesh::GetOrg()const
{
return m_rigid.GetOrg();
}
const CRigidTransform::matrix_t& CFunctionalMesh::GetTransform()const
{
return m_rigid.GetTransform();
}
|
{"hexsha": "d4acfc251a828fc716fce5bfe3f4b8b5254c66ff", "size": 15126, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "functional_mesh.cpp", "max_stars_repo_name": "RomanFesenko/SurfaceViewer", "max_stars_repo_head_hexsha": "e8f27946ae3bce125b4c6639d9315e4652f401e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "functional_mesh.cpp", "max_issues_repo_name": "RomanFesenko/SurfaceViewer", "max_issues_repo_head_hexsha": "e8f27946ae3bce125b4c6639d9315e4652f401e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "functional_mesh.cpp", "max_forks_repo_name": "RomanFesenko/SurfaceViewer", "max_forks_repo_head_hexsha": "e8f27946ae3bce125b4c6639d9315e4652f401e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2728971963, "max_line_length": 105, "alphanum_fraction": 0.631230993, "num_tokens": 3956}
|
#!/usr/bin/env python
import os
import sys
import numpy as np
import random
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, LongType, DoubleType
import operator
import re
spark = SparkSession.builder \
.master('local[*]') \
.enableHiveSupport() \
.getOrCreate()
sc = spark.sparkContext
def loadData(namefile) :
textfile = sc.textFile(namefile) \
.zipWithIndex() \
.map(lambda x: (x[1], x[0].split(',') )) \
.map(lambda x: (x[0], [float(i) for i in x[1]] ))
return textfile
def dimension(rdd_load_data) :
return rdd_load_data.map(lambda x: len(x[1])).reduce(lambda x, y: max(x, y))
#calculate the distance between each point and centroids
def calculate_distance(x, y):
x = np.array(x)
y = np.array(y)
return np.sqrt(np.sum(np.square( x-y )))
#assign a centroid to each point
def assignToCluster(rdd, centroids):
cartes = rdd.cartesian(centroids)
cartes = cartes.map(lambda x: (x[0][0], (x[1][0], calculate_distance(x[1][1], x[0][1])) ) )
cartes = cartes.groupByKey().mapValues(closestcentroide)
return cartes
#choose the closest centroid to each node
def closestcentroide(y):
y = list(y)
closest = y[0]
for couple in y:
if(closest[1] > couple[1]):
closest = couple
return closest
#formula to calculate the new coordinate (mean)
def calculate_new_coordinate(x):
mean = lambda x: sum(x) / len(x)
return [ round(mean(x), 2) for x in zip(*x)]
# calculate the new coordinate of each centroid
def computeCentroids(rdd, assigned_centroids):
return assigned_centroids \
.join(rdd) \
.map(lambda x: (x[1][0][0], x[1][1]) ) \
.groupByKey() \
.mapValues(calculate_new_coordinate)
# calculate the distance intra clusters
def computeIntraClusterDistance(rdd, centroids):
initdistance = assignToCluster(rdd, centroids)
totaldistance = initdistance.map(lambda x: (1, x[1][1]) ).reduceByKey(lambda x,y: x+y)
return totaldistance
def initCentroids(rdd_flat_data, k):
seed = random.randint(0, 100)
centroids = rdd_flat_data.takeSample(False, k, seed=seed)
return sc.parallelize(centroids) \
.zipWithIndex() \
.map(lambda x: (x[1], x[0][1]))
def kmeans(filename, k, max_attempt = 20, debug=True):
flat_data = loadData(filename)
#dim = dimension(flat_data)
centroids = initCentroids(flat_data, k)
#assign every point to a centroids
assigned_points = assignToCluster(flat_data, centroids)
#compute centroids
old_centroids = computeCentroids(flat_data, assigned_points)
converged = False
step_count = 0
step_max = max_attempt
while not converged and step_count < step_max:
step_count += 1
if(debug):
print('running step ', step_count)
assigned_points = assignToCluster(flat_data, old_centroids)
new_centroids = computeCentroids(flat_data, assigned_points)
distance_intracluster = computeIntraClusterDistance(flat_data, new_centroids)
centroid_changed = old_centroids.join(new_centroids).map(lambda x: np.sqrt(np.sum(np.square(np.array(x[1][0]) - np.array(x[1][1])))) ).reduce(lambda x, y: x+y)
if(debug):
print(' -> ' , round(centroid_changed, 2))
if centroid_changed == 0:
converged = True
old_centroids = sc.parallelize(new_centroids.collect())
return (step_count, distance_intracluster.take(1)[0][1], assigned_points, old_centroids )
if __name__ == "__main__":
file_name = sys.argv[1]
k = int(sys.argv[2])
max_attempt = int(sys.argv[3])
step_count, distance, points, centroids= kmeans(file_name, k, max_attempt)
print("\n")
print("distance : " , distance)
print("nbr iteration : " , step_count)
print("\n")
print("points [echantillon]: " , points.take(2))
print("centroids [echantillon]: ", centroids.take(2))
print("\n")
|
{"hexsha": "57d194d2143790d759d7410dc103e0cd45c37507", "size": 4299, "ext": "py", "lang": "Python", "max_stars_repo_path": "projet_data_analitics/exercice_1.py", "max_stars_repo_name": "Zouhairhajji/Data_science", "max_stars_repo_head_hexsha": "17b4f4f7bad759e1ca5849f106c058567c9142d7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "projet_data_analitics/exercice_1.py", "max_issues_repo_name": "Zouhairhajji/Data_science", "max_issues_repo_head_hexsha": "17b4f4f7bad759e1ca5849f106c058567c9142d7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "projet_data_analitics/exercice_1.py", "max_forks_repo_name": "Zouhairhajji/Data_science", "max_forks_repo_head_hexsha": "17b4f4f7bad759e1ca5849f106c058567c9142d7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2828947368, "max_line_length": 168, "alphanum_fraction": 0.6140963015, "include": true, "reason": "import numpy", "num_tokens": 1113}
|
from gemmi import cif
import numpy as np
from pyxtal import pyxtal
def cif_data(cif_file):
"""
Loads a cif file and extracts loops and items as data and metadata.
returns:
- metadata : cif header.
- data : atom loops
"""
data = cif.read_file(cif_file)
block = data.sole_block()
metadata = {}
for item in block:
if item.pair is not None:
metadata[item.pair[0]] = item.pair[1]
loop_items = [
"_atom_site_type_symbol",
"_atom_site_label",
"_atom_site_symmetry_multiplicity",
"_atom_site_fract_x",
"_atom_site_fract_y",
"_atom_site_fract_z",
"_atom_site_occupancy",
]
loop = {}
for item in loop_items:
loop[item] = list(block.find_loop(item))
return metadata, loop
def load_structure(cif="data/mp-27692_BaB4O7.cif"):
"""Load a crystalline structure from a cif file.
Args:
cif (str, optional): path to cif file. Defaults to
'data/mp-27692_BaB4O7.cif'.
"""
xtal = pyxtal()
xtal.from_seed(seed=cif)
struc = xtal.to_pymatgen()
return struc
def max_n(struc, num=10000):
"""Calculates the maximum index 'n' of a supercell of size
n x n x n for which the number of sites does not excede 'num'.
Args:
struc (pymatgen.core.structure.Structure): a mutable version
of pyxtal structure.
num (int, optional): [description]. Defaults to 10000.
"""
arr = np.arange(0, num, 1)
sites = struc.num_sites
sites_sc = sites * arr ** 3
max_val = sites_sc <= num
return np.argmax(sites_sc[max_val])
def sc(struc, n=1, save=False, centred=True):
"""Generates the atomic positions of a supercell of
size n x n x n from an initial structure given by a
.cif file.
Args:
struc (pyxtal.pyxtal): structure. output of the
load_structure() method.
n (int, optional): [description]. Defaults to 1.
save (bool, optional): whether to save or not the ouput to a file.
centred (bool, optional): if True gives centered supercell.
"""
supercell = struc.__mul__(scaling_matrix=n)
x, y, z, elem = [], [], [], []
for i in range(0, np.shape(supercell)[0]):
line = str(supercell[i]).replace("[", "").replace("]", "").split()
x.append(float(line[0]))
y.append(float(line[1]))
z.append(float(line[2]))
elem.append(line[3])
cen_x = x - np.max(x) / 2
cen_y = y - np.max(y) / 2
cen_z = z - np.max(z) / 2
if centred:
arr = np.array(list(zip(elem, cen_x, cen_y, cen_z)), "U3,f,f,f")
filename = "struc_centred.txt"
else:
arr = np.array(list(zip(elem, x, y, z)), "U3,f,f,f")
filename = "struc.txt"
if save:
np.savetxt(
filename, arr, fmt="%s %.4f %.4f %.4f", header="Elem\tx\ty\tz"
)
return arr
|
{"hexsha": "e7a6cd9e1096d917541bae052113a368471e396c", "size": 2916, "ext": "py", "lang": "Python", "max_stars_repo_path": "rmctools/builder.py", "max_stars_repo_name": "jorobledo/RMCtools", "max_stars_repo_head_hexsha": "6ff5ebb35ac70f9275c23ab33d5576397784064d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-31T19:46:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-31T19:46:46.000Z", "max_issues_repo_path": "rmctools/builder.py", "max_issues_repo_name": "jorobledo/RMCtools", "max_issues_repo_head_hexsha": "6ff5ebb35ac70f9275c23ab33d5576397784064d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-31T21:10:38.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-31T21:11:33.000Z", "max_forks_repo_path": "rmctools/builder.py", "max_forks_repo_name": "jorobledo/RMCtools", "max_forks_repo_head_hexsha": "6ff5ebb35ac70f9275c23ab33d5576397784064d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-31T19:46:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-31T19:46:56.000Z", "avg_line_length": 26.752293578, "max_line_length": 74, "alphanum_fraction": 0.5925925926, "include": true, "reason": "import numpy", "num_tokens": 824}
|
[STATEMENT]
lemma length_nat: "length (nat_to_bv n) = length_nat n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (nat_to_bv n) = length_nat n
[PROOF STEP]
apply (simp add: length_nat_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (nat_to_bv n) = (LEAST na. n < 2 ^ na)
[PROOF STEP]
apply (rule Least_equality [symmetric])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. n < 2 ^ length (nat_to_bv n)
2. \<And>y. n < 2 ^ y \<Longrightarrow> length (nat_to_bv n) \<le> y
[PROOF STEP]
prefer 2
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>y. n < 2 ^ y \<Longrightarrow> length (nat_to_bv n) \<le> y
2. n < 2 ^ length (nat_to_bv n)
[PROOF STEP]
apply (rule length_nat_to_bv_upper_limit)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>y. n < 2 ^ y \<Longrightarrow> n \<le> 2 ^ y - 1
2. n < 2 ^ length (nat_to_bv n)
[PROOF STEP]
apply arith
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. n < 2 ^ length (nat_to_bv n)
[PROOF STEP]
apply (rule ccontr)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> n < 2 ^ length (nat_to_bv n) \<Longrightarrow> False
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> n < 2 ^ length (nat_to_bv n) \<Longrightarrow> False
[PROOF STEP]
assume "~ n < 2 ^ length (nat_to_bv n)"
[PROOF STATE]
proof (state)
this:
\<not> n < 2 ^ length (nat_to_bv n)
goal (1 subgoal):
1. \<not> n < 2 ^ length (nat_to_bv n) \<Longrightarrow> False
[PROOF STEP]
hence "2 ^ length (nat_to_bv n) \<le> n"
[PROOF STATE]
proof (prove)
using this:
\<not> n < 2 ^ length (nat_to_bv n)
goal (1 subgoal):
1. 2 ^ length (nat_to_bv n) \<le> n
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
2 ^ length (nat_to_bv n) \<le> n
goal (1 subgoal):
1. \<not> n < 2 ^ length (nat_to_bv n) \<Longrightarrow> False
[PROOF STEP]
hence "length (nat_to_bv n) < length (nat_to_bv n)"
[PROOF STATE]
proof (prove)
using this:
2 ^ length (nat_to_bv n) \<le> n
goal (1 subgoal):
1. length (nat_to_bv n) < length (nat_to_bv n)
[PROOF STEP]
by (rule length_nat_to_bv_lower_limit)
[PROOF STATE]
proof (state)
this:
length (nat_to_bv n) < length (nat_to_bv n)
goal (1 subgoal):
1. \<not> n < 2 ^ length (nat_to_bv n) \<Longrightarrow> False
[PROOF STEP]
thus False
[PROOF STATE]
proof (prove)
using this:
length (nat_to_bv n) < length (nat_to_bv n)
goal (1 subgoal):
1. False
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1148, "file": "RSAPSS_Word", "length": 15}
|
// Copyright (c) 2006, Giovanni P. Deretta
//
// This code may be used under either of the following two licences:
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE. OF SUCH DAMAGE.
//
// Or:
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_COROUTINE_CONTEXT_WINDOWS_HPP_20060625
#define BOOST_COROUTINE_CONTEXT_WINDOWS_HPP_20060625
#include <windows.h>
#include <winnt.h>
#include <boost/config.hpp>
#include <boost/config.hpp>
#include <boost/noncopyable.hpp>
#include <boost/coroutine/exception.hpp>
#include <boost/coroutine/detail/swap_context.hpp>
namespace boost {namespace coroutines {namespace detail{
namespace windows {
typedef LPVOID fiber_ptr;
/*
* This number (0x1E00) has been sighted in the wild (at least on windows XP systems)
* as return value from GetCurrentFiber() on non fibrous threads. This is sowehow related
* to OS/2 where the current fiber pointer is overloaded as a version field.
* On non-NT systems, 0 is returned.
*/
fiber_ptr fiber_magic = reinterpret_cast<fiber_ptr>(0x1E00);
/*
* Return true if current thread is a fiber.
* FIXME: on longhorn shoud use IsThreadAFiber
*/
bool is_fiber() {
fiber_ptr current = GetCurrentFiber();
return current != 0 && current != fiber_magic;
}
/*
* Windows implementation for the context_impl_base class.
* @note context_impl is not required to be consistent
* If not initialized it can only be swapped out, not in
* (at that point it will be initialized).
*/
class fibers_context_impl_base {
public:
/**
* Create an empty context.
* An empty context cannot be restored from,
* but can be saved in.
*/
fibers_context_impl_base() :
m_ctx(0) {}
/*
* Free function. Saves the current context in @p from
* and restores the context in @p to. On windows the from
* parameter is ignored. The current context is saved on the
* current fiber.
* Note that if the current thread is not a fiber, it will be
* converted to fiber on the fly on call and unconverted before
* return. This is expensive. The user should convert the
* current thread to a fiber once on thread creation for better performance.
* Note that we can't leave the thread unconverted on return or else we
* will leak resources on thread destruction. Do the right thing by
* default.
*/
friend
void
swap_context(fibers_context_impl_base& from,
const fibers_context_impl_base& to,
default_hint) {
if(!is_fiber()) {
BOOST_ASSERT(from.m_ctx == 0);
from.m_ctx = ConvertThreadToFiber(0);
BOOST_ASSERT(from.m_ctx != 0);
SwitchToFiber(to.m_ctx);
BOOL result = ConvertFiberToThread();
BOOST_ASSERT(result);
(void)result;
from.m_ctx = 0;
} else {
bool call_from_main = from.m_ctx == 0;
if(call_from_main)
from.m_ctx = GetCurrentFiber();
SwitchToFiber(to.m_ctx);
if(call_from_main)
from.m_ctx = 0;
}
}
~fibers_context_impl_base() {}
protected:
explicit
fibers_context_impl_base(fiber_ptr ctx) :
m_ctx(ctx) {}
fiber_ptr m_ctx;
};
template<typename T>
inline
VOID CALLBACK
trampoline(LPVOID pv) {
T* fun = static_cast<T*>(pv);
BOOST_ASSERT(fun);
(*fun)();
}
class fibers_context_impl :
public fibers_context_impl_base,
private boost::noncopyable {
public:
typedef fibers_context_impl_base context_impl_base;
enum {default_stack_size = 8192};
/**
* Create a context that on restore inovkes Functor on
* a new stack. The stack size can be optionally specified.
*/
template<typename Functor>
explicit
fibers_context_impl(Functor& cb, std::ptrdiff_t stack_size) :
fibers_context_impl_base
(CreateFiber(stack_size== -1? 0 : stack_size,
static_cast<LPFIBER_START_ROUTINE>(&trampoline<Functor>),
static_cast<LPVOID>(&cb)))
{
BOOST_ASSERT(m_ctx);
}
~fibers_context_impl() {
DeleteFiber(m_ctx);
}
private:
};
typedef fibers_context_impl context_impl;
}
} } }
#endif
|
{"hexsha": "a7c6092a58fc0353c326731664e30a403bf97b03", "size": 5436, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/coroutine/detail/context_windows.hpp", "max_stars_repo_name": "erikfrey/coroutine", "max_stars_repo_head_hexsha": "fe1b9e12f96e320446da1ef49015955162edb17f", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2015-10-19T02:52:00.000Z", "max_stars_repo_stars_event_max_datetime": "2016-05-09T09:52:25.000Z", "max_issues_repo_path": "boost/coroutine/detail/context_windows.hpp", "max_issues_repo_name": "erikfrey/coroutine", "max_issues_repo_head_hexsha": "fe1b9e12f96e320446da1ef49015955162edb17f", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "boost/coroutine/detail/context_windows.hpp", "max_forks_repo_name": "erikfrey/coroutine", "max_forks_repo_head_hexsha": "fe1b9e12f96e320446da1ef49015955162edb17f", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3571428571, "max_line_length": 93, "alphanum_fraction": 0.6791758646, "num_tokens": 1303}
|
[STATEMENT]
lemma pref_gcd_lq: assumes "u \<le>p v" shows "(gcd \<^bold>|u\<^bold>| \<^bold>|u\<inverse>\<^sup>>v\<^bold>|) = gcd \<^bold>|u\<^bold>| \<^bold>|v\<^bold>|"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. gcd \<^bold>|u\<^bold>| \<^bold>|u\<inverse>\<^sup>>v\<^bold>| = gcd \<^bold>|u\<^bold>| \<^bold>|v\<^bold>|
[PROOF STEP]
using gcd_add2[of "\<^bold>|u\<^bold>|" "\<^bold>|u\<inverse>\<^sup>>v\<^bold>|", unfolded lq_len[OF assms], symmetric]
[PROOF STATE]
proof (prove)
using this:
gcd \<^bold>|u\<^bold>| \<^bold>|u\<inverse>\<^sup>>v\<^bold>| = gcd \<^bold>|u\<^bold>| \<^bold>|v\<^bold>|
goal (1 subgoal):
1. gcd \<^bold>|u\<^bold>| \<^bold>|u\<inverse>\<^sup>>v\<^bold>| = gcd \<^bold>|u\<^bold>| \<^bold>|v\<^bold>|
[PROOF STEP]
.
|
{"llama_tokens": 359, "file": "Combinatorics_Words_CoWBasic", "length": 2}
|
[STATEMENT]
lemma Suc_le_length_iff:
"(Suc n \<le> length xs) = (\<exists>x ys. xs = x # ys \<and> n \<le> length ys)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (Suc n \<le> length xs) = (\<exists>x ys. xs = x # ys \<and> n \<le> length ys)
[PROOF STEP]
by (metis Suc_le_D[of n] Suc_le_mono[of n] Suc_length_conv[of _ xs])
|
{"llama_tokens": 156, "file": null, "length": 1}
|
subroutine tripck(na,nc,nerr,nn,noutpt,nttyo,n1,n2,n3,
$ qdup12,qdup13,qdup23,unam1,unam2,unam3)
c
c This suboutine checks the species triplets that were read
c from the DATA0 file for illegal combinations.
c
c The only legal combinations here correspond to ternary
c systems and maximal third order in the expansion of the
c Gibbs energy. These are:
c
c nnn' mu(nnn') maps to itself
c nca zeta(nca) maps to mu(nca)
c cc'a psi(cc'a) maps to mu(cc'a)
c aa'c psi(aa'c) maps to mu(aa'c)
c
c Here n = neutral, n' a different neutral, c = cation, c' = a
c different cation, a = anion, and a' = a different anion.
c Solvent water (w) may not appear in any combination in the
c normal Pitzer treatment of electrolyte solutions.
c
c Other possible combinations are not allowed because of one or
c more of the following reasons:
c
c 1. The combination corresponds to systems of higher order
c (e.g., cc'c'')
c 2. The combination corresponds to systems of lesser order
c (e.g., nnn and cca)
c 3. The combination corresponds to parameters that are defined
c by convention to be zero (i.e., to unused parameters).
c Examples include ccc and ncc.
c
c This suboutine is called by:
c
c EQPT/rdpz3.f
c
c-----------------------------------------------------------------------
c
c Principal input:
c
c na = number of anions in a species triplet
c nc = number of cations in a species triplet
c nn = number of neutral species in a species triplet
c n1 = index of the first species in a triplet
c n2 = index of the second species in a triplet
c n3 = index of the third species in a triplet
c unam1 = first name in a species triplet
c unam2 = second name in a species triplet
c unam3 = third name in a species triplet
c
c Principal output:
c
c nerr = error counter
c
c-----------------------------------------------------------------------
c
implicit none
c
c-----------------------------------------------------------------------
c
c Calling sequence variable declarations.
c
integer noutpt,nttyo
c
integer na,nc,nn,nerr,n1,n2,n3
c
logical qdup12,qdup13,qdup23
c
character*24 unam1,unam2,unam3
c
c-----------------------------------------------------------------------
c
c Local variable declarations.
c
integer j2,j3,j4
c
integer ilnobl
c
c-----------------------------------------------------------------------
c
j2 = ilnobl(unam1)
j3 = ilnobl(unam2)
j4 = ilnobl(unam3)
c
c* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
c
c Check for illegal appearance of solvent water.
c
if (unam1(1:4) .eq.'H2O ' .or. unam2(1:4) .eq.'H2O ' .or.
$ unam3(1:4).eq.'H2O ' .or. n1.eq.1 .or. n2.eq.1 .or.
$ n3.eq.1) then
write (noutpt,1430) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1430) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1430 format(/' * Error - (EQPT/tripck) Have found an illegal data',
$ ' block for the',/7x,'species triplet ',a,', ',a,', ',a,
$ ' among the blocks for',/7x,'"mixture parameters".',
$ ' Solvent water may not appear in such a',/7x,'triplet in',
$ ' the normal Pitzer treatment of electrolyte',
$ /7x,'solutions.')
nerr = nerr + 1
endif
c
c* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
c
if (qdup12 .and. qdup13) then
c
c The same species appears three times.
c
if (nc.gt.0 .or. na.gt.0) then
c
c The same cation or anion appears three times.(ccc or aaa).
c
write (noutpt,1110) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1110) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1110 format(/' * Error - (EQPT/tripck) Have found an illegal data',
$ ' block for the',/7x,'species triplet ',a,', ',a,', ',a,
$ ' among the blocks',/7x,'for "mixture parameters". The same',
$ ' ion appears three times.',/7x,'This is not a valid',
$ ' combination, as the corrresponding lambda and mu',
$ /7x,'data are defined to be zero by convention in the normal',
$ ' Pitzer',/7x,'treatment of electrolyte solutions.')
endif
c
if (nn .gt. 0) then
c
c The same neutral appears three times (nnn).
c
write (noutpt,1112) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1112) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1112 format(/' * Error - (EQPT/tripck) Have found an illegal data',
$ ' block for the',/7x,'species triplet ',a,', ',a,', ',a,
$ ' among the blocks',/7x,'for "mixture parameters". The',
$ ' same neutral appears three times.',/7x,'This is not',
$ ' a valid combination for the present kind of block.',
$ /7x,'Enter these data in a block for "single-salt',
$ ' parameters".')
endif
c
nerr = nerr + 1
go to 999
endif
c
c* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
c
c Check for other illegal combinations involving two or more
c neutrals.
c
if (nn .eq. 3) then
if (.not.(qdup12 .or. qdup13 .or. qdup23)) then
c
c Have three distinct neutrals (nn'n'').
c
write (noutpt,1130) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1130) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1130 format(/' * Error - (EQPT/tripck) Have found an illegal data',
$ ' block for the',/7x,'species triplet ',a,', ',a,', ',a,
$ ' among the blocks',/7x,'for "mixture parameters". Three',
$ ' distinct neutrals appear. This is not',/7x,'a valid',
$ ' combination in the context of the normal Pitzer',
$ /7x,'treatment of electrolyte solutions as it corresponds',
$ ' to a',/7x,'quaternary system.')
nerr = nerr + 1
go to 999
endif
endif
c
if (nn .eq. 2) then
if (nc.eq.1 .or. na.eq.1) then
c
if (qdup12 .or. qdup23 .or. qdup13) then
c
c Have one of the following combinations: nnc or nna.
c
write (noutpt,1132) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1132) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1132 format(/' * Error - (EQPT/tripck) Have found an illegal',
$ ' data block for the',/7x,'species triplet ',a,', ',a,', ',
$ a,' among the blocks',/7x,'for "mixture parameters". One',
$ ' neutral appears twice with an ion.',/7x,'This is not a',
$ ' valid combination in the context of the normal Pitzer',
$ /7x,'treatment of electrolyte solutions. The corresponding',
$ ' parameters',/7x,'are not used.')
else
c
c Have one of the following combinations: nn'c or nn'a.
c
write (noutpt,1136) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1136) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1136 format(/' * Error - (EQPT/tripck) Have found an illegal',
$ ' data block for the',/7x,'species triplet ',a,', ',a,', ',
$ a,' among the blocks',/7x,'for "mixture parameters". Two',
$ ' neutrals appear with an ion.',/7x,'This is not a',
$ ' valid combination in the context of the normal Pitzer',
$ /7x,'treatment of electrolyte solutions as it corresponds',
$ ' to a',/7x,'quaternary system.')
endif
nerr = nerr + 1
go to 999
endif
endif
c
c* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
c
c Check for other illegal combinations involving two or more
c cations.
c
if (nc .eq. 3) then
if (.not.(qdup12 .or. qdup13 .or. qdup23)) then
c
c Have three distinct cations (cc'c'').
c
write (noutpt,1140) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1140) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1140 format(/' * Error - (EQPT/tripck) Have found an illegal data',
$ ' block for the',/7x,'species triplet ',a,', ',a,', ',a,
$ ' among the blocks',/7x,'for "mixture parameters". Three',
$ ' distinct cations appear. This is',/7x,'not a valid',
$ ' combination in the context of the normal Pitzer',
$ /7x,'treatment of electrolyte solutions as it corresponds',
$ ' to a',/7x,'quaternary system.')
nerr = nerr + 1
go to 999
endif
c
if (qdup12 .and. qdup13) then
c
c Have one cation appearing three times (ccc).
c
write (noutpt,1142) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1142) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1142 format(/' * Error - (EQPT/tripck) Have found an illegal data',
$ ' block for the',/7x,'species triplet ',a,', ',a,', ',a,
$ ' among the blocks',/7x,'for "mixture parameters". A single',
$ ' cation appears three times. This',/7x,'is not a valid',
$ ' combination in the context of the normal Pitzer',
$ /7x,'treatment of electrolyte solutions. The corresponding',
$ ' parameters are',/7x,'zero by convention.')
nerr = nerr + 1
go to 999
endif
c
if (qdup12 .or. qdup13 .or. qdup23) then
c
c Have one cation appearing two times (ccc').
c
write (noutpt,1144) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1144) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1144 format(/' * Error - (EQPT/tripck) Have found an illegal data',
$ ' block for the',/7x,'species triplet ',a,', ',a,', ',a,
$ ' among the blocks',/7x,'for "mixture parameters". A single',
$ ' cation appears twice with another',/7x,'cation. This is',
$ ' not a valid combination in the context of the normal',
$ /7x,'Pitzer treatment of electrolyte solutions. The',
$ ' corresponding',/7x,'parameters are not used.')
nerr = nerr + 1
go to 999
endif
endif
c
if (nc .eq. 2) then
c
if (qdup12 .or. qdup23 .or. qdup13) then
c
if (nn .eq. 1) then
c
c Have a repeated cation and a neutral (ccn).
c
write (noutpt,1150) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1150) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1150 format(/' * Error - (EQPT/tripck) Have found an illegal',
$ ' data block for the',/7x,'species triplet ',a,', ',a,', ',
$ a,' among the blocks',/7x,'for "mixture parameters". One',
$ ' cation appears twice with a neutral.',/7x,'This is not a',
$ ' valid combination in the context of the normal Pitzer',
$ /7x,'treatment of electrolyte solutions. The',
$ ' corresponding parameters',/7x,'are not used.')
elseif (na .eq. 1) then
c
c Have a repeated cation and an anion (cca).
c
write (noutpt,1152) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1152) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1152 format(/' * Error - (EQPT/tripck) Have found an illegal',
$ ' data block for the',/7x,'species triplet ',a,', ',a,', ',
$ a,' among the blocks',/7x,'for "mixture parameters". One',
$ ' cation appears twice with an anion.',/7x,'This is not a',
$ ' valid combination for the present type of block. The',
$ /7x,'corresponding parameters properly appear in a block',
$ ' under',/7x,'"single-salt parameters".')
endif
nerr = nerr + 1
go to 999
else
c
if (nn .eq. 1) then
c
c Have two cations and a neutral (c'cn).
c
write (noutpt,1154) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1154) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1154 format(/' * Error - (EQPT/tripck) Have found an illegal',
$ ' data block for the',/7x,'species triplet ',a,', ',a,', ',
$ a,' among the blocks',/7x,'for "mixture parameters". Two',
$ ' cations appear with a neutral.',/7x,'This is not a',
$ ' valid combination in the context of the normal Pitzer',
$ /7x,'treatment of electrolyte solutions as it corresponds',
$ ' to a',/7x,'quaternary system.')
nerr = nerr + 1
go to 999
endif
c
c Have two cations and an anion (cc'a). That is a legal
c combination.
c
endif
endif
c
c* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
c
c Check for other illegal combinations involving two or more
c anions.
c
if (na .eq. 3) then
if (.not.(qdup12 .or. qdup13 .or. qdup23)) then
c
c Have three distinct anions (aa'a'').
c
write (noutpt,1160) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1160) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1160 format(/' * Error - (EQPT/tripck) Have found an illegal data',
$ ' block for the',/7x,'species triplet ',a,', ',a,', ',a,
$ ' among the blocks',/7x,'for "mixture parameters". Three',
$ ' distinct anions appear. This is',/7x,'not a valid',
$ ' combination in the context of the normal Pitzer',
$ /7x,'treatment of electrolyte solutions as it corresponds',
$ ' to a',/7x,'quaternary system.')
nerr = nerr + 1
go to 999
endif
c
if (qdup12 .and. qdup13) then
c
c Have one anion appearing three times (aaa).
c
write (noutpt,1162) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1162) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1162 format(/' * Error - (EQPT/tripck) Have found an illegal data',
$ ' block for the',/7x,'species triplet ',a,', ',a,', ',a,
$ ' among the blocks',/7x,'for "mixture parameters". A single',
$ ' anion appears three times. This',/7x,'is not a valid',
$ ' combination in the context of the normal Pitzer',
$ /7x,'treatment of electrolyte solutions. The corresponding',
$ ' parameters are',/7x,'zero by convention.')
nerr = nerr + 1
go to 999
endif
c
if (qdup12 .or. qdup13 .or. qdup23) then
c
c Have one anion appearing two times (aaa').
c
write (noutpt,1164) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1164) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1164 format(/' * Error - (EQPT/tripck) Have found an illegal data',
$ ' block for the',/7x,'species triplet ',a,', ',a,', ',a,
$ ' among the blocks',/7x,'for "mixture parameters". A single',
$ ' anion appears twice with another',/7x,'anion. This is',
$ ' not a valid combination in the context of the normal',
$ /7x,'Pitzer treatment of electrolyte solutions. The',
$ ' corresponding',/7x,'parameters are not used.')
nerr = nerr + 1
go to 999
endif
endif
c
if (na .eq. 2) then
c
if (qdup12 .or. qdup23 .or. qdup13) then
c
if (nn .eq. 1) then
c
c Have a repeated anion and a neutral (aan).
c
write (noutpt,1170) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1170) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1170 format(/' * Error - (EQPT/tripck) Have found an illegal',
$ ' data block for the',/7x,'species triplet ',a,', ',a,', ',
$ a,' among the blocks',/7x,'for "mixture parameters". One',
$ ' anion appears twice with a neutral.',/7x,'This is not a',
$ ' valid combination in the context of the normal Pitzer',
$ /7x,'treatment of electrolyte solutions. The',
$ ' corresponding parameters',/7x,'are not used.')
elseif (nc .eq. 1) then
c
c Have a repeated anion and a cation (aac).
c
write (noutpt,1173) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1173) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1173 format(/' * Error - (EQPT/tripck) Have found an illegal',
$ ' data block for the',/7x,'species triplet ',a,', ',a,', ',
$ a,' among the blocks',/7x,'for "mixture parameters". One',
$ ' anion appears twice with a cation.',/7x,'This is not a',
$ ' valid combination for the present type of block. The',
$ /7x,'corresponding parameters properly appear in a block',
$ ' under',/7x,'"single-salt parameters".')
endif
nerr = nerr + 1
go to 999
else
c
if (nn .eq. 1) then
c
c Have two anions and a neutral (a'an).
c
write (noutpt,1174) unam1(1:j2),unam2(1:j3),unam3(1:j4)
write (nttyo,1174) unam1(1:j2),unam2(1:j3),unam3(1:j4)
1174 format(/' * Error - (EQPT/tripck) Have found an illegal',
$ ' data block for the',/7x,'species triplet ',a,', ',a,', ',
$ a,' among the blocks',/7x,'for "mixture parameters". Two',
$ ' anions appear with a neutral.',/7x,'This is not a',
$ ' valid combination in the context of the normal Pitzer',
$ /7x,'treatment of electrolyte solutions as it corresponds',
$ ' to a',/7x,'quaternary system.')
nerr = nerr + 1
go to 999
endif
c
c Have two anions and a cation (aa'c). That is a legal
c combination.
c
endif
endif
c
c* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
c
999 continue
c
end
|
{"hexsha": "f935fc5fbd756e615aa6e576a11a48820610a1b3", "size": 17514, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/eqpt/src/tripck.f", "max_stars_repo_name": "39alpha/eq3_6", "max_stars_repo_head_hexsha": "4ff7eec3d34634f1470ae5f67d8e294694216b6e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/eqpt/src/tripck.f", "max_issues_repo_name": "39alpha/eq3_6", "max_issues_repo_head_hexsha": "4ff7eec3d34634f1470ae5f67d8e294694216b6e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-11-30T15:48:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-02T18:16:22.000Z", "max_forks_repo_path": "src/eqpt/src/tripck.f", "max_forks_repo_name": "39alpha/eq3_6", "max_forks_repo_head_hexsha": "4ff7eec3d34634f1470ae5f67d8e294694216b6e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9863013699, "max_line_length": 72, "alphanum_fraction": 0.5500742263, "num_tokens": 5820}
|
#!/usr/bin/env python
# coding: utf-8
# # Sample Statistics
# Reading: Emile-Geay Chapter 4.I and 4.II (p51-58)
#
# Other resources:
# https://en.wikipedia.org/wiki/Sampling_distribution
#
# https://en.wikipedia.org/wiki/Central_limit_theorem
# In[2]:
get_ipython().run_line_magic('reset', '')
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# These are some parameters to make figures nice (and big)
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
plt.rcParams['figure.figsize'] = 16,8
params = {'legend.fontsize': 'x-large',
'figure.figsize': (15, 5),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
plt.rcParams.update(params)
# # Sample Mean
# Consider a process gaussian process. Let's say this process has true mean $\mu$ and true variance $\sigma^2$. When we work with random samples in python (or any programming language), the process mean and process variance are the values of $\mu$ and $\sigma$ we pass to the method that generates random samples such as
# <code>stats.norm.rvs(loc=mu,scale=sigma, size=Ndraws)</code>
#
# We denote a random variable with a Normal/Gaussian distribution with location/mean $\mu$ and scale/variance $\sigma^2$ as $$ X\sim\mathcal{N}(\mu,\sigma^2)$$
#
# If we only have a sample of this random variable we can estimate the mean using the **sample mean**:
# $$\overline{X}_n=\frac{1}{n}\sum_{i=1}^n X_i$$.
#
# The law of large numbers tells us that the sample mean converges to the process mean for a large enough sample.
# ### **Definition: Consistency**
# An estimator is consistent if it asymptotes to the process value *as the size of the value increases*. For the mean, that statement of consistency is precisely the law of large numbers:
#
# $$\overline{X}_n \rightarrow \mu \text{ as } n\rightarrow \infty$$
#
# It turns out, the sample mean is also an **unbiased** estimator.
#
# ### Definition: Unbiased estimator.
# An estimator is unbiased if its expectation is the same as the process value.
#
# $$E(\overline{X}_n)=E(X)=\mu$$
#
#
# While the two may look similar, they are actually different. The sample mean is itself a random variable, as it is the sum of $n$ random variables. Thus, it will have a distribution and it will have a mean - or expected - value.
#
# Here is one way to think about this. Say we are taking daily measurements of a quantity in the lab. We want to measure a true quantity, but we can only do so with some error. Let's say those errors are normally distributed We can model our measurement as:
# $$X=\mu +\varepsilon$$
# where $\varepsilon$ are random errors with zero-mean and variance $\sigma$. In this case oure measurement $X$ is a random variable whose process mean is the true value of the quantity we are after.
#
# Let's say we take a sample of size $n$ of that process. Consistency tells us that as the sample mean gets *larger*, i.e., as we take more independent measurements and average them together, we would approach the true mean. The bias tell us what would happen if we repeat the measurement. Say, we take another sample mean of size $n$ tomorrow, and another the day after, and so on. Would the mean of all of those realizations of the sample mean also approach the true mean? Yes, if the estimator is also unbiased.
#
# In practice, we may not be able to either take an infinitely large sample, or repeate the measurement. It's possible that all we have is a sample of size n. We would like to know whether that sample is an unbiased estimate.
#
# For the sample mean, this is easy to prove. Since the sample mean is a random variable, we can also apply the law of large numbers to it: Expectation of the sample mean would actually be the average of an infinite number of sample means.
#
# $$E(\overline{X}_n)=\lim_{k\rightarrow \infty} \frac{1}{k}\sum_{j=1}^k (\overline X_n)_j$$
#
# A simple re-ordering of the sums shows us that the sample mean is an unbiased estimator:
#
# $$E(\overline{X}_n)=\lim_{k\rightarrow \infty} \frac{1}{k}\frac{1}{n}\sum_{j=1}^k\sum_{i=1}^n X_{i,j}$$
# $$E(\overline{X}_n)=\frac{1}{n}\sum_{j=1}^n\left[\lim_{k\rightarrow \infty} \frac{1}{k}\sum_{i=1}^k X_{i,j}\right]$$
#
# By the law of large numbers,
# $$E(\overline{X}_n)=\frac{1}{n}\sum_{j=1}^n n\mu=\mu$$
#
#
#
#
# Let's convince us of this with some numerics. Let's take a sample mean, look at what happens as we increase the sample size, and then let's look at what the distribution of the sample mean is.
#
# **Consistency:**
# In[8]:
# Consistency
mu=0.2;
sigma=1;
sample_size_max=10000
#preallocate vector of sample sizes
sample_mean=np.zeros([sample_size_max,1])
X_norm=stats.norm.rvs(loc=mu,scale=sigma, size=sample_size_max)
# let's compute the sample mean as a function of sample size (or number of draws)
for n in np.arange(1,sample_size_max):
sample_mean[n]=(1/n)*np.sum(X_norm[0:n])
fig, ax = plt.subplots(1, 1)
ax.plot(sample_mean)
ax.grid()
ax.set_xlabel('n')
ax.set_ylabel('sample mean $=\sum_{i=1}^n X_i$')
# **Bias**
# In the cell below, change the number of samples to convince yourself that the expected value of the sample mean converges to the process mean:
# In[15]:
# Consistency
mu=3;
sigma=2;
# sample size and number of samples
sample_size=20
nsamples=10000
# preallocate vector of sample means
sample_mean=np.zeros([nsamples,1])
# compute a number nsamples of sample means
for n in np.arange(nsamples):
X_norm=stats.norm.rvs(loc=mu,scale=sigma, size=sample_size)
sample_mean[n]=(1/sample_size)*np.sum(X_norm)
fig, ax = plt.subplots(1, 1)
ax.hist(sample_mean,30)
ax.vlines(np.mean(sample_mean),ymin=0,ymax=nsamples/10,colors='r',label='expected sample mean')
ax.vlines(mu,ymin=0,ymax=nsamples/10,colors='y',label='process mean')
ax.set_xlabel('sample variance')
ax.set_xlabel('sample mean')
ax.set_ylabel('number of realizations')
ax.set_title('distribution of sample mean')
# <hr style="border:2px solid black"> </hr>
#
#
#
# <hr style="border:2px solid black"> </hr>
# # Sample Variance / Standard Deviation
#
# Remember, the variance is defined as:
# $$V(X)=E([X-\mu)^2]$$
#
# Just like the sample mean, we can define an estimator for the variance as the sample variance.
# $$s_n=\frac{1}{n}\sum_{i=1}^n(X_i-\overline{X}_n)^2$$
#
# Now let's check consistency and bias for the sample variance
# In[179]:
# Consistency
mu=10;
sigma=4; #variance =sigma^2
sample_size_max=5000
#preallocate vector of sample sizes
sample_var=np.zeros([sample_size_max,1])
X_norm=stats.norm.rvs(loc=mu,scale=sigma, size=sample_size_max)
# let's compute the sample variance as a function of sample size (or number of draws)
for n in np.arange(1,sample_size_max):
sample_var[n]=(1/n)*np.sum((X_norm[0:n]-np.mean(X_norm[0:n]))**2)
fig, ax = plt.subplots(1, 1)
ax.plot(sample_var,label='sample variance')
ax.hlines(sigma**2,xmin=0,xmax=sample_size_max,color='r',label='process variance')
ax.grid()
ax.set_xlabel('n')
ax.legend()
ax.set_ylabel('sample variance')
# In[59]:
# Bias
mu=0.4;
sigma=0.5; #variance =sigma^2
# sample size and number of samples
sample_size=200
nsamples=15000
# preallocate vector of sample means
sample_var=np.zeros([nsamples,1])
# compute a number nsamples of sample means
for n in np.arange(1,nsamples):
X_norm=stats.norm.rvs(loc=mu,scale=sigma, size=sample_size)
sample_var[n]=(1/sample_size)*np.sum((X_norm[0:n]-np.mean(X_norm[0:n]))**2)
fig, ax = plt.subplots(1, 1)
ax.hist(sample_var,30,rwidth=0.9)
ax.vlines(np.mean(sample_var),ymin=0,ymax=nsamples/5,colors='r',label='expected sample variance ')
ax.vlines(sigma**2,ymin=0,ymax=nsamples/5,colors='y',label='process variance')
ax.set_xlabel('sample variance')
ax.set_ylabel('number of realizations')
ax.set_title('Distribution of Sample variance (200 years)')
ax.legend()
ax.set_xlim(left=0,right=0.5)
# So, the simple sample standard variance is a ***consistent, but biased*** estimator of the process variance. Sure, if the sample is large enough, it will eventually converget. But for finite samples, its expected value is **not** equal to the true value of the process variance.
#
# It turns out that if we want a consistent *and* unbiased estimator for the variance we have to use a corrected sample variance
# $$s_{n-1}=\frac{1}{n-1}\sum_{i=1}^n(X_i-\overline{X}_n)^2$$
#
#
# We will show why that is in class.
# ## Exercise:
# Show (numerically) that the corrected sample variance is unbiased. Repeat the experiment above by estimating the mean of the distribution of the corrected sample variance, and show that is matches the process variance .
# In[175]:
# Exericse code block:
# ## Sampling Distribution & the central limit theorem
#
# Since the sample mean is a random variable, it means it has a distribution. The Central Limit Theorem tells us what that distribution is:
#
# #### Central Limit Theorem:
# For a sequence $\{X_1,\ldots, X_n\}$,of independent and identically distributed random variables with mean $\mu$ and variance $\sigma^2$, if the sample size $n$ is large enough, the distribution of the sample mean is normal with mean $\mu$ and variance $\sigma^2/n$:
#
# $$\overline{X}_n=\frac{1}{n}\sum_{i=1}^n X_i \sim \mathcal N(\mu,\sigma^2/n)$$.
#
# This is one of the most powerful results in statistics. It tells us how quickly the uncertainty in the mean decreases: in particular, the variance decreases as the number of observations (and that the standard deviation decreases as the square root of the number of observations).
#
#
# **Attention** Notice that the Central Limit Theorem does **not** require the random variables to be normal/gaussian. That's right, the sample mean of **any** random variable tends to be normal/gaussian for a large enough sample.
#
# 
# In[41]:
# Consistency
mu=0.4;
sigma=0.5;
# sample size and number of samples
sample_size=1
nsamples=10000
# preallocate vector of sample means
sample_mean=np.zeros([nsamples,1])
# compute a number nsamples of sample means
for n in np.arange(nsamples):
X_norm=stats.norm.rvs(loc=mu,scale=sigma, size=sample_size)
sample_mean[n]=(1/sample_size)*np.sum(X_norm)
# the pdf of the normal distribution suggested by the CLT. Let's plot that from -4 to +4 standard deviations
mu_clt=mu;
sigma_clt=sigma/np.sqrt(sample_size)
x_clt=np.linspace(mu-4*sigma_clt,mu+4*sigma_clt,1000)
pdf_clt=stats.norm.pdf(x_clt,mu_clt,sigma_clt)
fig, ax = plt.subplots(1, 1)
ax.hist(sample_mean,30,density=True,rwidth=0.9)
ax.plot(x_clt,pdf_clt,label='CLT')
ax.vlines(np.mean(sample_mean),ymin=0,ymax=0.7,colors='b',label='expected sample mean')
ax.vlines(mu,ymin=0,ymax=0.7,colors='y',label='process mean')
ax.set_xlabel('sample variance')
ax.set_xlabel('sample mean')
ax.set_ylabel('number of realizations')
ax.set_xlim(left=-3,right=3)
ax.set_title('n=1 years')
ax.legend()
#
# A# Exercise:
# Show that the central limit theorem holds for distributions other than the normal distribution.
|
{"hexsha": "751a5c42aa7cdd2bc37591d1dcdaf07787d63f6d", "size": 11229, "ext": "py", "lang": "Python", "max_stars_repo_path": "_build/jupyter_execute/content/Module01/M01_N05_SampleStatistics.py", "max_stars_repo_name": "cdds-uiuc/simles-book", "max_stars_repo_head_hexsha": "79f0fe1133d44f6b94b4bdcd0f05ff65434240c9", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "_build/jupyter_execute/content/Module01/M01_N05_SampleStatistics.py", "max_issues_repo_name": "cdds-uiuc/simles-book", "max_issues_repo_head_hexsha": "79f0fe1133d44f6b94b4bdcd0f05ff65434240c9", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "_build/jupyter_execute/content/Module01/M01_N05_SampleStatistics.py", "max_forks_repo_name": "cdds-uiuc/simles-book", "max_forks_repo_head_hexsha": "79f0fe1133d44f6b94b4bdcd0f05ff65434240c9", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3242320819, "max_line_length": 515, "alphanum_fraction": 0.7256211595, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3183}
|
classdef medianFilter < EBSDFilter
properties
numNeighbours % number of neigbours to consider (default 1)
end
methods
function F = medianFilter(varargin)
%
F.numNeighbours = get_option(varargin,'neighbours',1);
addlistener(F,'isHex','PostSet',@check);
function check(varargin)
if F.isHex
warning(['Hexagonal grids are not yet fully supportet for the medianFilter. ' ...
'It might give reasonable results anyway']);
end
end
end
function ori = smooth(F,ori,quality)
ori(quality==0) = nan;
% this projects to the fundamental region around the mean
[~,ori] = mean(ori);
% make verything quaternion
q = quaternion(ori);
% some shortcuts
nn = F.numNeighbours;
dn = 1+2*nn;
% the mean distance from every candiate to all others
meanDist = zeros([size(q),2*F.numNeighbours+1,2*F.numNeighbours+1]);
% make q a bit larger
q = [quaternion.nan(F.numNeighbours,size(q,2)+2*F.numNeighbours);...
[quaternion.nan(size(q,1),F.numNeighbours),...
q,quaternion.nan(size(q,1),F.numNeighbours)];...
quaternion.nan(F.numNeighbours,size(q,2)+2*F.numNeighbours)];
% compute for any candiate the mean distance to all other points
% the first two loops are for the candidate
for i1 = 1:dn
for j1 = 1:dn
% the candidate
qq = q(i1+(0:end-dn),j1+(0:end-dn));
count = zeros(size(qq));
% compute the distance from the candidate to all other candidates
for i2 = 1:dn
for j2 = 1:dn
omega = angle(qq,q(i2+(0:end-dn),j2+(0:end-dn)));
[meanDist(:,:,i1,j1),count] = nanplus(meanDist(:,:,i1,j1),omega,count);
end
end
meanDist(:,:,i1,j1) = meanDist(:,:,i1,j1) ./ count;
end
end
% find median
meanDist = reshape(meanDist,[size(qq),(2*F.numNeighbours+1)^2,]);
[mm,id] = min(meanDist,[],3);
[i,j] = ind2sub(size(qq),1:length(qq));
[ii,jj] = ind2sub([2*F.numNeighbours+1 2*F.numNeighbours+1],id);
% in regions where everything is nan take simply the center point
% we may later weaken this to allow inpainting
ii(isnan(mm)) = nn+1;
jj(isnan(mm)) = nn+1;
% compute the final indece to the median
ind = sub2ind(size(q),i(:)+ii(:)-1,j(:)+jj(:)-1);
% switch to median
ori(1:length(ori)) = q(ind);
end
end
end
|
{"author": "mtex-toolbox", "repo": "mtex", "sha": "f0ce46a720935e9ae8106ef919340534bca1adcb", "save_path": "github-repos/MATLAB/mtex-toolbox-mtex", "path": "github-repos/MATLAB/mtex-toolbox-mtex/mtex-f0ce46a720935e9ae8106ef919340534bca1adcb/EBSDAnalysis/EBSDSmoothing/medianFilter.m"}
|
function tests = test_ft_preproc_rereference
% MEM 1gb
% WALLTIME 00:10:00
% DEPENDENCY ft_preproc_rereference
if nargout
% assume that this is called by RUNTESTS
tests = functiontests(localfunctions);
else
% assume that this is called from the command line
fn = localfunctions;
for i=1:numel(fn)
feval(fn{i});
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function testOptions(testCase)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
nchan = 8;
nsample = 1000;
dat = randn(nchan, nsample) + 1;
handlenan = false;
leadfield = randn(8, 1000);
result = {};
result{end+1} = ft_preproc_rereference(dat, 1, 'avg', handlenan);
result{end+1} = ft_preproc_rereference(dat, 1:4, 'avg', handlenan);
result{end+1} = ft_preproc_rereference(dat, 1:8, 'avg', handlenan);
result{end+1} = ft_preproc_rereference(dat, 1:4, 'median', handlenan);
result{end+1} = ft_preproc_rereference(dat, 1:8, 'median', handlenan);
result{end+1} = ft_preproc_rereference(dat, 1, 'rest', handlenan, leadfield);
result{end+1} = ft_preproc_rereference(dat, 1:4, 'rest', handlenan, leadfield);
result{end+1} = ft_preproc_rereference(dat, 1:8, 'rest', handlenan, leadfield);
% all iterations were done with (slightly) different options, hence the results should not be equal
for i=1:numel(result)
for j=(i+1):numel(result)
assert(~isequal(result{i}, result{j}), 'the results %d and %d should not be equal', i, j);
end
end
|
{"author": "fieldtrip", "repo": "fieldtrip", "sha": "c2039be598a02d86b39aae76bfa7aaa720f9801c", "save_path": "github-repos/MATLAB/fieldtrip-fieldtrip", "path": "github-repos/MATLAB/fieldtrip-fieldtrip/fieldtrip-c2039be598a02d86b39aae76bfa7aaa720f9801c/test/test_ft_preproc_rereference.m"}
|
#include <boost/shared_array.hpp>
#include <iostream>
using namespace std;
int main()
{
boost::shared_array<int> p1{new int[1]};
{
boost::shared_array<int> p2{p1};
p2[0] = 1;
}
cout << p1[0] << '\n';
}
|
{"hexsha": "489c36145773edc181d1533e408f9c38d6699750", "size": 220, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Boost/smartptrs/sharedarray.cpp", "max_stars_repo_name": "Fernal73/LearnCpp", "max_stars_repo_head_hexsha": "100aa80e447fe7735d7f8217c2ec72ae32ed1c7f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Boost/smartptrs/sharedarray.cpp", "max_issues_repo_name": "Fernal73/LearnCpp", "max_issues_repo_head_hexsha": "100aa80e447fe7735d7f8217c2ec72ae32ed1c7f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Boost/smartptrs/sharedarray.cpp", "max_forks_repo_name": "Fernal73/LearnCpp", "max_forks_repo_head_hexsha": "100aa80e447fe7735d7f8217c2ec72ae32ed1c7f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.6666666667, "max_line_length": 42, "alphanum_fraction": 0.5954545455, "num_tokens": 74}
|
import glob
import math
from os import path, mkdir
import random
import cv2
import dlib
from imutils import face_utils
import numpy as np
import pandas as pd
from skimage.util import random_noise
DATASET_PATH = "C:\\youtube_faces"
class DetectorInput:
"""dlib input container class."""
def __init__(self, file: str, top: int, left: int, width: int, height: int, points: list):
self.file = file
self.top = top
self.left = left
self.width = width
self.height = height
self.points = points
def build_xml(inputs: list, filename: str):
xmlstr = "<dataset>\n"
xmlstr += " <images>\n"
for next_input in inputs:
xmlstr += " <image file='%s'>\n" % next_input.file
xmlstr += " <box top='%d' left='%d' width='%d' height='%d'>\n" % (next_input.top, next_input.left, next_input.width, next_input.height)
for i, (x, y) in enumerate(next_input.points):
xmlstr += " <part name='%d' x='%d' y='%d' />\n" % (i, x, y)
xmlstr += " </box>\n"
xmlstr += " </image>\n"
xmlstr += " </images>\n"
xmlstr += "</dataset>"
with open(filename, "w") as f:
f.write(xmlstr)
def main():
video_df = pd.read_csv(DATASET_PATH + "\\youtube_faces_with_keypoints_full.csv")
npz_files = glob.glob(DATASET_PATH + "\\youtube_faces_*\\*.npz")
video_ids = [x.split('\\')[-1].split('.')[0] for x in npz_files]
full_paths = {}
for video_id, full_path in zip(video_ids, npz_files):
full_paths[video_id] = full_path
detector = dlib.get_frontal_face_detector()
if not path.exists("./data"):
mkdir("./data")
inputs = []
for video_id, video_path in full_paths.items():
video_info = video_df.loc[video_df["videoID"] == video_id]
frame_count = int(video_info["videoDuration"].values[0])
video_file = np.load(video_path)
images = video_file["colorImages"]
landmarks = video_file["landmarks2D"]
for i in range(frame_count):
original_image = images[:,:,:,i]
original_image = cv2.cvtColor(original_image, cv2.COLOR_RGB2BGR)
# Also save a copy with Gaussian noise. Gaussian noise most-closely
# matches the noise apparent in low-quality webcam footage.
image_gaussian = random_noise(original_image, mode="gaussian", mean=0, var=0.05, clip=True)
image_gaussian = (255 * image_gaussian).astype(np.uint8)
image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
faces = detector(image, 1)
if len(faces) != 1:
continue
left, top, width, height = face_utils.rect_to_bb(faces[0])
pts = landmarks[:,:,i]
pts_x, pts_y = pts.T
pts_min_x = np.min(pts_x)
pts_max_x = np.max(pts_x)
pts_min_y = np.min(pts_y)
pts_max_y = np.max(pts_y)
# Check if any of the keypoints are in the frame
if pts_max_x < left or pts_min_x > left + width or pts_max_y < top or pts_min_y > top + height:
continue
filename = "data/%s_%d.png" % (video_id, i)
if not path.isfile(filename):
cv2.imwrite(filename, original_image)
filename_gaussian = "data/%s_%d_gaussian.png" % (video_id, i)
if not path.isfile(filename_gaussian):
cv2.imwrite(filename_gaussian, image_gaussian)
next_input = DetectorInput(filename, top, left, width, height, list(pts))
next_input_gaussian = DetectorInput(filename_gaussian, top, left, width, height, list(pts))
inputs += [next_input, next_input_gaussian]
random.shuffle(inputs)
split_index = math.floor(len(inputs) * 0.9) # 90% train, 10% test since our dataset is somewhat large
train_inputs = inputs[0:split_index]
test_inputs = inputs[split_index:]
build_xml(train_inputs, "youtube_faces_train.xml")
build_xml(test_inputs, "youtube_faces_test.xml")
if __name__ == "__main__":
main()
|
{"hexsha": "23819c9e7ca5e7a64eb9943ca97f0a7faf073f7c", "size": 4094, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocess_landmarks.py", "max_stars_repo_name": "karashiiro/facial-landmark-tracking", "max_stars_repo_head_hexsha": "787b5663be4f824f631145913136e06a6f970f23", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "preprocess_landmarks.py", "max_issues_repo_name": "karashiiro/facial-landmark-tracking", "max_issues_repo_head_hexsha": "787b5663be4f824f631145913136e06a6f970f23", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "preprocess_landmarks.py", "max_forks_repo_name": "karashiiro/facial-landmark-tracking", "max_forks_repo_head_hexsha": "787b5663be4f824f631145913136e06a6f970f23", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8828828829, "max_line_length": 148, "alphanum_fraction": 0.6128480703, "include": true, "reason": "import numpy", "num_tokens": 1038}
|
function [] = sb_fix_unwrap_manual(reset_flag,ix_reduced_list)
% [] = sb_fix_unwrap_manual(reset_flag)
% script to manually fix unwrapping errors in Small Baseline processing
%
% This function allows to manually fix unwrapping errors by iterating
% the results of ps_plot('rsb'). You can add add or subtract an integer number
% of 2pi cycles over a selected region, or correct with respect to the closest
% wrap of another region. You can keep iterating till the 'rsb'
% errors have reduced. If it goes wrong and you do want to reset your
% unwrapped data back to that of the original you can set the reset_flag to 1;
%
% By Bekaert David - University of Leeds - Sept 2014
%
% modifications:
% Bekaert David 05/2015 Fix in command line output, fix the code for ps_plot
% Bekaert David 09/2015 Add file restoring.
% Bekaert David 12/2016 Add closest wrap option, change to stamps_save code
% Bekaert David 12/2016 Add argument which is a subset of intergerograms to reduce plotting needs.
%
% deramp IFGs
deramp_flag =0; % optional deramp rsb. Might be needed when processed with gamma
% what to use as guidence 'rsb' or 'usb' to click on
plot_option = 'usb'; % only 'rsb' or 'usb'
dem_option = 'd'; % only 'd' or ''
% flags
if nargin<1
reset_flag=0;
end
if isempty(reset_flag)
reset_flag=0;
end
if nargin<2
ix_reduced_list=[];
end
if ~strcmpi(plot_option,'usb') && ~strcmpi(plot_option,'rsb')
error('Only plot_option: rsb and usb supported');
end
if ~strcmpi(dem_option,'d') && ~isempty(dem_option)
error('Only dem_option: d and [] supported');
end
% keeping the original data, incase user want to reset it
if exist('phuw_sb2_original.mat','file')~=2
copyfile('phuw_sb2.mat','phuw_sb2_original.mat')
copyfile('phuw2.mat','phuw2_original.mat')
copyfile('phuw_sb_res2.mat','phuw_sb_res2_original.mat')
else
copyfile('phuw_sb2.mat','phuw_sb2_previous_temp.mat')
copyfile('phuw2.mat','phuw2_previous_temp.mat')
copyfile('phuw_sb_res2.mat','phuw_sb_res2_previous_temp.mat')
end
% use previous or start from scratch
if reset_flag==1
fprintf('Using the original data\n')
ph_input = load('phuw_sb2_original');
else
fprintf('Use data from a previous run\n')
ph_input = load('phuw_sb2.mat');
end
% loading the interferogram information
ps = load('ps2.mat');
% generating list of ifgs to be ploted
drop_ifg_index = getparm('drop_ifg_index');
if ~isempty(ix_reduced_list)
% removing ifgs that have been dropped before
for k=1:length(drop_ifg_index)
ix_temp = find(drop_ifg_index(k)==ix_reduced_list);
ix_reduced_list(ix_temp)=[];
end
if sum(ix_reduced_list>ps.n_ifg)>0
fprintf('Your list is larger than number of IFGS, will reset to max number of IFG \n')
ix_reduced_list(ix_reduced_list>ps.n_ifg)=[];
end
end
% reset the list in case nothing was left
if isempty(ix_reduced_list)
ix_reduced_list = 1:ps.n_ifg;
ix_reduced_list(drop_ifg_index)=[];
end
% deramping
if deramp_flag==1
deramp_option = ['-o'];
else
deramp_option = '';
end
if ~isempty(dem_option)
if deramp_flag==1
dem_option='-do';
else
dem_option= '-d';
end
end
if strcmpi(plot_option,'usb')
plot_option = [plot_option dem_option];
elseif strcmpi(plot_option,'rsb')
plot_option = [plot_option deramp_option];
end
% plotting the current rsb data
ps_plot(['rsb' deramp_option],1,0,0,ix_reduced_list);
% get the interferogram that the user needs to adapt
repeat=1;
while repeat==1
ix_ifg = input('Which interferogram to you want to correct? ','s');
ix_ifg = str2num(ix_ifg);
if isempty(ix_ifg)
repeat=1;
elseif ix_ifg<=ps.n_ifg
repeat=0;
else
fprintf(['Not that many interferograms \n'])
end
end
% plot the rsb value for this interferogram
% option one can use deramped rsb, this is for teh case teh interferograms
% were not created from relative differences, i.e. each interferogram had a
% baseline estimated and there might be some ramping errors because of that.
h_fig = ps_plot([plot_option],1,0,0,ix_ifg);
set(h_fig,'name',['Original interferogram']);
% Getting a polygon of the incorrect unwrapped area
fprintf('Define the incorrect unwrapped region through a polygon by clicking on the figure. \n')
repeat_zoom=1;
while repeat_zoom==1
action_flag= input('First, zoom to your region of interest. Press [c] to continue. ','s');
if strcmpi(action_flag,'c')
repeat_zoom=0;
end
end
fprintf('Now, start defining the polygon by outlining the incorrect region. \n Press enter once done\n')
% call the figure in case the user clicked somewhere else
figure(h_fig);
polygon=ginput;
% plotting the polygon on top
hold on
plot([polygon(:,1) ;polygon(1,1)],[polygon(:,2);polygon(1,2)],'r-','linewidth',2)
% loop untill the user is happy with it
continue_flag=1;
while continue_flag==1
repeat=1;
fprintf('You can shift the whole region by an integer number of cycles or you can put all pixels to a specifc wrap \n')
while repeat==1
ix_shift= input('By how many cycles to you want to shift this region? [+-integer or inf for wrap option] ','s');
ix_shift = str2num(ix_shift);
if isempty(ix_shift)
repeat=1;
elseif ix_shift==inf
repeat=0;
elseif (ix_shift./(round(ix_shift)))~=1
fprintf(['Needs to be an integer number... \n'])
repeat =1;
else
repeat=0;
end
end
% finding the pixels within the polygon
ix = inpolygon(ps.lonlat(:,1),ps.lonlat(:,2),polygon(:,1),polygon(:,2));
% checking the option the user picked - closes wrap (inf) or shift region
if ix_shift==inf % closest wrap option
fprintf('Define region to which you want to define as reference wrap (average will be used!). \n Press enter once done\n')
% call the figure in case the user clicked somewhere else
figure(h_fig);
polygon_ref=ginput;
% plotting the ploygon on top
hold on
plot([polygon_ref(:,1) ;polygon_ref(1,1)],[polygon_ref(:,2);polygon_ref(1,2)],'b-','linewidth',4)
% finding the pixels within the reference polygon
ix_ref = inpolygon(ps.lonlat(:,1),ps.lonlat(:,2),polygon_ref(:,1),polygon_ref(:,2));
% check to which interferograms this should be applied
repeat2=1;
while repeat2==1
action_flag= input('Do you want to apply this to all interferograms [y/n]? ','s');
if strcmpi(action_flag,'y')
repeat2=0;
elseif strcmpi(action_flag,'n')
repeat2=0;
end
end
% store the orginal interferograms
ix_ifg_or = ix_ifg;
if strcmpi(action_flag,'y')
ix_ifg=[1:size(ph_uw,2)];
end
% do the estimation for each itnerferogram
ph_uw= ph_input.ph_uw;
ref_phase = nanmean(ph_uw(ix_ref,ix_ifg),1);
for k_ifgs=1:length(ix_ifg)
% compute the reference
radian_shift = round((ph_uw(ix,ix_ifg(k_ifgs))-ref_phase(k_ifgs))./(2*pi))*2*pi;
ph_uw(ix,ix_ifg(k_ifgs)) = ph_uw(ix,ix_ifg(k_ifgs)) - radian_shift;
end
% update back to the previous interferogram to be corrected for
% plotting purposes
ix_ifg = ix_ifg_or;
clear ref_phase
else % option of shifting the interferogram
% the shift in radians
radian_shift = ix_shift*2*pi;
% modifying the interferogram
ph_uw= ph_input.ph_uw;
ph_uw(ix,ix_ifg)=ph_uw(ix,ix_ifg)+radian_shift;
end
msd = ph_input.msd;
% saving the data
stamps_save('phuw_sb2.mat',ph_uw,msd);
% re-running the
sb_invert_uw
% plot the new residuals
ps_plot(['rsb' deramp_option],1,0,0,ix_reduced_list);
h_fig_new = ps_plot([plot_option],1,0,0,ix_ifg);
set(h_fig_new,'name',['Corrected interferogram']);
repeat=1;
while repeat==1
string= input('retry? [y/n] ','s');
if strcmpi(string,'y')
repeat=0;
continue_flag = 1;
elseif strcmpi(string,'n')
repeat=0;
continue_flag = 0;
% see if the result needs to be kept or reverted
repeat2=1;
while repeat2==1
action_flag= input('Keep this result [y/n]? ','s');
if strcmpi(action_flag,'y')
repeat2=0;
elseif strcmpi(action_flag,'n')
repeat2=0;
% restore the codes
copyfile('phuw_sb2_original.mat','phuw_sb2.mat')
copyfile('phuw2_original.mat','phuw2.mat')
copyfile('phuw_sb_res2_original.mat','phuw_sb_res2.mat')
else
fprintf('y or n ...\n')
end
end
else
fprintf('y or n ...\n')
end
end
end
|
{"author": "dbekaert", "repo": "StaMPS", "sha": "c159eb81b16c446e0e8fdef7dd435eb22e0240ed", "save_path": "github-repos/MATLAB/dbekaert-StaMPS", "path": "github-repos/MATLAB/dbekaert-StaMPS/StaMPS-c159eb81b16c446e0e8fdef7dd435eb22e0240ed/matlab/sb_fix_unwrap_manual_new.m"}
|
import numpy as np
from forkan import dataset_path
from forkan.datasets.dsprites import load_dsprites, load_dsprites_one_fixed
from forkan.datasets.image import load_unlabeled_image_dataset
from forkan.datasets.mnist import load_mnist
def load_set(name):
return np.load('{}/{}.npz'.format(dataset_path, name))['data']
def load_atari_normalized(env):
name = env.replace('NoFrameskip', '').lower().split('-')[0]
return np.load('{}/{}-normalized.npz'.format(dataset_path, name))['data']
def load_pendulum():
return np.load('{}/pendulum-visual-random-normalized-cut.npz'.format(dataset_path))['data']
def load_uniform_pendulum():
return np.load('{}/pendulum-visual-uniform.npz'.format(dataset_path))['data']
|
{"hexsha": "2d25a63b33f7c3148b41deee874f6126afe45809", "size": 733, "ext": "py", "lang": "Python", "max_stars_repo_path": "forkan/datasets/__init__.py", "max_stars_repo_name": "llach/dl-toolkit", "max_stars_repo_head_hexsha": "33ae3d48ce6f24fc0c254b93ed3f4b8a767ffea5", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-27T17:47:58.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-27T17:47:58.000Z", "max_issues_repo_path": "forkan/datasets/__init__.py", "max_issues_repo_name": "llach/dl-toolkit", "max_issues_repo_head_hexsha": "33ae3d48ce6f24fc0c254b93ed3f4b8a767ffea5", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "forkan/datasets/__init__.py", "max_forks_repo_name": "llach/dl-toolkit", "max_forks_repo_head_hexsha": "33ae3d48ce6f24fc0c254b93ed3f4b8a767ffea5", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-01-24T16:43:03.000Z", "max_forks_repo_forks_event_max_datetime": "2019-01-24T16:43:03.000Z", "avg_line_length": 30.5416666667, "max_line_length": 95, "alphanum_fraction": 0.7435197817, "include": true, "reason": "import numpy", "num_tokens": 182}
|
import numpy as np
# Source: https://zpl.fi/aligning-point-patterns-with-kabsch-umeyama-algorithm/
def get_similarity_transform(A, B):
"""
Compute the optimal similarity transform from B to A.
Namely, compute rotation `R`, translation `t` and scale `c` that best align points `B` into
reference points `A` in the least squares sense as `c * R @ b_i + t ≈ a_i` for all `i`.
Or, in matrix form,
(c*R[3,3] | t ) @ (B[3,N]) ≈ (A[3,N])
( 0 | 1 ) @ ( 1 ) ( 1 )
Uses the Kabsch-Umeyama algorithm.
A, B
np.ndarray, float32, (N, 3)
return:
R
np.ndarray, float32, (3, 3)
t
np.ndarray, float32, (3,)
c
float
"""
assert A.shape == B.shape
n, m = A.shape
EA = np.mean(A, axis=0)
EB = np.mean(B, axis=0)
VarA = np.mean(np.linalg.norm(A - EA, axis=1) ** 2)
H = ((A - EA).T @ (B - EB)) / n
U, D, VT = np.linalg.svd(H)
d = np.sign(np.linalg.det(U) * np.linalg.det(VT))
S = np.diag([1] * (m - 1) + [d]).astype(A.dtype)
R = U @ S @ VT
c = VarA / np.trace(np.diag(D) @ S)
t = EA - c * R @ EB
return R, t, c
if __name__ == '__main__':
TARGET = np.float32([
[-0.067039, 0.037248, 0.257207],
[-0.021285, 0.123967, 0.23051],
[-0.138212, 0.123967, 0.235361],
[-0.077207, 0.310112, 0.13561],
[-0.077207, -0.048883, 0.170014],
])
SOURCE = np.float32([
[-0.025177, 0.05771, 0.226592],
[0.055307, 0.167654, 0.207354],
[-0.111732, 0.160782, 0.215266],
[-0.034637, 0.449523, 0.08648],
[-0.021788, -0.052372, 0.10771],
])
R, t, c = get_similarity_transform(TARGET, SOURCE)
print(f"R:\n{R}")
print(f"t:\n{t}")
print(f"scale:\n{c}")
|
{"hexsha": "8523bf0948782db1a5f32f74f20de57a2b32130e", "size": 1784, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocess_custom_data/gonzalo/get_similarity_transform.py", "max_stars_repo_name": "shrubb/NeuS", "max_stars_repo_head_hexsha": "6111c4157d5c2b15fc00aa2b1ddcfb73860a6270", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "preprocess_custom_data/gonzalo/get_similarity_transform.py", "max_issues_repo_name": "shrubb/NeuS", "max_issues_repo_head_hexsha": "6111c4157d5c2b15fc00aa2b1ddcfb73860a6270", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "preprocess_custom_data/gonzalo/get_similarity_transform.py", "max_forks_repo_name": "shrubb/NeuS", "max_forks_repo_head_hexsha": "6111c4157d5c2b15fc00aa2b1ddcfb73860a6270", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4461538462, "max_line_length": 95, "alphanum_fraction": 0.5213004484, "include": true, "reason": "import numpy", "num_tokens": 671}
|
import os
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from matplotlib.image import AxesImage
from matplotlib.text import Text
from scipy.spatial.distance import cosine
from tensorflow import keras, Tensor
from typing import List, Tuple, Set
import tensorflow_datasets as tfds
from tensorflow.python.data import Dataset
import tensorflow as tf
import time
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.python.framework.errors_impl import NotFoundError
from tensorflow.python.keras.metrics import SparseCategoricalAccuracy
from tensorflow.python.ops.gen_math_ops import Mean
from tensorflow.python.training.checkpoint_management import CheckpointManager
from tensorflow.python.training.tracking.util import Checkpoint
from tensorflow_datasets.core.features.text import SubwordTextEncoder
from tqdm import tqdm
from classes import Cluster, create_padding_mask, Transformer, Example, scaled_dot_product_attention, CustomSchedule
BUFFER_SIZE = 20000
BATCH_SIZE = 1000
MAX_LENGTH = 40
NUMBER_OF_LAYERS = 4
MODEL_DIMENSIONS = 128
FEED_FORWARD_DIMENSIONS = 512
NUMBER_OF_HEADS = 8
DROPOUT_RATE = 0.1
EPOCHS = 5
tokenizer: SubwordTextEncoder
transformer: Transformer
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def create_masks(inp, tar):
# Encoder padding mask
enc_padding_mask = create_padding_mask(inp)
# Used in the 2nd attention block in the decoder. This padding mask is used to mask the encoder outputs.
dec_padding_mask = create_padding_mask(inp)
# Used in the 1st attention block in the decoder. It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = create_look_ahead_mask(keras.backend.shape(tar)[1])
dec_target_padding_mask = create_padding_mask(tar)
combined_mask = keras.backend.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
def encode(lang1, lang2) -> Tuple[Tensor, Tensor]:
lang1: Tensor = [tokenizer.vocab_size] + tokenizer.encode(lang1.numpy()) + [tokenizer.vocab_size + 1]
lang2: Tensor = [tokenizer.vocab_size] + tokenizer.encode(lang2.numpy()) + [tokenizer.vocab_size + 1]
return lang1, lang2
def evaluate(inp_sentence: str, tokenizer, transformer):
start_token: List[int] = [tokenizer.vocab_size]
end_token: List[int] = [tokenizer.vocab_size + 1]
inp_sentence_enc: List[int] = start_token + tokenizer.encode(inp_sentence) + end_token
encoder_input: Tensor = keras.backend.expand_dims(inp_sentence_enc, 0)
decoder_input: List[int] = [tokenizer.vocab_size]
output: Tensor = keras.backend.expand_dims(decoder_input, 0)
attention_weights = None
for i in range(MAX_LENGTH):
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(encoder_input, output)
# predictions.shape == (batch_size, seq_len, vocab_size)
predictions, attention_weights = transformer(
[encoder_input, output, False, enc_padding_mask, combined_mask, dec_padding_mask])
# select the last word from the seq_len dimension
predictions = predictions[:, -1:, :] # (batch_size, 1, vocab_size)
predicted_id = keras.backend.cast(keras.backend.argmax(predictions, axis=-1), tf.int32)
# return the result if the predicted_id is equal to the end token
if predicted_id == tokenizer.vocab_size + 1:
return keras.backend.squeeze(output, axis=0), attention_weights
# concatentate the predicted_id to the output which is given to the decoder as its input.
output = keras.backend.concatenate([output, predicted_id], axis=-1)
return keras.backend.squeeze(output, axis=0), attention_weights
def evaluate_lexis() -> None:
examples: List[Tuple[str, str]] = [
("ego sum Alpha et Omega initium et finis", "ego sum Alpha et Omega principium et finis"),
("et secundus angelus tuba cecinit", "et septimus angelus tuba cecinit"),
("et ecce equus pallidus", "et ecce equus niger"),
("diliges proximum tuum sicut te ipsum", "diliges proximum tuum tamquam te ipsum"),
("si sic eum volo manere donec veniam quid ad te", "si sic eum volo manere donec venio quid ad te"),
("quaerebant ergo eum prendere", "quaerebant ergo eum adprehendere"),
("qui credit in me habet vitam aeternam", "qui credit in Filium habet vitam aeternam")
]
sims: List[float] = []
for example in examples:
tensors_avg: List[Tensor] = []
# for sent in example:
for sent in [example[0], example[0]]:
tokens: Set[str] = set(sent.split())
tensors: List[Tensor] = []
for tok in tokens:
tensors += transformer.get_embeddings_for_token(sent, tokenizer, tok)
tensors_avg.append(transformer.avg(tensors))
cos_sim: float = 1 - cosine(tensors_avg[0], tensors_avg[1])
sims.append(cos_sim)
a = 0
def evaluate_polysemy(tokenizer: SubwordTextEncoder, transformer: Transformer):
dataset_url: str = "https://box.hu-berlin.de/f/ce30d723beef4feba4c3/?dl=1"
dataset_path: str = tf.keras.utils.get_file("pars_test.txt", dataset_url)
# dataset_path: str = "../data/pars_test.txt"
lines: List[str] = open(dataset_path).read().split("\n")
examples: List[Example] = [Example(x) for x in lines if x]
predictions: List[int] = []
sims: List[float] = []
for ex in examples:
token1: str = ex.context1.content[ex.context1.token_range_start:ex.context1.token_range_end]
tensors1: List[Tensor] = transformer.get_embeddings_for_token(ex.context1.content, tokenizer, token1)
token2: str = ex.context2.content[ex.context2.token_range_start:ex.context2.token_range_end]
tensors2: List[Tensor] = transformer.get_embeddings_for_token(ex.context2.content, tokenizer, token2)
cos_sim: float = 1 - cosine(tensors1[0], tensors2[0])
sims.append(cos_sim)
predictions.append(1 if cos_sim > 0.4 else 0)
print([x.label for x in examples])
print(predictions)
print(sims)
correct_indices: List[int] = [i for i in range(len(predictions)) if predictions[i] == examples[i].label]
print(correct_indices)
print(f"Accuracy: {len(correct_indices) / len(examples) * 100}%")
def evaluate_polysemy_old(tokenizer: SubwordTextEncoder, transformer: Transformer):
sentences: List[str] = [
"et percussa est tertia pars solis et tertia pars lunae et tertia pars stellarum ut obscuraretur tertia pars eorum et diei non luceret pars tertia et nox similiter",
"nam et pars quedam fluminis Nili ibi currit",
"Ac saepe in eum locum ventum est tanto in omnes partes diviso equitatu ut modo visum ab se Ambiorigem in fuga circumspicerent captivi nec plane etiam abisse ex conspectu contenderent ut spe consequendi inlata atque infinito labore suscepto qui se summam a Caesare gratiam inituros putarent paene naturam studio vincerent semper que paulum ad summam felicitatem defuisse videretur atque ille latebris aut saltibus se eriperet et noctu occultatus alias regiones partes que peteret non maiore equitum praesidio quam quattuor quibus solis vitam suam committere audebat",
"numquam ante arbitror te epistulam meam legisse nisi mea manu scriptam",
"ante diem xii Kal Decembr Milo ante mediam noctem cum magna manu in campum venit",
"numquam enim a Pomponia nostra certior sum factus esse cui dare litteras possem",
"quod fere plerisque accidit ut praesidio litterarum diligentiam in perdiscendo ac memoriam remittant",
"nam statim fidem publicam postulavit",
"habete fidem Dei",
"Fundamentum autem est iustitiae fides id est dictorum conventorum que constantia et veritas",
"sol ",
"merces "
]
tokens: List[str] = ["pars", "pars", "partes", "manu", "manu", "litteras", "litterarum", "fidem", "fidem", "fides",
"sol", "merces"]
# for tok in tokens:
# print(f"{tok}: {most_similar(transformer, tok)}")
print_tokens: List[str] = [
"pars solis", "pars fluminis", "equitatus in omnes partes divisus", "manu scriptus", "magna manus",
"litteras dare alicui", "praesidium litterarum", "fides publica", "fides dei", "fides iustitiae", "sol",
"merces"]
sims: np.ndarray = np.zeros((len(tokens), len(tokens)))
cross_validation_k: int = 5
for k in range(cross_validation_k):
relevant_tensors: List[Tensor] = []
for i in range(len(sentences)):
tensors: List[Tensor] = transformer.get_embeddings_for_token(sentences[i], tokenizer, tokens[i])
relevant_tensors.append(tensors[0])
for j in range(len(relevant_tensors) - 1):
cos_sim: float = 1 - cosine(relevant_tensors[-1], relevant_tensors[j])
sims[i, j] = sims[j, i] = round((sims[i, j] + cos_sim) / 2, 2) if sims[i, j] else cos_sim
plot_similarities(print_tokens, sims)
def evaluate_word_order():
dataset_path: str = "../data/word_order_test.txt"
lines: List[str] = open(dataset_path).read().split("\n")
examples: List[Example] = [Example(x) for x in lines if x]
sims: List[Tuple[float, float]] = []
for ex in examples:
token1: str = ex.context1.content[ex.context1.token_range_start:ex.context1.token_range_end]
tensors1: List[Tensor] = transformer.get_embeddings_for_token(ex.context1.content, tokenizer, token1)
token2: str = ex.context2.content[ex.context2.token_range_start:ex.context2.token_range_end]
tensors2: List[Tensor] = transformer.get_embeddings_for_token(ex.context2.content, tokenizer, token2)
cos_sim: float = 1 - cosine(tensors1[0], tensors2[0])
cos_sim2: float = 1 - cosine(tensors1[0], tensors1[0])
sims.append((cos_sim, cos_sim2))
a = 0
def filter_max_length(x, y, max_length=MAX_LENGTH):
return tf.logical_and(tf.size(x) <= max_length, tf.size(y) <= max_length)
def find_word_senses(tokenizer: SubwordTextEncoder, transformer: Transformer, dataset_path: str) -> None:
word_forms_set: Set[str] = {"pars", "partis", "parti", "partem", "parte", "partes", "partium", "partibus"}
examples: List[str] = open(dataset_path).read().split("\n")
examples = [y for x in examples for y in x.split("\t")]
example_token_sets: List[Set[str]] = [set(x.split()) for x in examples]
deletion_indices: List[int] = [i for i in range(len(examples)) if
not len(word_forms_set.intersection(example_token_sets[i]))]
examples = [examples[i] for i in range(len(examples)) if i not in deletion_indices]
examples_set: Set[str] = set(examples)
relevant_tensors: List[Tensor] = []
for example in tqdm(examples_set):
target_token: str = next(x for x in example.split() if x in word_forms_set)
tensors: List[Tensor] = transformer.get_embeddings_for_token(example, tokenizer, target_token)
relevant_tensors.append(tensors[0])
sims: np.ndarray = np.zeros((len(relevant_tensors), len(relevant_tensors)))
for i in range(len(relevant_tensors)):
for j in range(len(relevant_tensors) - 1):
if i == j:
continue
cos_sim: float = 1 - cosine(relevant_tensors[i], relevant_tensors[j])
sims[i, j] = sims[j, i] = round(cos_sim, 2)
examples = [x[:20] for x in examples]
sims_with_ex: List[Tuple[float, str, str]] = []
for i in range(len(sims)):
for j in range(len(sims[i])):
sims_with_ex.append((sims[i, j], examples[i], examples[j]))
sims_with_ex = [x for x in sims_with_ex if x[0]]
sims_with_ex.sort(key=lambda x: x[0], reverse=True)
sims_with_ex = sims_with_ex[:5] + sims_with_ex[-5:]
for swe in sims_with_ex:
print(swe)
def loss_function(real, pred, loss_object):
mask = tf.math.logical_not(keras.backend.equal(real, 0))
loss_ = loss_object(real, pred)
mask = keras.backend.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_sum(loss_) / tf.reduce_sum(mask)
def plot_attention_weights(attention, sentence, result, layer, tokenizer):
# attention, sentence, result, layer, tokenizer_pt, tokenizer_en
fig = plt.figure(figsize=(16, 8))
# sentence = tokenizer_pt.encode(sentence)
sentence = tokenizer.encode(sentence)
attention = keras.backend.squeeze(attention[layer], axis=0)
for head in range(attention.shape[0]):
ax = fig.add_subplot(2, 4, head + 1)
# plot the attention weights
ax.matshow(attention[head][:-1, :], cmap='viridis')
fontdict = {'fontsize': 10}
ax.set_xticks(range(len(sentence) + 2))
ax.set_yticks(range(len(result)))
ax.set_ylim(len(result) - 1.5, -0.5)
# ax.set_xticklabels(
# ['<start>'] + [tokenizer_pt.decode([i]) for i in sentence] + ['<end>'], fontdict=fontdict, rotation=90)
ax.set_xticklabels(
['<start>'] + [tokenizer.decode([i]) for i in sentence] + ['<end>'], fontdict=fontdict, rotation=90)
# ax.set_yticklabels([tokenizer_en.decode([i]) for i in result if i < tokenizer_en.vocab_size], fontdict=fontdict)
ax.set_yticklabels([tokenizer.decode([i]) for i in result], fontdict=fontdict)
ax.set_xlabel('Head {}'.format(head + 1))
plt.tight_layout()
plt.show()
def plot_similarities(print_tokens: List[str], sims: np.ndarray):
ax: Axes
fig: Figure
fig, ax = plt.subplots()
im: AxesImage = ax.imshow(sims)
# We want to show all ticks...
ax.set_xticks(np.arange(len(print_tokens)))
ax.set_yticks(np.arange(len(print_tokens)))
# ... and label them with the respective list entries
ax.set_xticklabels(print_tokens)
ax.set_yticklabels(print_tokens)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(print_tokens)):
for j in range(len(print_tokens)):
value: str = str(sims[i, j])[2:]
value += "0" if len(value) == 1 else ""
text: Text = ax.text(j, i, value, ha="center", va="center", color="w")
ax.set_title("Cosine similarity for various word senses")
fig.tight_layout()
plt.show()
def print_out(q, k, v):
temp_out, temp_attn = scaled_dot_product_attention(q, k, v, None)
print('Attention weights are:')
print(temp_attn)
print('Output is:')
print(temp_out)
def train_model(train_loss: Mean, train_accuracy: SparseCategoricalAccuracy, train_dataset: Dataset,
ckpt_manager: CheckpointManager):
for epoch in range(EPOCHS):
start = time.time()
train_loss.reset_states()
train_accuracy.reset_states()
for (batch_idx, (input_tensor, target)) in enumerate(train_dataset):
train_step(input_tensor, target)
if batch_idx % 50 == 0:
print(
f'Epoch {epoch + 1} Batch {batch_idx} Loss {train_loss.result()} Accuracy {train_accuracy.result()}')
if (epoch + 1) % 5 == 0:
ckpt_save_path: str = ckpt_manager.save()
print(f'Saving checkpoint for epoch {epoch + 1} at {ckpt_save_path}')
print(f'Epoch {epoch + 1} Loss {train_loss.result()} Accuracy {train_accuracy.result()}')
print(f'Time taken for 1 epoch: {time.time() - start} secs\n')
def tf_encode(la1: Tensor, la2: Tensor): # pt, en
# result_pt, result_en = tf.py_function(encode, [pt, en], [tf.int64, tf.int64])
# result_pt.set_shape([None])
# result_en.set_shape([None])
# return result_pt, result_en
result_la1, result_la2 = tf.py_function(encode, [la1, la2], [tf.int64, tf.int64])
result_la1.set_shape([None])
result_la2.set_shape([None])
return result_la1, result_la2
# The @tf.function trace-compiles train_step into a TF graph for faster
# execution. The function specializes to the precise shape of the argument
# tensors. To avoid re-tracing due to the variable sequence lengths or variable
# batch sizes (the last batch is smaller), use input_signature to specify
# more generic shapes.
train_step_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int64),
tf.TensorSpec(shape=(None, None), dtype=tf.int64),
]
@tf.function(input_signature=train_step_signature)
def train_step(inp: Tensor, tar: Tensor):
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)
with tf.GradientTape() as tape:
predictions, _ = transformer([inp, tar_inp, True, enc_padding_mask, combined_mask, dec_padding_mask])
loss = loss_function(tar_real, predictions, transformer.loss_object)
gradients = tape.gradient(loss, transformer.trainable_variables)
transformer.optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))
transformer.train_loss(loss)
transformer.train_accuracy(tar_real, predictions)
def translate(sentence: str, tokenizer, transformer, plot='') -> None:
# sentence: str, tokenizer_pt, tokenizer_en, transformer, plot = ''
# result, attention_weights = evaluate(sentence, tokenizer_pt, tokenizer_en, transformer)
result, attention_weights = evaluate(sentence, tokenizer, transformer)
# predicted_sentence = tokenizer_en.decode([i for i in result if i < tokenizer_en.vocab_size])
predicted_sentence = tokenizer.decode([i for i in result if i < tokenizer.vocab_size])
print(f'Input: {sentence}')
print(f'Predicted translation: {predicted_sentence}')
if plot:
# plot_attention_weights(attention_weights, sentence, result, plot, tokenizer_pt, tokenizer_en)
plot_attention_weights(attention_weights, sentence, result, plot, tokenizer)
def generate_examples(file_path: str) -> Tuple[str, str]:
with open(file_path) as f:
while True:
line: str = f.readline()
if not line:
break
line_parts: List[str] = line.split("\t")
yield line_parts[0], line_parts[1]
def generate_train_examples() -> Tuple[str, str]:
# train_dataset_fp: str = "../data/pars.txt"
train_dataset_fp: str = tf.keras.utils.get_file("proiel.txt",
"https://box.hu-berlin.de/f/7da8d9c5703440e88531/?dl=1")
# train_dataset_fp: str = tf.keras.utils.get_file("cc_train.txt",
# "https://box.hu-berlin.de/f/f9a36dcb16e945b4a179/?dl=1")
return generate_examples(train_dataset_fp)
def generate_val_examples() -> Tuple[str, str]:
# val_dataset_fp: str = "./.data/cc_val.txt"
val_dataset_fp: str = tf.keras.utils.get_file("cc_val.txt",
"https://box.hu-berlin.de/f/41a95d07b791433b919b/?dl=1")
return generate_examples(val_dataset_fp)
def predict_next_sentence(sentence: str, tokenizer: SubwordTextEncoder, transformer: Transformer) -> None:
result, attention_weights = evaluate(sentence, tokenizer, transformer)
predicted_sentence = tokenizer.decode([i for i in result if i < tokenizer.vocab_size])
print(f'Input: {sentence}')
print(f'Predicted translation: {predicted_sentence}')
def do_deep_learning():
checkpoint_path: str = "./checkpoints/train"
train_examples: Dataset = tf.data.Dataset.from_generator(
generate_train_examples, (tf.string, tf.string), (tf.TensorShape([]), tf.TensorShape([]))).take(500)
tokenizer_path: str = "tokenizer.subwords"
tokenizer_prefix: str = tokenizer_path.split(".")[0]
tokenizer: SubwordTextEncoder
try:
tokenizer = SubwordTextEncoder.load_from_file(tokenizer_prefix)
except NotFoundError:
tokenizer: SubwordTextEncoder = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(la1.numpy() + la2.numpy()[:-1] for la1, la2 in train_examples), target_vocab_size=2 ** 13)
tokenizer.save_to_file(tokenizer_prefix)
train_dataset: Dataset = train_examples.map(tf_encode)
train_dataset = train_dataset.filter(filter_max_length)
# cache the dataset to memory to get a speedup while reading from it.
train_dataset = train_dataset.cache()
train_dataset = train_dataset.shuffle(BUFFER_SIZE).padded_batch(BATCH_SIZE)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
val_examples: Dataset = tf.data.Dataset.from_generator(
generate_val_examples, (tf.string, tf.string), (tf.TensorShape([]), tf.TensorShape([]))).take(5000)
val_dataset = val_examples.map(tf_encode)
val_dataset = val_dataset.filter(filter_max_length).padded_batch(BATCH_SIZE)
input_vocabulary_size = target_vocabulary_size = tokenizer.vocab_size + 2
# TODO: USE VALIDATION DATASET DURING TRAINING!
np.set_printoptions(suppress=True)
transformer: Transformer = Transformer(
NUMBER_OF_LAYERS, MODEL_DIMENSIONS, NUMBER_OF_HEADS, FEED_FORWARD_DIMENSIONS, input_vocabulary_size,
target_vocabulary_size, pe_input=input_vocabulary_size, pe_target=target_vocabulary_size, rate=DROPOUT_RATE)
ckpt: Checkpoint = tf.train.Checkpoint(transformer=transformer, optimizer=transformer.optimizer)
ckpt_manager: CheckpointManager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Latest checkpoint restored!!')
evaluate_polysemy(tokenizer, transformer)
# evaluate_word_order()
# evaluate_lexis()
data_dir: str = "../data"
proiel_pickle_path: str = os.path.join(data_dir, "proiel_conllu.pickle")
cache_path: str = os.path.join(data_dir, "sense_embeddings.json")
# build_sense_embeddings(proiel_pickle_path, tokenizer, cache_path)
# train_model(transformer.train_loss, transformer.train_accuracy, train_dataset, ckpt_manager)
evaluate_polysemy(tokenizer, transformer)
predict_next_sentence("Gallia est omnis divisa in partes tres.", tokenizer, transformer)
predict_next_sentence("Arma virumque cano Troiae qui primus ab oris Italiam fato profugus Laviniaque venit litora.",
tokenizer, transformer)
predict_next_sentence(
"Omnis homines qui sese student praestare ceteris animalibus summa ope niti decet ne vitam silentio transeant veluti pecora quae natura prona atque ventri oboedientia finxit.",
tokenizer, transformer)
# do_deep_learning()
|
{"hexsha": "126bb5e01f79e23005cff5c2063692c1e38d6c16", "size": 22727, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/transformer_tensorflow.py", "max_stars_repo_name": "konstantinschulz/asrael", "max_stars_repo_head_hexsha": "f169dacaa2883a02857f1b4a5050534408809eb8", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/transformer_tensorflow.py", "max_issues_repo_name": "konstantinschulz/asrael", "max_issues_repo_head_hexsha": "f169dacaa2883a02857f1b4a5050534408809eb8", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-06-08T22:26:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:48:09.000Z", "max_forks_repo_path": "python/transformer_tensorflow.py", "max_forks_repo_name": "konstantinschulz/asrael", "max_forks_repo_head_hexsha": "f169dacaa2883a02857f1b4a5050534408809eb8", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.30248307, "max_line_length": 575, "alphanum_fraction": 0.6993003916, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5656}
|
export basin_fractions, tipping_probabilities
"""
basin_fractions(basins::Array) → fs::Dict
Calculate the fraction of the basins of attraction encoded in `basins`.
The elements of `basins` are integers, enumerating the attractor that the entry of `basins`
converges to. Return a dictionary that maps attractor IDs to their relative fractions.
In[^Menck2013] the authors use these fractions to quantify the stability of a basin of
attraction, and specifically how it changes when a parameter is changed.
[^Menck2013]: Menck, Heitzig, Marwan & Kurths. How basin stability complements the linear stability paradigm. [Nature Physics, 9(2), 89–92](https://doi.org/10.1038/nphys2516)
"""
function basin_fractions(basins::AbstractArray)
fs = Dict{eltype(basins), Float64}()
ids = unique(basins)
N = length(basins)
for ξ in ids
B = count(isequal(ξ), basins)
fs[ξ] = B/N
end
return fs
end
"""
tipping_probabilities(basins_before, basins_after) → P
Return the tipping probabilities of the computed basins before and after a change
in the system parameters (or time forcing), according to the definition of[^Kaszás2019].
The input `basins` are integer-valued arrays, where the integers enumerate the attractor,
e.g. the output of [`basins_of_attraction`](@ref).
## Description
Let ``\\mathcal{B}_i(p)`` denote the basin of attraction of attractor ``A_i`` at
parameter(s) ``p``. Kaszás et al[^Kaszás2019] define the tipping probability
from ``A_i`` to ``A_j``, given a parameter change in the system of ``p_- \\to p_+``, as
```math
P(A_i \\to A_j | p_- \\to p_+) =
\\frac{|\\mathcal{B}_j(p_+) \\cap \\mathcal{B}_i(p_-)|}{|\\mathcal{B}_i(p_-)|}
```
where ``|\\cdot|`` is simply the volume of the enclosed set.
The value of `` P(A_i \\to A_j | p_- \\to p_+)`` is `P[i, j]`.
The equation describes something quite simple:
what is the overlap of the basin of attraction of ``A_i`` at ``p_-`` with that of the
attractor ``A_j`` at ``p_+``.
If `basins_before, basins_after` contain values of `-1`, corresponding to trajectories
that diverge, this is considered as the last attractor of the system in `P`.
[^Kaszás2019]: Kaszás, Feudel & Tél. Tipping phenomena in typical dynamical systems subjected to parameter drift. [Scientific Reports, 9(1)](https://doi.org/10.1038/s41598-019-44863-3)
"""
function tipping_probabilities(basins_before::AbstractArray, basins_after::AbstractArray)
@assert size(basins_before) == size(basins_after)
bid, aid = unique.((basins_before, basins_after))
P = zeros(length(bid), length(aid))
N = length(basins_before)
# Make -1 last entry in bid, aid, if it exists
put_minus_1_at_end!(bid); put_minus_1_at_end!(aid)
# Notice: the following loops could be optimized with smarter boolean operations,
# however they are so fast that everything should be done within milliseconds even
# on a potato
for (i, ι) in enumerate(bid)
B_i = findall(isequal(ι), basins_before)
μ_B_i = length(B_i) # μ = measure
for (j, ξ) in enumerate(aid)
B_j = findall(isequal(ξ), basins_after)
μ_overlap = length(B_i ∩ B_j)
P[i, j] = μ_overlap/μ_B_i
end
end
return P
end
function put_minus_1_at_end!(bid)
if -1 ∈ bid
sort!(bid)
popfirst!(bid)
push!(bid, -1)
end
end
|
{"hexsha": "9054f9e74f16d4829fddda99be534128729ac289", "size": 3358, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/basins/tipping.jl", "max_stars_repo_name": "onkyo14taro/ChaosTools.jl", "max_stars_repo_head_hexsha": "2b7d157de60027f9fbf6efcfaeb8cdccf8f480f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 168, "max_stars_repo_stars_event_min_datetime": "2018-01-07T15:46:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T13:38:24.000Z", "max_issues_repo_path": "src/basins/tipping.jl", "max_issues_repo_name": "onkyo14taro/ChaosTools.jl", "max_issues_repo_head_hexsha": "2b7d157de60027f9fbf6efcfaeb8cdccf8f480f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 176, "max_issues_repo_issues_event_min_datetime": "2017-12-19T20:26:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T20:08:23.000Z", "max_forks_repo_path": "src/basins/tipping.jl", "max_forks_repo_name": "onkyo14taro/ChaosTools.jl", "max_forks_repo_head_hexsha": "2b7d157de60027f9fbf6efcfaeb8cdccf8f480f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 39, "max_forks_repo_forks_event_min_datetime": "2018-01-27T07:59:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-06T10:08:36.000Z", "avg_line_length": 39.9761904762, "max_line_length": 184, "alphanum_fraction": 0.6959499702, "num_tokens": 974}
|
import requests
import os
import numpy as np
import json
import sys
import time
import uproot
import numba
import hepaccelerate
import hepaccelerate.kernels as kernels
from hepaccelerate.utils import Results, Dataset, Histogram, choose_backend
from tests.kernel_test import load_dataset
USE_CUDA = int(os.environ.get("HEPACCELERATE_CUDA", 0)) == 1
nplib, backend = choose_backend(use_cuda=USE_CUDA)
def time_kernel(dataset, test_kernel):
# ensure it's compiled
test_kernel(dataset)
n = len(dataset)
t0 = time.time()
for i in range(5):
test_kernel(dataset)
t1 = time.time()
dt = (t1 - t0) / 5.0
speed = float(n) / dt
return speed
def test_kernel_sum_in_offsets(dataset):
muons = dataset.structs["Muon"][0]
sel_ev = nplib.ones(muons.numevents(), dtype=nplib.bool)
sel_mu = nplib.ones(muons.numobjects(), dtype=nplib.bool)
z = kernels.sum_in_offsets(
backend, muons.offsets, muons.pt, sel_ev, sel_mu, dtype=nplib.float32
)
def test_kernel_simple_cut(dataset):
muons = dataset.structs["Muon"][0]
sel_mu = muons.pt > 30.0
def test_kernel_max_in_offsets(dataset):
muons = dataset.structs["Muon"][0]
sel_ev = nplib.ones(muons.numevents(), dtype=nplib.bool)
sel_mu = nplib.ones(muons.numobjects(), dtype=nplib.bool)
z = kernels.max_in_offsets(backend, muons.offsets, muons.pt, sel_ev, sel_mu)
def test_kernel_get_in_offsets(dataset):
muons = dataset.structs["Muon"][0]
sel_ev = nplib.ones(muons.numevents(), dtype=nplib.bool)
sel_mu = nplib.ones(muons.numobjects(), dtype=nplib.bool)
inds = nplib.zeros(muons.numevents(), dtype=nplib.int8)
inds[:] = 0
z = kernels.get_in_offsets(backend, muons.offsets, muons.pt, inds, sel_ev, sel_mu)
def test_kernel_mask_deltar_first(dataset):
muons = dataset.structs["Muon"][0]
jet = dataset.structs["Jet"][0]
sel_ev = nplib.ones(muons.numevents(), dtype=nplib.bool)
sel_mu = nplib.ones(muons.numobjects(), dtype=nplib.bool)
sel_jet = jet.pt > 10
muons_matched_to_jet = kernels.mask_deltar_first(
backend,
{"offsets": muons.offsets, "eta": muons.eta, "phi": muons.phi},
sel_mu,
{"offsets": jet.offsets, "eta": jet.eta, "phi": jet.phi},
sel_jet,
0.3,
)
def test_kernel_histogram_from_vector(dataset):
muons = dataset.structs["Muon"][0]
weights = 2 * nplib.ones(muons.numobjects(), dtype=nplib.float32)
ret = kernels.histogram_from_vector(
backend, muons.pt, weights, nplib.linspace(0, 200, 100, dtype=nplib.float32)
)
def test_kernel_histogram_from_vector_several(dataset):
muons = dataset.structs["Muon"][0]
mask = nplib.ones(muons.numobjects(), dtype=nplib.bool)
mask[:100] = False
weights = 2 * nplib.ones(muons.numobjects(), dtype=nplib.float32)
variables = [
(muons.pt, nplib.linspace(0, 200, 100, dtype=nplib.float32)),
(muons.eta, nplib.linspace(-4, 4, 100, dtype=nplib.float32)),
(muons.phi, nplib.linspace(-4, 4, 100, dtype=nplib.float32)),
(muons.mass, nplib.linspace(0, 200, 100, dtype=nplib.float32)),
(muons.charge, nplib.array([-1, 0, 1, 2], dtype=nplib.float32)),
]
ret = kernels.histogram_from_vector_several(backend, variables, weights, mask)
def test_kernel_select_opposite_sign(dataset):
muons = dataset.structs["Muon"][0]
sel_ev = nplib.ones(muons.numevents(), dtype=nplib.bool)
sel_mu = nplib.ones(muons.numobjects(), dtype=nplib.bool)
muons_passing_os = kernels.select_opposite_sign(
backend, muons.offsets, muons.charge, sel_mu
)
def test_timing(ds):
with open("data/kernel_benchmarks.txt", "a") as of:
for i in range(5):
ret = run_timing(ds)
of.write(json.dumps(ret) + "\n")
def run_timing(ds):
print("Testing memory transfer speed")
t0 = time.time()
for i in range(5):
ds.move_to_device(nplib)
t1 = time.time()
dt = (t1 - t0) / 5.0
ret = {
"use_cuda": USE_CUDA,
"num_threads": numba.config.NUMBA_NUM_THREADS,
"use_avx": numba.config.ENABLE_AVX,
"num_events": ds.numevents(),
"memsize": ds.memsize(),
}
print(
"Memory transfer speed: {0:.2f} MHz, event size {1:.2f} bytes, data transfer speed {2:.2f} MB/s".format(
ds.numevents() / dt / 1000.0 / 1000.0,
ds.eventsize(),
ds.memsize() / dt / 1000 / 1000,
)
)
ret["memory_transfer"] = ds.numevents() / dt / 1000.0 / 1000.0
t = time_kernel(ds, test_kernel_sum_in_offsets)
print("sum_in_offsets {0:.2f} MHz".format(t / 1000 / 1000))
ret["sum_in_offsets"] = t / 1000 / 1000
t = time_kernel(ds, test_kernel_simple_cut)
print("simple_cut {0:.2f} MHz".format(t / 1000 / 1000))
ret["simple_cut"] = t / 1000 / 1000
t = time_kernel(ds, test_kernel_max_in_offsets)
print("max_in_offsets {0:.2f} MHz".format(t / 1000 / 1000))
ret["max_in_offsets"] = t / 1000 / 1000
t = time_kernel(ds, test_kernel_get_in_offsets)
print("get_in_offsets {0:.2f} MHz".format(t / 1000 / 1000))
ret["get_in_offsets"] = t / 1000 / 1000
t = time_kernel(ds, test_kernel_mask_deltar_first)
print("mask_deltar_first {0:.2f} MHz".format(t / 1000 / 1000))
ret["mask_deltar_first"] = t / 1000 / 1000
t = time_kernel(ds, test_kernel_select_opposite_sign)
print("select_muons_opposite_sign {0:.2f} MHz".format(t / 1000 / 1000))
ret["select_muons_opposite_sign"] = t / 1000 / 1000
t = time_kernel(ds, test_kernel_histogram_from_vector)
print("histogram_from_vector {0:.2f} MHz".format(t / 1000 / 1000))
ret["histogram_from_vector"] = t / 1000 / 1000
t = time_kernel(ds, test_kernel_histogram_from_vector_several)
print("histogram_from_vector_several {0:.2f} MHz".format(t / 1000 / 1000))
ret["histogram_from_vector_several"] = t / 1000 / 1000
return ret
if __name__ == "__main__":
dataset = load_dataset(nplib, 5)
test_timing(dataset)
|
{"hexsha": "7936498f598ebcf3d6ab89053ad6152459e29c2e", "size": 6029, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/timing.py", "max_stars_repo_name": "irenedutta23/hepaccelerate", "max_stars_repo_head_hexsha": "c18fac25a0b88414836bc0a84a333f30ba02ac47", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-10-17T10:06:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-05T17:55:24.000Z", "max_issues_repo_path": "examples/timing.py", "max_issues_repo_name": "irenedutta23/hepaccelerate", "max_issues_repo_head_hexsha": "c18fac25a0b88414836bc0a84a333f30ba02ac47", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2019-09-13T17:09:51.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-29T01:41:16.000Z", "max_forks_repo_path": "examples/timing.py", "max_forks_repo_name": "irenedutta23/hepaccelerate", "max_forks_repo_head_hexsha": "c18fac25a0b88414836bc0a84a333f30ba02ac47", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-01-15T17:57:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-24T02:51:22.000Z", "avg_line_length": 33.1263736264, "max_line_length": 112, "alphanum_fraction": 0.6631282136, "include": true, "reason": "import numpy,import numba", "num_tokens": 1795}
|
#include <boost/mpl/aux_/preprocessor/is_seq.hpp>
|
{"hexsha": "4033fd472c8c200e346bcda90a4b1d0825de4e00", "size": 50, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_mpl_aux__preprocessor_is_seq.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_mpl_aux__preprocessor_is_seq.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_mpl_aux__preprocessor_is_seq.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 25.0, "max_line_length": 49, "alphanum_fraction": 0.8, "num_tokens": 14}
|
% Autogenerated translation of about.md by Texpad
% To stop this file being overwritten during the typeset process, please move or remove this header
\documentclass[12pt]{book}
\usepackage{graphicx}
\usepackage[utf8]{inputenc}
\usepackage[a4paper,left=.5in,right=.5in,top=.3in,bottom=0.3in]{geometry}
\setlength\parindent{0pt}
\setlength{\parskip}{\baselineskip}
\renewcommand*\familydefault{\sfdefault}
\usepackage{hyperref}
\pagestyle{plain}
\begin{document}
\Large
\hrule
permalink: /
title: "Bogdan Mazoure - Math \& Stats graduate student @ McGill University"
excerpt: "About me"
author\emph{profile: true
redirect}from:
- /about/
\section*{ - /about.html}
\chapter*{About me}
I am currently a PhD student at the Montreal Institute for Learning Algorithms (MILA) and McGill University, co-supervised by Devon Hjelm and Doina Precup. My research interests include deep reinforcement learning, probabilistic modeling, variational inference and representation learning.
I have completed my Master's in Statistics at McGill University under the supervision of Prof. \href{http://www.math.mcgill.ca/neslehova/}{Johanna Neslehova}. My thesis focuses on reconstructing graphical models from discrete data with variational inference and multiarmed bandits. It can be found here: \href{https://bmazoure.github.io/files/thesis_Msc_2018.pdf}{link}.
I was also a research intern at Nuance during the summer of 2018 where I collaborated with Dr. \href{https://scholar.google.ca/citations?user=KRPMXqYAAAAJ&hl=en}{Atta Norouzian}. My work there focused on modeling acoustic signals such as speech with deep neural architectures.
Previously, I obtained a Bachelor's in Computer Science and Statistics in 2017 from McGill University.
\chapter*{Research interests}
\begin{itemize}
\item Deep and distributional reinforcement learning;
\item Multivariate statistics;
\item Parametric and Non-parametric Bayesian methods (Gaussian processes and variational inference);
\item Probabilistic graphical models;
\item Uncertainty representation in neural networks;
\item Generative models (auto-encoding variational Bayes and generative nets);
\item Dependence modeling for discrete marginals.
\end{itemize}
\end{document}
|
{"hexsha": "4970cc02da0084fa24de0f93c16cf0770b8249c7", "size": 2219, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "_pages/about.tex", "max_stars_repo_name": "bmazoure/bmazoure.github.io", "max_stars_repo_head_hexsha": "02af7f10a4432d0966fca63d85d40587b9773594", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "_pages/about.tex", "max_issues_repo_name": "bmazoure/bmazoure.github.io", "max_issues_repo_head_hexsha": "02af7f10a4432d0966fca63d85d40587b9773594", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "_pages/about.tex", "max_forks_repo_name": "bmazoure/bmazoure.github.io", "max_forks_repo_head_hexsha": "02af7f10a4432d0966fca63d85d40587b9773594", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-09-04T03:53:27.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-04T03:53:27.000Z", "avg_line_length": 45.2857142857, "max_line_length": 370, "alphanum_fraction": 0.8008111762, "num_tokens": 547}
|
SUBROUTINE AF_ATIM ( isdx, maxsc, istim, ietim, iret )
C************************************************************************
C* AF_ATIM *
C* *
C* This subroutine locates, decodes, and stores the time (HHMM) data *
C* from within an AIREP report. The report must have already been *
C* broken up into "like-type" groups using subroutine AF_BKGP. *
C* Output includes the indices of the "like-type" groups which contain *
C* the start and the end of the time data; these values are set *
C* to IMISSD if the search for the data was unsuccessful. *
C* *
C* AF_ATIM ( ISDX, MAXSC, ISTIM, IETIM, IRET ) *
C* *
C* Input parameters: *
C* ISDX INTEGER Index of "like-type" group with *
C* which to begin search for data *
C* MAXSC INTEGER Maximum number of "like-type" *
C* groups to search following ISDX *
C* *
C* Output parameters: *
C* ISTIM INTEGER Index of "like-type" group which*
C* contains start of data *
C* IETIM INTEGER Index of "like-type" group which*
C* contains end of data *
C* IRET INTEGER Return code *
C* 0 = normal return *
C* *
C** *
C* Log: *
C* J. Ator/NP12 09/96 *
C* J. Ator/NP12 08/97 New interface format, style changes *
C************************************************************************
INCLUDE 'GEMPRM.PRM'
INCLUDE 'afcmn.cmn'
C*
INCLUDE 'affnc.fnc'
C-----------------------------------------------------------------------
C
C* Initialize variables.
C
iret = 0
istim = IMISSD
ietim = IMISSD
C
C* Locate the time data. It is identifiable as a four-digit
C* numeric "like-type" group that was not concatenated with any
C* other "like-type" groups in the original report.
C
ii = isdx
maxii = IEDX ( ii, maxsc, nflds )
C
DO WHILE ( ( ( ii + 1 ) .le. maxii ) .and.
+ ( istim .eq. IMISSD ) )
IF ( ( itypsf ( ii ) .eq. NMR ) .and.
+ ( lensf ( ii ) .eq. 4 ) ) THEN
IF ( ( irfnsf ( ii - 1 ) .ne. irfnsf ( ii ) ) .and.
+ ( irfnsf ( ii + 1 ) .ne. irfnsf ( ii ) ) ) THEN
C
C* The time data has been found.
C
istim = ii
END IF
END IF
ii = ii + 1
END DO
C
IF ( istim .ne. IMISSD ) THEN
ietim = istim
C
C* Decode and store the time data.
C
CALL AF_HHMM ( fields ( istim ) ( 1 : lensf ( istim ) ),
+ iertim )
END IF
C*
RETURN
END
|
{"hexsha": "21fe63494e5dfdd9147d77ccf8b955087f5c434d", "size": 2393, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "gempak/source/bridge/af/afatim.f", "max_stars_repo_name": "oxelson/gempak", "max_stars_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2015-06-03T15:26:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T22:36:03.000Z", "max_issues_repo_path": "gempak/source/bridge/af/afatim.f", "max_issues_repo_name": "oxelson/gempak", "max_issues_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 60, "max_issues_repo_issues_event_min_datetime": "2015-05-11T21:36:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:22:42.000Z", "max_forks_repo_path": "gempak/source/bridge/af/afatim.f", "max_forks_repo_name": "oxelson/gempak", "max_forks_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2016-06-06T21:55:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T18:23:28.000Z", "avg_line_length": 30.6794871795, "max_line_length": 73, "alphanum_fraction": 0.5432511492, "num_tokens": 826}
|
# Function to return feature scaled points
import cv2
import numpy as np
from . import plot_image
def scale_features(obj, mask, points, boundary_line, device, debug=None):
"""scale_features: returns feature scaled points
This is a function to transform the coordinates of landmark points onto a common scale (0 - 1.0).
Inputs:
obj = a contour of the plant object (this should be output from the object_composition.py fxn)
mask = this is a binary image. The object should be white and the background should be black
points = the points to scale
boundary_line = A vertical coordinate that denotes the height of the plant pot, the coordinates of this reference
point is also rescaled
device = a counter variable
debug = True/False. If True, print image
:param obj: ndarray
:param mask: ndarray
:param points: ndarray
:param boundary_line: int
:param device: int
:param debug: str
:return:
"""
device += 1
# Get the dimensions of the image from the binary thresholded object (mask)
if not np.any(mask) or not np.any(obj):
rescaled = ('NA', 'NA')
centroid_scaled = ('NA', 'NA')
boundary_line_scaled = ('NA', 'NA')
return device, rescaled, centroid_scaled, boundary_line_scaled
iy, ix = np.shape(mask)
x, y, width, height = cv2.boundingRect(obj)
m = cv2.moments(mask, binaryImage=True)
cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
# Convert the boundary line position (top of the pot) into a coordinate on the image
if boundary_line != 'NA':
line_position = int(iy) - int(boundary_line)
bly = line_position
else:
bly = cmy
blx = cmx
# Maximum and minimum values of the object
Ymax = y
Ymin = y + height
Xmin = x
Xmax = x + width
# Scale the coordinates of each of the feature locations
# Feature scaling X' = (X - Xmin) / (Xmax - Xmin)
# Feature scaling Y' = (Y - Ymin) / (Ymax - Ymin)
rescaled = []
for p in points:
xval = float(p[0, 0] - Xmin) / float(Xmax - Xmin)
yval = float(p[0, 1] - Ymin) / float(Ymax - Ymin)
scaled_point = (xval, yval)
rescaled.append(scaled_point)
# Lets rescale the centroid
cmx_scaled = float(cmx - Xmin) / float(Xmax - Xmin)
cmy_scaled = float(cmy - Ymin) / float(Ymax - Ymin)
centroid_scaled = (cmx_scaled, cmy_scaled)
# Lets rescale the boundary_line
blx_scaled = float(blx - Xmin) / float(Xmax - Xmin)
bly_scaled = float(bly - Ymin) / float(Ymax - Ymin)
boundary_line_scaled = (blx_scaled, bly_scaled)
# If debug is 'True' plot an image of the scaled points on a black background
if debug == 'print':
# Make a decent size blank image
scaled_img = np.zeros((1500, 1500, 3), np.uint8)
plotter = np.array(rescaled)
# Multiple the values between 0 - 1.0 by 1000 so you can plot on the black image
plotter = plotter * 1000
# For each of the coordinates plot a circle where the point is
# (+250 helps center the object in the middle of the blank image)
for i in plotter:
x, y = i.ravel()
cv2.circle(scaled_img, (int(x) + 250, int(y) + 250), 15, (255, 255, 255), -1)
cv2.circle(scaled_img, (int(cmx_scaled * 1000) + 250, int(cmy_scaled * 1000) + 250), 25, (0, 0, 255), -1)
cv2.circle(scaled_img, (int(blx_scaled * 1000) + 250, int(bly_scaled * 1000) + 250), 25, (0, 255, 0), -1)
# Because the coordinates increase as you go down and to the right on the
# image you need to flip the object around the x-axis
flipped_scaled = cv2.flip(scaled_img, 0)
cv2.imwrite((str(device) + '_feature_scaled.png'), flipped_scaled)
# Return the transformed points
if debug == 'plot':
# Make a decent size blank image
scaled_img = np.zeros((1500, 1500, 3), np.uint8)
plotter = np.array(rescaled)
# Multiple the values between 0 - 1.0 by 1000 so you can plot on the black image
plotter = plotter * 1000
# For each of the coordinates plot a circle where the point is (+250 helps center
# the object in the middle of the blank image)
for i in plotter:
x, y = i.ravel()
cv2.circle(scaled_img, (int(x) + 250, int(y) + 250), 15, (255, 255, 255), -1)
cv2.circle(scaled_img, (int(cmx_scaled * 1000) + 250, int(cmy_scaled * 1000) + 250), 25, (0, 0, 255), -1)
cv2.circle(scaled_img, (int(blx_scaled * 1000) + 250, int(bly_scaled * 1000) + 250), 25, (0, 255, 0), -1)
# Because the coordinates increase as you go down and to the right on the
# image you need to flip the object around the x-axis
flipped_scaled = cv2.flip(scaled_img, 0)
plot_image(flipped_scaled)
# Return the transformed points
return device, rescaled, centroid_scaled, boundary_line_scaled
|
{"hexsha": "e2723a925ee10488f3020151e0f5f50b34be31a3", "size": 4994, "ext": "py", "lang": "Python", "max_stars_repo_path": "plantcv/scale_features.py", "max_stars_repo_name": "Atitsuda/plantcv", "max_stars_repo_head_hexsha": "29932a0f0aa3573a74335425bccc394b2fb7b885", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-12-05T12:07:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-25T08:58:52.000Z", "max_issues_repo_path": "plantcv/scale_features.py", "max_issues_repo_name": "Atitsuda/plantcv", "max_issues_repo_head_hexsha": "29932a0f0aa3573a74335425bccc394b2fb7b885", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plantcv/scale_features.py", "max_forks_repo_name": "Atitsuda/plantcv", "max_forks_repo_head_hexsha": "29932a0f0aa3573a74335425bccc394b2fb7b885", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-13T17:44:53.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-13T17:44:53.000Z", "avg_line_length": 45.8165137615, "max_line_length": 117, "alphanum_fraction": 0.6287545054, "include": true, "reason": "import numpy", "num_tokens": 1438}
|
#!/usr/bin/python
from src.logger.LOG import LOG
from src.TVEKDNN.CustomDNN import CustomDNN
import numpy as np
"""
.. module:: CustomDNN
:platform: Unix, Windows
:synopsis: A useful module indeed.
.. moduleauthor:: Thomas Vimal Easo K<thomasvml@gmail.com>
"""
class ShallowDNN(CustomDNN):
''' ShallowDNN is a sub class of CustomDNN used to create a shallo neural network(1 hidden layer)
'''
def __init__(self):
''' Default Constructor
'''
pass
def setParameters(self):
for nCurrentLevel in range(self.numberofLevel):
for nCurrentNode in range(self.numberofNodesInLayerZero):
W = np.zeros((self.numOfFeatures, self.numberofNodesInLayerZero), dtype=int)
b = np.zeros(self.numberofLevel)
LOG.D(" -> Weight:{}".format(self.W1.shape))
self.parameters = {"W1": self.W1, "b1": self.b1}
def forwardPropagation(self):
''' This is a instance method of ShallowDNN used for forward propagating the neural net
:param None: None
:type None: None
:return: True if generation is Success else False
:rtype: bool
'''
self.W1 = self.parameters["W1"]
self.b1 = self.parameters["b1"]
self.W2 = self.parameters["W2"]
self.b3 = self.parameters["b2"]
self.Z1 = np.dot(self.W1.T, self.X_train) + self.b1
self.A1 = self.applyActivator(self.Z1)
self.Z2 = np.dot(self.W2.T, self.A1) + self.b2
self.A2 = self.applyActivator(self.Z2)
LOG.D(" -> A:{}".format(self.A2.shape))
LOG.D("Forward Propogation is completed successfully");
return True
def cost(self):
''' This is a instance method of ShallowDNN used for calculating cost of DNN
:param None: None
:type None: None
:return: True if generation is Success else False
:rtype: bool
'''
return -1 * np.sum((self.Y_train * np.log(self.A)) + ((1 - self.Y_train) * (np.log(1 - self.A))))/ self.numOfSamples
def backPropagration(self):
''' This is a instance method of ShallowDNN used for back propogation
:param None: None
:type None: None
:return: True if generation is Success else False
:rtype: bool
'''
dZ = self.A - self.Y_train
LOG.D(" -> X_train:{} dZ.T:{}".format(self.X_train.shape, dZ.T.shape));
self.dW = (np.dot(self.X_train, dZ.T)) / self.numOfSamples
self.db = np.sum(dZ) / self.numOfSamples
LOG.D(" -> dW:{} db:{}".format(self.dW, self.db));
LOG.D("Back Propogation is completed successfully");
return True
|
{"hexsha": "e2c37c49a7d875cc48f298fe2775ea21b83cf383", "size": 2371, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/TVEKDNN/ShallowDNN.py", "max_stars_repo_name": "tvek/DatasciencePythonInitBase", "max_stars_repo_head_hexsha": "e578b4a3026b55bc2935b200453e511f1731c75e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/TVEKDNN/ShallowDNN.py", "max_issues_repo_name": "tvek/DatasciencePythonInitBase", "max_issues_repo_head_hexsha": "e578b4a3026b55bc2935b200453e511f1731c75e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-01T23:55:30.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-01T23:55:30.000Z", "max_forks_repo_path": "src/TVEKDNN/ShallowDNN.py", "max_forks_repo_name": "tvek/DatasciencePythonInitBase", "max_forks_repo_head_hexsha": "e578b4a3026b55bc2935b200453e511f1731c75e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9146341463, "max_line_length": 118, "alphanum_fraction": 0.684943062, "include": true, "reason": "import numpy", "num_tokens": 707}
|
module Semacrolon
import Nope
%access public export
%default total
--data SemacrolonToken : Type where
-- STMacro : SemacrolonToken
-- STEq : SemacrolonToken
-- STMacroName : String -> SemacrolonToken
-- STTerm : Term String -> SemacrolonToken
data SemacrolonExpr : Type where
MacroAppl : String -> List SemacrolonExpr -> SemacrolonExpr
NopeAppl : Term String -> SemacrolonExpr -> List SemacrolonExpr -> SemacrolonExpr
SemacrolonTerm : Term String -> SemacrolonExpr
data SemacrolonAst : Type where
MacroDef : String -> List SemacrolonExpr -> NopeAst -> SemacrolonAst
MacroExpr : SemacrolonExpr -> SemacrolonAst
mutual
-- This is necessary for totality
unwrapQuotedIds_ : List NopeAst -> List NopeAst
unwrapQuotedIds_ [] = []
unwrapQuotedIds_ (ast :: asts) = unwrapQuotedIds ast :: unwrapQuotedIds_ asts
unwrapQuotedIds : NopeAst -> NopeAst
unwrapQuotedIds (TermAst (Raw '#' y)) = TermAst (Id y)
unwrapQuotedIds (TermAst term@(Raw _ _)) = TermAst term
unwrapQuotedIds (TermAst term@(Id _)) = TermAst term
unwrapQuotedIds (Appl ast ast_ asts) =
Appl (unwrapQuotedIds ast) (unwrapQuotedIds ast_) (unwrapQuotedIds_ asts)
parseMacroExpr : NopeAst -> Maybe SemacrolonExpr
parseMacroExpr (TermAst term@(Raw _ _)) = Just $ SemacrolonTerm term
parseMacroExpr (TermAst (Id x)) =
case unpack x of
('#' :: _) => Nothing
(';' :: _) => Just $ MacroAppl x []
_ => Just $ SemacrolonTerm $ Id x
parseMacroExpr (Appl (TermAst x) ast2 xs) = ?parseMacroExpr_rhs_1
parseMacroExpr (Appl (Appl ast ast_ ys) ast2 xs) = ?parseMacroExpr_rhs_3
parseMacro : NopeAst -> Maybe SemacrolonAst
execMacro : SemacrolonAst -> NopeAst
execMacro (MacroDef x xs ast) = ?execMacro_rhs_1
execMacro (MacroExpr x) = ?execMacro_rhs_2
|
{"hexsha": "f3f0ebd5c782cb0b281a11687d55747e5cb06828", "size": 1803, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "Semacrolon.idr", "max_stars_repo_name": "PolyglotSymposium/nosh", "max_stars_repo_head_hexsha": "3b1adb5db66ab54256b08126c3c7e7d8ffc3a27f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-06-02T14:48:01.000Z", "max_stars_repo_stars_event_max_datetime": "2017-06-02T14:48:01.000Z", "max_issues_repo_path": "Semacrolon.idr", "max_issues_repo_name": "PolyglotSymposium/nosh", "max_issues_repo_head_hexsha": "3b1adb5db66ab54256b08126c3c7e7d8ffc3a27f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Semacrolon.idr", "max_forks_repo_name": "PolyglotSymposium/nosh", "max_forks_repo_head_hexsha": "3b1adb5db66ab54256b08126c3c7e7d8ffc3a27f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6730769231, "max_line_length": 84, "alphanum_fraction": 0.7110371603, "num_tokens": 569}
|
import numpy as np
def ortho(vect2d):
"""Computes an orthogonal vector to the one given"""
return np.array((-vect2d[1], vect2d[0]))
def dist(pt_a, pt_b):
"""Euclidian distance between two (x, y) points"""
return ((pt_a[0]-pt_b[0])**2 + (pt_a[1]-pt_b[1])**2)**.5
class Dubins:
"""
Class implementing a Dubins path planner with a constant turn radius.
Attributes
----------
radius : float
The radius of the turn used in all the potential trajectories.
point_separation : float
The distance between points of the trajectories. More points increases
the precision of the path but also augments the computation time of the
colision check.
Methods
-------
dubins_path
Computes the shortest dubins path between two given points.
generate_points_straight
Turns a path into a set of point representing the trajectory, for
dubins paths when the path is one of LSL, LSR, RSL, RSR.
generate_points_curve
Turns a path into a set of point representing the trajectory, for
dubins paths when the path is one of RLR or LRL.
find_center
Compute the center of the circle described by a turn.
lsl
Dubins path with a left straight left trajectory.
rsr
Dubins path with a right straight right trajectory.
rsl
Dubins path with a right straight left trajectory.
lsr
Dubins path with a left straight right trajectory.
lrl
Dubins path with a left right left trajectory.
rlr
Dubins path with a right left right trajectory.
"""
def __init__(self, radius, point_separation):
assert radius > 0 and point_separation > 0
self.radius = radius
self.point_separation = point_separation
def all_options(self, start, end, sort=False):
"""
Computes all the possible Dubin's path and returns them, in the form
of a list of tuples representing each option: (path_length,
dubins_path, straight).
Parameters
----------
start : tuple
In the form (x, y, psi), with psi in radians.
The representation of the inital point.
end : tuple
In the form (x, y, psi), with psi in radians.
The representation of the final point.
sort : bool
If the list of option has to be sorted by decreasing cost or not.
Returns
-------
The shortest list of points (x, y) linking the initial and final points
given as input with only turns of a defined radius and straight line.
"""
center_0_left = self.find_center(start, 'L')
center_0_right = self.find_center(start, 'R')
center_2_left = self.find_center(end, 'L')
center_2_right = self.find_center(end, 'R')
options = [self.lsl(start, end, center_0_left, center_2_left),
self.rsr(start, end, center_0_right, center_2_right),
self.rsl(start, end, center_0_right, center_2_left),
self.lsr(start, end, center_0_left, center_2_right),
self.rlr(start, end, center_0_right, center_2_right),
self.lrl(start, end, center_0_left, center_2_left)]
if sort:
options.sort(key=lambda x: x[0])
return options
def dubins_path(self, start, end):
"""
Computes all the possible Dubin's path and returns the sequence of
points representing the shortest option.
Parameters
----------
start : tuple
In the form (x, y, psi), with psi in radians.
The representation of the inital point.
end : tuple
In the form (x, y, psi), with psi in radians.
The representation of the final point.
Returns
-------
The shortest list of points (x, y) linking the initial and final points
given as input with only turns of a defined radius and straight line.
In the form of a (2xn) numpy array.
"""
options = self.all_options(start, end)
dubins_path, straight = min(options, key=lambda x: x[0])[1:]
return self.generate_points(start, end, dubins_path, straight)
def generate_points(self, start, end, dubins_path, straight):
"""
Transforms the dubins path in a succession of points in the 2D plane.
Parameters
----------
start: tuple
In the form (x, y, psi), with psi in radians.
The representation of the inital point.
end: tuple
In the form (x, y, psi), with psi in radians.
The representation of the final point.
dubins_path: tuple
The representation of the dubins path in the form of a tuple
containing:
- the angle of the turn in the first circle, in rads.
- the angle of the turn in the last circle, in rads.
- the angle of the turn in the central circle, in rads, or the
length of the central segment if straight is true.
straight: bool
True if their is a central segment in the dubins path.
Returns
-------
The shortest list of points (x, y) linking the initial and final points
given as input with only turns of a defined radius and straight line.
In the form of a (2xn) numpy array.
"""
if straight:
return self.generate_points_straight(start, end, dubins_path)
return self.generate_points_curve(start, end, dubins_path)
def lsl(self, start, end, center_0, center_2):
"""
Left-Straight-Left trajectories.
First computes the poisition of the centers of the turns, and then uses
the fact that the vector defined by the distance between the centers
gives the direction and distance of the straight segment.
.. image:: img/twoturnssame.svg
Parameters
----------
start : tuple
(x, y, psi) coordinates of the inital point.
end : tuple
(x, y, psi) coordinates of the final point.
center_0 : tuple
(x, y) coordinates of the center of the first turn.
center_2 : tuple
(x, y) coordinates of the center of the last turn.
Returns
-------
total_len : float
The total distance of this path.
(beta_0, beta_2, straight_dist) : tuple
The dubins path, i.e. the angle of the first turn, the angle of the
last turn, and the length of the straight segment.
straight : bool
True, to indicate that this path contains a straight segment.
"""
straight_dist = dist(center_0, center_2)
alpha = np.arctan2((center_2-center_0)[1], (center_2-center_0)[0])
beta_2 = (end[2]-alpha)%(2*np.pi)
beta_0 = (alpha-start[2])%(2*np.pi)
total_len = self.radius*(beta_2+beta_0)+straight_dist
return (total_len, (beta_0, beta_2, straight_dist), True)
def rsr(self, start, end, center_0, center_2):
"""
Right-Straight-Right trajectories.
First computes the poisition of the centers of the turns, and then uses
the fact that the vector defined by the distance between the centers
gives the direction and distance of the straight segment.
.. image:: img/twoturnssame.svg
Parameters
----------
start : tuple
(x, y, psi) coordinates of the inital point.
end : tuple
(x, y, psi) coordinates of the final point.
center_0 : tuple
(x, y) coordinates of the center of the first turn.
center_2 : tuple
(x, y) coordinates of the center of the last turn.
Returns
-------
total_len : float
The total distance of this path.
(beta_0, beta_2, straight_dist) : tuple
The dubins path, i.e. the angle of the first turn, the angle of the
last turn, and the length of the straight segment.
straight : bool
True, to indicate that this path contains a straight segment.
"""
straight_dist = dist(center_0, center_2)
alpha = np.arctan2((center_2-center_0)[1], (center_2-center_0)[0])
beta_2 = (-end[2]+alpha)%(2*np.pi)
beta_0 = (-alpha+start[2])%(2*np.pi)
total_len = self.radius*(beta_2+beta_0)+straight_dist
return (total_len, (-beta_0, -beta_2, straight_dist), True)
def rsl(self, start, end, center_0, center_2):
"""
Right-Straight-Left trajectories.
Because of the change in turn direction, it is a little more complex to
compute than in the RSR or LSL cases. First computes the position of
the centers of the turns, and then uses the rectangle triangle defined
by the point between the two circles, the center point of one circle
and the tangeancy point of this circle to compute the straight segment
distance.
.. image:: img/twoturnsopposite.svg
Parameters
----------
start : tuple
(x, y, psi) coordinates of the inital point.
end : tuple
(x, y, psi) coordinates of the final point.
center_0 : tuple
(x, y) coordinates of the center of the first turn.
center_2 : tuple
(x, y) coordinates of the center of the last turn.
Returns
-------
total_len : float
The total distance of this path.
(beta_0, beta_2, straight_dist) : tuple
The dubins path, i.e. the angle of the first turn, the angle of the
last turn, and the length of the straight segment.
straight : bool
True, to indicate that this path contains a straight segment.
"""
median_point = (center_2 - center_0)/2
psia = np.arctan2(median_point[1], median_point[0])
half_intercenter = np.linalg.norm(median_point)
if half_intercenter < self.radius:
return (float('inf'), (0, 0, 0), True)
alpha = np.arccos(self.radius/half_intercenter)
beta_0 = -(psia+alpha-start[2]-np.pi/2)%(2*np.pi)
beta_2 = (np.pi+end[2]-np.pi/2-alpha-psia)%(2*np.pi)
straight_dist = 2*(half_intercenter**2-self.radius**2)**.5
total_len = self.radius*(beta_2+beta_0)+straight_dist
return (total_len, (-beta_0, beta_2, straight_dist), True)
def lsr(self, start, end, center_0, center_2):
"""
Left-Straight-Right trajectories.
Because of the change in turn direction, it is a little more complex to
compute than in the RSR or LSL cases. First computes the poisition of
the centers of the turns, and then uses the rectangle triangle defined
by the point between the two circles, the center point of one circle
and the tangeancy point of this circle to compute the straight segment
distance.
.. image:: img/twoturnsopposite.svg
Parameters
----------
start : tuple
(x, y, psi) coordinates of the inital point.
end : tuple
(x, y, psi) coordinates of the final point.
center_0 : tuple
(x, y) coordinates of the center of the first turn.
center_2 : tuple
(x, y) coordinates of the center of the last turn.
Returns
-------
total_len : float
The total distance of this path.
(beta_0, beta_2, straight_dist) : tuple
The dubins path, i.e. the angle of the first turn, the angle of the
last turn, and the length of the straight segment.
straight : bool
True, to indicate that this path contains a straight segment.
"""
median_point = (center_2 - center_0)/2
psia = np.arctan2(median_point[1], median_point[0])
half_intercenter = np.linalg.norm(median_point)
if half_intercenter < self.radius:
return (float('inf'), (0, 0, 0), True)
alpha = np.arccos(self.radius/half_intercenter)
beta_0 = (psia-alpha-start[2]+np.pi/2)%(2*np.pi)
beta_2 = (.5*np.pi-end[2]-alpha+psia)%(2*np.pi)
straight_dist = 2*(half_intercenter**2-self.radius**2)**.5
total_len = self.radius*(beta_2+beta_0)+straight_dist
return (total_len, (beta_0, -beta_2, straight_dist), True)
def lrl(self, start, end, center_0, center_2):
"""
Left-right-Left trajectories.
Using the isocele triangle made by the centers of the three circles,
computes the required angles.
.. image:: img/threeturns.svg
Parameters
----------
start : tuple
(x, y, psi) coordinates of the inital point.
end : tuple
(x, y, psi) coordinates of the final point.
center_0 : tuple
(x, y) coordinates of the center of the first turn.
center_2 : tuple
(x, y) coordinates of the center of the last turn.
Returns
-------
total_len : float
The total distance of this path.
(beta_0, beta_2, straight_dist) : tuple
The dubins path, i.e. the angle of the first turn, the angle of the
last turn, and the length of the straight segment.
straight : bool
False, to indicate that this path does not contain a straight part.
"""
dist_intercenter = dist(center_0, center_2)
intercenter = (center_2 - center_0)/2
psia = np.arctan2(intercenter[1], intercenter[0])
if 2*self.radius < dist_intercenter > 4*self.radius:
return (float('inf'), (0, 0, 0), False)
gamma = 2*np.arcsin(dist_intercenter/(4*self.radius))
beta_0 = (psia-start[2]+np.pi/2+(np.pi-gamma)/2)%(2*np.pi)
beta_1 = (-psia+np.pi/2+end[2]+(np.pi-gamma)/2)%(2*np.pi)
total_len = (2*np.pi-gamma+abs(beta_0)+abs(beta_1))*self.radius
return (total_len,
(beta_0, beta_1, 2*np.pi-gamma),
False)
def rlr(self, start, end, center_0, center_2):
"""
Right-left-right trajectories.
Using the isocele triangle made by the centers of the three circles,
computes the required angles.
.. image:: img/threeturns.svg
Parameters
----------
start : tuple
(x, y, psi) coordinates of the inital point.
end : tuple
(x, y, psi) coordinates of the final point.
center_0 : tuple
(x, y) coordinates of the center of the first turn.
center_2 : tuple
(x, y) coordinates of the center of the last turn.
Returns
-------
total_len : float
The total distance of this path.
(beta_0, beta_2, straight_dist) : tuple
The dubins path, i.e. the angle of the first turn, the angle of the
last turn, and the length of the straight segment.
straight : bool
False, to indicate that this path does not contain a straight part.
"""
dist_intercenter = dist(center_0, center_2)
intercenter = (center_2 - center_0)/2
psia = np.arctan2(intercenter[1], intercenter[0])
if 2*self.radius < dist_intercenter > 4*self.radius:
return (float('inf'), (0, 0, 0), False)
gamma = 2*np.arcsin(dist_intercenter/(4*self.radius))
beta_0 = -((-psia+(start[2]+np.pi/2)+(np.pi-gamma)/2)%(2*np.pi))
beta_1 = -((psia+np.pi/2-end[2]+(np.pi-gamma)/2)%(2*np.pi))
total_len = (2*np.pi-gamma+abs(beta_0)+abs(beta_1))*self.radius
return (total_len,
(beta_0, beta_1, 2*np.pi-gamma),
False)
def find_center(self, point, side):
"""
Given an initial position, and the direction of the turn, computes the
center of the circle with turn radius self.radius passing by the intial
point.
Parameters
----------
point : tuple
In the form (x, y, psi), with psi in radians.
The representation of the inital point.
side : Char
Either 'L' to indicate a left turn, or 'R' for a right turn.
Returns
-------
coordinates : 2x1 Array Like
Coordinates of the center of the circle describing the turn.
"""
assert side in 'LR'
angle = point[2] + (np.pi/2 if side == 'L' else -np.pi/2)
return np.array((point[0] + np.cos(angle)*self.radius,
point[1] + np.sin(angle)*self.radius))
def generate_points_straight(self, start, end, path):
"""
For the 4 first classes of dubins paths, containing in the middle a
straight section.
Parameters
----------
start : tuple
Start position in the form (x, y, psi).
end : tuple
End position in the form (x, y, psi).
path : tuple
The computed dubins path, a tuple containing:
- the angle of the turn in the first circle, in rads
- the angle of the turn in the last circle, in rads
- the length of the straight line in between
A negative angle means a right turn (antitrigonometric), and a
positive angle represents a left turn.
Returns
-------
The shortest list of points (x, y) linking the initial and final points
given as input with only turns of a defined radius and straight line.
In the form of a (2xn) numpy array.
"""
total = self.radius*(abs(path[1])+abs(path[0]))+path[2] # Path length
center_0 = self.find_center(start, 'L' if path[0] > 0 else 'R')
center_2 = self.find_center(end, 'L' if path[1] > 0 else 'R')
# We first need to find the points where the straight segment starts
if abs(path[0]) > 0:
angle = start[2]+(abs(path[0])-np.pi/2)*np.sign(path[0])
ini = center_0+self.radius*np.array([np.cos(angle), np.sin(angle)])
else: ini = np.array(start[:2])
# We then identify its end
if abs(path[1]) > 0:
angle = end[2]+(-abs(path[1])-np.pi/2)*np.sign(path[1])
fin = center_2+self.radius*np.array([np.cos(angle), np.sin(angle)])
else: fin = np.array(end[:2])
dist_straight = dist(ini, fin)
# We can now generate all the points with the desired precision
points = []
for x in np.arange(0, total, self.point_separation):
if x < abs(path[0])*self.radius: # First turn
points.append(self.circle_arc(start, path[0], center_0, x))
elif x > total - abs(path[1])*self.radius: # Last turn
points.append(self.circle_arc(end, path[1], center_2, x-total))
else: # Straight segment
coeff = (x-abs(path[0])*self.radius)/dist_straight
points.append(coeff*fin + (1-coeff)*ini)
points.append(end[:2])
return np.array(points)
def generate_points_curve(self, start, end, path):
"""
For the two last paths, where the trajectory is a succession of 3
turns. First computing the position of the center of the central turn,
then using the three circles to apply the angles given in the path
argument.
Parameters
----------
start : tuple
Start position in the form (x, y, psi).
end : tuple
End position in the form (x, y, psi).
path : tuple
The computed dubins path, a tuple containing:
- the angle of the turn in the first circle, in rads
- the angle of the turn in the last circle, in rads
- the angle of the turn in the central circle, in rads
A negative angle means a right turn (antitrigonometric), and a
positive angle represents a left turn.
Returns
-------
The shortest list of points (x, y) linking the initial and final points
given as input with only turns of a defined radius. In the form of a
(2xn) numpy array.
"""
total = self.radius*(abs(path[1])+abs(path[0])+abs(path[2]))
center_0 = self.find_center(start, 'L' if path[0] > 0 else 'R')
center_2 = self.find_center(end, 'L' if path[1] > 0 else 'R')
intercenter = dist(center_0, center_2)
center_1 = (center_0 + center_2)/2 +\
np.sign(path[0])*ortho((center_2-center_0)/intercenter)\
*(4*self.radius**2-(intercenter/2)**2)**.5
psi_0 = np.arctan2((center_1 - center_0)[1],
(center_1 - center_0)[0])-np.pi
points = []
for x in np.arange(0, total, self.point_separation):
if x < abs(path[0])*self.radius: # First turn
points.append(self.circle_arc(start, path[0], center_0, x))
elif x > total - abs(path[1])*self.radius: # Last turn
points.append(self.circle_arc(end, path[1], center_2, x-total))
else: # Middle Turn
angle = psi_0-np.sign(path[0])*(x/self.radius-abs(path[0]))
vect = np.array([np.cos(angle), np.sin(angle)])
points.append(center_1+self.radius*vect)
points.append(end[:2])
return np.array(points)
def circle_arc(self, reference, beta, center, x):
"""
Returns the point located on the circle of center center and radius
defined by the class, at the angle x.
Parameters
----------
reference : float
Angular starting point, in radians.
beta : float
Used actually only to know the direction of the rotation, and hence
to know if the path needs to be added or substracted from the
reference angle.
center : tuple
(x, y) coordinates of the center of the circle from which we need a
point on the circumference.
x : float
The lenght of the path on the circle.
Returns
-------
The coordinates of the point on the circle, in the form of a tuple.
"""
angle = reference[2]+((x/self.radius)-np.pi/2)*np.sign(beta)
vect = np.array([np.cos(angle), np.sin(angle)])
return center+self.radius*vect
|
{"hexsha": "55f3c63f6b1a3be212b1efb8e8a1f195356fb6cc", "size": 23181, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/dubins.py", "max_stars_repo_name": "FelicienC/RRT-Dubins", "max_stars_repo_head_hexsha": "db074c15ee51d1dda73c327a68b192dd9001054e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-04-11T04:11:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T11:49:02.000Z", "max_issues_repo_path": "code/dubins.py", "max_issues_repo_name": "FelicienC/RRT-Dubins", "max_issues_repo_head_hexsha": "db074c15ee51d1dda73c327a68b192dd9001054e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/dubins.py", "max_forks_repo_name": "FelicienC/RRT-Dubins", "max_forks_repo_head_hexsha": "db074c15ee51d1dda73c327a68b192dd9001054e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-12-23T08:53:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-29T13:08:31.000Z", "avg_line_length": 41.5430107527, "max_line_length": 80, "alphanum_fraction": 0.5734869074, "include": true, "reason": "import numpy", "num_tokens": 5565}
|
import pulp
import numpy as np
import nltk
#nltk.download('punkt')
#nltk.download('stopwords')
from nltk.data import load as LPickle
import sys, os.path as path
from nltk.tokenize import word_tokenize
from nltk.util import ngrams
sys.path.append(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
from summarizer.utils.data_helpers import extract_ngrams2, prune_ngrams, untokenize
from summarizer.algorithms.base import Sentence
from _summarizer import Summarizer
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
sent_detector = LPickle('tokenizers/punkt/english.pickle')
class ExtractiveUpperbound(Summarizer):
def __init__(self, language):
self.sentences = []
self.docs = []
self.models = []
self.doc_sent_dict = {}
self.ref_ngrams = []
self.LANGUAGE = language
self.stemmer = SnowballStemmer(self.LANGUAGE)
self.stoplist = set(stopwords.words(self.LANGUAGE))
def __call__(self, docs, models, length, ngram_type=2):
self.sum_length = int(length)
self.load_data(docs, models)
self.get_ref_ngrams(ngram_type)
self.ref_ngrams = prune_ngrams(self.ref_ngrams, self.stoplist, ngram_type)
#self.prune_sentences(remove_citations=True, remove_redundancy=True)
self.sentences_idx = range(len(self.sentences))
self.ref_ngrams_idx = range(len(self.ref_ngrams))
summary_idx = self.solve_ilp(ngram_type)
summary_txt = self.get_summary_text(summary_idx)
return summary_txt
def load_data(self, docs, models):
'''
Load the data into
:doc_sent_dict
:sentences
Parameters:
docs: List of list of docs each doc is represented with its filename and sents
[['filename1', ['sent1','sent2','sent3']],['filename2'], ['sent1','sent2','sent3']] ]
models: List of list of models each doc is represented with its filename and sents
[['filename1', ['sent1','sent2','sent3']],['filename2'], ['sent1','sent2','sent3']] ]
'''
self.docs = docs
self.models = models
self.sentences = []
self.doc_sent_dict = {}
doc_id = 0
for doc_id, doc in enumerate(docs):
_, doc_sents = doc
total = len(self.sentences)
for sent_id, sentence in enumerate(doc_sents):
token_sentence = word_tokenize(sentence, self.LANGUAGE)
sentence_s = Sentence(token_sentence, doc_id, sent_id+1)
untokenized_form = untokenize(token_sentence)
sentence_s.untokenized_form = untokenized_form
sentence_s.length = len(untokenized_form.split(' '))
self.doc_sent_dict[total+sent_id] = "%s_%s" % (str(doc_id), str(sent_id))
self.sentences.append(sentence_s)
def prune_sentences(self,
mininum_sentence_length=5,
remove_citations=True,
remove_redundancy=True,
imp_list=[]):
"""Prune the sentences.
Remove the sentences that are shorter than a given length, redundant
sentences and citations from entering the summary.
Args:
mininum_sentence_length (int): the minimum number of words for a
sentence to enter the summary, defaults to 5
remove_citations (bool): indicates that citations are pruned,
defaults to True
remove_redundancy (bool): indicates that redundant sentences are
pruned, defaults to True
"""
pruned_sentences = []
# loop over the sentences
for i, sentence in enumerate(self.sentences):
if imp_list:
if imp_list[i] == 0:
continue
# prune short sentences
if sentence.length < mininum_sentence_length:
continue
# prune citations
first_token, last_token = sentence.tokens[0], sentence.tokens[-1]
if remove_citations and \
(first_token == u"``" or first_token == u'"') and \
(last_token == u"''" or first_token == u'"'):
continue
# prune identical and almost identical sentences
if remove_redundancy:
is_redundant = False
for prev_sentence in pruned_sentences:
if sentence.tokens == prev_sentence.tokens:
is_redundant = True
break
if is_redundant:
continue
# otherwise add the sentence to the pruned sentence container
pruned_sentences.append(sentence)
self.sentences = pruned_sentences
def get_ref_ngrams(self, N):
for _, summary in self.models:
self.ref_ngrams.extend(extract_ngrams2(summary, self.stemmer, self.LANGUAGE, N))
def get_summary_text(self, summary_idx):
return [ self.sentences[idx].untokenized_form for idx in summary_idx]
def solve_ilp(self, N):
# build the A matrix: a_ij is 1 if j-th gram appears in the i-th sentence
A = np.zeros((len(self.sentences_idx), len(self.ref_ngrams_idx)))
for i in self.sentences_idx:
sent = self.sentences[i].untokenized_form
sngrams = list(extract_ngrams2([sent], self.stemmer, self.LANGUAGE, N))
for j in self.ref_ngrams_idx:
if self.ref_ngrams[j] in sngrams:
A[i][j] = 1
# Define ILP variable, x_i is 1 if sentence i is selected, z_j is 1 if gram j appears in the created summary
x = pulp.LpVariable.dicts('sentences', self.sentences_idx, lowBound=0, upBound=1, cat=pulp.LpInteger)
z = pulp.LpVariable.dicts('grams', self.ref_ngrams_idx, lowBound=0, upBound=1, cat=pulp.LpInteger)
# Define ILP problem, maximum coverage of grams from the reference summaries
prob = pulp.LpProblem("ExtractiveUpperBound", pulp.LpMaximize)
prob += pulp.lpSum(z[j] for j in self.ref_ngrams_idx)
# Define ILP constraints, length constraint and consistency constraint (impose that z_j is 1 if j
# appears in the created summary)
prob += pulp.lpSum(x[i] * self.sentences[i].length for i in self.sentences_idx) <= self.sum_length
for j in self.ref_ngrams_idx:
prob += pulp.lpSum(A[i][j] * x[i] for i in self.sentences_idx) >= z[j]
# Solve ILP problem and post-processing to get the summary
try:
print('Solving using CPLEX')
prob.solve(pulp.CPLEX(msg=0))
except:
print('Fall back to GLPK')
prob.solve(pulp.GLPK(msg=0))
summary_idx = []
for idx in self.sentences_idx:
if x[idx].value() == 1.0:
summary_idx.append(idx)
return summary_idx
|
{"hexsha": "ea9e98d910e20c1bf27195a9ebfb050fa48338e6", "size": 7000, "ext": "py", "lang": "Python", "max_stars_repo_path": "summarizer/algorithms/upper_bound_ilp.py", "max_stars_repo_name": "UKPLab/acl2017-interactive_summarizer", "max_stars_repo_head_hexsha": "0f5ab42e3b9a89015147d194916eaba00c56623d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2017-06-02T08:39:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T09:48:16.000Z", "max_issues_repo_path": "summarizer/algorithms/upper_bound_ilp.py", "max_issues_repo_name": "UKPLab/acl2017-interactive_summarizer", "max_issues_repo_head_hexsha": "0f5ab42e3b9a89015147d194916eaba00c56623d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "summarizer/algorithms/upper_bound_ilp.py", "max_forks_repo_name": "UKPLab/acl2017-interactive_summarizer", "max_forks_repo_head_hexsha": "0f5ab42e3b9a89015147d194916eaba00c56623d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2017-06-22T07:48:27.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-23T17:44:52.000Z", "avg_line_length": 38.6740331492, "max_line_length": 116, "alphanum_fraction": 0.6154285714, "include": true, "reason": "import numpy", "num_tokens": 1586}
|
import os
import pandas as pd
import numpy as np
import re
class read:
def __init__(self,file):
self.file = file
def read_aeronet_ocv3(self, skiprows=8):
''' Read and format in pandas data.frame the standard AERONET-OC data '''
dateparse = lambda x: pd.datetime.strptime(x, "%d:%m:%Y %H:%M:%S")
ifile=self.file
h1 = pd.read_csv(ifile, skiprows=skiprows - 1, nrows=1).columns[3:]
h1 = np.insert(h1,0,'site')
data_type = h1.str.replace('\[.*\]', '')
data_type = data_type.str.replace('Exact_Wave.*', 'wavelength')
#convert into float to order the dataframe with increasing wavelength
h2 = h1.str.replace('.*\[', '')
h2 = h2.str.replace('nm\].*', '')
h2 = h2.str.replace('Exact_Wavelengths\(um\)_','')
h2 = pd.to_numeric(h2, errors='coerce') #h2.str.extract('(\d+)').astype('float')
h2 = h2.fillna('').T
df = pd.read_csv(ifile, skiprows=skiprows, na_values=['N/A', -999.0,-9.999999 ], parse_dates={'date': [1, 2]},
date_parser=dateparse, index_col=False)
# df['site'] = site
# df.set_index(['site', 'date'],inplace=True)
df.set_index('date', inplace=True)
tuples = list(zip(h1, data_type, h2))
df.columns = pd.MultiIndex.from_tuples(tuples, names=['l0', 'l1', 'l2'])
df = df.dropna(axis=1, how='all').dropna(axis=0, how='all')
df.columns = pd.MultiIndex.from_tuples([(x[0], x[1], x[2]) for x in df.columns])
df.sort_index(axis=1, level=2, inplace=True)
return df
def read_aeronet_oc(self, skiprows=13):
''' Read and format in pandas data.frame the standard AERONET-OC data '''
dateparse = lambda x: pd.datetime.strptime(x, "%d:%m:%Y %H:%M:%S")
ifile=self.file
h1 = pd.read_csv(ifile, skiprows=skiprows - 2, nrows=1).columns[2:]
h2 = pd.read_csv(ifile, skiprows=skiprows - 1, nrows=1).columns[2:]
h1 = h1.append(h2[len(h1):])
data_type = h1.str.replace('\(.*\)', '')
data_type = data_type.str.replace('ExactWave.*', 'oc_wavelength')
#convert into float to order the dataframe with increasing wavelength
h2 = h2.str.extract('(\d+)').astype('float')
h2 = h2.fillna('')
df = pd.read_csv(ifile, skiprows=skiprows, na_values=['N/A', -999.0,-9.999999 ], parse_dates={'date': [0, 1]},
date_parser=dateparse, index_col=False)
# df['site'] = site
# df.set_index(['site', 'date'],inplace=True)
df.set_index('date', inplace=True)
tuples = list(zip(h1, data_type, h2))
df.columns = pd.MultiIndex.from_tuples(tuples, names=['l0', 'l1', 'l2'])
df = df.dropna(axis=1, how='all').dropna(axis=0, how='all')
df.sort_index(axis=1, level=2, inplace=True)
return df
def read_aeronet(self, skiprows=6):
''' Read and format in pandas data.frame the V3 AERONET data '''
ifile=self.file
df = pd.read_csv(ifile, skiprows=skiprows, nrows=1) # read just first line for columns
columns = df.columns.tolist() # get the columns
cols_to_use = columns[:len(columns) - 1] # drop the last one
df = pd.read_csv(ifile, skiprows=skiprows, usecols=cols_to_use, index_col=False, na_values=['N/A', -999.0])
df = df.dropna(axis=1, how='all').dropna(axis=0, how='all')
df.rename(columns={'AERONET_Site_Name': 'site', 'Last_Processing_Date(dd/mm/yyyy)': 'Last_Processing_Date'},
inplace=True)
format = "%d:%m:%Y %H:%M:%S"
df['date'] = pd.to_datetime(df[df.columns[0]] + ' ' + df[df.columns[1]], format=format)
# df.set_index(['site','date'], inplace=True)
df.set_index('date', inplace=True)
df = df.drop(df.columns[[0, 1]], axis=1)
# df['year'] = df.index.get_level_values(1).year
# cleaning up
df.drop(list(df.filter(regex='Input')), axis=1, inplace=True)
df.drop(list(df.filter(regex='Empty')), axis=1, inplace=True)
df.drop(list(df.filter(regex='Day')), axis=1, inplace=True)
# indexing columns with spectral values
data_type = df.columns.str.replace('AOD.*nm', 'aot')
data_type = data_type.str.replace('Exact_Wave.*', 'wavelength')
data_type = data_type.str.replace('Triplet.*[0-9]', 'std')
data_type = data_type.str.replace(r'^(?!aot|std|wavelength).*$', '')
wl_type = df.columns.str.extract('(\d+)').astype('float')
wl_type = wl_type.fillna('')
tuples = list(zip(df.columns, data_type, wl_type))
df.columns = pd.MultiIndex.from_tuples(tuples, names=['l0', 'l1', 'l2'])
if 'wavelength' in df.columns.levels[1]:
df.loc[:, (slice(None), 'wavelength',)] = df.loc[:, (slice(None), 'wavelength')] * 1000 # convert into nm
df = df.dropna(axis=1, how='all').dropna(axis=0, how='all')
df.sort_index(axis=1, level=2, inplace=True)
return df
def read_aeronet_inv(self, skiprows=6):
''' Read and format in pandas data.frame the V3 Aerosol Inversion AERONET data '''
ifile=self.file
df = pd.read_csv(ifile, skiprows=skiprows, nrows=1) # read just first line for columns
columns = df.columns.tolist() # get the columns
cols_to_use = columns[:len(columns) - 1] # drop the last one
df = pd.read_csv(ifile, skiprows=skiprows, usecols=cols_to_use, index_col=False, na_values=['N/A', -999.0])
df = df.dropna(axis=1, how='all').dropna(axis=0, how='all')
df.rename(columns={'AERONET_Site_Name': 'site', 'Last_Processing_Date(dd/mm/yyyy)': 'Last_Processing_Date',},
inplace=True)
format = "%d:%m:%Y %H:%M:%S"
df['date'] = pd.to_datetime(df[df.columns[1]] + ' ' + df[df.columns[2]], format=format)
# df.set_index(['site','date'], inplace=True)
df.set_index('date', inplace=True)
df = df.drop(df.columns[[0, 1]], axis=1)
# df['year'] = df.index.get_level_values(1).year
# cleaning up
df.drop(list(df.filter(regex='Input')), axis=1, inplace=True)
df.drop(list(df.filter(regex='Empty')), axis=1, inplace=True)
df.drop(list(df.filter(regex='Day')), axis=1, inplace=True)
df.drop(list(df.filter(regex='Angle_Bin')), axis=1, inplace=True)
# indexing columns with spectral values
data_type = df.columns.str.replace('AOD.*nm', 'aot')
data_type = data_type.str.replace('Exact_Wave.*', 'wavelength')
data_type = data_type.str.replace('Triplet.*[0-9]', 'std')
data_type = data_type.str.replace(r'^(?!aot|std|wavelength).*$', '')
wl_type = df.columns.str.extract('(\d+)').astype('float')
wl_type = wl_type.fillna('')
tuples = list(zip(df.columns, data_type, wl_type))
df.columns = pd.MultiIndex.from_tuples(tuples, names=['l0', 'l1', 'l2'])
if 'wavelength' in df.columns.levels[1]:
df.loc[:, (slice(None), 'wavelength',)] = df.loc[:, (slice(None), 'wavelength')] * 1000 # convert into nm
df = df.dropna(axis=1, how='all').dropna(axis=0, how='all')
df.sort_index(axis=1, level=2, inplace=True)
return df
|
{"hexsha": "1e4d6d4d18b7a0551435b27f0037f456ad02809a", "size": 7245, "ext": "py", "lang": "Python", "max_stars_repo_path": "aeronet_visu/data_loading.py", "max_stars_repo_name": "Tristanovsk/aeronet_visu", "max_stars_repo_head_hexsha": "03905aa3f9aacae501f0af378afd885ca25cd981", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aeronet_visu/data_loading.py", "max_issues_repo_name": "Tristanovsk/aeronet_visu", "max_issues_repo_head_hexsha": "03905aa3f9aacae501f0af378afd885ca25cd981", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aeronet_visu/data_loading.py", "max_forks_repo_name": "Tristanovsk/aeronet_visu", "max_forks_repo_head_hexsha": "03905aa3f9aacae501f0af378afd885ca25cd981", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.6241610738, "max_line_length": 118, "alphanum_fraction": 0.595721187, "include": true, "reason": "import numpy", "num_tokens": 2051}
|
// Copyright 2011-2012 Renato Tegon Forti
// Copyright 2014 Renato Tegon Forti, Antony Polukhin.
// Copyright 2015-2020 Antony Polukhin.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt
// or copy at http://www.boost.org/LICENSE_1_0.txt)
// For more information, see http://www.boost.org
#include "../example/b2_workarounds.hpp"
#include <boost/dll.hpp>
#include <boost/core/lightweight_test.hpp>
// Unit Tests
extern "C" void BOOST_SYMBOL_EXPORT exef() {
}
int main(int argc, char* argv[])
{
using namespace boost::dll;
boost::dll::fs::path shared_library_path = b2_workarounds::first_lib_from_argv(argc, argv);
BOOST_TEST(shared_library_path.string().find("test_library") != std::string::npos);
BOOST_TEST(b2_workarounds::is_shared_library(shared_library_path));
std::cout << "Library: " << shared_library_path;
{
shared_library sl(shared_library_path);
BOOST_TEST(sl.has("say_hello"));
BOOST_TEST(sl.has("lib_version"));
BOOST_TEST(sl.has("integer_g"));
BOOST_TEST(sl.has(std::string("integer_g")));
BOOST_TEST(!sl.has("i_do_not_exist"));
BOOST_TEST(!sl.has(std::string("i_do_not_exist")));
}
{
shared_library sl(program_location());
BOOST_TEST(sl.has("exef"));
BOOST_TEST(!sl.has("i_do_not_exist"));
}
exef(); // Make sure that this function still callable in traditional way
return boost::report_errors();
}
|
{"hexsha": "4f9831770a3b1c6dad3d4de36673901aee0d3c3d", "size": 1509, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/dll/test/shared_library_search_symbol_test.cpp", "max_stars_repo_name": "armdevvel/boost", "max_stars_repo_head_hexsha": "30d0930951181ef5bc5aad2231ebac8575db0720", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-04-28T15:15:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-28T15:15:28.000Z", "max_issues_repo_path": "libs/dll/test/shared_library_search_symbol_test.cpp", "max_issues_repo_name": "armdevvel/boost", "max_issues_repo_head_hexsha": "30d0930951181ef5bc5aad2231ebac8575db0720", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2017-05-23T08:01:11.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-06T20:49:05.000Z", "max_forks_repo_path": "libs/dll/test/shared_library_search_symbol_test.cpp", "max_forks_repo_name": "armdevvel/boost", "max_forks_repo_head_hexsha": "30d0930951181ef5bc5aad2231ebac8575db0720", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2015-11-03T14:12:19.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-22T19:20:54.000Z", "avg_line_length": 30.18, "max_line_length": 95, "alphanum_fraction": 0.6845593108, "num_tokens": 373}
|
import NuralNetwork as N
import numpy as np
import cv2 as cv2
import sys
import glob
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
N.clearScreen()
X,Y= N.readImageData()
layers_dims = [X.shape[0], 20, 7, 5, 1] # 4-layer model
parameters = N.L_layer_model(X, Y, layers_dims, num_iterations = 500, print_cost = True)
N.predictRunningImage(parameters)
|
{"hexsha": "0713c4018ef69739106d75a8b1dd8f28091dfbc5", "size": 421, "ext": "py", "lang": "Python", "max_stars_repo_path": "31_NuralNetwork_FaceClassifier_MultiHidden_Layer/main.py", "max_stars_repo_name": "ManMohan291/PyProgram", "max_stars_repo_head_hexsha": "edcaa927bd70676bd14355acad7262ae2d32b8e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-09-07T17:44:54.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-07T17:44:57.000Z", "max_issues_repo_path": "31_NuralNetwork_FaceClassifier_MultiHidden_Layer/main.py", "max_issues_repo_name": "ManMohan291/PyProgram", "max_issues_repo_head_hexsha": "edcaa927bd70676bd14355acad7262ae2d32b8e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "31_NuralNetwork_FaceClassifier_MultiHidden_Layer/main.py", "max_forks_repo_name": "ManMohan291/PyProgram", "max_forks_repo_head_hexsha": "edcaa927bd70676bd14355acad7262ae2d32b8e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.0357142857, "max_line_length": 88, "alphanum_fraction": 0.7529691211, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 119}
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
class LinearRegression:
def __init__(self, learning_rate=0.001, n_iters=1000):
self.lr = learning_rate
self.n_iters = n_iters
self.weights = None
self.bias = None
def fit(self, X, y):
n_samples, n_features = X.shape
# init parameters
self.weights = np.zeros(n_features)
self.bias = 0
# gradient descent
for _ in range(self.n_iters):
y_predicted = np.dot(X, self.weights) + self.bias
# compute gradients
dw = (1 / n_samples) * np.dot(X.T, (y_predicted - y))
db = (1 / n_samples) * np.sum(y_predicted - y)
# update parameters
self.weights -= self.lr * dw
self.bias -= self.lr * db
def predict(self, X):
y_approximated = np.dot(X, self.weights) + self.bias
return y_approximated
# Importing the dataset
dataset = pd.read_csv('train.csv')
df_dataset = pd.DataFrame(dataset)
X_train = dataset.iloc[:, :-1].values
y_train = dataset.iloc[:, -1].values
X_test = pd.read_csv('test.csv').iloc[:, :].values
df_test = pd.DataFrame(X_test)
df_train = pd.DataFrame(X_train)
# number of null values in each
df_train.isnull().sum()
# rows cols datatype information
df_dataset.info()
# filling the missing value
'''
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
imputer = imputer.fit(X_train[:, 0:81])
X_train[:, 0:81] = imputer.transform(X_train[:, 0:81])
'''
# int
df_train[3] = df_train[3].fillna(df_train[3].mean())
df_test[3] = df_test[3].fillna(df_test[3].mean())
# droping the col with a lot of null values
df_train.drop([0],axis=1,inplace=True)
df_test.drop([0],axis=1,inplace=True)
df_train.drop([6],axis=1,inplace=True)
df_test.drop([6],axis=1,inplace=True)
df_train.drop([59],axis=1,inplace=True)
df_test.drop([59],axis=1,inplace=True)
df_train.drop([72],axis=1,inplace=True)
df_test.drop([72],axis=1,inplace=True)
df_train.drop([73],axis=1,inplace=True)
df_test.drop([73],axis=1,inplace=True)
df_train.drop([74],axis=1,inplace=True)
df_test.drop([74],axis=1,inplace=True)
#df_train.drop([74],axis=1,inplace=True)
#df_test.drop([74],axis=1,inplace=True)
# object (categorical data)
df_train[30] = df_train[30].fillna(df_train[30].mode()[0])
df_test[30] = df_test[30].fillna(df_test[30].mode()[0])
df_train[31] = df_train[31].fillna(df_train[31].mode()[0])
df_test[31] = df_test[31].fillna(df_test[31].mode()[0])
df_train[57] = df_train[57].fillna(df_train[57].mode()[0])
df_test[57] = df_test[57].fillna(df_test[57].mode()[0])
df_train[58] = df_train[58].fillna(df_train[58].mode()[0])
df_test[58] = df_test[58].fillna(df_test[58].mode()[0])
df_train[60] = df_train[60].fillna(df_train[60].mode()[0])
df_test[60] = df_test[60].fillna(df_test[60].mode()[0])
df_train[63] = df_train[63].fillna(df_train[63].mode()[0])
df_test[63] = df_test[63].fillna(df_test[63].mode()[0])
df_train[64] = df_train[64].fillna(df_train[64].mode()[0])
df_test[64] = df_test[64].fillna(df_test[64].mode()[0])
df_train.isnull().sum()
# categorical data (filling missing value)
df_train[25] = df_train[25].fillna(df_train[25].mode()[0])
df_test[25] = df_test[25].fillna(df_test[25].mode()[0])
df_train[26] = df_train[26].fillna(df_train[26].mode()[0])
df_test[26] = df_test[26].fillna(df_test[26].mode()[0])
df_train[32] = df_train[32].fillna(df_train[32].mode()[0])
df_test[32] = df_test[32].fillna(df_test[32].mode()[0])
df_train[33] = df_train[33].fillna(df_train[33].mode()[0])
df_test[33] = df_test[33].fillna(df_test[33].mode()[0])
df_train[35] = df_train[35].fillna(df_train[35].mode()[0])
df_test[35] = df_test[35].fillna(df_test[35].mode()[0])
df_train.isnull().sum()
# graph (visualization of null values)
import seaborn as sns
sns.heatmap( df_test.isnull() , yticklabels = False , cbar = False , cmap = "coolwarm" )
sns.heatmap( df_test.isnull() , yticklabels = False , cbar = False , cmap = "YlGnBu" )
df_test[47] = df_test[47].fillna(df_test[47].mean())
df_test[48] = df_test[48].fillna(df_test[48].mean())
# droping the col with null values
df_train.dropna(inplace = True)
df_test.dropna(inplace=True)
# concatenation of training and test data
df = pd.concat( [df_train,df_test] , axis = 0)
for fields in df.columns:
if df_dataset[(df_dataset.columns)[fields]].dtype == 'O':
continue
else:
df[fields] = pd.to_numeric(df[fields])
# Encoding categorical data
def encoding():
df_final = df
i=0
for fields in df.columns:
print(fields)
if df[fields].dtype == 'O':
df_1 = pd.get_dummies(df[fields] , drop_first = True)
else:
df_1 = df[fields].copy()
df.drop([fields] , axis=1 , inplace=True)
if i==0:
df_final = df_1.copy()
else:
df_final = pd.concat([df_final,df_1] , axis=1)
i = i+1
df_final = pd.concat([df,df_final],axis=1)
return df_final
df_data = encoding()
df_data = df_data.loc[: , ~df_data.columns.duplicated()]
df[1].value_counts
np.unique(df[1])
df[25].dtype
# Avoiding the Dummy Variable Trap
X = X[:, 1:]
# Splitting the dataset into the Training set and Test set
df_train = df_data.iloc[:1459, :].values
df_test = df_data.iloc[1459: , :].values
y_train = y_train[:1459 , ]
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
df_train = sc_X.fit_transform(df_train)
df_test = sc_X.transform(df_test)
mean_value = sum(y_train)/len(y_train)
max_value = max(y_train)
y_train = (y_train - mean_value)/max_value
# Fitting Multiple Linear Regression to the Training set
#from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(df_train, y_train)
y_pred = regressor.predict(df_test)
y_pred = y_pred*max_value + mean_value
temp_test = pd.read_csv('test.csv').iloc[:, :]
df_temp_test = pd.DataFrame(temp_test)
df_pred = pd.DataFrame(y_pred)
df_pred[0] = df_pred[0].fillna(df_pred[0].mean())
y_pred = df_pred.iloc[:, -1].values
prediction = pd.DataFrame({"Id" : temp_test['Id'],"SalePrice" : y_pred})
prediction.to_csv('prediction.csv',index=False)
prediction.head()
prediction = pd.read_csv('prediction.csv')
|
{"hexsha": "579b87fbdf4847d15c688634f52da41bcaca8429", "size": 6625, "ext": "py", "lang": "Python", "max_stars_repo_path": "HOUSE_PRICES/house_prices.py", "max_stars_repo_name": "Mohit-007/KAGGLE-MACHINE-LEARNING", "max_stars_repo_head_hexsha": "f53ef40390fde09a0b783c6bc46fd6e6269c10fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "HOUSE_PRICES/house_prices.py", "max_issues_repo_name": "Mohit-007/KAGGLE-MACHINE-LEARNING", "max_issues_repo_head_hexsha": "f53ef40390fde09a0b783c6bc46fd6e6269c10fa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HOUSE_PRICES/house_prices.py", "max_forks_repo_name": "Mohit-007/KAGGLE-MACHINE-LEARNING", "max_forks_repo_head_hexsha": "f53ef40390fde09a0b783c6bc46fd6e6269c10fa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1516393443, "max_line_length": 89, "alphanum_fraction": 0.6443773585, "include": true, "reason": "import numpy", "num_tokens": 1886}
|
import sys, os
from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, recall_score, matthews_corrcoef
import pandas as pd
import numpy as np
import json
EVALUATE_CAPITAL_ONLY=True
DO_SEQ_AVERAGING=False
#BINDING_CMAPS_PATH="/home/rsanchez/Tesis/rriPredMethod/data/bench5Data/newCodeData/computedFeatures/common/contactMapsBinding"
BINDING_CMAPS_PATH=None # if None, contact maps contained in the results file will be used, otherwise, new contacts will be added from the files in the path
def averageScores(scoresDf):
labels_list=[]
scores_list=[]
for chain in scoresDf["chainId"].unique():
df= scoresDf.loc[scoresDf["chainId"]==chain,:]
result= [ ((int(elem), ""), (score, label)) if elem[-1].isdigit() else ((int(elem[:-1]), elem[-1]), (score, label))
for elem,score,label in zip(df["resId"], df["prediction"], df["categ"]) ]
result.sort(key= lambda x: x[0])
result= zip(* result)[1]
scores, labels= zip(* result)
scores= list(np.convolve(scores, np.array([1, 3, 1])/5.0, mode='same')+ np.array(scores))
labels_list+= labels
scores_list+= scores
return scores_list, labels_list
def loadResults( resultsPath, fnameResults, cMapsPath=BINDING_CMAPS_PATH):
prefix= fnameResults.split("_")[0].split(".")[0]
if ".lig" in fnameResults:
chainType="l"
else:
chainType="r"
scoresDf= pd.read_table(os.path.join(resultsPath, fnameResults), comment="#", sep="\s+", dtype={"resId":str, "chainId":str})
if not cMapsPath is None:
newCmapSet=set([])
for fname in os.listdir(cMapsPath):
if ((chainType=="l" and "_l_" in fname) or (chainType=="r" and "_r_" in fname)) and fname.startswith(prefix):
df= pd.read_table(os.path.join(cMapsPath,fname),sep='\s+', header='infer', comment="#",
dtype= {"chainIdL":str, "chainIdR":str, "resIdL":str, "resIdR":str,
"chainId":str, "resId":str, "resId":str})
for i in range(df.shape[0]):
chainId, resId, categ= df.iloc[i,:]
if categ==1:
newCmapSet.add((chainId, resId))
for chainId, resId in newCmapSet:
scoresDf.loc[(scoresDf["chainId"]==chainId) & (scoresDf["resId"]==resId),"categ"]=1
return scoresDf
def get_single_chain_statistics(prefix, labels, scores):
EVAL_PAIRS_AT= [ 2**3, 2**4]
precisionAt=[]
recallAt=[]
scores= np.array(scores)
labels= np.array(labels)
try:
roc_complex= roc_auc_score(labels, scores)
except ValueError:
roc_complex= np.nan
probability_sorted_indexes = scores.argsort(axis=0)
probability_sorted_indexes = probability_sorted_indexes[::-1]
for evalPoint in EVAL_PAIRS_AT:
try:
label_predictions= np.ones(scores.shape[0], dtype=np.int32)* np.min( labels)
label_predictions[probability_sorted_indexes[0 : evalPoint]]= np.repeat(1, evalPoint)
precisionAt.append( precision_score(labels[probability_sorted_indexes[0 : evalPoint]],
label_predictions[probability_sorted_indexes[0 : evalPoint]]))
recallAt.append( recall_score(labels, label_predictions))
# print(sum(labels==1), sum(label_predictions==1),precisionAt[-1], recallAt[-1])
except IndexError:
precisionAt.append( 0.0)
recallAt.append( 0.0)
summary= pd.DataFrame({"pdb":[prefix]})
summary["auc_chains"]= [roc_complex]
for evalPoint, precisionAt, recallAt in zip(EVAL_PAIRS_AT,precisionAt, recallAt):
summary["prec_%d"%evalPoint]= [precisionAt]
summary["reca_%d"%evalPoint]= [recallAt]
return summary
def getRecallAtPrec(resultsPath, targetPrec=0.666, jsonFname=None, useSeqAvera= DO_SEQ_AVERAGING):
allScores=[]
allLabels=[]
perComplexSummaries=[]
for fname in sorted(os.listdir(resultsPath)):
if EVALUATE_CAPITAL_ONLY and fname[1:4].islower(): continue
print(fname)
if ".rec" in fname or ".lig" in fname:
results= loadResults( resultsPath, fname)
if results is None: continue
if useSeqAvera:
scores, labels= averageScores(results)
else:
scores= list(results["prediction"].values)
labels= list(results["categ"].values)
summary= get_single_chain_statistics(fname, labels, scores)
perComplexSummaries.append(summary)
# print("%s %f"%(fname,roc_complex))
allScores+= scores
allLabels+= labels
summary= pd.concat(perComplexSummaries, ignore_index=True)
means= summary.mean(axis=0)
summary= summary.append( summary.ix[summary.shape[0]-1,:],ignore_index=True )
summary.ix[summary.shape[0]-1,0]= "mean"
summary.ix[summary.shape[0]-1, 1:]= means
roc_auc= roc_auc_score(allLabels, allScores)
allScores= np.array(allScores)
allScoresUnique= np.unique(np.round(allScores, decimals=4))
indices= np.argsort(allScoresUnique)[1:-1]
# import matplotlib.pyplot as plt
# plt.hist(allScoresUnique[:-2], bins=50)
# plt.show()
bestThr=-1
bestMcc= 0
bestPrec= -1
bestRec= -1
thr_2_prec={"thr":[], "prec":[]}
nonClassLabel= int(np.min(allLabels))
for i in indices:
tmpThr= allScoresUnique[i]
binaryScores= np.array([1.0 if score>tmpThr else nonClassLabel for score in allScores])
currentPrec= precision_score(allLabels, binaryScores)
thr_2_prec["thr"].append(tmpThr)
thr_2_prec["prec"].append(currentPrec)
if abs(currentPrec- targetPrec) < abs(bestPrec- targetPrec):
bestMcc= matthews_corrcoef(allLabels, binaryScores)
bestThr= tmpThr
bestPrec= precision_score(allLabels, binaryScores)
bestRec= recall_score(allLabels, binaryScores)
print(thr_2_prec)
if jsonFname:
with open(jsonFname,"w") as f:
json.dump(thr_2_prec, f)
print(summary.to_string(index=False))
print("roc auc(mixed/mean): %f/%f Thr: %f mcc: %f prec: %f rec: %f"%(roc_auc,summary.iloc[-1,1], bestThr, bestMcc, bestPrec, bestRec))
return bestThr
def countIfAboveThr(bestThr, resultsPath):
totalNum=0
numAbove=0
for fname in sorted(os.listdir(resultsPath)):
#print(fname)
if ".rec" in fname or ".lig" in fname:
results= loadResults( resultsPath, fname)
totalNum+=1
if results is None:
continue
else:
scores= np.array(results["prediction"].values)
if np.sum(scores>=bestThr):
numAbove+=1
print("Above thr %d / %d "%(numAbove,totalNum))
if __name__=="__main__":
'''
python evaluation/getPrecsAt_diffThr.py ~/Tesis/rriPredMethod/data/bench5Data/newCodeData/results/mixed_2/ ~/Tesis/rriPredMethod/data/bench5Data/newCodeData/results/thr_2_prec_b5_mixed_2.json
'''
if len(sys.argv)==3:
resultsPath= os.path.expanduser(sys.argv[1])
jsonFname= os.path.expanduser(sys.argv[2])
else:
raise ValueError("Bad arguments.")
bestThr= getRecallAtPrec(resultsPath, jsonFname=jsonFname)
countIfAboveThr(bestThr, resultsPath)
|
{"hexsha": "7c9a632a17d8f97a2f2d797a674df71c7ad96e16", "size": 6884, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluation/getPrecsAt_diffThr.py", "max_stars_repo_name": "rsanchezgarc/BIPSPI", "max_stars_repo_head_hexsha": "e155fee0836084ea02bc9919c58817d26a4a13e5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-01-21T21:11:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-06T19:55:28.000Z", "max_issues_repo_path": "evaluation/getPrecsAt_diffThr.py", "max_issues_repo_name": "rsanchezgarc/BIPSPI", "max_issues_repo_head_hexsha": "e155fee0836084ea02bc9919c58817d26a4a13e5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "evaluation/getPrecsAt_diffThr.py", "max_forks_repo_name": "rsanchezgarc/BIPSPI", "max_forks_repo_head_hexsha": "e155fee0836084ea02bc9919c58817d26a4a13e5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-05-25T14:57:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-27T12:53:41.000Z", "avg_line_length": 39.5632183908, "max_line_length": 194, "alphanum_fraction": 0.6846310285, "include": true, "reason": "import numpy", "num_tokens": 1905}
|
import copy
import warnings
from distutils.version import LooseVersion
import dask
import dask.array as da
import scipy.sparse as sp
from dask.base import tokenize
from dask.delayed import Delayed, delayed
from sklearn.utils import safe_indexing
from sklearn.utils.validation import _is_arraylike, indexable
from ..utils import _num_samples
if LooseVersion(dask.__version__) > "0.15.4":
from dask.base import is_dask_collection
else:
from dask.base import Base
def is_dask_collection(x):
return isinstance(x, Base)
def _indexable(x):
return indexable(x)[0]
def _maybe_indexable(x):
return indexable(x)[0] if _is_arraylike(x) else x
def to_indexable(*args, **kwargs):
"""Ensure that all args are an indexable type.
Conversion runs lazily for dask objects, immediately otherwise.
Parameters
----------
args : array_like or scalar
allow_scalars : bool, optional
Whether to allow scalars in args. Default is False.
"""
if kwargs.get("allow_scalars", False):
indexable = _maybe_indexable
else:
indexable = _indexable
for x in args:
if x is None or isinstance(x, da.Array):
yield x
elif is_dask_collection(x):
yield delayed(indexable, pure=True)(x)
else:
yield indexable(x)
def _index_param_value(num_samples, v, indices):
"""Private helper function for parameter value indexing.
This determines whether a fit parameter `v` to a SearchCV.fit
should be indexed along with `X` and `y`. Note that this differs
from the scikit-learn version. They pass `X` and compute num_samples.
We pass `num_samples` instead.
"""
if not _is_arraylike(v) or _num_samples(v) != num_samples:
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def to_keys(dsk, *args):
for x in args:
if x is None:
yield None
elif isinstance(x, da.Array):
x = delayed(x)
dsk.update(x.dask)
yield x.key
elif isinstance(x, Delayed):
dsk.update(x.dask)
yield x.key
else:
assert not is_dask_collection(x)
key = "array-" + tokenize(x)
dsk[key] = x
yield key
def copy_estimator(est):
# Semantically, we'd like to use `sklearn.clone` here instead. However,
# `sklearn.clone` isn't threadsafe, so we don't want to call it in
# tasks. Since `est` is guaranteed to not be a fit estimator, we can
# use `copy.deepcopy` here without fear of copying large data.
return copy.deepcopy(est)
def unzip(itbl, n):
return zip(*itbl) if itbl else [()] * n
class DeprecationDict(dict):
"""A dict which raises a warning when some keys are looked up
Note, this does not raise a warning for __contains__ and iteration.
It also will raise a warning even after the key has been manually set by
the user.
This implementation was copied from Scikit-Learn.
See License information here:
https://github.com/scikit-learn/scikit-learn/blob/master/README.rst
"""
def __init__(self, *args, **kwargs):
self._deprecations = {}
super(DeprecationDict, self).__init__(*args, **kwargs)
def __getitem__(self, key):
if key in self._deprecations:
warn_args, warn_kwargs = self._deprecations[key]
warnings.warn(*warn_args, **warn_kwargs)
return super(DeprecationDict, self).__getitem__(key)
def get(self, key, default=None):
"""Return the value corresponding to key, else default.
Parameters
----------
key : any hashable object
The key
default : object, optional
The default returned when key is not in dict
"""
# dict does not implement it like this, hence it needs to be overridden
try:
return self[key]
except KeyError:
return default
def add_warning(self, key, *args, **kwargs):
"""Add a warning to be triggered when the specified key is read
Parameters
----------
key : any hashable object
The key
"""
self._deprecations[key] = (args, kwargs)
|
{"hexsha": "fd573cd8cdb941e318caa888d4f271be4512361f", "size": 4321, "ext": "py", "lang": "Python", "max_stars_repo_path": "dask_ml/model_selection/utils.py", "max_stars_repo_name": "jjerphan/dask-ml", "max_stars_repo_head_hexsha": "cfa57e5789fc29bdcee5d846b5116c07f5fbe292", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dask_ml/model_selection/utils.py", "max_issues_repo_name": "jjerphan/dask-ml", "max_issues_repo_head_hexsha": "cfa57e5789fc29bdcee5d846b5116c07f5fbe292", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dask_ml/model_selection/utils.py", "max_forks_repo_name": "jjerphan/dask-ml", "max_forks_repo_head_hexsha": "cfa57e5789fc29bdcee5d846b5116c07f5fbe292", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0, "max_line_length": 79, "alphanum_fraction": 0.6375838926, "include": true, "reason": "import scipy", "num_tokens": 1032}
|
# fmt: off
import torch
import numpy as np
import pandas as pd
import random
from farm.modeling.tokenization import Tokenizer
from farm.data_handler.data_silo import DataSilo
from farm.modeling.language_model import LanguageModel
from farm.modeling.prediction_head import TextClassificationHead, TokenClassificationHead
from farm.modeling.adaptive_model import AdaptiveModel
from farm.modeling.optimization import initialize_optimizer
from farm.train import Trainer
from farm.utils import set_all_seeds, initialize_device_settings
# Generate some dummy data
dummy= [[["hello", "this", "is", "a", "demo"], [1,0,0,0,1], "not sw"], [["hello", "this", "is", "starwars"], [1,0,0,1], "sw"]]
train_data = []
test_data = []
for number in range(100):
train_data.append(dummy[random.randint(0, 1)])
test_data.append(dummy[random.randint(0, 1)])
train_df = pd.DataFrame(train_data, columns = ['label', 'trigger', 'label'])
train_df.to_csv("train.csv")
test_df = pd.DataFrame(test_data, columns = ['sentence', 'trigger', 'label'])
test_df.to_csv("test.csv")
from farm.data_handler.processor import Processor
from tokenizers.pre_tokenizers import WhitespaceSplit
from farm.data_handler.samples import (
Sample,
SampleBasket,
)
from farm.data_handler.utils import expand_labels
class MTLProcessor(Processor):
def __init__(
self,
tokenizer,
max_seq_len,
data_dir,
train_filename,
test_filename,
delimiter,
dev_split=0.0,
dev_filename=None,
label_list=None,
metric=None,
proxies=None,
**kwargs
):
self.delimiter = delimiter
super(MTLProcessor, self).__init__(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
dev_split=dev_split,
data_dir=data_dir,
tasks={},
proxies=proxies
)
def file_to_dicts(self, file: str) -> [dict]:
dicts = list()
df = pd.read_csv(file)
for text, label, tokens in zip(df.sentence.values, df.label.values, df.trigger.values):
columns = dict()
text = ast.literal_eval(text)
tokens = ast.literal_eval(tokens)
columns["text"] = " ".join(text)
columns["document_level_task_label"] = label # Key hard-coded
columns["token_level_task_label"] = list(map(str, tokens)) # Key hard-coded
dicts.append(columns)
return dicts
@staticmethod
def _get_start_of_word(word_ids):
words = np.array(word_ids)
words[words == None] = -1
start_of_word_single = [0] + list(np.ediff1d(words) > 0)
start_of_word_single = [int(x) for x in start_of_word_single]
return start_of_word_single
# Most of the code is copied from NERProcessor - dataset_from_dicts()
def dataset_from_dicts(self, dicts, indices=None, return_baskets=False, non_initial_token="X"):
self.baskets = []
self.pre_tokenizer = WhitespaceSplit()
texts = [x["text"] for x in dicts]
words_and_spans = [self.pre_tokenizer.pre_tokenize_str(x) for x in texts]
words = [[x[0] for x in y] for y in words_and_spans]
word_spans_batch = [[x[1] for x in y] for y in words_and_spans]
tokenized_batch = self.tokenizer.batch_encode_plus(
words,
return_offsets_mapping=True,
return_special_tokens_mask=True,
return_token_type_ids=True,
return_attention_mask=True,
truncation=True,
max_length=self.max_seq_len,
padding="max_length",
is_split_into_words=True,
)
for i in range(len(dicts)):
tokenized = tokenized_batch[i]
d = dicts[i]
id_external = self._id_from_dict(d)
if indices:
id_internal = indices[i]
else:
id_internal = i
input_ids = tokenized.ids
segment_ids = tokenized.type_ids
initial_mask = self._get_start_of_word(tokenized.words)
assert len(initial_mask) == len(input_ids)
padding_mask = tokenized.attention_mask
if return_baskets:
token_to_word_map = tokenized.words
word_spans = word_spans_batch[i]
tokenized_dict = {
"tokens": tokenized.tokens,
"word_spans": word_spans,
"token_to_word_map": token_to_word_map,
"start_of_word": initial_mask
}
else:
tokenized_dict = {}
feature_dict = {
"input_ids": input_ids,
"padding_mask": padding_mask,
"segment_ids": segment_ids,
"initial_mask": initial_mask,
}
for task_name, task in self.tasks.items():
try:
label_name = task["label_name"]
labels_word = d[label_name]
label_list = task["label_list"]
label_tensor_name = task["label_tensor_name"]
if task["task_type"] == "classification":
label_ids = [label_list.index(labels_word)]
elif task["task_type"] == "ner":
labels_token = expand_labels(labels_word, initial_mask, non_initial_token)
label_ids = [label_list.index(lt) for lt in labels_token]
except ValueError:
label_ids = None
problematic_labels = set(labels_token).difference(set(label_list))
print(f"[Task: {task_name}] Could not convert labels to ids via label_list!"
f"\nWe found a problem with labels {str(problematic_labels)}")
except KeyError:
label_ids = None
# print(f"[Task: {task_name}] Could not convert labels to ids via label_list!"
# "\nIf your are running in *inference* mode: Don't worry!"
# "\nIf you are running in *training* mode: Verify you are supplying a proper label list to your processor and check that labels in input data are correct.")
if label_ids:
feature_dict[label_tensor_name] = label_ids
curr_sample = Sample(id=None,
clear_text=d,
tokenized=tokenized_dict,
features=[feature_dict])
curr_basket = SampleBasket(id_internal=id_internal,
raw=d,
id_external=id_external,
samples=[curr_sample])
self.baskets.append(curr_basket)
if indices and 0 not in indices:
pass
else:
self._log_samples(1)
dataset, tensor_names = self._create_dataset()
ret = [dataset, tensor_names, self.problematic_sample_ids]
if return_baskets:
ret.append(self.baskets)
return tuple(ret)
from sklearn.metrics import f1_score
def custom_f1_score(y_true, y_pred):
f1_scores = []
for t, p in zip(y_true, y_pred):
f1_scores.append(f1_score(t, p, average='macro'))
return {"f1 macro score" : sum(f1_scores) / len(f1_scores), "total" : len(f1_scores)}
from typing import List
def my_loss_agg(individual_losses: List[torch.Tensor], global_step=None, batch=None):
loss = torch.sum(individual_losses[0]) + torch.sum(individual_losses[1])
return loss
DO_LOWER_CASE = False
LANG_MODEL = "bert-base-uncased"
TRAIN_FILE = "/content/train.csv"
# DEV_FILE = "/content/dev.csv"
TEST_FILE = "/content/test.csv"
MAX_SEQ_LEN = 128
BATCH_SIZE = 8
LEARNING_RATE = 2e-5
N_EPOCHS = 1
EMBEDS_DROPOUT_PROB = 0.1
EVALUATE_EVERY = 20
DEVICE, N_GPU = initialize_device_settings(use_cuda=True)
set_all_seeds(seed=42)
tokenizer = Tokenizer.load(
pretrained_model_name_or_path=LANG_MODEL,
do_lower_case=DO_LOWER_CASE,
)
TRIGGER_LABELS = ["X", "0", "1"]
LABEL_LIST = ["not sw", "sw"]
processor = MTLProcessor(data_dir = ".",
tokenizer=tokenizer,
max_seq_len=128,
train_filename=TRAIN_FILE,
test_filename=TEST_FILE,
delimiter=",",
)
from farm.evaluation.metrics import register_metrics
register_metrics('f1_weighted', custom_f1_score)
metric = 'f1_weighted'
processor.add_task(name="document_level_task", label_list=LABEL_LIST, metric="acc", text_column_name="text", label_column_name="label", task_type="classification")
processor.add_task(name="token_level_task", label_list=TRIGGER_LABELS, metric=metric, text_column_name="text", label_column_name="tokens", task_type="ner")
data_silo = DataSilo(processor=processor,
batch_size=BATCH_SIZE
)
language_model = LanguageModel.load(LANG_MODEL)
document_level_task_head = TextClassificationHead(num_labels=len(LABEL_LIST), task_name="document_level_task")
token_level_task_head = TokenClassificationHead(num_labels=len(TRIGGER_LABELS), task_name="token_level_task")
model = AdaptiveModel(
language_model=language_model,
prediction_heads=[document_level_task_head, token_level_task_head],
embeds_dropout_prob=EMBEDS_DROPOUT_PROB,
lm_output_types=["per_sequence", "per_token"],
device=DEVICE,
loss_aggregation_fn=my_loss_agg)
model, optimizer, lr_schedule = initialize_optimizer(
model=model,
device=DEVICE,
learning_rate=LEARNING_RATE,
n_batches=len(data_silo.loaders["train"]),
n_epochs=N_EPOCHS)
trainer = Trainer(model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=N_EPOCHS,
n_gpu=N_GPU,
lr_schedule=lr_schedule,
device=DEVICE,
evaluate_every=EVALUATE_EVERY,
)
model = trainer.train()
|
{"hexsha": "09e442185ad214e811f973f017a82d740bd265f3", "size": 10096, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/multitask_learning.py", "max_stars_repo_name": "imdiptanu/FARM", "max_stars_repo_head_hexsha": "44736869c3756fc730e07a10b8198e42b8af0940", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/multitask_learning.py", "max_issues_repo_name": "imdiptanu/FARM", "max_issues_repo_head_hexsha": "44736869c3756fc730e07a10b8198e42b8af0940", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/multitask_learning.py", "max_forks_repo_name": "imdiptanu/FARM", "max_forks_repo_head_hexsha": "44736869c3756fc730e07a10b8198e42b8af0940", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5492957746, "max_line_length": 191, "alphanum_fraction": 0.625792393, "include": true, "reason": "import numpy", "num_tokens": 2208}
|
"""
Copyright 2020 RICHARD TJÖRNHAMMAR
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import numpy as np
def contrasted_signal ( edf ) :
# NOT NECESSARILY VALID
B = np.mean ( np.mean( edf ) )
l2_cont = lambda x,B : np.abs( ( 2**x - 2**B )/( 2**x + 2**B ) ) * x
cdf = edf .apply( lambda x : l2_cont(x,np.mean(x)) )
return ( cdf )
# REGULAR CONTRAST
contrast = lambda A,B : ( A-B )/( A+B )
e_flatness = lambda x : np.exp(np.mean(np.log(x),0))/np.mean(x,0)
e_contrast = lambda x : 1 - e_flatness(x)
|
{"hexsha": "92ceb9bff10d180327810f3a461ffa7e1b17c6b1", "size": 1033, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/rankor/contrasts.py", "max_stars_repo_name": "richardtjornhammar/rankor", "max_stars_repo_head_hexsha": "f04a83091a92130b9b73439b4ad65b5be3056cf9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/rankor/contrasts.py", "max_issues_repo_name": "richardtjornhammar/rankor", "max_issues_repo_head_hexsha": "f04a83091a92130b9b73439b4ad65b5be3056cf9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/rankor/contrasts.py", "max_forks_repo_name": "richardtjornhammar/rankor", "max_forks_repo_head_hexsha": "f04a83091a92130b9b73439b4ad65b5be3056cf9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-24T12:41:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T12:41:24.000Z", "avg_line_length": 33.3225806452, "max_line_length": 72, "alphanum_fraction": 0.6911907067, "include": true, "reason": "import numpy", "num_tokens": 292}
|
#!/usr/bin/env python
"""
Copyright 2010-2018 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Broadband Platform Version of SDSU MO-GOF
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
import shutil
import numpy as np
from decimal import Decimal, getcontext
# Import Broadband modules
import bband_utils
import plot_seismograms as PS
import validation_cfg
from sdsu_mogof_cfg import SDSUMOGofCfg
from install_cfg import InstallCfg
from station_list import StationList
import plot_value_map
import fault_utils
class SDSUMOGoF(object):
"""
Implement SDSU Mayhew-Olson GOF as a Broadband Component
"""
def __init__(self, i_r_stations, i_weights, plot_map,
i_a_datadir, i_format,
i_comparison_label, sim_id=0):
self.sim_id = sim_id
self.r_stations = i_r_stations
self.gof_weights = i_weights
self.a_datadir = i_a_datadir
self.format = i_format
self.comp_label = i_comparison_label
# Compute GOF, one station at a time
self.single_stat_run = True
self.num_stations = 0
self.num_timesteps = 0
self.len_seismo = 0.0
self.input_set_1 = []
self.input_set_2 = []
self.config = SDSUMOGofCfg()
self.config.cfggof["weights"] = self.gof_weights
self.config.cfggof["input_param"] = self.format
self.install = InstallCfg.getInstance()
self.log = os.path.join(self.install.A_OUT_LOG_DIR,
str(self.sim_id),
"%d.sdsu_mogof.log" % (self.sim_id))
self.site_fit = []
self.metric_count = 0
self.plot_gof_map = plot_map
self.hypo = []
def open_file(self, file_name, mode):
try:
file = open(file_name, mode)
except IOError as err:
print("ERROR (sdsu_mogof: Unable to open the file",
file_name, "Ending program.\n", err)
sys.exit(-1)
else:
return file
def get_file_list(self, filename):
files = None
try:
fp = open(filename, 'r')
files = fp.readlines()
for i in xrange(0, len(files)):
files[i] = os.path.abspath(files[i].strip())
fp.close()
except:
print("ERROR (sdsu_mogof): Failed to read in files from %s" %
(filename))
sys.exit(-1)
else:
return files
def check_input_list(self, input_set):
for seismo in input_set:
print("Checking seismogram file %s" % (seismo))
if not os.path.exists(seismo):
print("ERROR (sdsu_mogof): Seismogram %s does not exist" %
(seismo))
sys.exit(-1)
sp = open(seismo, 'r')
samples = sp.readlines()
sp.close()
header = 0
# Check that the BBP file is formatted correctly
num_samples = len(samples)
tstart = -1.0
while samples[num_samples - 1] == "":
num_samples = num_samples - 1
for i in xrange(0, num_samples):
tokens = samples[i].split()
if tokens[0] != '#':
if len(tokens) != 4:
print("ERROR (sdsu_mogof): Seismogram "
"%s is incorrectly formatted" % (seismo))
print(tokens)
sys.exit(-1)
if tstart == -1.0:
tstart = float(tokens[0])
else:
header += 1
num_samples -= header
# Check number of timesteps
if self.num_timesteps == 0 or self.single_stat_run:
self.num_timesteps = num_samples
print("Determined num_timesteps = %d" % (self.num_timesteps))
else:
if num_samples != self.num_timesteps:
print("ERROR (sdsu_mogof): Found %d samples in %s, expected %d" %
(num_samples, seismo, self.num_timesteps))
sys.exit(-1)
# Save length of seismogram
if self.len_seismo < 1.0 or self.single_stat_run:
self.len_seismo = float(samples[num_samples - 1].split()[0]) - tstart
print("Determined len_seismo = %f s" % (self.len_seismo))
return 0
def concat_input_set(self, input_set, concat_filename):
try:
# Open concatenated file
print("Creating input file set %s" % (concat_filename))
cp = open(concat_filename, 'w')
for input in input_set:
print("Concatenating input file %s" % (input))
# Read in file
input_file = open(input, 'r')
lines = input_file.readlines()
input_file.close()
# Append to concatenated file
for line in lines:
# Ensure newline is present
if line.strip()[0] != '#' and line.strip()[0] != "\n":
if line[-1:] != "\n":
line = line + "\n"
cp.write(line)
cp.close()
except:
print("ERROR (sdsu_mogof): Failed to concatenate input set")
sys.exit(-1)
return 0
def write_param_dat(self, paramfile):
try:
pp = open(paramfile, 'w')
pp.write("%s\n" % (self.config.INPUT_SEISMO_1))
pp.write("%s\n" % (self.config.INPUT_SEISMO_2))
pp.write("%d\n" % (0))
# pp.write("%d\n" % (self.config.cfggof["num_station"]))
pp.write("%d\n" % (self.num_stations))
pp.write("%d\n" % (self.num_timesteps))
pp.write("%d\n" % (self.num_timesteps))
pp.write("%s\n" % (self.config.cfggof["input_param"]))
pp.write("%f\n" % (self.len_seismo))
if self.config.cfggof["use_nga"] == "true":
pp.write("%s\n" % (self.config.cfgnga["source_mag"]))
pp.write("%s\n" % (self.config.cfgnga["dip"]))
pp.write("%s\n" % (self.config.cfgnga["rake"]))
pp.write("%s\n" % (self.config.cfgnga["depth_coseismic"]))
pp.write("%s\n" % \
(os.path.split(self.config.cfgnga["site_file"])[1]))
pp.write("%s\n" % (self.config.cfggof["low_cut"]))
pp.write("%s\n" % (self.config.cfggof["high_cut"]))
# for n in xrange(0, 12):
# pp.write("%f\n" % (1.0))
pp.write("%s\n" % (self.config.cfggof["weights"]["pga"]))
pp.write("%s\n" % (self.config.cfggof["weights"]["pgv"]))
pp.write("%s\n" % (self.config.cfggof["weights"]["pgd"]))
pp.write("%s\n" % (self.config.cfggof["weights"]["psa"]))
pp.write("%s\n" % (self.config.cfggof["weights"]["spectral_Fit"]))
pp.write("%s\n" % (self.config.cfggof["weights"]["cumulative_energy_fit"]))
pp.write("%s\n" % (self.config.cfggof["weights"]["inelastic_elastic_fit"]))
pp.write("%s\n" % (self.config.cfggof["weights"]["sepctral_acc"]))
pp.write("%s\n" % (self.config.cfggof["weights"]["spec_duration"]))
pp.write("%s\n" % (self.config.cfggof["weights"]["data_energy_release_duration"]))
pp.write("%s\n" % (self.config.cfggof["weights"]["cross_correlation"]))
pp.write("%s\n" % (self.config.cfggof["weights"]["fourier_spectrum"]))
pp.close()
except IOError:
print("ERROR (sdsu_mogof): Failed to write %s" % (paramfile))
sys.exit(-1)
return 0
# def BBPReader(self, bbpfile):
# cp = self.open_file(bbpfile, 'rb')
# lines = cp.readlines()
# cp.close()
# dt = 0.0
# iindex = 1
# dcount = 0
# hdr = 0
# samples = []
#
# for line in lines:
# tokens = line.strip().split()
# if (tokens[0] != '#'):
# if (len(tokens) != 4):
# print "ERROR (sdsu_mogof): Seismogram %s is incorrectly formatted" % (bbpfile)
# print tokens
# sys.exit(-1)
# else:
# sample = (float(tokens[0]), float(tokens[1]),
# float(tokens[2]), float(tokens[3]))
# samples.append(sample)
# dcount += 1
# else:
# hdr += 1
#
# if dcount > 2:
# dt = samples[1][0] - timesteps[0][0]
#
# return samples, dt, dcount
def bbp_writer(self, bbpfile, samples):
if samples is None:
print("ERROR (sdsu_mogof): Need to specify sample list")
sys.exit(-1)
# Open file
bbp = open(bbpfile, 'w')
for sample in samples:
bbp.write("%10.6f %16.8G %16.8G %16.8G\n" %
(float(sample[0]), float(sample[1]),
float(sample[2]), float(sample[3])))
# Close file
bbp.close()
# All done
return 0
def get_sample_data(self, bbpfile):
dt = Decimal(0)
dcount = 0
bfile = self.open_file(bbpfile, 'rb')
lines = bfile.readlines()
for line in lines:
tokens = line.strip().split()
if tokens[0] != '#':
if len(tokens) != 4:
print("ERROR (sdsu_mogof): "
"Seismogram %s is incorrectly formatted" % (bbpfile))
print(tokens)
sys.exit(-1)
else:
dcount += 1
if dcount < 2:
dt = Decimal(tokens[0])
elif dcount == 2:
dt = Decimal(tokens[0]) - dt
# print "get_sample_data: bbpfile: %s, dt: %f, dcount: %d" % (bbpfile, dt, dcount)
return dt, dcount
def match_sample_rate(self, bbpfile, newdt):
work_dir = os.getcwd()
bbpfile_name = os.path.join(work_dir,
os.path.splitext(os.path.basename(bbpfile))[0])
cmd = ("%s/wcc2bbp " % (self.install.A_GP_BIN_DIR) +
"nsfile=%s.000 ewfile=%s.090 udfile=%s.ver " %
(bbpfile_name, bbpfile_name, bbpfile_name) +
"wcc2bbp=0 < %s >> %s 2>&1" % (bbpfile, self.log))
bband_utils.runprog(cmd)
for comp in self.config.COMPS:
infile = bbpfile_name + "." + comp
outfile = bbpfile_name + ".ms." + comp
cmd = ("%s/wcc_resamp_arbdt " % (self.install.A_GP_BIN_DIR) +
"newdt=%f infile=%s outfile=%s >> %s 2>&1" %
(newdt, infile, outfile, self.log))
bband_utils.runprog(cmd)
cmd = ("%s/wcc2bbp wcc2bbp=1 " % self.install.A_GP_BIN_DIR +
'title="MOGOF" '
'nsfile=%s.ms.000 ewfile=%s.ms.090 udfile=%s.ms.ver > %s.ms.bbp 2>> %s' %
(bbpfile_name, bbpfile_name, bbpfile_name, bbpfile_name, self.log))
bband_utils.runprog(cmd)
os.remove("%s.000" % bbpfile_name)
os.remove("%s.090" % bbpfile_name)
os.remove("%s.ver" % bbpfile_name)
os.remove("%s.ms.000" % bbpfile_name)
os.remove("%s.ms.090" % bbpfile_name)
os.remove("%s.ms.ver" % bbpfile_name)
return "%s.ms.bbp" % bbpfile_name
def MatchSampleLength(self, bbpfile, length, match_method="pad"):
work_dir = os.getcwd()
fname = os.path.join(work_dir,
"%s.ml.bbp" %
(os.path.splitext(os.path.basename(bbpfile))[0]))
shutil.copy2(bbpfile, fname)
bbpfile = fname
bfile = self.open_file(bbpfile, 'rb')
lines = bfile.readlines()
bfile.close()
if match_method == "pad":
tokens = lines[len(lines)-1].strip().split()
bfile = self.open_file(bbpfile, 'a')
dt = 0.0
timestep = 0.0
if len(tokens) != 4:
print("ERROR (sdsu_mogof): " +
"Seismogram %s is incorrectly formatted" % (bbpfile))
print(tokens)
sys.exit(-1)
else:
timestep = float(tokens[0])
dt = timestep - float(lines[len(lines) - 2].strip().split()[0])
for _ in range(0, length):
timestep = timestep + dt
bfile.write("%10.6f %16.8G %16.8G %16.8G\n" %
(timestep, float(tokens[1]),
float(tokens[2]), float(tokens[3])))
bfile.close()
elif match_method == "trim":
print("ERROR (sdsu_mogof): Seismogram trim feature is unavailable")
sys.exit(-1)
return bbpfile
def match_seis(self, syn_bbp, obs_bbp, length=0):
syn_dt = Decimal(0)
syn_dcount = 0
obs_dt = Decimal(0)
obs_dcount = 0
if os.path.exists(syn_bbp):
syn_dt, syn_dcount = self.get_sample_data(syn_bbp)
else:
print("ERROR (sdsu_mogof): Seismogram file %s not found!" %
(syn_bbp))
sys.exit(-1)
if os.path.exists(obs_bbp):
obs_dt, obs_dcount = self.get_sample_data(obs_bbp)
else:
print("ERROR (sdsu_mogof): Seismogram file %s not found!" %
(obs_bbp))
sys.exit(-1)
print("Matching Seismograms Syn: "
"%s (dt: %f, SL: %d) with Obs: %s (dt: %f, SL: %d)" %
(syn_bbp, syn_dt, syn_dcount, obs_bbp, obs_dt, obs_dcount))
# Match Sample Rates if required
# Always try to Upsample
if syn_dt != obs_dt:
if syn_dt != Decimal(0) and obs_dt != Decimal(0):
if obs_dt > syn_dt:
# print "SDSU_MOGof: Upsampling Observed Seismogram %s from %f to %f" % (obs_bbp, float(obs_dt), float(syn_dt))
obs_bbp = self.match_sample_rate(obs_bbp, float(syn_dt))
obs_dt, obs_dcount = self.get_sample_data(obs_bbp)
else:
# print "SDSU_MOGof: Upsampling Synthetic Seismogram %s from %f to %f" % (obs_bbp, float(syn_dt), float(obs_dt))
syn_bbp = self.match_sample_rate(syn_bbp, float(obs_dt))
syn_dt, syn_dcount = self.get_sample_data(syn_bbp)
else:
print("ERROR (sdsu_mogof): Time Step is 0! "
"Obs-dt: %f, Syn-dt: %f" % (float(obs_dt), float(syn_dt)))
sys.exit(-1)
# Match Record Lengths
if syn_dcount != obs_dcount or length != 0:
if length == 0:
# Lenght was not specified, pad the shorter record
# with zero values at the end of the record
length = obs_dcount - syn_dcount
if length > 0:
# print "Matching synthetic seismogram length to %d" % obs_dcount
syn_bbp = self.MatchSampleLength(syn_bbp, length, "pad")
elif length < 0:
# print "Matching observed seismogram length to %d" % syn_dcount
obs_bbp = self.MatchSampleLength(obs_bbp,
abs(length), "pad")
elif length > 0:
# Length was specified adjust the length of the
# seismogram to specified length
if length > syn_dcount:
syn_bbp = self.MatchSampleLength(syn_bbp, length, "pad")
elif length < syn_dcount:
syn_bbp = self.MatchSampleLength(syn_bbp, length, "trim")
if length > obs_dcount:
obs_bbp = self.MatchSampleLength(obs_bbp, length, "pad")
elif length < obs_dcount:
obs_bbp = self.MatchSampleLength(obs_bbp, length, "trim")
return syn_bbp, obs_bbp
def summarize_results(self, gof_results, summ_results):
a_indir = os.path.join(self.install.A_IN_DATA_DIR, str(self.sim_id))
if self.config.cfggof["num_station"] >= 3 and self.plot_gof_map:
# Check if a station file with subset of stations exists and use that
stat_file = "%d_stats.txt" % (self.sim_id)
if os.path.isfile(os.path.join(a_indir, stat_file)):
pvm = plot_value_map.PlotValueMap(stat_file,
self.sim_id, self.hypo)
else:
pvm = plot_value_map.PlotValueMap(self.r_stations,
self.sim_id, self.hypo)
else:
if self.plot_gof_map:
# Disable generating the GOF map with less than 3 stations
print("Note (SDSU - GOF): Less than 3 stations found in " +
"station list (%d), GOF map plots will be disabled!" %
self.config.cfggof["num_station"])
self.plot_gof_map = False
self.site_fit = np.array([[0.0 for i in range(4)] for j in range(self.config.cfggof["num_station"])])
self.metric_count = 0
# Compute site_fit
if self.config.cfggof["weights"]["pga"] > 0.0:
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["pga"])
PGA_val = np.array(self.get_gof_metric(fname, start_col=0))
self.site_fit = (self.site_fit +
(PGA_val * np.float(self.config.cfggof["weights"]["pga"])))
self.metric_count += 1
if self.format == 'A':
self.plotOverlays(self.input_set_1, self.input_set_2, PGA_val)
if self.plot_gof_map:
pvm.run(PGA_val, "PGA")
if self.config.cfggof["weights"]["pgv"] > 0.0:
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["pgv"])
PGV_val = np.array(self.get_gof_metric(fname, start_col=0))
self.site_fit = (self.site_fit +
(PGV_val * self.config.cfggof["weights"]["pgv"]))
self.metric_count += 1
if self.format == 'V':
self.plotOverlays(self.input_set_1, self.input_set_2, PGV_val)
if self.plot_gof_map:
pvm.run(PGV_val, "PGV")
if self.config.cfggof["weights"]["pgd"] > 0.0:
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["pgd"])
PGD_val = np.array(self.get_gof_metric(fname, start_col=0))
self.site_fit = (self.site_fit +
(self.config.cfggof["weights"]["pgd"] * PGD_val))
self.metric_count += 1
if self.plot_gof_map:
pvm.run(PGD_val, "PGD")
if self.config.cfggof["weights"]["psa"] > 0.0:
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["psa"])
PSA_val = np.array(self.get_gof_metric(fname, start_col=0))
self.site_fit = (self.site_fit +
(self.config.cfggof["weights"]["psa"] * PSA_val))
self.metric_count += 1
if self.plot_gof_map:
pvm.run(PSA_val, "PSA")
if self.config.cfggof["weights"]["spectral_Fit"] > 0.0:
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["spectral_Fit"])
SpFit_val = np.array(self.get_gof_metric(fname, start_col=0))
self.site_fit = (self.site_fit +
(self.config.cfggof["weights"]["spectral_Fit"]
* SpFit_val))
self.metric_count += 1
if self.plot_gof_map:
pvm.run(SpFit_val, "Spectral Fit")
if self.config.cfggof["weights"]["cumulative_energy_fit"] > 0.0:
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["cumulative_energy_fit"])
EnFit_val = np.array(self.get_gof_metric(fname, start_col=0))
self.site_fit = (self.site_fit +
(self.config.cfggof["weights"]["cumulative_energy_fit"]
* EnFit_val))
self.metric_count += 1
if self.plot_gof_map:
pvm.run(EnFit_val, "Cumulative Energy Fit")
if self.config.cfggof["weights"]["data_energy_release_duration"] > 0.0:
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["data_energy_release_duration"])
Dur_val = np.array(self.get_gof_metric(fname, start_col=0))
self.site_fit = (self.site_fit +
(self.config.cfggof["weights"]["data_energy_release_duration"]
* Dur_val))
self.metric_count += 1
if self.plot_gof_map:
pvm.run(Dur_val, "Energy release Duration")
if self.config.cfggof["weights"]["inelastic_elastic_fit"] > 0.0:
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["inelastic_elastic_fit"])
InElEl_val = np.array(self.get_gof_metric(fname, start_col=0))
self.site_fit = (self.site_fit +
(self.config.cfggof["weights"]["inelastic_elastic_fit"]
* InElEl_val))
self.metric_count += 1
if self.plot_gof_map:
pvm.run(InElEl_val, "In-elastic - Elastic Fit")
if self.config.cfggof["weights"]["sepctral_acc"] > 0.0:
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["sepctral_acc"])
SAFit_val = np.array(self.get_gof_metric(fname, start_col=0))
self.site_fit = (self.site_fit +
(self.config.cfggof["weights"]["sepctral_acc"]
* SAFit_val))
self.metric_count += 1
if self.plot_gof_map:
pvm.run(SAFit_val, "Spectral Acceleration")
if self.config.cfggof["weights"]["spec_duration"] > 0.0:
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["spec_duration"])
SpecDur_val = np.array(self.get_gof_metric(fname, start_col=0))
self.site_fit = (self.site_fit +
(self.config.cfggof["weights"]["spec_duration"]
* SpecDur_val))
self.metric_count += 1
if self.plot_gof_map:
pvm.run(SpecDur_val, "Spectral Duration")
if self.config.cfggof["weights"]["cross_correlation"] > 0.0:
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["cross_correlation"])
CCFit_val = np.array(self.get_gof_metric(fname, start_col=0))
self.site_fit = (self.site_fit +
(self.config.cfggof["weights"]["cross_correlation"]
* CCFit_val))
self.metric_count += 1
if self.plot_gof_map:
pvm.run(CCFit_val, "Cross Correlation")
if self.config.cfggof["weights"]["fourier_spectrum"] > 0.0:
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["fourier_spectrum"])
FSComp_val = np.array(self.get_gof_metric(fname, start_col=0))
self.site_fit = (self.site_fit +
(self.config.cfggof["weights"]["fourier_spectrum"]
* FSComp_val))
self.metric_count += 1
if self.plot_gof_map:
pvm.run(FSComp_val, "Fourier Spectrum")
#Calculate site_fit Values
self.site_fit = self.site_fit / float(self.metric_count)
gof_results = os.path.join(self.config.cfggof["output_dir"],
"GOF.list")
np.savetxt(gof_results, self.site_fit, delimiter='\t')
if self.plot_gof_map:
pvm.run(self.site_fit, "Site Fit")
# Read in the file
if not os.path.exists(gof_results):
print("ERROR (sdsu_mogof): GOF Results file was not found!")
sys.exit(-1)
gp = self.open_file(gof_results, 'r')
lines = gp.readlines()
gp.close()
fname = os.path.join(self.config.cfggof["output_dir"],
"stat_comp.txt")
if not os.path.exists(fname):
print("ERROR (sdsu_mogof): GOF Stations file was not found!")
sys.exit(-1)
comp_stat_file = self.open_file(fname, 'r')
comp_stats = comp_stat_file.readlines()
comp_stat_file.close()
sp = open(summ_results, 'w')
header = "%-24s\t%-24s\tOverall\tX\tY\tZ" % ("Sta (input 1)", "Sta (input 2)")
sep = "=" * 96
print("\nResults:")
print(header)
print(sep)
sp.write("#%s\n" % (header))
sp.write("#%s\n" % (sep))
stnum = 0
for line in lines:
stripped = line.strip()
if stripped[0] != '%':
tokens = stripped.split()
if stnum >= self.config.cfggof["num_station"]:
print("More results reported than input stations!")
return 1
stats = comp_stats[stnum].strip().split()
input_1 = stats[0]
input_2 = stats[1]
overall = float(tokens[0])
x = float(tokens[1])
y = float(tokens[2])
z = float(tokens[3])
results = ("%-24s\t%-24s\t%6.2f\t%6.2f\t%6.2f\t%6.2f" %
(input_1, input_2, overall, x, y, z))
print(results)
sp.write("%s\n" % (results))
stnum += 1
sp.close()
print("")
print("Saved summary in %s" % (summ_results))
return 0
def get_gof_metric(self, filename, start_col=0):
gof_metric = []
metric_file = self.open_file(filename, 'r')
m_data = metric_file.readlines()
for line in m_data:
if line.strip().startswith('#') or line.strip().startswith('%'):
continue
else:
pieces = line.split()
if start_col == 0:
if len(pieces) == 7:
# slon, slat, site, overall fit, x fit, y fit, z fit
start_col = 3
gof_metric.append([float(s) for s in pieces[start_col:]])
return gof_metric
#def set_gof_metric(self, filename, m_data, stat=None, start_col=0):
def set_gof_metric(self, filename, m_data, stat=None):
metric_file = self.open_file(filename, 'a')
sample = ""
for line in m_data:
sample += "%-10s" % "\t".join(map(str, line))
if stat is not None:
sample = "%-8s\t%-8s\t%-12s\t%s" % (stat.lon, stat.lat,
stat.scode, sample)
metric_file.write("%s\n" % sample)
metric_file.close()
return
def plotOverlays(self, filelist_syn, filelist_obs, gofdata):
if len(filelist_syn) != len(filelist_obs):
print("ERROR (sdsu_mogof): "
"Number of stations in sets 1 and 2 differ!")
sys.exit(-1)
if len(filelist_syn) != len(gofdata):
print("ERROR (sdsu_mogof): Number of stations "
"%d is not equal to lenght of GOF data %d!" %
(len(filelist_syn), len(gofdata)))
sys.exit(-1)
a_outdir = os.path.join(self.install.A_OUT_DATA_DIR,
str(self.sim_id),
"gof_plots")
if not os.path.exists(a_outdir):
os.mkdir(a_outdir)
if self.format == "V":
ylabel = "Velocity (cm/s)"
goflabel = ["GOF", "PGV"]
elif self.format == "A":
ylabel = "Acceleration (cm/s/s)"
goflabel = ["GOF", "PGA"]
for i in range(0, len(filelist_syn)):
obs_bbp = filelist_obs[i]
syn_bbp = filelist_syn[i]
gdata = gofdata[i]
stat = os.path.basename(obs_bbp)
stat = stat.split(".")[0]
outfile = os.path.join(a_outdir, "%s_%s_match_%s_overlay.png" %
(self.sim_id, stat, self.format))
print("PS.plot_overlay(%s, %s, %s, Observed, Run %s," %
(stat, obs_bbp, syn_bbp, self.sim_id) +
"%s, %s, %r, %r" %
(outfile, ylabel, goflabel, gdata))
PS.plot_overlay(stat, obs_bbp, syn_bbp, "Observed",
"Run %s" % self.sim_id, outfile,
ylabel, goflabel, gdata)
return
def do_gof(self, filelist_syn, filelist_obs, stat=None):
sim_id = self.sim_id
run_dir = self.config.cfggof["work_dir"]
self.config.cfggof["input_set_1"] = os.path.join(run_dir,
"%d_syn_list.input" %
(sim_id))
self.config.cfggof["input_set_2"] = os.path.join(run_dir,
"%d_obs_list.input" %
(sim_id))
inpsfile = self.open_file(self.config.cfggof["input_set_1"], 'w')
# num_stations = 0
for filename in filelist_syn:
inpsfile.write("%s\n" % (filename))
# num_stations += 1
inpsfile.close()
inpofile = self.open_file(self.config.cfggof["input_set_2"], 'w')
for filename in filelist_obs:
inpofile.write("%s\n" % (filename))
inpofile.close()
#Concatenate input BBP files into single GOF
# User data in a_outdir, reference data in self.install.A_GF_DIR/gp/data/AccBBP
# - Check input files exist
if not os.path.exists(self.config.cfggof["input_set_1"]):
print("ERROR (sdsu_mogof): Input_set_1 file %s does not exist" %
(self.config.cfggof["input_set_1"]))
sys.exit(-1)
if not os.path.exists(self.config.cfggof["input_set_2"]):
print("ERROR (sdsu_mogof): Input_set_2 file %s does not exist" %
(self.config.cfggof["input_set_2"]))
sys.exit(-1)
# Get individual input file lists
input_set_1 = self.get_file_list(self.config.cfggof["input_set_1"])
input_set_2 = self.get_file_list(self.config.cfggof["input_set_2"])
if input_set_1 is None or input_set_2 is None:
print("ERROR (sdsu_mogof): Empty file lists found")
sys.exit(-1)
# Check each file in each filelist
if len(input_set_1) != len(input_set_2):
print("ERROR (sdsu_mogof): "
"Number of stations in sets 1 and 2 differ")
sys.exit(-1)
if self.check_input_list(input_set_1) != 0:
print("ERROR (sdsu_mogof): Input_set_1 is not valid")
sys.exit(-1)
if self.check_input_list(input_set_2) != 0:
print("ERROR (sdsu_mogof): Input_set_2 is not valid")
sys.exit(-1)
# Concatenate each input set into a single GOF formatted file
# and save to work_dir
concat_file_1 = os.path.join(self.config.cfggof["work_dir"],
self.config.INPUT_SEISMO_1)
concat_file_2 = os.path.join(self.config.cfggof["work_dir"],
self.config.INPUT_SEISMO_2)
if self.concat_input_set(input_set_1, concat_file_1) != 0:
print("ERROR (sdsu_mogof): Failed to concatenate input set 1")
sys.exit(-1)
if self.concat_input_set(input_set_2, concat_file_2) != 0:
print("ERROR (sdsu_mogof): Failed to concatenate input set 2")
sys.exit(-1)
# self.config.cfggof["num_station"]= len(input_set_1)
self.config.cfggof["timesteps_set_1"] = self.num_timesteps
self.config.cfggof["timesteps_set_2"] = self.num_timesteps
self.config.cfggof["seismo_length"] = self.len_seismo
# If NGA, copy site_file to work_dir
if self.config.cfggof["use_nga"]:
print("ERROR (sdsu_mogof): "
"USE_NGA option has not been implemented. Aborting...")
#shutil.copy(self.config.cfgnga["site_file"], self.config.cfggof["work_dir"])
sys.exit(-1)
# Construct parameter file in working_dir <<<<<<
paramfile = os.path.join(self.config.cfggof["work_dir"],
self.config.PARAM_DAT_FILE)
if self.write_param_dat(paramfile) != 0:
print("ERROR (sdsu_mogof): Failed to write %s" % (paramfile))
sys.exit(-1)
# Save path to GOF output dir
gof_out_dir = os.path.abspath(os.path.join(self.config.cfggof["work_dir"], "out"))
# Check if "out" subdir exists
if os.path.exists(gof_out_dir):
# Remove "out" subdir
shutil.rmtree(gof_out_dir)
# (Re)Create "out" subdir
os.mkdir(gof_out_dir)
# Execute GOF codes
# Run the code
if self.config.cfggof["use_nga"] == "true":
# This has not been implemented!!!!
# Just abort (there's already a check about 30 lines above)
sys.exit(-1)
# cmd = ['%s' % (gof_nga_bin)]
# print "Executing cmd: %s" % (str(cmd))
# bband_utils.runprog(cmd)
else:
# cmd = ['%s' % (gof_bin)]
# print "Executing cmd: %s" % (str(cmd))
# bband_utils.runprog(cmd)
#
# site_fit = np.array([[0.0 for i in range(4)] for j in range(num_stations)])
self.metric_count = 0
PGX_run = False
if self.config.cfggof["weights"]["pga"] > 0.0:
PGX_run = True
cmd = '%s >> %s 2>&1' % (self.config.GOF_PGX_BIN, self.log)
print("Executing cmd: %s" % (str(cmd)))
bband_utils.runprog(cmd)
fname = os.path.join(self.config.cfggof["work_dir"],
"out",
self.config.cfggof["file"]["pga"])
PGA_val = np.array(self.get_gof_metric(fname, start_col=0))
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["pga"])
self.set_gof_metric(fname, PGA_val, stat)
# site_fit = site_fit + (PGA_val * np.float(self.config.cfggof["weights"]["pga"]))
self.metric_count += 1
if self.config.cfggof["weights"]["pgv"] > 0.0:
if PGX_run == False:
PGX_run = True
cmd = '%s >> %s 2>&1' % (self.config.GOF_PGX_BIN, self.log)
print("Executing cmd: %s" % (str(cmd)))
bband_utils.runprog(cmd)
fname = os.path.join(self.config.cfggof["work_dir"],
"out",
self.config.cfggof["file"]["pgv"])
PGV_val = np.array(self.get_gof_metric(fname, start_col=0))
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["pgv"])
self.set_gof_metric(fname, PGV_val, stat)
# site_fit = site_fit + (self.config.cfggof["weights"]["pgv"] * PGV_val)
self.metric_count += 1
if self.config.cfggof["weights"]["pgd"] > 0.0:
if PGX_run == False:
PGX_run = True
cmd = '%s >> %s 2>&1' % (self.config.GOF_PGX_BIN, self.log)
print("Executing cmd: %s" % (str(cmd)))
bband_utils.runprog(cmd)
fname = os.path.join(self.config.cfggof["work_dir"],
"out",
self.config.cfggof["file"]["pgd"])
PGD_val = np.array(self.get_gof_metric(fname, start_col=0))
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["pgd"])
self.set_gof_metric(fname, PGD_val, stat)
# site_fit = site_fit + (self.config.cfggof["weights"]["pgd"] * PGD_val)
self.metric_count += 1
if self.config.cfggof["weights"]["psa"] > 0.0:
if PGX_run == False:
PGX_run = True
cmd = '%s >> %s 2>&1' % (self.config.GOF_PGX_BIN, self.log)
print("Executing cmd: %s" % (str(cmd)))
bband_utils.runprog(cmd)
fname = os.path.join(self.config.cfggof["work_dir"],
"out",
self.config.cfggof["file"]["psa"])
PSA_val = np.array(self.get_gof_metric(fname, start_col=0))
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["psa"])
self.set_gof_metric(fname, PSA_val, stat)
# site_fit = site_fit + (self.config.cfggof["weights"]["psa"] * PSA_val)
self.metric_count += 1
if self.config.cfggof["weights"]["spectral_Fit"] > 0.0:
cmd = '%s >> %s 2>&1' % (self.config.GOF_SpFit_BIN, self.log)
print("Executing cmd: %s" % (str(cmd)))
bband_utils.runprog(cmd)
fname = os.path.join(self.config.cfggof["work_dir"],
"out",
self.config.cfggof["file"]["spectral_Fit"])
SpFit_val = np.array(self.get_gof_metric(fname, start_col=0))
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["spectral_Fit"])
self.set_gof_metric(fname, SpFit_val, stat)
# site_fit = site_fit + (self.config.cfggof["weights"]["spectral_Fit"] * SpFit_val)
self.metric_count += 1
if self.config.cfggof["weights"]["cumulative_energy_fit"] > 0.0 \
or self.config.cfggof["weights"]["data_energy_release_duration"] > 0.0:
cmd = '%s >> %s 2>&1' % (self.config.GOF_DCumEn_BIN, self.log)
print("Executing cmd: %s" % (str(cmd)))
bband_utils.runprog(cmd)
if self.config.cfggof["weights"]["cumulative_energy_fit"] > 0.0:
fname = os.path.join(self.config.cfggof["work_dir"],
"out",
self.config.cfggof["file"]["cumulative_energy_fit"])
EnFit_val = np.array(self.get_gof_metric(fname, start_col=0))
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["cumulative_energy_fit"])
self.set_gof_metric(fname, EnFit_val, stat)
# site_fit = site_fit + (self.config.cfggof["weights"]["cumulative_energy_fit"] * EnFit_val)
self.metric_count += 1
if self.config.cfggof["weights"]["data_energy_release_duration"] > 0.0:
fname = os.path.join(self.config.cfggof["work_dir"],
"out",
self.config.cfggof["file"]["data_energy_release_duration"])
Dur_val = np.array(self.get_gof_metric(fname, start_col=0))
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["data_energy_release_duration"])
self.set_gof_metric(fname, Dur_val, stat)
# site_fit = site_fit + (self.config.cfggof["weights"]["data_energy_release_duration"] * Dur_val)
self.metric_count += 1
if self.config.cfggof["weights"]["inelastic_elastic_fit"] > 0.0:
cmd = '%s >> %s 2>&1' % (self.config.GOF_InElFit_BIN, self.log)
print("Executing cmd: %s" % (str(cmd)))
bband_utils.runprog(cmd)
fname = os.path.join(self.config.cfggof["work_dir"],
"out",
self.config.cfggof["file"]["inelastic_elastic_fit"])
InElEl_val = np.array(self.get_gof_metric(fname, start_col=0))
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["inelastic_elastic_fit"])
self.set_gof_metric(fname, InElEl_val, stat)
# site_fit = site_fit + (self.config.cfggof["weights"]["inelastic_elastic_fit"] * InElEl_val)
self.metric_count += 1
if self.config.cfggof["weights"]["sepctral_acc"] > 0.0:
cmd = '%s >> %s 2>&1' % (self.config.GOF_SAFit16_BIN, self.log)
print("Executing cmd: %s" % (str(cmd)))
bband_utils.runprog(cmd)
fname = os.path.join(self.config.cfggof["work_dir"],
"out",
self.config.cfggof["file"]["sepctral_acc"])
SAFit_val = np.array(self.get_gof_metric(fname, start_col=0))
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["sepctral_acc"])
self.set_gof_metric(fname, SAFit_val, stat)
# site_fit = site_fit + (self.config.cfggof["weights"]["sepctral_acc"] * SAFit_val)
self.metric_count += 1
if self.config.cfggof["weights"]["spec_duration"] > 0.0:
cmd = '%s >> %s 2>&1' % (self.config.GOF_SpecDurFit_BIN, self.log)
print("Executing cmd: %s" % (str(cmd)))
bband_utils.runprog(cmd)
fname = os.path.join(self.config.cfggof["work_dir"],
"out",
self.config.cfggof["file"]["spec_duration"])
SpecDur_val = np.array(self.get_gof_metric(fname, start_col=0))
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["spec_duration"])
self.set_gof_metric(fname, SpecDur_val, stat)
# site_fit = site_fit + (self.config.cfggof["weights"]["spec_duration"] * SpecDur_val)
self.metric_count += 1
if self.config.cfggof["weights"]["cross_correlation"] > 0.0:
cmd = '%s >> %s 2>&1' % (self.config.GOF_CCFit_BIN, self.log)
print("Executing cmd: %s" % (str(cmd)))
bband_utils.runprog(cmd)
fname = os.path.join(self.config.cfggof["work_dir"],
"out",
self.config.cfggof["file"]["cross_correlation"])
CCFit_val = np.array(self.get_gof_metric(fname, start_col=0))
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["cross_correlation"])
self.set_gof_metric(fname, CCFit_val, stat)
# site_fit = site_fit + (self.config.cfggof["weights"]["cross_correlation"] * CCFit_val)
self.metric_count += 1
if self.config.cfggof["weights"]["fourier_spectrum"] > 0.0:
cmd = '%s >> %s 2>&1' % (self.config.GOF_FSComp_BIN, self.log)
print("Executing cmd: %s" % (str(cmd)))
bband_utils.runprog(cmd)
fname = os.path.join(self.config.cfggof["work_dir"],
"out",
self.config.cfggof["file"]["fourier_spectrum"])
FSComp_val = np.array(self.get_gof_metric(fname, start_col=0))
fname = os.path.join(self.config.cfggof["output_dir"],
self.config.cfggof["file"]["fourier_spectrum"])
self.set_gof_metric(fname, FSComp_val, stat)
# site_fit = site_fit + (self.config.cfggof["weights"]["fourier_spectrum"] * FSComp_val)
self.metric_count += 1
#Calculate site_fit Values
# site_fit = site_fit / float(self.metric_count)
# fname = "%s/out/GOF.list" % (self.config.cfggof["work_dir"])
# np.savetxt(fname, site_fit, delimiter='\t')
return
def run(self):
"""
Runs the SDSU MoGOF code
"""
print("SDSU MOGoF".center(80, '-'))
getcontext().prec = 10
# Required Inputs: sim_id
sim_id = self.sim_id
old_cwd = os.getcwd()
sta_base = os.path.basename(os.path.splitext(self.r_stations)[0])
a_indir = os.path.join(self.install.A_IN_DATA_DIR, str(sim_id))
a_tmpdir = os.path.join(self.install.A_TMP_DATA_DIR, str(sim_id))
a_tmpdir_seis = os.path.join(self.install.A_TMP_DATA_DIR, str(sim_id),
"obs_seis_%s" % (sta_base))
a_outdir = os.path.join(self.install.A_OUT_DATA_DIR, str(sim_id))
a_statfile = os.path.join(self.install.A_IN_DATA_DIR,
str(sim_id),
self.r_stations)
if not os.path.exists(a_statfile):
# We need the station file
print("ERROR (sdsu_mogof): Cannot find station list file %s" %
(a_statfile))
sys.exit(-1)
self.config.cfggof["output_dir"] = a_outdir
# Make sure the output and tmp directories exist
if not os.path.exists(a_tmpdir):
os.mkdir(a_tmpdir)
if not os.path.exists(a_outdir):
os.mkdir(a_outdir)
if self.plot_gof_map:
# Let's figure out the hypocenter location
val_obj = None
if self.comp_label != "" and self.comp_label is not None:
# If validation, get validation object
val_obj = validation_cfg.VE_EVENTS.get_event_by_name(self.comp_label)
if val_obj is not None:
# Look to see if we have a srf file specified
srffile = val_obj.get_input("GP", "srf")
else:
srffile = ""
# If we have SRF file, get hypocenter from it, otherwise
# look for an srf file in the indata directory
if srffile != "":
self.hypo = fault_utils.get_hypocenter(srffile)
else:
entries = os.listdir(a_indir)
candidate_list = []
for entry in entries:
if entry.startswith("xyz_"):
# Skip xyz srf file created by bbtoolbox.py
continue
if entry.endswith(".srf"):
candidate_list.append(entry)
if len(candidate_list) == 1:
srffile = os.path.join(a_indir, candidate_list[0])
self.hypo = fault_utils.get_hypocenter(srffile)
# Create a site1d run dir
run_dir = os.path.join(a_tmpdir, "MOGof")
if not os.path.exists(run_dir):
os.mkdir(run_dir)
self.config.cfggof["work_dir"] = run_dir
else:
# - Clean MOGof run dir
for root, dirs, files in os.walk(run_dir):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
os.chdir(run_dir)
# Read and parse the station list with this call
slo = StationList(a_statfile)
if slo is None:
print("ERROR (sdsu_mogof): Cannot open station list %s" %
(a_statfile))
sys.exit(-1)
site_list = slo.getStationList()
print("Opening Station list %s." % (a_statfile))
#Build GOF formated input file from BPP files.
# - Check if the output BBP file exists for a station in the station list
# - Concatenate it to end of the input file.
filelist_syn = []
filelist_obs = []
self.num_stations = 0
self.config.cfggof["num_station"] = 0
stats = []
fname = os.path.join(self.config.cfggof["output_dir"], "stat_comp.txt")
comp_stat_file = self.open_file(fname, 'a')
print("Running SDSU MO-GOF codes")
for sites in site_list:
slon = float(sites.lon)
slat = float(sites.lat)
site = sites.scode
expected_file = os.path.join(a_outdir, "%d.%s.acc.bbp" %
(sim_id, site))
if not os.path.exists(expected_file):
# Just skip it
print("Couldn't find file %s. This is not necessarily an error, as you may have run with a subset of a stations. Goodness of fit will continue with available stations." % (expected_file))
continue
obs_file = os.path.join(a_tmpdir_seis, "%s.bbp" % (site))
if not os.path.exists(obs_file):
# Just skip it
print("Couldn't find observed seismogram file %s. Goodness of fit will continue with available stations." % (obs_file))
continue
stats.append((slon, slat, site))
self.config.cfggof["num_station"] += 1
expected_file, obs_file = self.match_seis(expected_file,
obs_file, 0)
# Add file to input file list
filelist_syn.append(expected_file)
self.input_set_1.append(expected_file)
filelist_obs.append(obs_file)
self.input_set_2.append(obs_file)
comp_stat_file.write("%s\t\t%s\n" %
(os.path.basename(expected_file),
os.path.basename(obs_file)))
self.num_stations += 1
if self.single_stat_run:
self.do_gof(filelist_syn, filelist_obs, stat=sites)
self.num_stations = 0
filelist_syn = []
filelist_obs = []
if self.config.cfggof["num_station"] != len(site_list):
print("Writing a subset of stations with valid BBP files for GOF!")
fname = os.path.join(a_indir, "%d_stats.txt" % (self.sim_id))
stat_file = self.open_file(fname, 'a')
for station in stats:
stat_file.write("%.3f\t%.3f\t%s\n" % (station[0], station[1],
station[2]))
stat_file.close()
# if self.single_stat_run == False:
# self.do_gof(filelist_syn, filelist_obs)
comp_stat_file.close()
# Summarize results
gof_results = os.path.join(self.config.cfggof["output_dir"], "GOF.list")
summ_results = os.path.join(self.config.cfggof["output_dir"],
self.config.SUMMARY_FILE)
if self.summarize_results(gof_results, summ_results) != 0:
print("ERROR (sdsu_mogof): Failed to summarize results")
sys.exit(-1)
os.chdir(old_cwd)
print("SDSU MOGoF Completed".center(80, '-'))
if __name__ == "__main__":
print("Testing Module: %s" % (os.path.basename(sys.argv[0])))
ME = SDSUMOGoF(sys.argv[1], sys.argv[2], sys.argv[3],
sys.argv[4], sys.argv[5], sys.argv[6],
sys.argv[7], sim_id=int(sys.argv[8]))
ME.run()
|
{"hexsha": "5dfc070d2fc93ff18cf2121f8929e4c846d7014e", "size": 53527, "ext": "py", "lang": "Python", "max_stars_repo_path": "bbp/comps/sdsu_mogof.py", "max_stars_repo_name": "ZhangHCFJEA/bbp", "max_stars_repo_head_hexsha": "33bd999cf8d719c49f9a904872c62f02eb5850d1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2017-10-31T09:16:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T23:44:29.000Z", "max_issues_repo_path": "bbp/comps/sdsu_mogof.py", "max_issues_repo_name": "ZhangHCFJEA/bbp", "max_issues_repo_head_hexsha": "33bd999cf8d719c49f9a904872c62f02eb5850d1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 37, "max_issues_repo_issues_event_min_datetime": "2017-05-23T15:15:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-05T09:13:18.000Z", "max_forks_repo_path": "bbp/comps/sdsu_mogof.py", "max_forks_repo_name": "ZhangHCFJEA/bbp", "max_forks_repo_head_hexsha": "33bd999cf8d719c49f9a904872c62f02eb5850d1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2017-09-21T17:43:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T06:34:30.000Z", "avg_line_length": 45.0563973064, "max_line_length": 203, "alphanum_fraction": 0.5164496422, "include": true, "reason": "import numpy", "num_tokens": 12625}
|
// Copyright Ricardo Calheiros de Miranda Cosme 2018.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/core/lightweight_test.hpp>
#include <coruja/container/vector.hpp>
#include <coruja/object/object.hpp>
#include <coruja/object/view/any_object.hpp>
#include <coruja/boost_optional.hpp>
#include <coruja/boost_variant.hpp>
#include <coruja/observer_class.hpp>
using namespace coruja;
struct empty{};
template<typename Self>
struct reaction_self
{
void operator()(Self& self) const
{
BOOST_TEST(_self == &self);
*self.called = true;
}
Self*& _self;
};
struct my_class_eb : observer_class<my_class_eb> {
using base = observer_class<my_class_eb>;
my_class_eb() = default;
my_class_eb(bool& b)
: base()
, _self(new my_class_eb*{this})
, called(&b)
{
observe(i, reaction_self<my_class_eb>{*_self});
observe(ii, [](my_class_eb& self, int x)
{
*self.called = true;
BOOST_TEST(x == 10);
});
observe(iii, [](my_class_eb& self, object<int>& x)
{
*self.called = true;
BOOST_TEST(x == 15);
});
observe_for_each(v, [](my_class_eb& self, vector<int>&, vector<int>::iterator)
{ *self.called = true; });
observe_before_erase(v, [](my_class_eb& self, vector<int>&, vector<int>::iterator)
{ *self.called = true; });
observe(var, [](my_class_eb&){});
observe(oi, [](my_class_eb&){});
}
my_class_eb(my_class_eb&& rhs)
: base(std::move(rhs))
, _self(std::move(rhs._self))
, i(std::move(rhs.i))
, ii(std::move(rhs.ii))
, iii(std::move(rhs.iii))
, v(std::move(rhs.v))
, called(std::move(rhs.called))
, var(std::move(rhs.var))
, oi(std::move(rhs.oi))
{
*_self = this;
}
my_class_eb& operator=(my_class_eb&& rhs)
{
base::operator=(std::move(rhs));
_self = std::move(rhs._self);
i = std::move(rhs.i);
ii = std::move(rhs.ii);
iii = std::move(rhs.iii);
v = std::move(rhs.v);
called = std::move(rhs.called);
var = std::move(rhs.var);
oi = std::move(rhs.oi);
*_self = this;
return *this;
}
std::unique_ptr<my_class_eb*> _self;
object<int> i, ii, iii;
vector<int> v;
bool* called{nullptr};
variant<int, float> var;
optional<int> oi;
};
template<typename Class>
void test_reactions(Class& o, bool& called)
{
o.i = 5;
BOOST_TEST(called);
called = false;
o.ii = 10;
BOOST_TEST(called);
called = false;
o.iii = 15;
BOOST_TEST(called);
called = false;
o.v.emplace_back(0);
BOOST_TEST(called);
called = false;
o.v.erase(o.v.begin());
BOOST_TEST(called);
called = false;
}
struct observe_any_view : observer_class<observe_any_view> {
observe_any_view(view::any_object<int>& o) {
observe(o, []{});
observe(o, [](observe_any_view&){});
observe(o, [](observe_any_view&, int){});
observe_obj_for_each(o, []{});
observe_obj_for_each(o, [](observe_any_view&){});
observe_obj_for_each(o, [](observe_any_view&, int){});
}
};
int main() {
static_assert(std::is_default_constructible<my_class_eb>::value,"");
//reactions
{
bool called{false};
my_class_eb o{called};
test_reactions(o, called);
}
//move ctor
{
bool called{false};
my_class_eb src{called};
auto o = std::move(src);
test_reactions(o, called);
}
//move assignment operator
{
bool called{false};
my_class_eb src{called};
my_class_eb o{called};
o = std::move(src);
test_reactions(o, called);
}
{
object<int> o;
view::any_object<int> v(o);
observe_any_view c(v);
}
}
|
{"hexsha": "894d3fe82b0b6a94ae579f706bb1043cfc03543c", "size": 4207, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/observer_class.cpp", "max_stars_repo_name": "ethiago/coruja", "max_stars_repo_head_hexsha": "13c595a7b37271d04c51de1ffb45b965c95a18cf", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 13.0, "max_stars_repo_stars_event_min_datetime": "2017-10-24T07:13:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-07T02:51:31.000Z", "max_issues_repo_path": "test/observer_class.cpp", "max_issues_repo_name": "ethiago/coruja", "max_issues_repo_head_hexsha": "13c595a7b37271d04c51de1ffb45b965c95a18cf", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-10-30T21:50:44.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-06T19:35:23.000Z", "max_forks_repo_path": "test/observer_class.cpp", "max_forks_repo_name": "ethiago/coruja", "max_forks_repo_head_hexsha": "13c595a7b37271d04c51de1ffb45b965c95a18cf", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2018-09-19T20:29:07.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-20T18:12:09.000Z", "avg_line_length": 25.8098159509, "max_line_length": 90, "alphanum_fraction": 0.5486094604, "num_tokens": 1081}
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rc
from matplotlib.pyplot import MultipleLocator
rc('mathtext', default='regular')
plt.rcParams['font.family'] = ['Times New Roman'] #
plt.rcParams['axes.unicode_minus'] = False
g = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
x = np.arange(10) # x轴刻度标签位置
Name = [
"Rein", "HEM-B", "HEMSS", "HEMSD", "HEMPD", "HEMDS", "HEM-LD", "Simple",
"Simple2"
]
HEM0PS = [
6.24005, 4.153866, 2.312664, 1.507056, 0.924996, 0.672416, 0.535015,
0.458862, 0.439522, 0.405026
]
HEM5DD = [
2.962161, 1.57773, 0.940414, 0.610254, 0.464354, 0.374525, 0.325231,
0.301888, 0.318704, 0.276994
]
Memory = [154, 161, 170, 189, 227, 304, 456, 761, 1372, 2593]
fig = plt.figure(figsize=(5, 4))
ax = fig.add_subplot(111)
# ax.plot(be, Rein, marker='o', color='r', label=Name[0])
ax.plot(x, HEM0PS, marker='x', color='DEEPPINK', label=Name[1])
# ax.plot(be, BIOP1SS, marker='+', color='g', label=Name[2])
# ax.plot(be, BIOP2SD, marker='*', color='b', label=Name[3])
# ax.plot(be, BIOP3PD, marker='^', color='m', label=Name[4])
# ax.plot(be, BIOP4DS, marker='s', color='y', label=Name[5])
ax.plot(x, HEM5DD, marker='.', color='DODGERBLUE', label=Name[6])
# ax.plot(be, Simple, marker='D', color='lightseagreen', label=Name[7])
lgdsize = 15
lsize = 18
ax.legend(loc=(2.8 / 10, 3.7 / 5), fontsize=lgdsize)
ax.grid()
ax.set_xlabel('Number of Groups', fontsize=lsize)
ax.set_ylabel('Matching Time (ms)', fontsize=lsize)
# plt.xticks(range(0,10))
ax.set_xlim(-0.5, 9.5)
ax.set_zorder(0)
x_major_locator = MultipleLocator(1)
ax.xaxis.set_major_locator(x_major_locator)
for size in ax.get_xticklabels(): #获取x轴上所有坐标,并设置字号
# size.set_fontname('Times New Roman')
size.set_fontsize('16')
plt.tick_params(labelsize=18)
# plt.tick_params(direction='out', labelsize=18, length=4, width=1)
ax2 = ax.twinx()
ax2.bar(x, Memory, color='lightsteelblue', label='Footprint') # alpha=0.7,
ax2.set_ylabel(r"Footprint (MB)", fontsize=lsize)
ax2.set_ylim(0, 2700)
ax2.legend(loc=(2.8 / 10, 2.5 / 5), fontsize=lgdsize, ncol=1)
ax2.set_zorder(1)
ax2.set_yticklabels(['0', '1k', '2k'])
# for size in ax2.get_xticklabels(): #获取x轴上所有坐标,并设置字号
# # size.set_fontname('Times New Roman')
# size.set_fontsize('16')
plt.xticks(x, labels=g)
plt.tick_params(direction='out', labelsize=20, length=4, width=1)
for a, b in zip(x, Memory):
c = b - 170
if a == 5:
c += 5
elif a == 4:
c -= 10
plt.text(a, c, '%.0f' % b, ha='center', va='bottom', fontsize=12) #,
gcf = plt.gcf()
plt.show()
gcf.savefig('./exp1_matchingTime_memory_be.pdf',
format='pdf',
bbox_inches='tight')
|
{"hexsha": "5103c877b016594e17b9d2adc3a6ad660d39edbb", "size": 2696, "ext": "py", "lang": "Python", "max_stars_repo_path": "pictures/HEM_expand/exp1_matchingTime_memory_be.py", "max_stars_repo_name": "shiwanghua/HEM", "max_stars_repo_head_hexsha": "be164ba1d577ea5e99141c7cbae98db1ca85f9c7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pictures/HEM_expand/exp1_matchingTime_memory_be.py", "max_issues_repo_name": "shiwanghua/HEM", "max_issues_repo_head_hexsha": "be164ba1d577ea5e99141c7cbae98db1ca85f9c7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pictures/HEM_expand/exp1_matchingTime_memory_be.py", "max_forks_repo_name": "shiwanghua/HEM", "max_forks_repo_head_hexsha": "be164ba1d577ea5e99141c7cbae98db1ca85f9c7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2839506173, "max_line_length": 76, "alphanum_fraction": 0.6535608309, "include": true, "reason": "import numpy", "num_tokens": 1026}
|
#!/usr/bin/env python
from datetime import timedelta
import gym
from gym import spaces
import numpy as np
import os
import random
import time
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.monitor import Monitor
from stable_baselines3 import DQN
from stable_baselines3.dqn import MlpPolicy
from connection import Connection
from game import Status, MAX_ACTIONS
test_status = Status.load_test_file("state")
STATUS_VECTOR_SIZE = len(Status.vectorizer.vectorize(test_status))
LOG = open(os.path.expanduser("~/env.log"), "a+")
def log(message, and_print=False):
LOG.write(message + "\n")
if and_print:
print(message)
class SpireEnv(gym.Env):
metadata = {"render.modes": ["human"]}
def __init__(self, conn):
super(SpireEnv, self).__init__()
self.conn = conn
self.action_space = spaces.Discrete(MAX_ACTIONS)
self.observation_space = spaces.MultiBinary(STATUS_VECTOR_SIZE)
self.total_games = 0
self.total_floors = 0
self.max_floors = []
def observe(self):
status = self.conn.get_status()
vector = Status.vectorizer.vectorize(status)
return np.array(vector)
def reset(self, seed=None):
"""
This usually does not actually reset the game state.
If there is a game in progress, this lets the game stay as it is.
If there is no game in progress, this does start a new one.
"""
self.last_enemy = "none"
status = self.conn.get_status()
if not status.has_game():
self.conn.start_game(seed=seed)
elif seed is not None:
raise ValueError("expected to start a new game")
return self.observe()
def step(self, action):
status = self.conn.get_status()
if not status.has_commands():
# The game is over.
return (self.observe(), 0, True, {})
try:
command = status.make_command(action)
except ValueError:
# This action is invalid. Try the first valid command
commands = status.get_commands()
command = commands[0]
log(f"command: {command}")
pre_hp = status.hit_points()
pre_floor = status.floor()
status = self.conn.issue_command(command)
post_hp = status.hit_points()
post_floor = status.floor()
reward = post_hp - pre_hp
if post_floor > pre_floor:
log(f"got to floor {post_floor}")
reward += 10 * (post_floor - pre_floor)
self.total_floors += 1
if status.has_game():
done = False
else:
log("game over")
self.total_games += 1
done = True
if status.is_death():
log(f"seed {status.seed()} died to {self.last_enemy}")
self.max_floors.append(status.floor())
if len(self.max_floors) > 30:
self.max_floors.pop(0)
self.last_enemy = status.enemy()
return (self.observe(), reward, done, {})
def render(self, mode="human"):
if mode != "human":
raise NotImplementedError
self.conn.show()
class TensorboardCallback(BaseCallback):
def __init__(self, env):
verbose = 0
super(TensorboardCallback, self).__init__(verbose)
self.env = env
def _on_step(self):
fs = self.env.max_floors
if fs:
max_floor = sum(fs) / len(fs)
self.logger.record("max_floor", max_floor)
return True
MODEL_NAME = "dqn_default"
MODEL_CLASS = DQN
KWARGS = {"verbose": 1, "buffer_size": 100000, "learning_starts": 1000}
def train(hours):
conn = Connection()
env = Monitor(SpireEnv(conn), "./tmp/")
env.reset()
logdir = "./tboard_log"
try:
model = MODEL_CLASS.load(MODEL_NAME, env=env, tensorboard_log=logdir)
except FileNotFoundError:
model = MODEL_CLASS(MlpPolicy, env, tensorboard_log=logdir, **KWARGS)
start = time.time()
steps_per_hour = 7000
steps = steps_per_hour * hours
callback = TensorboardCallback(env)
model.learn(total_timesteps=steps, reset_num_timesteps=False, callback=callback)
model.save(MODEL_NAME)
elapsed = time.time() - start
print(f"{steps} steps processed")
print(f"{timedelta(seconds=elapsed)} time elapsed")
print(f"{env.total_floors} floors climbed")
print(f"{env.total_games} games played")
if env.total_games > 0:
print("{:.2f} floors per game".format(env.total_floors / env.total_games))
def evaluate(seed):
conn = Connection()
env = SpireEnv(conn)
obs = env.reset(seed=seed)
model = MODEL_CLASS.load(MODEL_NAME, env=env)
print(f"evaluating seed {seed}")
while True:
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
status = env.conn.get_status()
if status.is_death():
log(
f"on seed {seed}|{status.seed()} we got to floor {status.floor()}",
and_print=True,
)
if done:
break
if __name__ == "__main__":
for round in range(16):
print(f"{time.ctime()} - training round {round}")
train(1)
|
{"hexsha": "9d775cde34fa97c122c138116a2a1b30c9308af5", "size": 5304, "ext": "py", "lang": "Python", "max_stars_repo_path": "spire_env.py", "max_stars_repo_name": "lacker/snecko", "max_stars_repo_head_hexsha": "9b40cd4185a1797f2497f2316c08dcccdde518f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "spire_env.py", "max_issues_repo_name": "lacker/snecko", "max_issues_repo_head_hexsha": "9b40cd4185a1797f2497f2316c08dcccdde518f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-08-11T05:26:24.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-14T23:05:26.000Z", "max_forks_repo_path": "spire_env.py", "max_forks_repo_name": "lacker/snecko", "max_forks_repo_head_hexsha": "9b40cd4185a1797f2497f2316c08dcccdde518f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-11T05:18:53.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-11T05:18:53.000Z", "avg_line_length": 28.6702702703, "max_line_length": 84, "alphanum_fraction": 0.6180241327, "include": true, "reason": "import numpy", "num_tokens": 1232}
|
import numpy as np
from ..util.convert import ConvertibleType
from typing import Any, Optional, Iterable, Callable, Tuple, List, Dict
GLOBAL_GROUP = ''
class analyzer_property:
def __init__(
self,
client_name: Optional[str] = None,
*,
default,
detail: Optional[Dict[str, ConvertibleType]] = None,
hook: Callable[['BaseAnalyzer', Any], Any] = lambda obj, x: x,
):
self.client_name = client_name
self.default_value = default
self.hook = hook
self.detail = {} if detail is None else detail
self.compute_callbacks: List[
Callable[['BaseAnalyzer'], Any]
] = []
self.validate_callbacks: List[
[Callable[['BaseAnalyzer', Any], bool]]
] = []
self.detail['group'] = GLOBAL_GROUP
def __set_name__(self, owner, name: str):
if self.client_name is None:
self.client_name = name
self.name = name
def __get__(self, instance: 'BaseAnalyzer', owner=None):
if instance is None:
return self
if self.name in instance.__dict__:
return instance.__dict__[self.name]['value']
else:
return self.default_value
def __set__(self, instance: 'BaseAnalyzer', value):
if self.name not in instance.__dict__:
instance.__dict__[self.name] = {
'value': self.default_value,
'updating': False
}
value_dict = instance.__dict__[self.name]
if value_dict['updating']:
raise RuntimeError(
"Recursive update detected in the property {self.name!r}."
)
if all(check(instance, value) for check in self.validate_callbacks):
value_dict['updating'] = True
value_dict['value'] = self.hook(instance, value)
for callback in self.compute_callbacks:
callback(instance)
value_dict['updating'] = False
def __call__(self, hook: Callable[['BaseAnalyzer', Any], Any]):
old_hook = self.hook
self.hook = lambda obj, x: hook(obj, old_hook(obj, x))
return self
def compute(self, callback: Callable[['BaseAnalyzer'], Any]):
self.compute_callbacks.append(callback)
return callback
def validate(self, callback: Callable[['BaseAnalyzer', Any], bool]):
self.validate_callbacks.append(callback)
return callback
class AnalyzerMeta (type):
def __init__(
cls,
name: str,
bases: Tuple['AnalyzerMeta', ...],
namespace: Dict[str, Any],
):
super().__init__(name, bases, namespace)
cls._properties: Dict[str, str] = dict()
for base in bases:
cls._properties.update(base._properties)
for name, value in namespace.items():
if isinstance(value, analyzer_property):
cls._properties[value.client_name] = value.name
global GLOBAL_GROUP
GLOBAL_GROUP = ''
class BaseAnalyzer (metaclass=AnalyzerMeta):
def analyze(self, signal: np.ndarray):
raise NotImplementedError
def get_client_properties(
self,
client_names: Optional[Iterable[str]] = None,
):
if client_names is None:
client_names = type(self)._properties.keys()
return {
client_name: getattr(
self,
type(self)._properties[client_name],
)
for client_name in client_names
}
def get_client_property_details(
self,
client_names: Optional[Iterable[str]] = None,
):
if client_names is None:
client_names = type(self)._properties.keys()
return {
client_name: {
'value': getattr(
self,
type(self)._properties[client_name],
),
'detail': getattr(
type(self),
type(self)._properties[client_name],
).detail,
}
for client_name in client_names
}
def group(name: str):
global GLOBAL_GROUP
GLOBAL_GROUP = str(name)
|
{"hexsha": "1395051c4145ac5444084c328c6b3e20ca71ac06", "size": 4367, "ext": "py", "lang": "Python", "max_stars_repo_path": "_lib/analyzer/core.py", "max_stars_repo_name": "accup/Vyjit", "max_stars_repo_head_hexsha": "b7fc04625348eed04fc076950f7fabc99007eed8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "_lib/analyzer/core.py", "max_issues_repo_name": "accup/Vyjit", "max_issues_repo_head_hexsha": "b7fc04625348eed04fc076950f7fabc99007eed8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "_lib/analyzer/core.py", "max_forks_repo_name": "accup/Vyjit", "max_forks_repo_head_hexsha": "b7fc04625348eed04fc076950f7fabc99007eed8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3263888889, "max_line_length": 77, "alphanum_fraction": 0.5523242501, "include": true, "reason": "import numpy", "num_tokens": 874}
|
#!/usr/bin/env python
import sys, os, time
import numpy as np
import collections
def buildGraphImpl(fullMeshCells, nth, nr, targetListOfCells, varGroups):
# the total number of cells we have
numCells = len(fullMeshCells)
# graph is an adjecency list (here we use a dictionary) for each dof:
# - key = the global ID of a dof
# - value = list of global IDs for the dofs needed at west, north, eest, south
if varGroups==2:
graphVp = collections.OrderedDict()
graphSp = collections.OrderedDict()
# coeffVp contains the sign (-1 or 1) to identify where a Vp is located
# such that we can handle the boundaries easily
# - key = the global ID of a dof
# - value = value needed for the west, north, eest, south dof
coeffVp = collections.OrderedDict()
else:
graphVpSrp = collections.OrderedDict()
graphVpStp = collections.OrderedDict()
coeffVpSrp = collections.OrderedDict()
coeffVpStp = collections.OrderedDict()
graphSrp = collections.OrderedDict()
graphStp = collections.OrderedDict()
# --- loop over list of target cell IDs we want ---
for k in targetListOfCells:
iCell = fullMeshCells[k]
# find the ij of this cell
cellI, cellJ = iCell.getCellIJ()[0], iCell.getCellIJ()[1]
# temporary list of neighbors for a vp points listed as west, north, east, south (order matters)
tmpListVp = np.zeros(4, dtype=np.int64)
# tmp fro storing multiplicative coeff for vp stencil
tmpCoeffVp = np.zeros(4, dtype=float)
# stp points only have west, east (order matters)
tmpListStp = np.zeros(2, dtype=np.int64)
# srp points only have north, south (order matters)
tmpListSrp = np.zeros(2, dtype=np.int64)
#****************************************
# deal with vp
#
# vp exists everywhere but
# need to be careful near the boundaries
#****************************************
# get GID of the vp dof at current cell
vpGID = iCell.getDofGID('vp')
#-----------------------------------
# if cell is NOT close to boundary at theta=thetaLeft, stp dof on the left exists
if cellI >=1:
# get the gid of west stp, which is in the cell on the left
# since we use natural order for cell enumerating, the cell on the left is at k-1
westCell = fullMeshCells[k-1]
tmpListVp[0] = westCell.getDofGID('stp')
tmpCoeffVp[0] = 1.
# if cell is at theta=thetaLeft
if cellI==0:
# here we are at thetaLeft, and we know we need the stp on the left which
# does not exits. but we also know that the stp on the left is set
# to be equal to negative the value of
# stp on the right so we use trick of setting the west GID to be the negative of
# the stp dof on the right
tmpListVp[0] = iCell.getDofGID('stp')
tmpCoeffVp[0] = 0.
#-----------------------------------
# if cell is NOT close to thetaRight
if cellI<nth-1:
# get the gid of east stp
tmpListVp[2] = iCell.getDofGID('stp')
tmpCoeffVp[2] = 1.
if cellI==nth-1:
# here we are right at thetaRight, and we know we need the stp on the right
# which does not exits. but we also know that the stp on the right is set to
# be equal to negative the stp on the left so we set the east GID to be
# the negative of the stp dof on the west
westCell = fullMeshCells[k-1]
tmpListVp[2] = westCell.getDofGID('stp')
tmpCoeffVp[2] = 0.
#-----------------------------------
# if cell is NOT at earth surface
if cellJ<nr-1:
# get the gid of the north srp
tmpListVp[1] = iCell.getDofGID('srp')
tmpCoeffVp[1] = 1.
# if cell is at earth surface
if cellJ==nr-1:
northCell = fullMeshCells[k-nth]
tmpListVp[1] = northCell.getDofGID('srp')
tmpCoeffVp[1] = -1.
#-----------------------------------
# if cell is NOT at CMB
if cellJ>=1:
# get the gid of south srp
southwCell = fullMeshCells[k-nth]
tmpListVp[3] = southwCell.getDofGID('srp')
tmpCoeffVp[3] = 1.
# if cell is right next to cmb
if cellJ==0:
tmpListVp[3] = iCell.getDofGID('srp')
tmpCoeffVp[3] = -1.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# at the symmetry axes, set all coeffs = 0
# since v undefined here, this also means that
# srp will be zero here because srp uses the v on this axess
if cellI==0 or cellI==nth-1:
tmpCoeffVp = np.zeros(4)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# add to graph
if (varGroups==2):
graphVp[vpGID] = tmpListVp
coeffVp[vpGID] = tmpCoeffVp
else:
# for vpSrp, only take north and south elements
graphVpSrp[vpGID] = tmpListVp[1::2]
coeffVpSrp[vpGID] = tmpCoeffVp[1::2]
# for vpStp, only take west and east elements
graphVpStp[vpGID] = tmpListVp[0::2]
coeffVpStp[vpGID] = tmpCoeffVp[0::2]
#***********************************************
# --- deal with srp ---
#***********************************************
# srp exists only for cells with j<=nr-2
if cellJ <= nr-2:
srpGID = iCell.getDofGID('srp')
# get the cell that is north
northCell = fullMeshCells[k+nth]
# needs the north velocity
tmpListSrp[0] = northCell.getDofGID('vp')
# needs south velocity
tmpListSrp[1] = iCell.getDofGID('vp')
if varGroups==2:
graphSp[srpGID] = tmpListSrp
else:
graphSrp[srpGID] = tmpListSrp
#***********************************************
# --- deal with stp ---
#***********************************************
# stp exists only for cells with i<=nth-2
if cellI <= nth-2:
stpGID = iCell.getDofGID('stp')
# needs west velocity
tmpListStp[0] = iCell.getDofGID('vp')
# get the cell that is neighbor on right
eastCell = fullMeshCells[k+1]
# needs east velocity
tmpListStp[1] = eastCell.getDofGID('vp')
if varGroups==2:
graphSp[stpGID] = tmpListStp
else:
graphStp[stpGID] = tmpListStp
if varGroups==2:
return [graphVp, coeffVp, graphSp]
else:
return [graphVpSrp, graphVpStp, coeffVpSrp, coeffVpStp, graphSrp, graphStp]
|
{"hexsha": "c8472473b1fcd403f76ef01629172880b67c5649", "size": 6233, "ext": "py", "lang": "Python", "max_stars_repo_path": "meshing/helpers/build_graph_impl.py", "max_stars_repo_name": "fnrizzi/ElasticShearWaves", "max_stars_repo_head_hexsha": "b09cde0711562412c6bc24de0d18ad3a972b7289", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-12-06T16:17:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T09:23:45.000Z", "max_issues_repo_path": "meshing/helpers/build_graph_impl.py", "max_issues_repo_name": "fnrizzi/ElasticShearWaves", "max_issues_repo_head_hexsha": "b09cde0711562412c6bc24de0d18ad3a972b7289", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-12-01T14:40:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T07:44:41.000Z", "max_forks_repo_path": "meshing/helpers/build_graph_impl.py", "max_forks_repo_name": "fnrizzi/ElasticShearWaves", "max_forks_repo_head_hexsha": "b09cde0711562412c6bc24de0d18ad3a972b7289", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-02T02:06:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-02T02:06:41.000Z", "avg_line_length": 36.0289017341, "max_line_length": 100, "alphanum_fraction": 0.5846301941, "include": true, "reason": "import numpy", "num_tokens": 1868}
|
/*
* Copyright (c) 2014 Nicholas Corgan (n.corgan@gmail.com)
*
* Distributed under the MIT License (MIT) (See accompanying file LICENSE.txt
* or copy at http://opensource.org/licenses/MIT)
*/
#include <iostream>
#include <boost/filesystem.hpp>
#include <boost/locale/encoding_utf.hpp>
#include <pkmn/enums.hpp>
#include "game_save_gen3impl.hpp"
#include "conversions/items.hpp"
#include "conversions/pokemon.hpp"
namespace fs = boost::filesystem;
namespace pkmn
{
unsigned int _game_ids[] = {Versions::NONE, Versions::RUBY,
Versions::EMERALD, Versions::FIRERED};
game_save_gen3impl::game_save_gen3impl(uint8_t* buffer,
const std::string &filename): game_save_impl(filename)
{
_data = buffer;
_libspec_save = gba_read_main_save(_data);
_game_id = _game_ids[_libspec_save->type];
load();
}
game_save_gen3impl::~game_save_gen3impl()
{
gba_free_save(_libspec_save);
}
void game_save_gen3impl::load()
{
_trainer = conversions::import_gen3_trainer(_libspec_save);
}
void game_save_gen3impl::save_as(const std::string &filename)
{
//TODO: actual saving stuff
//Use size of original filepath to determine new filesize
unsigned int size = fs::file_size(_filepath);
std::ofstream ofile(filename.c_str());
ofile.write((char*)_data, size);
ofile.close();
_filepath = fs::path(filename);
}
} /* namespace pkmn */
|
{"hexsha": "98a1447e0984728e92d6783d1e632c5a3f5d8c9d", "size": 1556, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "lib/game_save_gen3impl.cpp", "max_stars_repo_name": "codemonkey85/LibPKMN", "max_stars_repo_head_hexsha": "96a1800a24bf3861da405cf56daa2d7afd6c850d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/game_save_gen3impl.cpp", "max_issues_repo_name": "codemonkey85/LibPKMN", "max_issues_repo_head_hexsha": "96a1800a24bf3861da405cf56daa2d7afd6c850d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/game_save_gen3impl.cpp", "max_forks_repo_name": "codemonkey85/LibPKMN", "max_forks_repo_head_hexsha": "96a1800a24bf3861da405cf56daa2d7afd6c850d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-07-08T20:43:57.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-08T20:43:57.000Z", "avg_line_length": 25.5081967213, "max_line_length": 97, "alphanum_fraction": 0.6401028278, "num_tokens": 373}
|
import pickle
import sys
import os
import time
import lasagne
import theano
import numpy as np
import theano.tensor as T
from lasagne import layers
from lasagne.updates import nesterov_momentum
from theano.sandbox.neighbours import neibs2images
from lasagne.nonlinearities import sigmoid, rectify, leaky_rectify, identity
from lasagne.nonlinearities import softmax
from lasagne import regularization
from scipy import misc
from PIL import Image
from lasagne import init
from math import floor
from sklearn.metrics import mean_squared_error, accuracy_score, hamming_loss, roc_curve, auc
from data_aug import data_aug
sys.path.append('..')
from common.shape import ReshapeLayer
from common.batch_norms import batch_norm, SoftThresPerc
from common.ch_inner_prod import ChInnerProd, ChInnerProdMerge
APS = 100;
PS = 100;
TileFolder = sys.argv[1] + '/';
LearningRate = theano.shared(np.array(5e-3, dtype=np.float32));
BatchSize = 96;
CNNModel = sys.argv[2] + '/cnn_lym_model.pkl';
heat_map_out = sys.argv[3];
mu = 0.6151888371;
sigma = 0.2506813109;
aug_fea_n = 1;
def whiteness(png):
wh = (np.std(png[:,:,0].flatten()) + np.std(png[:,:,1].flatten()) + np.std(png[:,:,2].flatten())) / 3.0;
return wh;
def iterate_minibatches(inputs, augs, targets):
if inputs.shape[0] <= BatchSize:
yield inputs, augs, targets;
return;
start_idx = 0;
for start_idx in range(0, len(inputs) - BatchSize + 1, BatchSize):
excerpt = slice(start_idx, start_idx + BatchSize);
yield inputs[excerpt], augs[excerpt], targets[excerpt];
if start_idx < len(inputs) - BatchSize:
excerpt = slice(start_idx + BatchSize, len(inputs));
yield inputs[excerpt], augs[excerpt], targets[excerpt];
def load_data(todo_list, rind):
X = np.zeros(shape=(BatchSize*40, 3, APS, APS), dtype=np.float32);
inds = np.zeros(shape=(BatchSize*40,), dtype=np.int32);
coor = np.zeros(shape=(20000000, 2), dtype=np.int32);
xind = 0;
lind = 0;
cind = 0;
for fn in todo_list:
lind += 1;
full_fn = TileFolder + '/' + fn;
if not os.path.isfile(full_fn):
continue;
if len(fn.split('_')) < 4:
continue;
x_off = float(fn.split('_')[0]);
y_off = float(fn.split('_')[1]);
svs_pw = float(fn.split('_')[2]);
png_pw = float(fn.split('_')[3].split('.png')[0]);
png = np.array(Image.open(full_fn).convert('RGB'));
for x in range(0, png.shape[1], APS):
if x + APS > png.shape[1]:
continue;
for y in range(0, png.shape[0], APS):
if y + APS > png.shape[0]:
continue;
if (whiteness(png[y:y+APS, x:x+APS, :]) >= 12):
X[xind, :, :, :] = png[y:y+APS, x:x+APS, :].transpose();
inds[xind] = rind;
xind += 1;
coor[cind, 0] = np.int32(x_off + (x + APS/2) * svs_pw / png_pw);
coor[cind, 1] = np.int32(y_off + (y + APS/2) * svs_pw / png_pw);
cind += 1;
rind += 1;
if xind >= BatchSize:
break;
X = X[0:xind];
inds = inds[0:xind];
coor = coor[0:cind];
return todo_list[lind:], X, inds, coor, rind;
def from_output_to_pred(output):
pred = np.copy(output);
pred = (pred >= 0.5).astype(np.int32);
return pred;
def multi_win_during_val(val_fn, inputs, augs, targets):
for idraw in [-1,]:
for jdraw in [-1,]:
########################
# Break batch into mini-batches
output_pat = np.zeros((inputs.shape[0], 1), dtype=np.float32);
ncase = 0;
for batch in iterate_minibatches(inputs, augs, targets):
inp, aug, tar = batch;
_, outp = val_fn(
data_aug(inp, mu, sigma, deterministic=False, idraw=idraw, jdraw=jdraw),
aug, tar);
output_pat[ncase:ncase+len(outp)] = outp;
ncase += len(outp);
# Break batch into mini-batches
########################
if 'weight' in locals():
weight += 1.0;
output += output_pat;
else:
weight = 1.0;
output = output_pat;
return output/weight;
def val_fn_epoch_on_disk(classn, val_fn):
all_or = np.zeros(shape=(20000000, classn), dtype=np.float32);
all_inds = np.zeros(shape=(20000000,), dtype=np.int32);
all_coor = np.zeros(shape=(20000000, 2), dtype=np.int32);
rind = 0;
n1 = 0;
n2 = 0;
n3 = 0;
todo_list = os.listdir(TileFolder);
while len(todo_list) > 0:
todo_list, inputs, inds, coor, rind = load_data(todo_list, rind);
if len(inputs) == 0:
all_coor[n3:n3+len(coor)] = coor;
n3 += len(coor);
continue;
augs = get_aug_feas(inputs);
targets = np.zeros((inputs.shape[0], classn), dtype=np.int32);
output = multi_win_during_val(val_fn, inputs, augs, targets);
all_or[n1:n1+len(output)] = output;
all_inds[n2:n2+len(inds)] = inds;
all_coor[n3:n3+len(coor)] = coor;
n1 += len(output);
n2 += len(inds);
n3 += len(coor);
all_or = all_or[:n1];
all_inds = all_inds[:n2];
all_coor = all_coor[:n3];
return all_or, all_inds, all_coor;
def confusion_matrix(Or, Tr, thres):
tpos = np.sum((Or>=thres) * (Tr==1));
tneg = np.sum((Or< thres) * (Tr==0));
fpos = np.sum((Or>=thres) * (Tr==0));
fneg = np.sum((Or< thres) * (Tr==1));
return tpos, tneg, fpos, fneg;
def auc_roc(Pr, Tr):
fpr, tpr, _ = roc_curve(Tr, Pr, pos_label=1.0);
return auc(fpr, tpr);
def build_network_from_ae(classn):
input_var = T.tensor4('input_var');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
featm = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify));
feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
maskm = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify));
mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None);
mask_map = SoftThresPerc(mask_rep, perc=97.0, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map");
enlyr = ChInnerProdMerge(feat_map, mask_map, name="encoder");
layer = batch_norm(layers.Deconv2DLayer(enlyr, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify));
glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad');
glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
gllyr = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature");
glblf = batch_norm(layers.Deconv2DLayer(gllyr, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
layer = layers.ElemwiseSumLayer([layer, glblf]);
network = ReshapeLayer(layer, ([0], -1));
mask_map.beta.set_value(np.float32(0.9*mask_map.beta.get_value()));
old_params = layers.get_all_params(network, trainable=True);
# Adding more layers
aug_var = T.matrix('aug_var');
target_var = T.imatrix('targets');
add_a = batch_norm(layers.Conv2DLayer(enlyr, 320, filter_size=(1,1), nonlinearity=leaky_rectify));
add_b = batch_norm(layers.Conv2DLayer(add_a, 320, filter_size=(1,1), nonlinearity=leaky_rectify));
add_c = batch_norm(layers.Conv2DLayer(add_b, 320, filter_size=(1,1), nonlinearity=leaky_rectify));
add_d = batch_norm(layers.Conv2DLayer(add_c, 320, filter_size=(1,1), nonlinearity=leaky_rectify));
add_0 = layers.Pool2DLayer(add_d, pool_size=(25,25), stride=25, mode='average_inc_pad');
add_1 = batch_norm(layers.DenseLayer(add_0, 100, nonlinearity=leaky_rectify));
add_2 = batch_norm(layers.DenseLayer(gllyr, 320, nonlinearity=leaky_rectify));
add_3 = batch_norm(layers.DenseLayer(add_2, 320, nonlinearity=leaky_rectify));
add_4 = batch_norm(layers.DenseLayer(add_3, 100, nonlinearity=leaky_rectify));
aug_layer = layers.InputLayer(shape=(None, aug_fea_n), input_var=aug_var);
cat_layer = lasagne.layers.ConcatLayer([add_1, add_4, aug_layer], axis=1);
hidden_layer = layers.DenseLayer(cat_layer, 80, nonlinearity=leaky_rectify);
network = layers.DenseLayer(hidden_layer, classn, nonlinearity=sigmoid);
all_params = layers.get_all_params(network, trainable=True);
new_params = [x for x in all_params if x not in old_params];
return network, new_params, input_var, aug_var, target_var;
def make_training_functions(network, new_params, input_var, aug_var, target_var):
output = lasagne.layers.get_output(network, deterministic=True, batch_norm_use_averages=True, batch_norm_update_averages=False);
loss = lasagne.objectives.binary_crossentropy(output, target_var).mean();
deter_output = lasagne.layers.get_output(network, deterministic=True);
deter_loss = lasagne.objectives.binary_crossentropy(deter_output, target_var).mean();
params = layers.get_all_params(network, trainable=True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=LearningRate, momentum=0.985);
new_params_updates = lasagne.updates.nesterov_momentum(loss, new_params, learning_rate=LearningRate, momentum=0.985);
val_fn = theano.function([input_var, aug_var, target_var], [deter_loss, deter_output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates=updates);
new_params_train_fn = theano.function([input_var, aug_var, target_var], loss, updates=new_params_updates);
return train_fn, new_params_train_fn, val_fn;
def get_aug_feas(X):
aug_feas = np.zeros((X.shape[0], aug_fea_n), dtype=np.float32);
return aug_feas;
def split_validation(classn):
network, new_params, input_var, aug_var, target_var = build_network_from_ae(classn);
train_fn, new_params_train_fn, val_fn = make_training_functions(network, new_params, input_var, aug_var, target_var);
layers.set_all_param_values(network, pickle.load(open(CNNModel, 'rb')));
# Testing
Or, inds, coor = val_fn_epoch_on_disk(classn, val_fn);
Or_all = np.zeros(shape=(coor.shape[0],), dtype=np.float32);
Or_all[inds] = Or[:, 0];
fid = open(TileFolder + '/' + heat_map_out, 'w');
for idx in range(0, Or_all.shape[0]):
fid.write('{} {} {}\n'.format(coor[idx][0], coor[idx][1], Or_all[idx]));
fid.close();
return;
def main():
if not os.path.exists(TileFolder):
exit(0);
classes = ['Lymphocytes'];
classn = len(classes);
sys.setrecursionlimit(10000);
split_validation(classn);
print('DONE!');
if __name__ == "__main__":
main();
|
{"hexsha": "07df42c2f8c523f416fbb2a9ba29cadf6652aa24", "size": 14477, "ext": "py", "lang": "Python", "max_stars_repo_path": "u24_lymphocyte/prediction/lymphocyte/pred.py", "max_stars_repo_name": "ALSM-PhD/quip_classification", "max_stars_repo_head_hexsha": "7347bfaa5cf11ae2d7a528fbcc43322a12c795d3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-06-14T16:50:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-21T12:09:09.000Z", "max_issues_repo_path": "u24_lymphocyte/prediction/lymphocyte/pred.py", "max_issues_repo_name": "ALSM-PhD/quip_classification", "max_issues_repo_head_hexsha": "7347bfaa5cf11ae2d7a528fbcc43322a12c795d3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-07-09T07:09:30.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-02T12:00:18.000Z", "max_forks_repo_path": "u24_lymphocyte/prediction/lymphocyte/pred.py", "max_forks_repo_name": "ALSM-PhD/quip_classification", "max_forks_repo_head_hexsha": "7347bfaa5cf11ae2d7a528fbcc43322a12c795d3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2019-04-17T20:23:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T00:32:47.000Z", "avg_line_length": 44.8204334365, "max_line_length": 132, "alphanum_fraction": 0.6640878635, "include": true, "reason": "import numpy,from scipy,import theano,from theano", "num_tokens": 4282}
|
from __future__ import division, print_function, absolute_import
import weakref
import numpy as np
from collections import OrderedDict
from ...framework.observer import Listener, Observable
from .primitive import MDPPrimitive
class StateDistribution(object):
"""Probability Distribution.
This class handles evaluation of empirically derived states and
calculates the probability distribution from them.
"""
def __init__(self):
self._states = OrderedDict()
""":type: dict[MDPState, tuple[float]]"""
# def __getstate__(self):
# return {
# "_states": self._states,
# "_proba_calc_method": {
# "module": self._proba_calc_method.__class__.__module__,
# "name": self._proba_calc_method.__class__.__name__
# }
# }
#
# def __setstate__(self, d):
# for name, value in d.iteritems():
# if name == "_proba_calc_method":
# module = __import__(value["module"])
# try:
# value = getattr(module, value["name"])()
# except:
# path = value["module"].split(".")
# mod = "module"
# for i, ele in enumerate(path):
# if i != 0:
# mod += '.'
# mod += ele
# value = getattr(eval(mod), value["name"])()
#
# setattr(self, name, value)
#
# self._dirty = False
def __repr__(self):
return repr(self._states)
def __len__(self):
return len(self._states)
def __getitem__(self, state):
return self._states.get(state, 0.)
def __setitem__(self, state, proba):
self._states[state] = proba
def __delitem__(self, state):
del self._states[state]
def clear(self):
"""Clear the probability distribution."""
self._states.clear()
return self._states
def pop(self, k, d=None):
return self._states.pop(k, d)
def keys(self):
return self._states.keys()
def values(self):
return self._states.values()
def items(self):
"""Retrieve the probability distribution.
Returns
-------
dict[MDPState, float] :
A list of probabilities for all possible transitions.
"""
return self._states.items()
def iterkeys(self):
return self._states.iterkeys()
def itervalues(self):
return self._states.itervalues()
def iteritems(self):
return self._states.iteritems()
def __iter__(self):
return iter(self._states)
def __contains__(self, item):
return item in self._states
def sample(self):
"""Returns a successor state according to the probability distribution.
Returns
-------
MDPState :
The next state sampled from the probability distribution.
"""
keys = self._states.keys()
if not keys:
raise UserWarning("No initial states defined.")
idx = np.random.choice(range(len(keys)), p=[v for v in self._states.values()])
return keys[idx]
class MDPState(MDPPrimitive):
"""
"""
class StateAction(Listener):
"""The models interface.
Contains all relevant information predicted by a model for a
given state-action pair. This includes the (predicted) reward and
transition probabilities to possible next states.
Attributes
----------
"""
_instance = object()
@property
def reward(self):
"""float: The reward"""
return self._model.reward
@property
def successor_probabilities(self):
"""StateDistribution: The state probability distribution"""
return self._successor_proba
def __init__(self, token, parent, action, model):
if token is not self._instance:
raise ValueError("Use 'create' to construct {0}".format(self.__class__.__name__))
super(MDPState.StateAction, self).__init__()
self._parent = parent
""":type: StateData"""
self._action = action
""":type: MDPAction"""
self._model = model
""":type: StateActionModel"""
self._approx = {}
""":type: dict[Primitive, Approximation]"""
self._successor_proba = StateDistribution()
def __getstate__(self):
return self.__dict__.copy()
def __setstate__(self, d):
self.__dict__.update(d)
@classmethod
def create(cls, parent, action, model):
result = cls(cls._instance, parent, action, model)
parent._state_action_map[action] = result
result._model.subscribe(result, 'model_change')
result.compute_successors()
return result
def notify(self, event):
if event.name == 'model_change' or event.name == 'average_change':
self._parent._mdp._inbox.add(self)
def compute_successors(self):
state = self._parent.state
self._successor_proba.clear()
effects = self._model.effects_map
# remove erstwhile approximations
for delta_s, approx in self._approx.items():
if delta_s not in effects:
if approx is not None:
approx.unsubscribe(self, 'average_change')
del self._approx[delta_s]
for delta_s, model_weight in effects.iteritems():
# add a new approximation
if delta_s not in self._approx:
successor = MDPState.create(delta_s + state)
succ_approx = None
if successor != state:
# not a self transition, so approximate
succ_approx = self._parent._mdp._approximator.approximate(successor)
succ_approx.subscribe(self, 'average_change')
self._approx[delta_s] = succ_approx
# Add the translated successors
approx = self._approx.get(delta_s, None)
if approx is None:
# effect was a self transition, use parent's state
self._successor_proba[self._parent.state] += model_weight
else:
for s, (_, value_weight) in approx.basis_weights.iteritems():
self._successor_proba[s] += value_weight * model_weight
self._parent.dispatch('mdp_change', action=self._action)
class StateData(Observable):
"""State information interface.
Information about the state can be accessed here.
Parameters
----------
Attributes
----------
"""
_instance = object()
@property
def state(self):
return self._approx.state
@property
def state_actions(self):
return self._state_action_map
def __init__(self, token, mdp, approx):
if token is not self._instance:
raise ValueError("Use 'create' to construct {0}".format(self.__class__.__name__))
super(MDPState.StateData, self).__init__()
self._mdp = mdp
""":type: MDP"""
self._approx = approx
""":type: Approximation"""
self._state_action_map = OrderedDict()
""":type: dict[MDPAction, StateAction]"""
def __getstate__(self):
return self.__dict__.copy()
def __setstate__(self, d):
self.__dict__.update(d)
@classmethod
def create(cls, mdp, approx):
result = cls(cls._instance, mdp, approx)
state = approx.state
mdp._state_map[state] = result
for act in mdp.actions:
if act.available(state):
sa = MDPState.StateAction.create(weakref.proxy(result), act, act.model(state))
assert act in result._state_action_map
assert sa == result._state_action_map[act]
return result
# -----------------------------
# MDPState
# -----------------------------
def __init__(self, token, features, name=None):
super(MDPState, self).__init__(token, features, name)
@classmethod
def create(cls, features, name=None, feature_limits=None):
features = cls._process_parameters(features, feature_limits)
return cls(cls._instance, features, name)
def encode(self):
# noinspection PyUnresolvedReferences,PyUnusedLocal
"""Encodes the state into a human readable representation.
Returns
-------
ndarray :
The encoded state.
Notes
-----
Optionally this method can be overwritten at runtime.
Examples
--------
>>> def my_encode(self)
... pass
...
>>> MDPState.encode = my_encode
"""
return self._features
|
{"hexsha": "f0edd140aa0797954d1837521a6e451b4bccd6f3", "size": 9255, "ext": "py", "lang": "Python", "max_stars_repo_path": "rlpy/agent/mdp/state.py", "max_stars_repo_name": "evenmarbles/rlpy", "max_stars_repo_head_hexsha": "3c3c39a316285ca725268e81aef030e5c764f797", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rlpy/agent/mdp/state.py", "max_issues_repo_name": "evenmarbles/rlpy", "max_issues_repo_head_hexsha": "3c3c39a316285ca725268e81aef030e5c764f797", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rlpy/agent/mdp/state.py", "max_forks_repo_name": "evenmarbles/rlpy", "max_forks_repo_head_hexsha": "3c3c39a316285ca725268e81aef030e5c764f797", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1465798046, "max_line_length": 98, "alphanum_fraction": 0.5529983793, "include": true, "reason": "import numpy", "num_tokens": 1891}
|
"""
DQN and DDQN
"""
import time
import numpy as np
from yarlp.utils.env_utils import GymEnv
from yarlp.agent.base_agent import Agent
from yarlp.model.networks import cnn
from dateutil.relativedelta import relativedelta as rd
from yarlp.model.model_factories import ddqn_model_factory
from yarlp.model.model_factories import build_ddqn_update_feed_dict
from yarlp.utils.replay_buffer import ReplayBuffer
from yarlp.external.baselines.baselines.deepq.replay_buffer import PrioritizedReplayBuffer
from yarlp.utils.schedules import LinearSchedule, PiecewiseSchedule, ConstantSchedule
from yarlp.utils import experiment_utils
from yarlp.utils.env_utils import get_wrapper_by_name
class DDQNAgent(Agent):
"""
"""
def __init__(self, env,
policy_network=None,
policy_network_params={'dueling': False},
learning_rate_schedule=None,
policy_learning_rate=1e-4,
model_file_path=None,
buffer_size=10000,
exploration_fraction=0.1,
exploration_final_eps=0.01,
exploration_schedule=None,
train_freq=4,
batch_size=32,
target_network_update_freq=10000,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
max_timesteps=1000000,
checkpoint_freq=10000,
save_freq=20000,
grad_norm_clipping=10,
learning_start_timestep=10000,
discount_factor=0.99,
double_q=True,
*args, **kwargs):
super().__init__(env, *args, **kwargs)
# assert env is discrete
assert GymEnv.env_action_space_is_discrete(env),\
"env {} is not discrete for DDQNAgent".format(env)
assert batch_size < buffer_size,\
'batch_size {} must be less than buffer_size {}'.format(
batch_size, buffer_size)
prioritized_replay = False
self.train_freq = train_freq
self.batch_size = batch_size
self.target_network_update_freq = target_network_update_freq
self.checkpoint_freq = checkpoint_freq
self.save_freq = save_freq
self.max_timesteps = max_timesteps
self.learning_start_timestep = learning_start_timestep
self.learning_rate = policy_learning_rate
self.global_t = 0
if policy_network is None:
policy_network = cnn
elif isinstance(policy_network, str):
policy_network = experiment_utils.get_network(
policy_network, {})
self._policy = ddqn_model_factory(
env, network=policy_network,
network_params={},
model_file_path=None,
grad_norm_clipping=10,
double_q=False, discount_factor=0.99)
self.tf_object_attributes.add('_policy')
self._policy.G(self._policy['update_target_network'])
policy_weight_sums = sum(
[np.sum(a) for a in self._policy.get_weights()])
self.logger.logger.info(
'Policy network weight sums: {}'.format(policy_weight_sums))
# Create the replay buffer
self.prioritized_replay = prioritized_replay
self.prioritized_replay_eps = prioritized_replay_eps
self.buffer_size = buffer_size
self.prioritized_replay_alpha = prioritized_replay_alpha
self.prioritized_replay_beta_iters = prioritized_replay_beta_iters
self.prioritized_replay_beta0 = prioritized_replay_beta0
self.set_replay_buffer()
# Create exploration and learning rate schedules
if exploration_schedule is None:
schedule_timesteps = max(
int(exploration_fraction * max_timesteps), 1)
exploration = LinearSchedule(
schedule_timesteps=schedule_timesteps,
initial_p=1.0,
final_p=exploration_final_eps)
elif isinstance(exploration_schedule, list):
exploration = PiecewiseSchedule(
exploration_schedule,
outside_value=exploration_schedule[-1][-1]
)
self.exploration = exploration
if learning_rate_schedule is None:
lr = ConstantSchedule(policy_learning_rate)
elif isinstance(learning_rate_schedule, list):
lr = PiecewiseSchedule(
learning_rate_schedule,
outside_value=learning_rate_schedule[-1][-1])
self.lr_schedule = lr
def set_replay_buffer(self):
if self.prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(
self.buffer_size,
alpha=self.prioritized_replay_alpha)
if self.prioritized_replay_beta_iters is None:
self.prioritized_replay_beta_iters = self.max_timesteps
beta_schedule = LinearSchedule(
self.prioritized_replay_beta_iters,
initial_p=self.prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(self.buffer_size)
beta_schedule = None
self.replay_buffer = replay_buffer
self.beta_schedule = beta_schedule
def get_action(self, state, epsilon):
"""
Generate an epsilon-greedy action from our policy model
Returns
----------
action : numpy array or integer
"""
q_values = self._policy.G(
self._policy['q_output'],
feed_dict={
self._policy['state']: state
})
deterministic_actions = np.argmax(q_values, axis=1)
batch_size = q_values.shape[0]
random_actions = np.random.randint(
0, q_values.shape[1], batch_size)
actions = np.where(
np.random.uniform(size=batch_size) < epsilon,
random_actions,
deterministic_actions)
return actions
def train(self):
obs = self._env.reset()
self.t = 0
self.last_saved_reward = -np.inf
update_target_network = self._policy['update_target_network']
update_fn = self._policy['optimizer_op:']
while self.global_t < self.max_timesteps:
if self.t < self.learning_start_timestep:
epsilon = 1
else:
epsilon = self.exploration.value(self.global_t)
action = self.get_action(
np.expand_dims(np.array(obs), 0),
epsilon)
new_obs, reward, done, info = self._env.step(action[0])
self.replay_buffer.add(obs, action, reward, new_obs, done)
obs = new_obs
if done:
obs = self._env.reset()
# train the networks every train_freq
if self.t > self.learning_start_timestep \
and self.t % self.train_freq == 0:
if self.prioritized_replay:
experience = self.replay_buffer.sample(
self.batch_size,
beta=self.beta_schedule.value(self.global_t))
(obst, actions, rewards,
obst1, dones, weights, batch_idx) = experience
else:
experience = self.replay_buffer.sample(
self.batch_size)
obst, actions, rewards, obst1, dones = experience
weights, batch_idx = np.ones_like(rewards), None
obst = np.array(obst)
obst1 = np.array(obst1)
lr = self.lr_schedule.value(self.global_t)
args = obst, actions, rewards, obst1, dones, weights, lr
feed_dict = build_ddqn_update_feed_dict(
self._policy, *args)
self._policy.G(update_fn, feed_dict)
if self.prioritized_replay:
td_errors = self._policy.G(
self._policy['td_errors'], feed_dict)
new_priorities = np.abs(td_errors) + \
self.prioritized_replay_eps
self.replay_buffer.update_priorities(
batch_idx, new_priorities)
# log stuff every episode
if self.t > 0 \
and self.t % self.checkpoint_freq == 0\
and self.t > self.learning_start_timestep:
# log things
episode_rewards = get_wrapper_by_name(
self._env, "MonitorEnv").get_episode_rewards()
self.logger.set_metrics_for_iter(episode_rewards[-100:])
num_episodes = len(episode_rewards)
self.logger.add_metric('timesteps_so_far', self.global_t)
eta = (self.max_timesteps - self.t) /\
(self.t / round(time.time() - self.logger._start_time, 6))
self.logger.add_metric(
'ETA',
str(rd(seconds=eta)))
self.logger.add_metric(
'epsilon', self.exploration.value(self.global_t))
if self.prioritized_replay:
self.logger.add_metric(
'beta', self.beta_schedule.value(self.global_t))
self.logger.add_metric('env_id', self._env_id)
self.logger.add_metric('episodes', num_episodes)
self.logger.add_metric(
'last_saved_reward', self.last_saved_reward)
self.logger.add_metric(
'learning_rate', self.lr_schedule.value(self.global_t))
self.logger.log()
if self.t > self.learning_start_timestep \
and self.t % self.target_network_update_freq == 0:
self.logger.logger.info('Update target network')
self._policy.G(update_target_network)
# save model if necessary
if self.t > self.learning_start_timestep \
and self.t % self.save_freq == 0 \
and self.logger._log_dir is not None:
running_reward = np.mean(get_wrapper_by_name(
self._env, "MonitorEnv").get_episode_rewards()[-100:])
if running_reward > self.last_saved_reward:
self.logger.logger.info(
'Saving best model, {} -> {}'.format(
self.last_saved_reward, running_reward))
self.save(self.logger._log_dir, 'best_agent')
self.last_saved_reward = running_reward
self.logger.logger.info('Saving model for checkpoint')
self.save(self.logger._log_dir)
self.t += 1
self.global_t += 1
if self.logger._log_dir:
self.save(self.logger._log_dir)
def enjoy(self, t):
from time import sleep
env = self.env
done = True
rewards = 0
while True:
if done:
obs = env.reset()
print(rewards, 'done')
rewards = 0
sleep(t)
obs = np.expand_dims(obs, 0)
obs, r, done, _ = env.step(
self.get_action(obs, epsilon=0.05))
rewards += r
env.render()
|
{"hexsha": "830e7881a7b99eb9c6451a8161990aeb8bcda2ef", "size": 11562, "ext": "py", "lang": "Python", "max_stars_repo_path": "yarlp/agent/ddqn_agent.py", "max_stars_repo_name": "btaba/yarlp", "max_stars_repo_head_hexsha": "e6bc70afe32f8617f56180d60d6a100c83868119", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2018-02-26T05:00:24.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-07T03:04:47.000Z", "max_issues_repo_path": "yarlp/agent/ddqn_agent.py", "max_issues_repo_name": "btaba/yarlp", "max_issues_repo_head_hexsha": "e6bc70afe32f8617f56180d60d6a100c83868119", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2018-10-23T17:43:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:00:24.000Z", "max_forks_repo_path": "yarlp/agent/ddqn_agent.py", "max_forks_repo_name": "btaba/yarlp", "max_forks_repo_head_hexsha": "e6bc70afe32f8617f56180d60d6a100c83868119", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-08-20T23:47:41.000Z", "max_forks_repo_forks_event_max_datetime": "2018-08-20T23:47:41.000Z", "avg_line_length": 39.0608108108, "max_line_length": 90, "alphanum_fraction": 0.5769763017, "include": true, "reason": "import numpy", "num_tokens": 2309}
|
// Copyright (C) 2019 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/stl_interfaces/iterator_interface.hpp>
#include "ill_formed.hpp"
#include <boost/core/lightweight_test.hpp>
#include <algorithm>
#include <array>
#include <numeric>
#include <tuple>
#include <type_traits>
struct basic_random_access_iter : boost::stl_interfaces::iterator_interface<
basic_random_access_iter,
std::random_access_iterator_tag,
int>
{
basic_random_access_iter() {}
basic_random_access_iter(int * it) : it_(it) {}
int & operator*() const { return *it_; }
basic_random_access_iter & operator+=(std::ptrdiff_t i)
{
it_ += i;
return *this;
}
friend std::ptrdiff_t operator-(
basic_random_access_iter lhs, basic_random_access_iter rhs) noexcept
{
return lhs.it_ - rhs.it_;
}
private:
int * it_;
};
BOOST_STL_INTERFACES_STATIC_ASSERT_CONCEPT(
basic_random_access_iter, std::random_access_iterator)
BOOST_STL_INTERFACES_STATIC_ASSERT_ITERATOR_TRAITS(
basic_random_access_iter,
std::random_access_iterator_tag,
std::random_access_iterator_tag,
int,
int &,
int *,
std::ptrdiff_t)
static_assert(
boost::stl_interfaces::v1::v1_dtl::
plus_eq<basic_random_access_iter, std::ptrdiff_t>::value,
"");
struct basic_adapted_random_access_iter
: boost::stl_interfaces::iterator_interface<
basic_adapted_random_access_iter,
std::random_access_iterator_tag,
int>
{
basic_adapted_random_access_iter() {}
basic_adapted_random_access_iter(int * it) : it_(it) {}
private:
friend boost::stl_interfaces::access;
int *& base_reference() noexcept { return it_; }
int * base_reference() const noexcept { return it_; }
int * it_;
};
BOOST_STL_INTERFACES_STATIC_ASSERT_CONCEPT(
basic_adapted_random_access_iter, std::random_access_iterator)
BOOST_STL_INTERFACES_STATIC_ASSERT_ITERATOR_TRAITS(
basic_adapted_random_access_iter,
std::random_access_iterator_tag,
std::random_access_iterator_tag,
int,
int &,
int *,
std::ptrdiff_t)
template<typename ValueType>
struct adapted_random_access_iter : boost::stl_interfaces::iterator_interface<
adapted_random_access_iter<ValueType>,
std::random_access_iterator_tag,
ValueType>
{
adapted_random_access_iter() {}
adapted_random_access_iter(ValueType * it) : it_(it) {}
template<
typename ValueType2,
typename Enable = std::enable_if_t<
std::is_convertible<ValueType2 *, ValueType *>::value>>
adapted_random_access_iter(adapted_random_access_iter<ValueType2> other) :
it_(other.it_)
{}
template<typename ValueType2>
friend struct adapted_random_access_iter;
private:
friend boost::stl_interfaces::access;
ValueType *& base_reference() noexcept { return it_; }
ValueType * base_reference() const noexcept { return it_; }
ValueType * it_;
};
BOOST_STL_INTERFACES_STATIC_ASSERT_CONCEPT(
adapted_random_access_iter<int>, std::random_access_iterator)
BOOST_STL_INTERFACES_STATIC_ASSERT_ITERATOR_TRAITS(
adapted_random_access_iter<int>,
std::random_access_iterator_tag,
std::random_access_iterator_tag,
int,
int &,
int *,
std::ptrdiff_t)
BOOST_STL_INTERFACES_STATIC_ASSERT_CONCEPT(
adapted_random_access_iter<int const>, std::random_access_iterator)
BOOST_STL_INTERFACES_STATIC_ASSERT_ITERATOR_TRAITS(
adapted_random_access_iter<int const>,
std::random_access_iterator_tag,
std::random_access_iterator_tag,
int,
int const &,
int const *,
std::ptrdiff_t)
template<typename ValueType>
struct random_access_iter : boost::stl_interfaces::iterator_interface<
random_access_iter<ValueType>,
std::random_access_iterator_tag,
ValueType>
{
random_access_iter() {}
random_access_iter(ValueType * it) : it_(it) {}
template<
typename ValueType2,
typename E = std::enable_if_t<
std::is_convertible<ValueType2 *, ValueType *>::value>>
random_access_iter(random_access_iter<ValueType2> it) : it_(it.it_)
{}
ValueType & operator*() const { return *it_; }
random_access_iter & operator+=(std::ptrdiff_t i)
{
it_ += i;
return *this;
}
friend std::ptrdiff_t
operator-(random_access_iter lhs, random_access_iter rhs) noexcept
{
return lhs.it_ - rhs.it_;
}
private:
ValueType * it_;
template<typename ValueType2>
friend struct random_access_iter;
};
using random_access = random_access_iter<int>;
using const_random_access = random_access_iter<int const>;
BOOST_STL_INTERFACES_STATIC_ASSERT_CONCEPT(
random_access, std::random_access_iterator)
BOOST_STL_INTERFACES_STATIC_ASSERT_ITERATOR_TRAITS(
random_access,
std::random_access_iterator_tag,
std::random_access_iterator_tag,
int,
int &,
int *,
std::ptrdiff_t)
BOOST_STL_INTERFACES_STATIC_ASSERT_CONCEPT(
const_random_access, std::random_access_iterator)
BOOST_STL_INTERFACES_STATIC_ASSERT_ITERATOR_TRAITS(
const_random_access,
std::random_access_iterator_tag,
std::random_access_iterator_tag,
int,
int const &,
int const *,
std::ptrdiff_t)
struct zip_iter : boost::stl_interfaces::proxy_iterator_interface<
zip_iter,
std::random_access_iterator_tag,
std::tuple<int, int>,
std::tuple<int &, int &>>
{
zip_iter() : it1_(nullptr), it2_(nullptr) {}
zip_iter(int * it1, int * it2) : it1_(it1), it2_(it2) {}
std::tuple<int &, int &> operator*() const
{
return std::tuple<int &, int &>{*it1_, *it2_};
}
zip_iter & operator+=(std::ptrdiff_t i)
{
it1_ += i;
it2_ += i;
return *this;
}
friend std::ptrdiff_t operator-(zip_iter lhs, zip_iter rhs) noexcept
{
return lhs.it1_ - rhs.it1_;
}
private:
int * it1_;
int * it2_;
};
using int_pair = std::tuple<int, int>;
using int_refs_pair = std::tuple<int &, int &>;
BOOST_STL_INTERFACES_STATIC_ASSERT_CONCEPT(
zip_iter, std::random_access_iterator)
BOOST_STL_INTERFACES_STATIC_ASSERT_ITERATOR_TRAITS(
zip_iter,
std::random_access_iterator_tag,
std::random_access_iterator_tag,
int_pair,
int_refs_pair,
boost::stl_interfaces::proxy_arrow_result<int_refs_pair>,
std::ptrdiff_t)
struct int_t
{
int value_;
bool operator==(int_t other) const { return value_ == other.value_; }
bool operator!=(int_t other) const { return value_ != other.value_; }
bool operator<(int_t other) const { return value_ < other.value_; }
bool operator==(int other) const { return value_ == other; }
bool operator!=(int other) const { return value_ != other; }
bool operator<(int other) const { return value_ < other; }
friend bool operator==(int lhs, int_t rhs) { return lhs == rhs.value_; }
friend bool operator!=(int lhs, int_t rhs) { return lhs != rhs.value_; }
friend bool operator<(int lhs, int_t rhs) { return lhs < rhs.value_; }
};
struct udt_zip_iter : boost::stl_interfaces::proxy_iterator_interface<
udt_zip_iter,
std::random_access_iterator_tag,
std::tuple<int_t, int>,
std::tuple<int_t &, int &>>
{
udt_zip_iter() : it1_(nullptr), it2_(nullptr) {}
udt_zip_iter(int_t * it1, int * it2) : it1_(it1), it2_(it2) {}
std::tuple<int_t &, int &> operator*() const
{
return std::tuple<int_t &, int &>{*it1_, *it2_};
}
udt_zip_iter & operator+=(std::ptrdiff_t i)
{
it1_ += i;
it2_ += i;
return *this;
}
friend std::ptrdiff_t operator-(udt_zip_iter lhs, udt_zip_iter rhs) noexcept
{
return lhs.it1_ - rhs.it1_;
}
private:
int_t * it1_;
int * it2_;
};
using int_t_int_pair = std::tuple<int_t, int>;
using int_t_int_refs_pair = std::tuple<int_t &, int &>;
BOOST_STL_INTERFACES_STATIC_ASSERT_CONCEPT(
udt_zip_iter, std::random_access_iterator)
BOOST_STL_INTERFACES_STATIC_ASSERT_ITERATOR_TRAITS(
udt_zip_iter,
std::random_access_iterator_tag,
std::random_access_iterator_tag,
int_t_int_pair,
int_t_int_refs_pair,
boost::stl_interfaces::proxy_arrow_result<int_t_int_refs_pair>,
std::ptrdiff_t)
namespace std {
// Required for std::sort to work with zip_iter. If zip_iter::reference
// were not a std::tuple with builtin types as its template parameters, we
// could have put this in another namespace.
void swap(zip_iter::reference && lhs, zip_iter::reference && rhs)
{
using std::swap;
swap(std::get<0>(lhs), std::get<0>(rhs));
swap(std::get<1>(lhs), std::get<1>(rhs));
}
}
void swap(udt_zip_iter::reference && lhs, udt_zip_iter::reference && rhs)
{
using std::swap;
swap(std::get<0>(lhs), std::get<0>(rhs));
swap(std::get<1>(lhs), std::get<1>(rhs));
}
std::array<int, 10> ints = {{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}};
std::array<int, 10> ones = {{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}};
std::array<std::tuple<int, int>, 10> tuples = {{
{0, 1},
{1, 1},
{2, 1},
{3, 1},
{4, 1},
{5, 1},
{6, 1},
{7, 1},
{8, 1},
{9, 1},
}};
std::array<int_t, 10> udts = {
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}}};
std::array<std::tuple<int_t, int>, 10> udt_tuples = {{
std::tuple<int_t, int>{{0}, 1},
std::tuple<int_t, int>{{1}, 1},
std::tuple<int_t, int>{{2}, 1},
std::tuple<int_t, int>{{3}, 1},
std::tuple<int_t, int>{{4}, 1},
std::tuple<int_t, int>{{5}, 1},
std::tuple<int_t, int>{{6}, 1},
std::tuple<int_t, int>{{7}, 1},
std::tuple<int_t, int>{{8}, 1},
std::tuple<int_t, int>{{9}, 1},
}};
////////////////////
// view_interface //
////////////////////
#include "view_tests.hpp"
template<typename T>
using data_t = decltype(std::declval<T>().data());
static_assert(
ill_formed<
data_t,
subrange<
basic_random_access_iter,
basic_random_access_iter,
boost::stl_interfaces::v1::element_layout::discontiguous>>::value,
"");
static_assert(
ill_formed<
data_t,
subrange<
basic_random_access_iter,
basic_random_access_iter,
boost::stl_interfaces::v1::element_layout::discontiguous> const>::
value,
"");
template<typename T>
using back_t = decltype(std::declval<T>().back());
static_assert(
ill_formed<
back_t,
subrange<
int *,
int const *,
boost::stl_interfaces::v1::element_layout::discontiguous>>::value,
"");
static_assert(
ill_formed<
back_t,
subrange<
int *,
int const *,
boost::stl_interfaces::v1::element_layout::discontiguous> const>::
value,
"");
int main()
{
{
basic_random_access_iter first(ints.data());
basic_random_access_iter last(ints.data() + ints.size());
BOOST_TEST(*first == 0);
BOOST_TEST(*(first + 1) == 1);
BOOST_TEST(*(first + 2) == 2);
BOOST_TEST(*(1 + first) == 1);
BOOST_TEST(*(2 + first) == 2);
BOOST_TEST(first[0] == 0);
BOOST_TEST(first[1] == 1);
BOOST_TEST(first[2] == 2);
BOOST_TEST(*(last - 1) == 9);
BOOST_TEST(*(last - 2) == 8);
BOOST_TEST(*(last - 3) == 7);
BOOST_TEST(last[-1] == 9);
BOOST_TEST(last[-2] == 8);
BOOST_TEST(last[-3] == 7);
BOOST_TEST(last - first == 10);
BOOST_TEST(first == first);
BOOST_TEST(first != last);
BOOST_TEST(first < last);
BOOST_TEST(first <= last);
BOOST_TEST(first <= first);
BOOST_TEST(last > first);
BOOST_TEST(last >= first);
BOOST_TEST(last >= last);
{
auto first_copy = first;
first_copy += 10;
BOOST_TEST(first_copy == last);
}
{
auto last_copy = last;
last_copy -= 10;
BOOST_TEST(last_copy == first);
}
}
{
{
std::array<int, 10> ints_copy;
basic_random_access_iter first(ints.data());
basic_random_access_iter last(ints.data() + ints.size());
std::copy(first, last, ints_copy.begin());
BOOST_TEST(ints_copy == ints);
}
{
std::array<int, 10> ints_copy;
basic_random_access_iter first(ints.data());
basic_random_access_iter last(ints.data() + ints.size());
std::copy(
std::make_reverse_iterator(last),
std::make_reverse_iterator(first),
ints_copy.begin());
std::reverse(ints_copy.begin(), ints_copy.end());
BOOST_TEST(ints_copy == ints);
}
{
std::array<int, 10> iota_ints;
basic_random_access_iter first(iota_ints.data());
basic_random_access_iter last(iota_ints.data() + iota_ints.size());
std::iota(first, last, 0);
BOOST_TEST(iota_ints == ints);
}
{
std::array<int, 10> iota_ints;
basic_random_access_iter first(iota_ints.data());
basic_random_access_iter last(iota_ints.data() + iota_ints.size());
std::iota(
std::make_reverse_iterator(last),
std::make_reverse_iterator(first),
0);
std::reverse(iota_ints.begin(), iota_ints.end());
BOOST_TEST(iota_ints == ints);
}
{
std::array<int, 10> iota_ints;
basic_random_access_iter first(iota_ints.data());
basic_random_access_iter last(iota_ints.data() + iota_ints.size());
std::iota(
std::make_reverse_iterator(last),
std::make_reverse_iterator(first),
0);
std::sort(first, last);
BOOST_TEST(iota_ints == ints);
}
}
{
basic_adapted_random_access_iter first(ints.data());
basic_adapted_random_access_iter last(ints.data() + ints.size());
BOOST_TEST(*first == 0);
BOOST_TEST(*(first + 1) == 1);
BOOST_TEST(*(first + 2) == 2);
BOOST_TEST(*(1 + first) == 1);
BOOST_TEST(*(2 + first) == 2);
BOOST_TEST(first[0] == 0);
BOOST_TEST(first[1] == 1);
BOOST_TEST(first[2] == 2);
BOOST_TEST(*(last - 1) == 9);
BOOST_TEST(*(last - 2) == 8);
BOOST_TEST(*(last - 3) == 7);
BOOST_TEST(last[-1] == 9);
BOOST_TEST(last[-2] == 8);
BOOST_TEST(last[-3] == 7);
BOOST_TEST(last - first == 10);
BOOST_TEST(first == first);
BOOST_TEST(first != last);
BOOST_TEST(first < last);
BOOST_TEST(first <= last);
BOOST_TEST(first <= first);
BOOST_TEST(last > first);
BOOST_TEST(last >= first);
BOOST_TEST(last >= last);
{
auto first_copy = first;
first_copy += 10;
BOOST_TEST(first_copy == last);
}
{
auto last_copy = last;
last_copy -= 10;
BOOST_TEST(last_copy == first);
}
}
{
{
std::array<int, 10> ints_copy;
basic_adapted_random_access_iter first(ints.data());
basic_adapted_random_access_iter last(ints.data() + ints.size());
std::copy(first, last, ints_copy.begin());
BOOST_TEST(ints_copy == ints);
}
{
std::array<int, 10> ints_copy;
basic_adapted_random_access_iter first(ints.data());
basic_adapted_random_access_iter last(ints.data() + ints.size());
std::copy(
std::make_reverse_iterator(last),
std::make_reverse_iterator(first),
ints_copy.begin());
std::reverse(ints_copy.begin(), ints_copy.end());
BOOST_TEST(ints_copy == ints);
}
{
std::array<int, 10> iota_ints;
basic_adapted_random_access_iter first(iota_ints.data());
basic_adapted_random_access_iter last(
iota_ints.data() + iota_ints.size());
std::iota(first, last, 0);
BOOST_TEST(iota_ints == ints);
}
{
std::array<int, 10> iota_ints;
basic_adapted_random_access_iter first(iota_ints.data());
basic_adapted_random_access_iter last(
iota_ints.data() + iota_ints.size());
std::iota(
std::make_reverse_iterator(last),
std::make_reverse_iterator(first),
0);
std::reverse(iota_ints.begin(), iota_ints.end());
BOOST_TEST(iota_ints == ints);
}
{
std::array<int, 10> iota_ints;
basic_adapted_random_access_iter first(iota_ints.data());
basic_adapted_random_access_iter last(
iota_ints.data() + iota_ints.size());
std::iota(
std::make_reverse_iterator(last),
std::make_reverse_iterator(first),
0);
std::sort(first, last);
BOOST_TEST(iota_ints == ints);
}
}
{
{
random_access first(ints.data());
random_access last(ints.data() + ints.size());
const_random_access first_copy(first);
const_random_access last_copy(last);
std::equal(first, last, first_copy, last_copy);
}
{
adapted_random_access_iter<int> first(ints.data());
adapted_random_access_iter<int> last(ints.data() + ints.size());
adapted_random_access_iter<int const> first_copy(
(int const *)ints.data());
adapted_random_access_iter<int const> last_copy(
(int const *)ints.data() + ints.size());
std::equal(first, last, first_copy, last_copy);
}
}
{
{
random_access first(ints.data());
random_access last(ints.data() + ints.size());
const_random_access first_const(first);
const_random_access last_const(last);
BOOST_TEST(first == first_const);
BOOST_TEST(first_const == first);
BOOST_TEST(first != last_const);
BOOST_TEST(last_const != first);
BOOST_TEST(first <= first_const);
BOOST_TEST(first_const <= first);
BOOST_TEST(first >= first_const);
BOOST_TEST(first_const >= first);
BOOST_TEST(last_const > first);
BOOST_TEST(last > first_const);
BOOST_TEST(first_const < last);
BOOST_TEST(first < last_const);
}
{
adapted_random_access_iter<int> first(ints.data());
adapted_random_access_iter<int> last(ints.data() + ints.size());
adapted_random_access_iter<int const> first_const(first);
adapted_random_access_iter<int const> last_const(last);
BOOST_TEST(first == first_const);
BOOST_TEST(first_const == first);
BOOST_TEST(first != last_const);
BOOST_TEST(last_const != first);
BOOST_TEST(first <= first_const);
BOOST_TEST(first_const <= first);
BOOST_TEST(first >= first_const);
BOOST_TEST(first_const >= first);
BOOST_TEST(last_const > first);
BOOST_TEST(last > first_const);
BOOST_TEST(first_const < last);
BOOST_TEST(first < last_const);
}
}
{
{
random_access first(ints.data());
random_access last(ints.data() + ints.size());
while (first != last)
first++;
}
{
random_access first(ints.data());
random_access last(ints.data() + ints.size());
while (first != last)
last--;
}
{
basic_random_access_iter first(ints.data());
basic_random_access_iter last(ints.data() + ints.size());
while (first != last)
first++;
}
{
basic_random_access_iter first(ints.data());
basic_random_access_iter last(ints.data() + ints.size());
while (first != last)
last--;
}
{
basic_adapted_random_access_iter first(ints.data());
basic_adapted_random_access_iter last(ints.data() + ints.size());
while (first != last)
first++;
}
{
basic_adapted_random_access_iter first(ints.data());
basic_adapted_random_access_iter last(ints.data() + ints.size());
while (first != last)
last--;
}
}
{
random_access first(ints.data());
random_access last(ints.data() + ints.size());
BOOST_TEST(*first == 0);
BOOST_TEST(*(first + 1) == 1);
BOOST_TEST(*(first + 2) == 2);
BOOST_TEST(*(1 + first) == 1);
BOOST_TEST(*(2 + first) == 2);
BOOST_TEST(first[0] == 0);
BOOST_TEST(first[1] == 1);
BOOST_TEST(first[2] == 2);
BOOST_TEST(*(last - 1) == 9);
BOOST_TEST(*(last - 2) == 8);
BOOST_TEST(*(last - 3) == 7);
BOOST_TEST(last[-1] == 9);
BOOST_TEST(last[-2] == 8);
BOOST_TEST(last[-3] == 7);
BOOST_TEST(last - first == 10);
BOOST_TEST(first == first);
BOOST_TEST(first != last);
BOOST_TEST(first < last);
BOOST_TEST(first <= last);
BOOST_TEST(first <= first);
BOOST_TEST(last > first);
BOOST_TEST(last >= first);
BOOST_TEST(last >= last);
{
auto first_copy = first;
first_copy += 10;
BOOST_TEST(first_copy == last);
}
{
auto last_copy = last;
last_copy -= 10;
BOOST_TEST(last_copy == first);
}
}
{
random_access first(ints.data());
random_access last(ints.data() + ints.size());
{
std::array<int, 10> ints_copy;
std::copy(first, last, ints_copy.begin());
BOOST_TEST(ints_copy == ints);
}
{
std::array<int, 10> ints_copy;
std::copy(
std::make_reverse_iterator(last),
std::make_reverse_iterator(first),
ints_copy.begin());
std::reverse(ints_copy.begin(), ints_copy.end());
BOOST_TEST(ints_copy == ints);
}
{
std::array<int, 10> iota_ints;
random_access first(iota_ints.data());
random_access last(iota_ints.data() + iota_ints.size());
std::iota(first, last, 0);
BOOST_TEST(iota_ints == ints);
}
{
std::array<int, 10> iota_ints;
random_access first(iota_ints.data());
random_access last(iota_ints.data() + iota_ints.size());
std::iota(
std::make_reverse_iterator(last),
std::make_reverse_iterator(first),
0);
std::reverse(iota_ints.begin(), iota_ints.end());
BOOST_TEST(iota_ints == ints);
}
{
std::array<int, 10> iota_ints;
random_access first(iota_ints.data());
random_access last(iota_ints.data() + iota_ints.size());
std::iota(
std::make_reverse_iterator(last),
std::make_reverse_iterator(first),
0);
std::sort(first, last);
BOOST_TEST(iota_ints == ints);
}
}
{
const_random_access first(ints.data());
const_random_access last(ints.data() + ints.size());
{
std::array<int, 10> ints_copy;
std::copy(first, last, ints_copy.begin());
BOOST_TEST(ints_copy == ints);
}
{
BOOST_TEST(std::binary_search(first, last, 3));
BOOST_TEST(std::binary_search(
std::make_reverse_iterator(last),
std::make_reverse_iterator(first),
3,
std::greater<>{}));
}
}
{
{
zip_iter first(ints.data(), ones.data());
zip_iter last(ints.data() + ints.size(), ones.data() + ones.size());
BOOST_TEST(std::equal(first, last, tuples.begin(), tuples.end()));
}
{
auto ints_copy = ints;
std::reverse(ints_copy.begin(), ints_copy.end());
auto ones_copy = ones;
zip_iter first(ints_copy.data(), ones_copy.data());
zip_iter last(
ints_copy.data() + ints_copy.size(),
ones_copy.data() + ones_copy.size());
BOOST_TEST(!std::equal(first, last, tuples.begin(), tuples.end()));
std::sort(first, last);
BOOST_TEST(std::equal(first, last, tuples.begin(), tuples.end()));
}
{
udt_zip_iter first(udts.data(), ones.data());
udt_zip_iter last(udts.data() + udts.size(), ones.data() + ones.size());
BOOST_TEST(
std::equal(first, last, udt_tuples.begin(), udt_tuples.end()));
}
{
auto udts_copy = udts;
std::reverse(udts_copy.begin(), udts_copy.end());
auto ones_copy = ones;
udt_zip_iter first(udts_copy.data(), ones_copy.data());
udt_zip_iter last(
udts_copy.data() + udts_copy.size(),
ones_copy.data() + ones_copy.size());
BOOST_TEST(
!std::equal(first, last, udt_tuples.begin(), udt_tuples.end()));
std::sort(first, last);
BOOST_TEST(
std::equal(first, last, udt_tuples.begin(), udt_tuples.end()));
}
}
{
basic_random_access_iter first(ints.data());
basic_random_access_iter last(ints.data() + ints.size());
auto r = range<boost::stl_interfaces::v1::element_layout::contiguous>(
first, last);
auto empty = range<boost::stl_interfaces::v1::element_layout::contiguous>(
first, first);
// range begin/end
{
std::array<int, 10> ints_copy;
std::copy(r.begin(), r.end(), ints_copy.begin());
BOOST_TEST(ints_copy == ints);
BOOST_TEST(empty.begin() == empty.end());
}
// empty/op bool
{
BOOST_TEST(!r.empty());
BOOST_TEST(r);
BOOST_TEST(empty.empty());
BOOST_TEST(!empty);
auto const cr = r;
BOOST_TEST(!cr.empty());
BOOST_TEST(cr);
auto const cempty = empty;
BOOST_TEST(cempty.empty());
BOOST_TEST(!cempty);
}
// data
{
BOOST_TEST(r.data() != nullptr);
BOOST_TEST(r.data()[2] == 2);
BOOST_TEST(empty.data() != nullptr);
auto const cr = r;
BOOST_TEST(cr.data() != nullptr);
BOOST_TEST(cr.data()[2] == 2);
auto const cempty = empty;
BOOST_TEST(cempty.data() != nullptr);
}
// size
{
BOOST_TEST(r.size() == 10u);
BOOST_TEST(empty.size() == 0u);
auto const cr = r;
BOOST_TEST(cr.size() == 10u);
auto const cempty = empty;
BOOST_TEST(cempty.size() == 0u);
}
// front/back
{
BOOST_TEST(r.front() == 0);
BOOST_TEST(r.back() == 9);
auto const cr = r;
BOOST_TEST(cr.front() == 0);
BOOST_TEST(cr.back() == 9);
}
// op[]
{
BOOST_TEST(r[2] == 2);
auto const cr = r;
BOOST_TEST(cr[2] == 2);
}
}
{
zip_iter first(ints.data(), ones.data());
zip_iter last(ints.data() + ints.size(), ones.data() + ones.size());
auto r = range<boost::stl_interfaces::v1::element_layout::discontiguous>(
first, last);
auto empty =
range<boost::stl_interfaces::v1::element_layout::discontiguous>(
first, first);
// range begin/end
{
BOOST_TEST(std::equal(first, last, tuples.begin(), tuples.end()));
}
// empty/op bool
{
BOOST_TEST(!r.empty());
BOOST_TEST(r);
BOOST_TEST(empty.empty());
BOOST_TEST(!empty);
auto const cr = r;
BOOST_TEST(!cr.empty());
BOOST_TEST(cr);
auto const cempty = empty;
BOOST_TEST(cempty.empty());
BOOST_TEST(!cempty);
}
// size
{
BOOST_TEST(r.size() == 10u);
BOOST_TEST(empty.size() == 0u);
auto const cr = r;
BOOST_TEST(cr.size() == 10u);
auto const cempty = empty;
BOOST_TEST(cempty.size() == 0u);
}
// front/back
{
BOOST_TEST(r.front() == (std::tuple<int, int>(0, 1)));
BOOST_TEST(r.back() == (std::tuple<int, int>(9, 1)));
auto const cr = r;
BOOST_TEST(cr.front() == (std::tuple<int, int>(0, 1)));
BOOST_TEST(cr.back() == (std::tuple<int, int>(9, 1)));
}
// op[]
{
BOOST_TEST(r[2] == (std::tuple<int, int>(2, 1)));
auto const cr = r;
BOOST_TEST(cr[2] == (std::tuple<int, int>(2, 1)));
}
}
return boost::report_errors();
}
|
{"hexsha": "b9a0a73ca58a5800a2e9c7d239145dc69e201fac", "size": 28337, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/stl_interfaces/test/random_access.cpp", "max_stars_repo_name": "anarthal/boost-unix-mirror", "max_stars_repo_head_hexsha": "8c34eb2fe471d6c3113c680c1fbef29e7a8063a0", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-07-12T13:52:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-12T13:52:18.000Z", "max_issues_repo_path": "libs/stl_interfaces/test/random_access.cpp", "max_issues_repo_name": "anarthal/boost-unix-mirror", "max_issues_repo_head_hexsha": "8c34eb2fe471d6c3113c680c1fbef29e7a8063a0", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2021-10-21T12:42:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-03T08:41:31.000Z", "max_forks_repo_path": "Libs/boost_1_76_0/libs/stl_interfaces/test/random_access.cpp", "max_forks_repo_name": "Antd23rus/S2DE", "max_forks_repo_head_hexsha": "47cc7151c2934cd8f0399a9856c1e54894571553", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2015-11-03T14:12:19.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-22T19:20:54.000Z", "avg_line_length": 27.6189083821, "max_line_length": 80, "alphanum_fraction": 0.5912764231, "num_tokens": 7043}
|
-10 ghijklmnop
-9 hijklmnop
-8 ijklmnop
-7 jklmnop
-6 klmnop
-5 lmnop
-4 mnop
-3 nop
-2 op
-1 p
|
{"hexsha": "468f586bc6ce3bdb3a182db29f2cb257dbf85ed9", "size": 106, "ext": "r", "lang": "R", "max_stars_repo_path": "test/unittest/funcs/substr/tst.substr-multi-var-idx-neg-no-cnt.r", "max_stars_repo_name": "LaudateCorpus1/dtrace-utils", "max_stars_repo_head_hexsha": "8ef492246aef8c034151a75a158163f42c635ec3", "max_stars_repo_licenses": ["UPL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/unittest/funcs/substr/tst.substr-multi-var-idx-neg-no-cnt.r", "max_issues_repo_name": "LaudateCorpus1/dtrace-utils", "max_issues_repo_head_hexsha": "8ef492246aef8c034151a75a158163f42c635ec3", "max_issues_repo_licenses": ["UPL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/unittest/funcs/substr/tst.substr-multi-var-idx-neg-no-cnt.r", "max_forks_repo_name": "LaudateCorpus1/dtrace-utils", "max_forks_repo_head_hexsha": "8ef492246aef8c034151a75a158163f42c635ec3", "max_forks_repo_licenses": ["UPL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 8.8333333333, "max_line_length": 14, "alphanum_fraction": 0.6226415094, "num_tokens": 53}
|
From Coqprime Require Import PocklingtonRefl.
Local Open Scope positive_scope.
Lemma primo40:
prime 206209712180582360058470143->
prime 25812668917919247631855455535243715547043.
Proof.
intro H.
apply (Pocklington_refl
(Ell_certif
25812668917919247631855455535243715547043
125176785540123
((206209712180582360058470143,1)::nil)
0
119164
93
961)
((Proof_certif _ H) :: nil)).
native_cast_no_check (refl_equal true).
Time Qed.
|
{"author": "mukeshtiwari", "repo": "Formally_Verified_Verifiable_Group_Generator", "sha": "e80e8d43e81b5201d6ab82a8ebc07a5cef03476b", "save_path": "github-repos/coq/mukeshtiwari-Formally_Verified_Verifiable_Group_Generator", "path": "github-repos/coq/mukeshtiwari-Formally_Verified_Verifiable_Group_Generator/Formally_Verified_Verifiable_Group_Generator-e80e8d43e81b5201d6ab82a8ebc07a5cef03476b/primality/p2_40.v"}
|
"""
CREATED: 3-JUN-2021
LAST EDIT: 11-JUN-2021
AUTHOR: DUANE RINEHART, MBA (duane.rinehart@gmail.com)
READS blotter.xlsx (SYMBOL, SHARES, UNITARY, EXTENDED, DATE, BROKER, EXIT_TARGET)
COMPARES holding period to SP500 performance ($, days) using PME (Public Market Equivalent) ref: https://docs.preqin.com/reports/Preqin-Special-Report-PME-July-2015.pdf
REF: https://towardsdatascience.com/python-for-finance-stock-portfolio-analyses-6da4c3e61054
"""
import os
import pandas as pd
from datetime import datetime as dt
import sqlite3
import yfinance as yf
# import numpy as np
# import matplotlib.pyplot as plt
# import plotly.graph_objs as go
try:
import constants
except ImportError:
raise ImportError("ERROR LOADING PREREQUISITE [FROM WORKING DIRECTORY]: constants")
class HistData:
"""
METHODS FOR CRUD OPERATIONS OF HISTORICAL DATA
"""
_db_server = constants.HIST_DB_SERVER
_db_name = constants.HIST_DB_NAME
def __init__(self):
"""
INIT CONNECTION TO DATABASE; IMPORTS SCHEMA IF NOT EXISTS
"""
if self._db_server == "sqlite":
self._con = sqlite3.connect(self._db_name)
try:
self._cur = self._con.cursor()
except:
print(
"UNABLE TO CONNECT TO DATABASE: \n",
"TYPE:",
self._db_server + "\n",
"HOST (FILENAME IF SQLITE):",
self._db_name,
)
# finally:
# self._con.close()
# CHECK IF sp500 TABLE EXISTS
self._cur.execute(
"""SELECT
COUNT(name)
FROM
sqlite_master
WHERE
type = 'table'
AND
name = 'sp500'"""
)
if self._cur.fetchone()[0] != 1:
print("INITIALIZE DB...")
try:
sql_file = open(constants.HIST_DB_SCHEMA)
sql_as_string = sql_file.read()
self._cur.executescript(sql_as_string)
except FileNotFoundError:
print(
"DATABASE SCHEMA CREATION FILE MISSING:", constants.HIST_DB_SCHEMA
)
def qry(self, sql, rtn_results: bool = True, rtn_iterator: bool = False) -> tuple:
self._cur.execute(sql)
if rtn_results == True:
return self._cur.fetchall()
else:
self._con.commit()
class YahooAPI:
"""
METHODS FOR SCRAPING YAHOO FINANCE
ref: https://aroussi.com/post/python-yahoo-finance
"""
def capture_historical(self, ticker, p):
stock = yf.Ticker(ticker)
d = stock.history(period=p)
df = pd.DataFrame(data=d)
return df # RETURN DATAFRAME OF ALL HISTORICAL [RAW] QUOTES
class DataAccess:
"""
METHODS FOR EXTRACTING & MANIPULATING RAW DATA
"""
def __init__(self):
self._srcPath = constants.SRCPATH
self._srcFile = constants.SRCFILE
def extract_blotter_data(self):
infile = os.path.join(self._srcPath, self._srcFile)
return pd.read_excel(infile, sheet_name="master")
def extract_desc_var(self, df):
"""
EXTRACT DESCRIPTIVE DATA FROM [BLOTTER] DATAFRAME
"""
min = df["DATE"].min().to_pydatetime().date()
max = df["DATE"].max().to_pydatetime().date()
cur_year = dt.today().year
end_of_last_year = dt(year=cur_year - 1, month=12, day=31).date()
diff = (max - min).days
return min, max, diff, end_of_last_year
def pop_sp500_tables(hist, diff):
"""
SCRAPES YAHOO FINANCE ('^GSPC' IS S&P500 SYMBOL) TO DOWNLOAD HISTORICAL SP500 DATA AND INSERTS INTO DATABASE
Valid periods are: 1d, 5d, 1mo, 3mo, 6mo, 1y, 2y, 5y, 10y, ytd, max
"""
y = YahooAPI()
if diff < 31:
period = "1mo"
elif diff < 91:
period = "3mo"
elif diff < 181:
period = "6mo"
elif diff < 365:
period = "1y"
elif diff < 730:
period = "2y"
hist_data = y.capture_historical(
"^GSPC", period
) # NOTE RETURNED DATA IS OF TYPE PANDAS DATAFRAME
for index, row in hist_data.iterrows():
result_date = index.to_pydatetime().date()
sql = """INSERT OR IGNORE INTO sp500 (date, open, close, high, low, volume)
VALUES ('{}', '{}', '{}', '{}', '{}', '{}')
""".format(
result_date,
row["Open"],
row["Close"],
row["High"],
row["Low"],
row["Volume"],
)
# NOTE: USING 'date' AS PRIMARY KEY PREVENTS DUPLICATE ENTRIES
# THIS WILL STILL PRODUCE INTEGRITY ERROR - WORKAROUND ADD IGNORE TO SQL
# REF: https://stackoverflow.com/questions/36518628/sqlite3-integrityerror-unique-constraint-failed-when-inserting-a-value
hist.qry(sql, rtn_results=False)
def main():
data = DataAccess()
xl_df = data.extract_blotter_data()
min, max, diff, prev_year_end = data.extract_desc_var(xl_df)
hist = HistData()
# QRY FOR DATA COVERING TIME PERIOD OF INTEREST
SQL = "SELECT COUNT(*) FROM sp500 WHERE date >= '{}' AND date <= '{}' ORDER BY date ASC".format(
min, max
)
cnt = hist.qry(SQL) # RETURNS LIST OF TUPLES
datapoints = cnt[0][0] # CAPTURE FIRST ELEMENT OF LIST; FIRST ELEMENT OF TUPLE
if diff > 0:
if (
datapoints / diff < 0.65
): # < 65%; PULL MORE DATA [NOTE: 71.4% OF DATES ARE M-F 5/7 DAYS PER WEEK; (EXCLUDES HOLIDAYS)]
pop_sp500_tables(hist, diff)
else:
print(
"CONTINUE WITH ",
round(datapoints / diff * 100, 2),
"% of DATES (INCLUDES HOLIDAYS & WEEKENDS)",
)
# results = hist.qry(SQL, rtn_iterator=True) # RETURNS LIST OF TUPLES
if __name__ == "__main__":
main()
|
{"hexsha": "ae26f1674b23ad60a0cdb5680f0d104ed5ab8192", "size": 5917, "ext": "py", "lang": "Python", "max_stars_repo_path": "stock_analysisPME.py", "max_stars_repo_name": "drinehart1/finance1", "max_stars_repo_head_hexsha": "5bdd2f6ca53838e84147652441bb76c2a08c778c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-09T04:56:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T04:56:06.000Z", "max_issues_repo_path": "stock_analysisPME.py", "max_issues_repo_name": "drinehart1/finance1", "max_issues_repo_head_hexsha": "5bdd2f6ca53838e84147652441bb76c2a08c778c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stock_analysisPME.py", "max_forks_repo_name": "drinehart1/finance1", "max_forks_repo_head_hexsha": "5bdd2f6ca53838e84147652441bb76c2a08c778c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3435897436, "max_line_length": 168, "alphanum_fraction": 0.5786716241, "include": true, "reason": "import numpy", "num_tokens": 1538}
|
from pyspark.sql.types import *
from optimus import Optimus
from optimus.helpers.json import json_enconding
from pyspark.ml.linalg import Vectors, VectorUDT, DenseVector
import numpy as np
nan = np.nan
import datetime
from pyspark.sql import functions as F
op = Optimus(master='local')
source_df=op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", 28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None)])
class Testdf_cols_2(object):
@staticmethod
def test_cols_add():
actual_df =source_df.cols.add(['height(ft)','rank'])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', FloatType(), True),('function', StringType(), True),('rank', FloatType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('sum', FloatType(), True)], [("Optim'us", 28.0, 'Leader', 10.0, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, 38.0), ('bumbl#ebéé ', 17.0, 'Espionage', 7.0, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 24.0), ('ironhide&', 26.0, 'Security', 7.0, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 33.0), ('Jazz', 13.0, 'First Lieutenant', 8.0, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 21.0), ('Megatron', None, 'None', 10.0, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, None), ('Metroplex_)^$', 300.0, 'Battle Station', 8.0, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, 308.0)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_add_all_columns():
actual_df =source_df.cols.add('*')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', FloatType(), True),('function', StringType(), True),('rank', FloatType(), True),('age', FloatType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('sum', FloatType(), True)], [("Optim'us", 28.0, 'Leader', 10.0, 5000000.0, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, 5000042.5), ('bumbl#ebéé ', 17.0, 'Espionage', 7.0, 5000000.0, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 5000026.0), ('ironhide&', 26.0, 'Security', 7.0, 5000000.0, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 5000037.0), ('Jazz', 13.0, 'First Lieutenant', 8.0, 5000000.0, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 5000023.0), ('Megatron', None, 'None', 10.0, 5000000.0, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, None), ('Metroplex_)^$', 300.0, 'Battle Station', 8.0, 5000000.0, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_mul():
actual_df =source_df.cols.mul(['height(ft)','rank'])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', FloatType(), True),('function', StringType(), True),('rank', FloatType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('mul', FloatType(), True)], [("Optim'us", 28.0, 'Leader', 10.0, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, 280.0), ('bumbl#ebéé ', 17.0, 'Espionage', 7.0, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 119.0), ('ironhide&', 26.0, 'Security', 7.0, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 182.0), ('Jazz', 13.0, 'First Lieutenant', 8.0, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 104.0), ('Megatron', None, 'None', 10.0, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, None), ('Metroplex_)^$', 300.0, 'Battle Station', 8.0, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, 2400.0)])
assert (expected_df.collect() == actual_df.collect())
|
{"hexsha": "22ff57ed6fbece8e552d3a6b8746329b3b97b9e5", "size": 9561, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_df_cols_2.py", "max_stars_repo_name": "atwoodjw/Optimus", "max_stars_repo_head_hexsha": "938463cec41a6683d2077c9afc7d6ba05c3b993f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-22T13:04:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-22T13:04:37.000Z", "max_issues_repo_path": "tests/test_df_cols_2.py", "max_issues_repo_name": "atwoodjw/Optimus", "max_issues_repo_head_hexsha": "938463cec41a6683d2077c9afc7d6ba05c3b993f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_df_cols_2.py", "max_forks_repo_name": "atwoodjw/Optimus", "max_forks_repo_head_hexsha": "938463cec41a6683d2077c9afc7d6ba05c3b993f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 329.6896551724, "max_line_length": 2244, "alphanum_fraction": 0.6658299341, "include": true, "reason": "import numpy", "num_tokens": 3599}
|
/-
Copyright (c) 2019 The Flypitch Project. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jesse Han, Floris van Doorn
-/
import .henkin
local attribute [instance, priority 0] classical.prop_decidable
open fol fol.Lhom
namespace fol
lemma satisfied_of_provable {L : Language} (T : Theory L) (ψ : sentence L) : T ⊢' ψ → T ⊨ ψ :=
λ _, soundness $ classical.choice ‹_›
lemma completeness_for_inconsistent_theories {L : Language} (T : Theory L) (ψ : sentence L) (h_inconsis : ¬ is_consistent T) : T ⊢' ψ ↔ T ⊨ ψ :=
⟨satisfied_of_provable _ _, λ _, exfalso' $ classical.by_contradiction ‹_›⟩
/- T is consistent iff there is a nonempty model of T -/
theorem model_existence {L : Language} (T : Theory L) : is_consistent T ↔ (∃ M : Structure L, (nonempty M) ∧ M ⊨ T) :=
begin
refine ⟨_,_⟩; intro H,
{ refine ⟨reduct (@henkin_language_over L T H)
(term_model $ completion_of_henkinization H), ⟨_,_⟩⟩,
{ exact reduct_nonempty_of_nonempty (by simp[fol.nonempty_term_model]) },
{ exact reduct_of_complete_henkinization_models_T _ } },
{ rcases H with ⟨M, ⟨H_nonempty, H_sat⟩⟩,
exact λ _, false_of_satisfied_false (satisfied_of_provable _ _ ‹_› ‹_› ‹_›) }
end
noncomputable def nonempty_model_of_consis {L : Language} {T : Theory L} (hT : is_consistent T) : Σ' M : Model T, nonempty M.fst.carrier :=
begin
have := (model_existence T).mp hT, apply classical.psigma_of_exists,
rcases this with ⟨M, hM, h_satisfied⟩,
apply exists.intro, swap, exact ⟨M, h_satisfied⟩, exact hM
end
/-- model_existence is implied by completeness --/
theorem model_existence_of_completeness {L : Language} (T : Theory L) (h_completeness : ∀ (T : Theory L) (ψ : sentence L), T ⊢' ψ ↔ T ⊨ ψ) : is_consistent T ↔ (∃ M : Structure L, (nonempty M) ∧ M ⊨ T) :=
begin
refine ⟨_,_⟩; intro H,
{ by_contra H', push_neg at H', apply H,
rw h_completeness,
intros M hM0 hMT, exfalso, exact H' M hM0 hMT },
{ intro H', rcases H with ⟨_,_,_⟩, rw[h_completeness] at H',
exact false_of_satisfied_false (H' ‹_› ‹_›) }
end
theorem completeness {L : Language} (T : Theory L) (ψ : sentence L) : T ⊢' ψ ↔ T ⊨ ψ :=
begin
refine ⟨λ _, satisfied_of_provable _ _ ‹_›, _⟩, by_contra H, push_neg at H,
rcases nonempty_model_of_consis (consis_not_of_not_provable H.right) with ⟨⟨M,HM⟩, H_nonempty⟩,
refine absurd H.left (not_satisfied_of_model_not _ _ _), swap,
exact ((by simp at HM; simp*) : (⟨M, by tidy⟩ : Model T) ⊨ _), from ‹_›
end
theorem compactness {L : Language} {T : Theory L} {f : sentence L} :
T ⊨ f ↔ ∃ fs : finset (sentence L), (↑fs : Theory L) ⊨ (f : sentence L) ∧ ↑fs ⊆ T :=
begin
rw [<-(completeness T f), theory_proof_compactness_iff], simp only [completeness]
end
end fol
|
{"author": "Jlh18", "repo": "ModelTheoryInLean8", "sha": "fbda7d869d4169b6e739bb74165e99ee03ca63d6", "save_path": "github-repos/lean/Jlh18-ModelTheoryInLean8", "path": "github-repos/lean/Jlh18-ModelTheoryInLean8/ModelTheoryInLean8-fbda7d869d4169b6e739bb74165e99ee03ca63d6/src/completeness.lean"}
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dask.distributed import wait, default_client
from cugraph.dask.common.input_utils import get_distributed_data
from cugraph.structure import renumber_wrapper as c_renumber
import cugraph.comms.comms as Comms
import dask_cudf
import numpy as np
import cudf
def call_renumber(sID,
data,
num_edges,
is_mnmg,
store_transposed):
wid = Comms.get_worker_id(sID)
handle = Comms.get_handle(sID)
return c_renumber.renumber(data[0],
num_edges,
wid,
handle,
is_mnmg,
store_transposed)
class NumberMap:
class SingleGPU:
def __init__(self, df, src_col_names, dst_col_names, id_type,
store_transposed):
self.col_names = NumberMap.compute_vals(src_col_names)
self.src_col_names = src_col_names
self.dst_col_names = dst_col_names
self.df = df
self.id_type = id_type
self.store_transposed = store_transposed
self.numbered = False
def to_internal_vertex_id(self, df, col_names):
tmp_df = df[col_names].rename(
columns=dict(zip(col_names, self.col_names)), copy=False
)
index_name = NumberMap.generate_unused_column_name(df.columns)
tmp_df[index_name] = tmp_df.index
return (
self.df.merge(tmp_df, on=self.col_names, how="right")
.sort_values(index_name)
.drop(columns=[index_name])
.reset_index()["id"]
)
def from_internal_vertex_id(
self, df, internal_column_name, external_column_names
):
tmp_df = self.df.merge(
df,
right_on=internal_column_name,
left_on="id",
how="right",
)
if internal_column_name != "id":
tmp_df = tmp_df.drop(columns=["id"])
if external_column_names is None:
return tmp_df
else:
return tmp_df.rename(
columns=dict(zip(self.col_names, external_column_names)),
copy=False,
)
def add_internal_vertex_id(self, df, id_column_name, col_names,
drop, preserve_order):
ret = None
if preserve_order:
index_name = NumberMap.generate_unused_column_name(df.columns)
tmp_df = df
tmp_df[index_name] = tmp_df.index
else:
tmp_df = df
if "id" in df.columns:
id_name = NumberMap.generate_unused_column_name(tmp_df.columns)
merge_df = self.df.rename(columns={"id": id_name}, copy=False)
else:
id_name = "id"
merge_df = self.df
if col_names is None:
ret = merge_df.merge(tmp_df, on=self.col_names, how="right")
elif col_names == self.col_names:
ret = merge_df.merge(tmp_df, on=self.col_names, how="right")
else:
ret = (
merge_df.merge(
tmp_df,
right_on=col_names,
left_on=self.col_names,
how="right",
)
.drop(columns=self.col_names)
)
if drop:
ret = ret.drop(columns=col_names)
ret = ret.rename(
columns={id_name: id_column_name}, copy=False
)
if preserve_order:
ret = ret.sort_values(index_name).reset_index(drop=True)
return ret
def indirection_map(self, df, src_col_names, dst_col_names):
tmp_df = cudf.DataFrame()
tmp = (
df[src_col_names]
.groupby(src_col_names)
.count()
.reset_index()
.rename(
columns=dict(zip(src_col_names, self.col_names)),
copy=False,
)
)
if dst_col_names is not None:
tmp_dst = (
df[dst_col_names]
.groupby(dst_col_names)
.count()
.reset_index()
)
for newname, oldname in zip(self.col_names, dst_col_names):
tmp_df[newname] = tmp[newname].append(tmp_dst[oldname])
else:
for newname in self.col_names:
tmp_df[newname] = tmp[newname]
tmp_df = tmp_df.groupby(self.col_names).count().reset_index()
tmp_df["id"] = tmp_df.index.astype(self.id_type)
self.df = tmp_df
return tmp_df
class MultiGPU:
def __init__(
self, ddf, src_col_names, dst_col_names, id_type, store_transposed
):
self.col_names = NumberMap.compute_vals(src_col_names)
self.val_types = NumberMap.compute_vals_types(ddf, src_col_names)
self.val_types["count"] = np.int32
self.id_type = id_type
self.ddf = ddf
self.store_transposed = store_transposed
self.numbered = False
def to_internal_vertex_id(self, ddf, col_names):
tmp_ddf = ddf[col_names].rename(
columns=dict(zip(col_names, self.col_names)))
for name in self.col_names:
tmp_ddf[name] = tmp_ddf[name].astype(self.ddf[name].dtype)
x = self.ddf.merge(
tmp_ddf,
on=self.col_names,
how="right",
)
return x['global_id']
def from_internal_vertex_id(
self, df, internal_column_name, external_column_names
):
tmp_df = self.ddf.merge(
df,
right_on=internal_column_name,
left_on="global_id",
how="right"
).map_partitions(lambda df: df.drop(columns="global_id"))
if external_column_names is None:
return tmp_df
else:
return tmp_df.map_partitions(
lambda df:
df.rename(
columns=dict(
zip(self.col_names, external_column_names)
),
copy=False
)
)
def add_internal_vertex_id(self, ddf, id_column_name, col_names, drop,
preserve_order):
# At the moment, preserve_order cannot be done on
# multi-GPU
if preserve_order:
raise Exception("preserve_order not supported for multi-GPU")
ret = None
if col_names is None:
ret = self.ddf.merge(
ddf, on=self.col_names, how="right"
)
elif col_names == self.col_names:
ret = self.ddf.merge(
ddf, on=col_names, how="right"
)
else:
ret = self.ddf.merge(
ddf, right_on=col_names, left_on=self.col_names
).map_partitions(
lambda df: df.drop(columns=self.col_names)
)
if drop:
ret = ret.map_partitions(lambda df: df.drop(columns=col_names))
ret = ret.map_partitions(
lambda df: df.rename(
columns={"global_id": id_column_name}, copy=False
)
)
return ret
def indirection_map(self, ddf, src_col_names, dst_col_names):
tmp = (
ddf[src_col_names]
.groupby(src_col_names)
.count()
.reset_index()
.rename(
columns=dict(zip(src_col_names, self.col_names)),
)
)
if dst_col_names is not None:
tmp_dst = (
ddf[dst_col_names]
.groupby(dst_col_names)
.count()
.reset_index()
)
for i, (newname, oldname) in enumerate(zip(self.col_names,
dst_col_names)):
if i == 0:
tmp_df = tmp[newname].append(tmp_dst[oldname]).\
to_frame(name=newname)
else:
tmp_df[newname] = tmp[newname].append(tmp_dst[oldname])
else:
for newname in self.col_names:
tmp_df[newname] = tmp[newname]
tmp_ddf = tmp_df.groupby(self.col_names).count().reset_index()
# Set global index
tmp_ddf = tmp_ddf.assign(idx=1)
tmp_ddf['global_id'] = tmp_ddf.idx.cumsum() - 1
tmp_ddf = tmp_ddf.drop(columns='idx')
tmp_ddf = tmp_ddf.persist()
self.ddf = tmp_ddf
return tmp_ddf
def __init__(self, id_type=np.int32):
self.implementation = None
self.id_type = id_type
def compute_vals_types(df, column_names):
"""
Helper function to compute internal column names and types
"""
return {
str(i): df[column_names[i]].dtype for i in range(len(column_names))
}
def generate_unused_column_name(column_names):
"""
Helper function to generate an unused column name
"""
name = 'x'
while name in column_names:
name = name + "x"
return name
def compute_vals(column_names):
"""
Helper function to compute internal column names based on external
column names
"""
return [str(i) for i in range(len(column_names))]
def to_internal_vertex_id(self, df, col_names=None):
"""
Given a collection of external vertex ids, return the internal
vertex ids
Parameters
----------
df: cudf.DataFrame, cudf.Series, dask_cudf.DataFrame, dask_cudf.Series
Contains a list of external vertex identifiers that will be
converted into internal vertex identifiers
col_names: (optional) list of strings
This list of 1 or more strings contain the names
of the columns that uniquely identify an external
vertex identifier
Returns
---------
vertex_ids : cudf.Series or dask_cudf.Series
The vertex identifiers. Note that to_internal_vertex_id
does not guarantee order or partitioning (in the case of
dask_cudf) of vertex ids. If order matters use
add_internal_vertex_id
"""
tmp_df = None
tmp_col_names = None
if type(df) is cudf.Series:
tmp_df = cudf.DataFrame()
tmp_df["0"] = df
tmp_col_names = ["0"]
elif type(df) is dask_cudf.Series:
tmp_df = dask_cudf.DataFrame()
tmp_df["0"] = df
tmp_col_names = ["0"]
else:
tmp_df = df
tmp_col_names = col_names
reply = self.implementation.to_internal_vertex_id(tmp_df,
tmp_col_names)
return reply
def add_internal_vertex_id(
self, df, id_column_name="id", col_names=None, drop=False,
preserve_order=False
):
"""
Given a collection of external vertex ids, return the internal vertex
ids combined with the input data.
If a series-type input is provided then the series will be in a column
named '0'. Otherwise the input column names in the DataFrame will be
preserved.
Parameters
----------
df: cudf.DataFrame, cudf.Series, dask_cudf.DataFrame, dask_cudf.Series
Contains a list of external vertex identifiers that will be
converted into internal vertex identifiers
id_column_name: (optional) string
The name to be applied to the column containing the id
(defaults to 'id')
col_names: (optional) list of strings
This list of 1 or more strings contain the names
of the columns that uniquely identify an external
vertex identifier
drop: (optional) boolean
If True, drop the column names specified in col_names from
the returned DataFrame. Defaults to False.
preserve_order: (optional) boolean
If True, do extra sorting work to preserve the order
of the input DataFrame. Defaults to False.
Returns
---------
df : cudf.DataFrame or dask_cudf.DataFrame
A DataFrame containing the input data (DataFrame or series)
with an additional column containing the internal vertex id.
Note that there is no guarantee of the order or partitioning
of elements in the returned DataFrame.
"""
tmp_df = None
tmp_col_names = None
can_drop = True
if type(df) is cudf.Series:
tmp_df = df.to_frame("0")
tmp_col_names = ["0"]
can_drop = False
elif type(df) is dask_cudf.Series:
tmp_df = df.to_frame("0")
tmp_col_names = ["0"]
can_drop = False
else:
tmp_df = df
if isinstance(col_names, list):
tmp_col_names = col_names
else:
tmp_col_names = [col_names]
return self.implementation.add_internal_vertex_id(
tmp_df, id_column_name, tmp_col_names, (drop and can_drop),
preserve_order
)
def from_internal_vertex_id(
self,
df,
internal_column_name=None,
external_column_names=None,
drop=False,
):
"""
Given a collection of internal vertex ids, return a DataFrame of
the external vertex ids
Parameters
----------
df: cudf.DataFrame, cudf.Series, dask_cudf.DataFrame, dask_cudf.Series
A list of internal vertex identifiers that will be
converted into external vertex identifiers. If df is a series type
object it will be converted to a dataframe where the series is
in a column labeled 'id'. If df is a dataframe type object
then internal_column_name should identify which column corresponds
the the internal vertex id that should be converted
internal_column_name: (optional) string
Name of the column containing the internal vertex id.
If df is a series then this parameter is ignored. If df is
a DataFrame this parameter is required.
external_column_names: (optional) string or list of strings
Name of the columns that define an external vertex id.
If not specified, columns will be labeled '0', '1,', ..., 'n-1'
drop: (optional) boolean
If True the internal column name will be dropped from the
DataFrame. Defaults to False.
Returns
---------
df : cudf.DataFrame or dask_cudf.DataFrame
The original DataFrame columns exist unmodified. Columns
are added to the DataFrame to identify the external vertex
identifiers. If external_columns is specified, these names
are used as the names of the output columns. If external_columns
is not specifed the columns are labeled '0', ... 'n-1' based on
the number of columns identifying the external vertex identifiers.
"""
tmp_df = None
can_drop = True
if type(df) is cudf.Series:
tmp_df = df.to_frame("id")
internal_column_name = "id"
can_drop = False
elif type(df) is dask_cudf.Series:
tmp_df = df.to_frame("id")
internal_column_name = "id"
can_drop = False
else:
tmp_df = df
output_df = self.implementation.from_internal_vertex_id(
tmp_df, internal_column_name, external_column_names
)
if drop and can_drop:
return output_df.drop(columns=internal_column_name)
return output_df
def renumber_and_segment(
df, src_col_names, dst_col_names, preserve_order=False,
store_transposed=False
):
if isinstance(src_col_names, list):
renumber_type = 'legacy'
elif not (df[src_col_names].dtype == np.int32 or
df[src_col_names].dtype == np.int64):
renumber_type = 'legacy'
else:
renumber_type = 'experimental'
renumber_map = NumberMap()
if not isinstance(src_col_names, list):
src_col_names = [src_col_names]
dst_col_names = [dst_col_names]
id_type = df[src_col_names[0]].dtype
if isinstance(df, cudf.DataFrame):
renumber_map.implementation = NumberMap.SingleGPU(
df, src_col_names, dst_col_names, renumber_map.id_type,
store_transposed
)
elif isinstance(df, dask_cudf.DataFrame):
renumber_map.implementation = NumberMap.MultiGPU(
df, src_col_names, dst_col_names, renumber_map.id_type,
store_transposed
)
else:
raise Exception("df must be cudf.DataFrame or dask_cudf.DataFrame")
if renumber_type == 'legacy':
indirection_map = renumber_map.implementation.\
indirection_map(df,
src_col_names,
dst_col_names)
df = renumber_map.add_internal_vertex_id(
df, "src", src_col_names, drop=True,
preserve_order=preserve_order
)
df = renumber_map.add_internal_vertex_id(
df, "dst", dst_col_names, drop=True,
preserve_order=preserve_order
)
else:
df = df.rename(columns={src_col_names[0]: "src",
dst_col_names[0]: "dst"})
num_edges = len(df)
if isinstance(df, dask_cudf.DataFrame):
is_mnmg = True
else:
is_mnmg = False
if is_mnmg:
client = default_client()
data = get_distributed_data(df)
result = [(client.submit(call_renumber,
Comms.get_session_id(),
wf[1],
num_edges,
is_mnmg,
store_transposed,
workers=[wf[0]]), wf[0])
for idx, wf in enumerate(data.worker_to_parts.items())]
wait(result)
def get_renumber_map(id_type, data):
return data[0].astype(id_type)
def get_segment_offsets(data):
return data[1]
def get_renumbered_df(id_type, data):
# FIXME: This assume the column names are always 'src'
# and 'dst' which is the case now
data[2]['src'] = data[2]['src'].astype(id_type)
data[2]['dst'] = data[2]['dst'].astype(id_type)
return data[2]
renumbering_map = dask_cudf.from_delayed(
[client.submit(get_renumber_map,
id_type,
data,
workers=[wf])
for (data, wf) in result])
list_of_segment_offsets = client.gather(
[client.submit(get_segment_offsets,
data,
workers=[wf])
for (data, wf) in result])
aggregate_segment_offsets = []
for segment_offsets in list_of_segment_offsets:
aggregate_segment_offsets.extend(segment_offsets)
renumbered_df = dask_cudf.from_delayed(
[client.submit(get_renumbered_df,
id_type,
data,
workers=[wf])
for (data, wf) in result])
if renumber_type == 'legacy':
renumber_map.implementation.ddf = indirection_map.merge(
renumbering_map,
right_on='original_ids', left_on='global_id',
how='right').\
drop(columns=['global_id', 'original_ids'])\
.rename(columns={'new_ids': 'global_id'})
else:
renumber_map.implementation.ddf = renumbering_map.rename(
columns={'original_ids': '0', 'new_ids': 'global_id'})
renumber_map.implementation.numbered = True
return renumbered_df, renumber_map, aggregate_segment_offsets
else:
renumbering_map, segment_offsets, renumbered_df = \
c_renumber.renumber(df,
num_edges,
0,
Comms.get_default_handle(),
is_mnmg,
store_transposed)
if renumber_type == 'legacy':
renumber_map.implementation.df = indirection_map.\
merge(renumbering_map,
right_on='original_ids', left_on='id').\
drop(columns=['id', 'original_ids'])\
.rename(columns={'new_ids': 'id'}, copy=False)
else:
renumber_map.implementation.df = renumbering_map.rename(
columns={'original_ids': '0', 'new_ids': 'id'}, copy=False)
renumber_map.implementation.numbered = True
return renumbered_df, renumber_map, segment_offsets
def renumber(df, src_col_names, dst_col_names, preserve_order=False,
store_transposed=False):
return NumberMap.renumber_and_segment(
df, src_col_names, dst_col_names,
preserve_order, store_transposed)[0:2]
def unrenumber(self, df, column_name, preserve_order=False,
get_column_names=False):
"""
Given a DataFrame containing internal vertex ids in the identified
column, replace this with external vertex ids. If the renumbering
is from a single column, the output dataframe will use the same
name for the external vertex identifiers. If the renumbering is from
a multi-column input, the output columns will be labeled 0 through
n-1 with a suffix of _column_name.
Note that this function does not guarantee order or partitioning in
multi-GPU mode.
Parameters
----------
df: cudf.DataFrame or dask_cudf.DataFrame
A DataFrame containing internal vertex identifiers that will be
converted into external vertex identifiers.
column_name: string
Name of the column containing the internal vertex id.
preserve_order: bool, optional (default=False)
If True, preserve the ourder of the rows in the output
DataFrame to match the input DataFrame
get_column_names: bool, optional (default=False)
If True, the unrenumbered column names are returned.
Returns
---------
df : cudf.DataFrame or dask_cudf.DataFrame
The original DataFrame columns exist unmodified. The external
vertex identifiers are added to the DataFrame, the internal
vertex identifier column is removed from the dataframe.
column_names: string or list of strings
If get_column_names is True, the unrenumbered column names are
returned.
Examples
--------
>>> from cugraph.structure import number_map
>>> df = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'],
... header=None)
>>> df, number_map = number_map.NumberMap.renumber(df, '0', '1')
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(df, 'src', 'dst')
>>> pr = cugraph.pagerank(G, alpha = 0.85, max_iter = 500,
... tol = 1.0e-05)
>>> pr = number_map.unrenumber(pr, 'vertex')
"""
if len(self.implementation.col_names) == 1:
# Output will be renamed to match input
mapping = {"0": column_name}
col_names = column_name
else:
# Output will be renamed to ${i}_${column_name}
mapping = {}
for nm in self.implementation.col_names:
mapping[nm] = nm + "_" + column_name
col_names = list(mapping.values())
if preserve_order:
index_name = NumberMap.generate_unused_column_name(df)
df[index_name] = df.index
df = self.from_internal_vertex_id(df, column_name, drop=True)
if preserve_order:
df = df.sort_values(
index_name
).drop(columns=index_name).reset_index(drop=True)
if type(df) is dask_cudf.DataFrame:
df = df.map_partitions(
lambda df: df.rename(columns=mapping, copy=False)
)
else:
df = df.rename(columns=mapping, copy=False)
if get_column_names:
return df, col_names
else:
return df
def vertex_column_size(self):
return len(self.implementation.col_names)
|
{"hexsha": "5d44e979b8e5b31241cfec22d52bbff563253458", "size": 27137, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/cugraph/cugraph/structure/number_map.py", "max_stars_repo_name": "AyodeAwe/cugraph", "max_stars_repo_head_hexsha": "e3273ee38a6dfd35d180b9b00071fd0e7a94e73e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/cugraph/cugraph/structure/number_map.py", "max_issues_repo_name": "AyodeAwe/cugraph", "max_issues_repo_head_hexsha": "e3273ee38a6dfd35d180b9b00071fd0e7a94e73e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/cugraph/cugraph/structure/number_map.py", "max_forks_repo_name": "AyodeAwe/cugraph", "max_forks_repo_head_hexsha": "e3273ee38a6dfd35d180b9b00071fd0e7a94e73e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.6566951567, "max_line_length": 79, "alphanum_fraction": 0.5342521281, "include": true, "reason": "import numpy", "num_tokens": 5372}
|
[STATEMENT]
lemma
assumes "P dvd x"
shows "[x = 0] (mod P)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. [x = 0::'a] (mod P)
[PROOF STEP]
using assms cong_def
[PROOF STATE]
proof (prove)
using this:
P dvd x
[?b = ?c] (mod ?a) = (?b mod ?a = ?c mod ?a)
goal (1 subgoal):
1. [x = 0::'a] (mod P)
[PROOF STEP]
by force
|
{"llama_tokens": 166, "file": "Multi_Party_Computation_ETP_RSA_OT", "length": 2}
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import numpy as np
import nevergrad as ng
import nevergrad.common.typing as tp
from nevergrad.parametrization import parameter as p
from . import nsga2
# pylint: disable=reimported,redefined-outer-name,unused-variable,unsubscriptable-object, unused-argument
# pylint: disable=import-outside-toplevel
def test_crowding_distance() -> None:
params = ng.p.Tuple(ng.p.Scalar(lower=0, upper=2), ng.p.Scalar(lower=0, upper=2))
candidates: tp.List[p.Parameter] = []
v = sorted([random.uniform(0.01, 0.99) for i in range(4)])
loss_values = [[0.0, 5.0], [v[0], v[3]], [v[1], v[2]], [1.0, 0.0]]
for i, v in enumerate(loss_values):
candidates.append(params.spawn_child().set_standardized_data(v))
candidates[i]._losses = np.array(v)
crowding_distance = nsga2.CrowdingDistance()
crowding_distance.compute_distance(candidates)
# For objective 1
cdist_1 = (loss_values[2][0] - loss_values[0][0]) / abs(loss_values[0][0] - loss_values[-1][0])
cdist_2 = (loss_values[3][0] - loss_values[1][0]) / abs(loss_values[0][0] - loss_values[-1][0])
# For objective 2
cdist_1 += (loss_values[0][1] - loss_values[2][1]) / abs(loss_values[0][1] - loss_values[-1][1])
cdist_2 += (loss_values[1][1] - loss_values[3][1]) / abs(loss_values[0][1] - loss_values[-1][1])
assert candidates[0]._meta["crowding_distance"] == float("inf")
np.testing.assert_almost_equal(candidates[1]._meta["crowding_distance"], cdist_1, decimal=3)
np.testing.assert_almost_equal(candidates[2]._meta["crowding_distance"], cdist_2, decimal=3)
assert candidates[3]._meta["crowding_distance"] == float("inf")
def test_fast_non_dominated_ranking() -> None:
params = ng.p.Tuple(ng.p.Scalar(lower=0, upper=2), ng.p.Scalar(lower=0, upper=2))
loss_values = [[[0.0, 2.0], [1.0, 1.0]], [[0.0, 4.0], [1.0, 3.0], [3.0, 1.0]], [[2.0, 3.0], [4.0, 2.0]]]
candidates: tp.List[p.Parameter] = []
expected_frontiers = []
for vals in loss_values:
expected_frontier = []
for v in vals:
candidate = params.spawn_child().set_standardized_data(v)
candidate._losses = np.array(v)
candidates.append(candidate)
expected_frontier.append(candidate)
expected_frontiers.append(expected_frontier)
ranking_method = nsga2.FastNonDominatedRanking()
frontiers = ranking_method.compute_ranking(candidates)
assert set(frontiers[0]) == set(expected_frontiers[0])
assert set(frontiers[1]) == set(expected_frontiers[1])
assert set(frontiers[2]) == set(expected_frontiers[2])
def get_nsga2_test_case_data():
params = ng.p.Tuple(ng.p.Scalar(lower=0, upper=2), ng.p.Scalar(lower=0, upper=2))
loss_values = [[[0.0, 2.0], [1.0, 1.0]], [[0.0, 4.0], [1.0, 3.0], [3.0, 1.0]], [[2.0, 3.0], [4.0, 2.0]]]
candidates: tp.List[p.Parameter] = []
expected_frontiers = []
for vals in loss_values:
expected_frontier = []
for v in vals:
candidate = params.spawn_child().set_standardized_data(v)
candidate._losses = np.array(v)
candidates.append(candidate)
expected_frontier.append(candidate)
expected_frontiers.append(expected_frontier)
return candidates, expected_frontiers
def test_nsga2_ranking() -> None:
candidates, expected_frontiers = get_nsga2_test_case_data()
ranking_method = nsga2.NSGA2Ranking()
rank_result = ranking_method.rank(candidates, len(candidates))
assert len(rank_result) == len(candidates)
for i, frontier in enumerate(expected_frontiers):
for c in frontier:
assert rank_result[c.uid][0] == i
def test_nsga2_ranking_2() -> None:
candidates, expected_frontiers = get_nsga2_test_case_data()
ranking_method = nsga2.NSGA2Ranking()
n_selected = len(expected_frontiers[0]) + len(expected_frontiers[1]) - 1
rank_result = ranking_method.rank(candidates, n_selected)
assert len(rank_result) == n_selected
# Check the first frontier
max_rank_frontier1 = 0
for c in expected_frontiers[0]:
assert rank_result[c.uid][2] == float("inf")
max_rank_frontier1 = max(max_rank_frontier1, rank_result[c.uid][0])
# Check the second frontier
n_cand_in_frontier2 = 0
for c in expected_frontiers[1]:
if c.uid in rank_result:
n_cand_in_frontier2 += 1
assert rank_result[c.uid][0] > max_rank_frontier1
assert n_cand_in_frontier2 == len(expected_frontiers[1]) - 1
def test_nsga2_ranking_3() -> None:
candidates, expected_frontiers = get_nsga2_test_case_data()
ranking_method = nsga2.NSGA2Ranking()
rank_result = ranking_method.rank(candidates, None)
assert len(rank_result) == len(candidates)
for i, frontier in enumerate(expected_frontiers):
expect_n_non_inf = max(0, len(frontier) - 2)
n_non_inf = 0
for c in frontier:
assert rank_result[c.uid][1] == i
if rank_result[c.uid][2] != float("inf"):
n_non_inf += 1
assert n_non_inf == expect_n_non_inf
def test_nsga2_ranking_4():
params = ng.p.Tuple(ng.p.Scalar(lower=0, upper=2))
loss_values = [0.0, 1.0, -10.0, 1.0, 3.0, 1.0]
candidates: tp.List[p.Parameter] = []
expected_frontier = []
for v in loss_values:
candidate = params.spawn_child().set_standardized_data(v)
candidate.loss = np.array(v)
candidates.append(candidate)
ranking_method = nsga2.NSGA2Ranking()
n_selected = 3
rank_result = ranking_method.rank(candidates, n_selected)
candidates.sort(key=lambda x: rank_result[x.uid][0] if x.uid in rank_result else float("inf"))
loss_from_rank = [r.loss for r in candidates[:n_selected]]
loss_from_sorted = [np.array(v) for v in sorted(loss_values)[:n_selected]]
assert loss_from_rank == loss_from_sorted
|
{"hexsha": "1b09231b63a752ffa7b1a9783abe55df2bc5bff8", "size": 6069, "ext": "py", "lang": "Python", "max_stars_repo_path": "nevergrad/optimization/multiobjective/test_nsga2.py", "max_stars_repo_name": "juliendehos/nevergrad", "max_stars_repo_head_hexsha": "b31a66bdc883e29a6c8572e341b4b56cc4157a9d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nevergrad/optimization/multiobjective/test_nsga2.py", "max_issues_repo_name": "juliendehos/nevergrad", "max_issues_repo_head_hexsha": "b31a66bdc883e29a6c8572e341b4b56cc4157a9d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nevergrad/optimization/multiobjective/test_nsga2.py", "max_forks_repo_name": "juliendehos/nevergrad", "max_forks_repo_head_hexsha": "b31a66bdc883e29a6c8572e341b4b56cc4157a9d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.46, "max_line_length": 108, "alphanum_fraction": 0.6739166255, "include": true, "reason": "import numpy", "num_tokens": 1744}
|
function obj_write ( output_file_name, node_num, face_num, normal_num, ...
order_max, node_xyz, face_order, face_node, normal_vector, vertex_normal )
%*****************************************************************************80
%
%% OBJ_WRITE writes graphics information to an Alias OBJ file.
%
% Discussion:
%
% If no normal vectors are supplied (NORMAL_NUM <= 0) then
% a simple format is used for the "F" records. Otherwise,
% the "v//vn" format is used.
%
% Example:
%
% # no_normals.obj
%
% g Group002
%
% v -3.269770 -39.572201 0.876128
% v -3.263720 -39.507999 2.160890
% ...
% v 0.000000 -9.988540 0.000000
%
% f 8 9 11 10
% f 12 13 15 14
% ...
% f 788 806 774
%
% # normals_supplied.obj
%
% g Group001
%
% v -3.269770 -39.572201 0.876128
% v -3.263720 -39.507999 2.160890
% ...
% v 0.000000 -9.988540 0.000000
%
% vn 0.0 1.0 0.0
% vn 1.0 0.0 0.0
% ...
% vn 0.0 0.0 1.0
%
% f 8//1 9//2 11//3 10//4
% f 12//5 13//6 15//7 14//8
% ...
% f 788//800 806//803 774//807
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 26 September 2008
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Input, string OUTPUT_FILE_NAME, the name of the output file.
%
% Input, integer NODE_NUM, the number of points.
%
% Input, integer FACE_NUM, the number of faces.
%
% Input, integer NORMAL_NUM, the number of normal vectors.
%
% Input, integer ORDER_MAX, the maximum number of vertices per face.
%
% Input, real NODE_XYZ(3,NODE_NUM), the coordinates of points.
%
% Input, integer FACE_ORDER(FACE_NUM), the number of vertices per face.
%
% Input, integer FACE_NODE(ORDER_MAX,FACE_NUM), the nodes making faces.
%
% Input, real NORMAL_VECTOR(3,NORMAL_NUM), normal vectors.
%
% Input, integer VERTEX_NORMAL(ORDER_MAX,FACE_NUM), the indices of normal
% vectors per vertex.
%
output_file_unit = fopen ( output_file_name, 'wt' );
if ( output_file_unit < 0 )
fprintf ( 1, '\n' );
fprintf ( 1, 'OBJ_WRITE - Fatal error!\n' );
fprintf ( 1, ' Could not open the output file "%s".\n', ...
output_file_name );
return
end
text_num = 0;
fprintf ( output_file_unit, '# %s\n', output_file_name );
fprintf ( output_file_unit, '# created by OBJ_WRITE.\n' );
fprintf ( output_file_unit, '\n' );
fprintf ( output_file_unit, 'g Group001\n' );
text_num = text_num + 4;
%
% V: vertex coordinates.
% For some reason, a fourth "coordinate" may be recommended.
% What is its meaning?
%
if ( 0 < node_num )
fprintf ( output_file_unit, '\n' );
text_num = text_num + 1;
end
w = 1.0;
for node = 1 : node_num
fprintf ( output_file_unit, 'v %f %f %f %f\n', node_xyz(1:3,node), w );
text_num = text_num + 1;
end
%
% VN: normal vectors.
%
if ( 0 < normal_num )
fprintf ( output_file_unit, '\n' );
text_num = text_num + 1;
for normal = 1 : normal_num
fprintf ( output_file_unit, 'vn %f %f %f\n', normal_vector(1:3,normal) );
text_num = text_num + 1;
end
end
%
% F: Faces, specified as a list of triples, one triple for each vertex:
% vertex index/vertex texture index/vertex normal index
%
if ( 0 < face_num )
fprintf ( output_file_unit, '\n' );
text_num = text_num + 1;
end
for face = 1 : face_num
text = 'f';
if ( normal_num <= 0 )
for vertex = 1 : face_order(face)
fprintf ( output_file_unit, ' %d', face_node(vertex,face) );
end
fprintf ( output_file_unit, '\n' );
text_num = text_num + 1;
else
for vertex = 1 : face_order(face)
fprintf ( output_file_unit, ' %d//%d', ...
face_node(vertex,face), vertex_normal(vertex,face) );
end
fprintf ( output_file_unit, '\n' );
text_num = text_num + 1;
end
end
fclose ( output_file_unit );
%
% Report.
%
fprintf ( 1, '\n' );
fprintf ( 1, 'OBJ_WRITE:\n' );
fprintf ( 1, ' Wrote %d text lines to "%s".\n', ...
text_num, output_file_name );
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/obj_io/obj_write.m"}
|
"""
# test_guppi.py
This testbench tests a guppi gpuspec reader
"""
import os
import glob
import numpy as np
import bifrost.pipeline as bfp
from bifrost.blocks import GuppiRawSourceBlock
from scipy.fftpack import fft as scipy_fft
if __name__ == "__main__":
# Setup pipeline
filenames = sorted(glob.glob('testdata/*.raw'))
b_read = GuppiRawSourceBlock(filenames, core=0)
# Run pipeline
pipeline = bfp.get_default_pipeline()
print pipeline.dot_graph()
pipeline.run()
|
{"hexsha": "2df37b0e3b1e098a7b692b424d4ebac511db2c10", "size": 509, "ext": "py", "lang": "Python", "max_stars_repo_path": "testbench/test_guppi_reader.py", "max_stars_repo_name": "MilesCranmer/bifrost", "max_stars_repo_head_hexsha": "951dd4a449850d22cfd74f4db13ecf806fe5cc30", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-06-27T10:12:44.000Z", "max_stars_repo_stars_event_max_datetime": "2017-06-27T10:12:44.000Z", "max_issues_repo_path": "testbench/test_guppi_reader.py", "max_issues_repo_name": "MilesCranmer/bifrost", "max_issues_repo_head_hexsha": "951dd4a449850d22cfd74f4db13ecf806fe5cc30", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "testbench/test_guppi_reader.py", "max_forks_repo_name": "MilesCranmer/bifrost", "max_forks_repo_head_hexsha": "951dd4a449850d22cfd74f4db13ecf806fe5cc30", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.2083333333, "max_line_length": 56, "alphanum_fraction": 0.7170923379, "include": true, "reason": "import numpy,from scipy", "num_tokens": 133}
|
import nltk
import numpy as np
import random
import string
f=open('chatbot.txt','r',errors = 'ignore')
raw=f.read()
raw=raw.lower()# converts to lowercase
nltk.download()
sent_tokens = nltk.sent_tokenize(raw)# converts to list of sentences
word_tokens = nltk.word_tokenize(raw)# converts to list of words
|
{"hexsha": "3d41ced0f52cb5205a262847f08b28b04cea669c", "size": 306, "ext": "py", "lang": "Python", "max_stars_repo_path": "School/Chat.py", "max_stars_repo_name": "Bamgm14/My-Random-Work", "max_stars_repo_head_hexsha": "b9678a3a84dd8ff00efd638890cff76eb6967c1b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "School/Chat.py", "max_issues_repo_name": "Bamgm14/My-Random-Work", "max_issues_repo_head_hexsha": "b9678a3a84dd8ff00efd638890cff76eb6967c1b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "School/Chat.py", "max_forks_repo_name": "Bamgm14/My-Random-Work", "max_forks_repo_head_hexsha": "b9678a3a84dd8ff00efd638890cff76eb6967c1b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8181818182, "max_line_length": 69, "alphanum_fraction": 0.7679738562, "include": true, "reason": "import numpy", "num_tokens": 74}
|
''' Build models to detect Algorithmically Generated Domain Names (DGA).
We're trying to classify domains as being 'legit' or having a high probability
of being generated by a DGA (Dynamic Generation Algorithm). We have 'legit' in
quotes as we're using the domains in Alexa as the 'legit' set.
'''
import os, sys
import traceback
import json
import optparse
import pickle
import collections
import sklearn
import sklearn.feature_extraction
import sklearn.ensemble
import sklearn.metrics
import pandas as pd
import numpy as np
import tldextract
import math
# Version printing is always a good idea
print 'Scikit Learn version: %s' % sklearn.__version__
print 'Pandas version: %s' % pd.__version__
print 'TLDExtract version: %s' % tldextract.__version__
# Version 0.12.0 of Pandas has a DeprecationWarning about Height blah that I'm ignoring
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Okay for this model we need the 2LD and nothing else
def domain_extract(uri):
ext = tldextract.extract(uri)
if (not ext.suffix):
return None
else:
return ext.domain
# Entropy calc (this must match model_eval)
def entropy(s):
p, lns = collections.Counter(s), float(len(s))
return -sum( count/lns * math.log(count/lns, 2) for count in p.values())
def show_cm(cm, labels):
# Compute percentanges
percent = (cm*100.0)/np.array(np.matrix(cm.sum(axis=1)).T) # Derp, I'm sure there's a better way
print 'Confusion Matrix Stats'
for i, label_i in enumerate(labels):
for j, label_j in enumerate(labels):
print "%s/%s: %.2f%% (%d/%d)" % (label_i, label_j, (percent[i][j]), cm[i][j], cm[i].sum())
def save_model_to_disk(name, model, model_dir='models'):
''' Serialize and save a model to disk'''
# First serialized the model
serialized_model = pickle.dumps(model, protocol=pickle.HIGHEST_PROTOCOL)
# Model directory + model name
model_path = os.path.join(model_dir, name+'.model')
# Now store it to disk
print 'Storing Serialized Model to Disk (%s:%.2fMeg)' % (name, len(serialized_model)/1024.0/1024.0)
open(model_path,'wb').write(serialized_model)
def load_model_from_disk(name, model_dir='models'):
# Model directory is relative to this file
model_path = os.path.join(model_dir, name+'.model')
# Put a try/except around the model load in case it fails
try:
model = pickle.loads(open(model_path,'rb').read())
except:
print 'Could not load model: %s from directory %s!' % (name, model_path)
return None
return model
def main():
''' Main method, takes care of loading data, running it through the various analyses
and reporting the results
'''
# Handle command-line arguments
parser = optparse.OptionParser()
parser.add_option('--alexa-file', default='data/alexa_100k.csv', help='Alexa file to pull from. Default: %default')
(options, arguments) = parser.parse_args()
print options, arguments
try: # Pokemon exception handling
# This is the Alexa 1M domain list.
print 'Loading alexa dataframe...'
alexa_dataframe = pd.read_csv(options.alexa_file, names=['rank','uri'], header=None, encoding='utf-8')
print alexa_dataframe.info()
print alexa_dataframe.head()
# Compute the 2LD of the domain given by Alexa
alexa_dataframe['domain'] = [ domain_extract(uri) for uri in alexa_dataframe['uri']]
del alexa_dataframe['rank']
del alexa_dataframe['uri']
alexa_dataframe = alexa_dataframe.dropna()
alexa_dataframe = alexa_dataframe.drop_duplicates()
print alexa_dataframe.head()
# Set the class
alexa_dataframe['class'] = 'legit'
# Shuffle the data (important for training/testing)
alexa_dataframe = alexa_dataframe.reindex(np.random.permutation(alexa_dataframe.index))
alexa_total = alexa_dataframe.shape[0]
print 'Total Alexa domains %d' % alexa_total
# Read in the DGA domains
dga_dataframe = pd.read_csv('data/dga_domains.txt', names=['raw_domain'], header=None, encoding='utf-8')
# We noticed that the blacklist values just differ by captilization or .com/.org/.info
dga_dataframe['domain'] = dga_dataframe.applymap(lambda x: x.split('.')[0].strip().lower())
del dga_dataframe['raw_domain']
# It's possible we have NaNs from blanklines or whatever
dga_dataframe = dga_dataframe.dropna()
dga_dataframe = dga_dataframe.drop_duplicates()
dga_total = dga_dataframe.shape[0]
print 'Total DGA domains %d' % dga_total
# Set the class
dga_dataframe['class'] = 'dga'
print 'Number of DGA domains: %d' % dga_dataframe.shape[0]
print dga_dataframe.head()
# Concatenate the domains in a big pile!
all_domains = pd.concat([alexa_dataframe, dga_dataframe], ignore_index=True)
# Add a length field for the domain
all_domains['length'] = [len(x) for x in all_domains['domain']]
# Okay since we're trying to detect dynamically generated domains and short
# domains (length <=6) are crazy random even for 'legit' domains we're going
# to punt on short domains (perhaps just white/black list for short domains?)
all_domains = all_domains[all_domains['length'] > 6]
# Add a entropy field for the domain
all_domains['entropy'] = [entropy(x) for x in all_domains['domain']]
print all_domains.head()
# Now we compute NGrams for every Alexa domain and see if we can use the
# NGrams to help us better differentiate and mark DGA domains...
# Scikit learn has a nice NGram generator that can generate either char NGrams or word NGrams (we're using char).
# Parameters:
# - ngram_range=(3,5) # Give me all ngrams of length 3, 4, and 5
# - min_df=1e-4 # Minimumum document frequency. At 1e-4 we're saying give us NGrams that
# # happen in at least .1% of the domains (so for 100k... at least 100 domains)
alexa_vc = sklearn.feature_extraction.text.CountVectorizer(analyzer='char', ngram_range=(3,5), min_df=1e-4, max_df=1.0)
# I'm SURE there's a better way to store all the counts but not sure...
# At least the min_df parameters has already done some thresholding
counts_matrix = alexa_vc.fit_transform(alexa_dataframe['domain'])
alexa_counts = np.log10(counts_matrix.sum(axis=0).getA1())
ngrams_list = alexa_vc.get_feature_names()
# For fun sort it and show it
import operator
_sorted_ngrams = sorted(zip(ngrams_list, alexa_counts), key=operator.itemgetter(1), reverse=True)
print 'Alexa NGrams: %d' % len(_sorted_ngrams)
for ngram, count in _sorted_ngrams[:10]:
print ngram, count
# We're also going to throw in a bunch of dictionary words
word_dataframe = pd.read_csv('data/words.txt', names=['word'], header=None, dtype={'word': np.str}, encoding='utf-8')
# Cleanup words from dictionary
word_dataframe = word_dataframe[word_dataframe['word'].map(lambda x: str(x).isalpha())]
word_dataframe = word_dataframe.applymap(lambda x: str(x).strip().lower())
word_dataframe = word_dataframe.dropna()
word_dataframe = word_dataframe.drop_duplicates()
print word_dataframe.head(10)
# Now compute NGrams on the dictionary words
# Same logic as above...
dict_vc = sklearn.feature_extraction.text.CountVectorizer(analyzer='char', ngram_range=(3,5), min_df=1e-5, max_df=1.0)
counts_matrix = dict_vc.fit_transform(word_dataframe['word'])
dict_counts = np.log10(counts_matrix.sum(axis=0).getA1())
ngrams_list = dict_vc.get_feature_names()
# For fun sort it and show it
import operator
_sorted_ngrams = sorted(zip(ngrams_list, dict_counts), key=operator.itemgetter(1), reverse=True)
print 'Word NGrams: %d' % len(_sorted_ngrams)
for ngram, count in _sorted_ngrams[:10]:
print ngram, count
# We use the transform method of the CountVectorizer to form a vector
# of ngrams contained in the domain, that vector is than multiplied
# by the counts vector (which is a column sum of the count matrix).
def ngram_count(domain):
alexa_match = alexa_counts * alexa_vc.transform([domain]).T # Woot vector multiply and transpose Woo Hoo!
dict_match = dict_counts * dict_vc.transform([domain]).T
print '%s Alexa match:%d Dict match: %d' % (domain, alexa_match, dict_match)
# Examples:
ngram_count('google')
ngram_count('facebook')
ngram_count('1cb8a5f36f')
ngram_count('pterodactylfarts')
ngram_count('ptes9dro-dwacty2lfa5rrts')
ngram_count('beyonce')
ngram_count('bey666on4ce')
# Compute NGram matches for all the domains and add to our dataframe
all_domains['alexa_grams']= alexa_counts * alexa_vc.transform(all_domains['domain']).T
all_domains['word_grams']= dict_counts * dict_vc.transform(all_domains['domain']).T
print all_domains.head()
# Use the vectorized operations of the dataframe to investigate differences
# between the alexa and word grams
all_domains['diff'] = all_domains['alexa_grams'] - all_domains['word_grams']
# The table below shows those domain names that are more 'dictionary' and less 'web'
print all_domains.sort_values(['diff'], ascending=True).head(10)
# The table below shows those domain names that are more 'web' and less 'dictionary'
# Good O' web....
print all_domains.sort_values(['diff'], ascending=False).head(50)
# Lets look at which Legit domains are scoring low on both alexa and word gram count
weird_cond = (all_domains['class']=='legit') & (all_domains['word_grams']<3) & (all_domains['alexa_grams']<2)
weird = all_domains[weird_cond]
print weird.shape[0]
print weird.head(10)
# Epiphany... Alexa really may not be the best 'exemplar' set...
# (probably a no-shit moment for everyone else :)
#
# Discussion: If you're using these as exemplars of NOT DGA, then your probably
# making things very hard on your machine learning algorithm.
# Perhaps we should have two categories of Alexa domains, 'legit'
# and a 'weird'. based on some definition of weird.
# Looking at the entries above... we have approx 80 domains
# that we're going to mark as 'weird'.
#
all_domains.loc[weird_cond, 'class'] = 'weird'
print all_domains['class'].value_counts()
all_domains[all_domains['class'] == 'weird'].head()
# Perhaps we will just exclude the weird class from our ML training
not_weird = all_domains[all_domains['class'] != 'weird']
X = not_weird.as_matrix(['length', 'entropy', 'alexa_grams', 'word_grams'])
# Labels (scikit learn uses 'y' for classification labels)
y = np.array(not_weird['class'].tolist())
# Random Forest is a popular ensemble machine learning classifier.
# http://scikit-learn.org/dev/modules/generated/sklearn.ensemble.RandomForestClassifier.html
clf = sklearn.ensemble.RandomForestClassifier(n_estimators=20) # Trees in the forest
# Train on a 80/20 split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Now plot the results of the holdout set in a confusion matrix
labels = ['legit', 'dga']
cm = sklearn.metrics.confusion_matrix(y_test, y_pred, labels)
show_cm(cm, labels)
# We can also look at what features the learning algorithm thought were the most important
importances = zip(['length', 'entropy', 'alexa_grams', 'word_grams'], clf.feature_importances_)
print importances
# Now train on the whole thing before doing tests and saving models to disk
clf.fit(X, y)
# test_it shows how to do evaluation, also fun for manual testing below :)
def test_it(domain):
_alexa_match = alexa_counts * alexa_vc.transform([domain]).T # Woot matrix multiply and transpose Woo Hoo!
_dict_match = dict_counts * dict_vc.transform([domain]).T
_X = [[len(domain), entropy(domain), _alexa_match, _dict_match]]
print '%s : %s' % (domain, clf.predict(_X)[0])
# Examples (feel free to change these and see the results!)
test_it('google')
test_it('google88')
test_it('facebook')
test_it('1cb8a5f36f')
test_it('pterodactylfarts')
test_it('ptes9dro-dwacty2lfa5rrts')
test_it('beyonce')
test_it('bey666on4ce')
test_it('supersexy')
test_it('yourmomissohotinthesummertime')
test_it('35-sdf-09jq43r')
test_it('clicksecurity')
# Serialize model to disk
save_model_to_disk('dga_model_random_forest', clf)
save_model_to_disk('dga_model_alexa_vectorizor', alexa_vc)
save_model_to_disk('dga_model_alexa_counts', alexa_counts)
save_model_to_disk('dga_model_dict_vectorizor', dict_vc)
save_model_to_disk('dga_model_dict_counts', dict_counts)
except KeyboardInterrupt:
print 'Goodbye Cruel World...'
sys.exit(0)
except Exception, error:
traceback.print_exc()
print '(Exception):, %s' % (str(error))
sys.exit(1)
if __name__ == '__main__':
main()
|
{"hexsha": "df7e373485aabb7fc54a4eac365b136a11b57740", "size": 13941, "ext": "py", "lang": "Python", "max_stars_repo_path": "dga_detection/dga_model_gen.py", "max_stars_repo_name": "bkaimal/data_hacking", "max_stars_repo_head_hexsha": "dd89fb8d7a53b07134cf72f80c61f6e943e21de5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dga_detection/dga_model_gen.py", "max_issues_repo_name": "bkaimal/data_hacking", "max_issues_repo_head_hexsha": "dd89fb8d7a53b07134cf72f80c61f6e943e21de5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dga_detection/dga_model_gen.py", "max_forks_repo_name": "bkaimal/data_hacking", "max_forks_repo_head_hexsha": "dd89fb8d7a53b07134cf72f80c61f6e943e21de5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.4910714286, "max_line_length": 127, "alphanum_fraction": 0.6626497382, "include": true, "reason": "import numpy", "num_tokens": 3413}
|
#!/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 12 12:18:00 2022
Author: Gianluca Bianco
"""
#################################################
# Libraries
#################################################
from termcolor import colored
from mathematics import e_parser
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as sci
#################################################
# "plotter_complex" function
#################################################
def plotter_complex( real_part, imaginary_part, a, b, n, coefficient ):
"""
Function used to plot a given wave-function for an index n.
Args:
real_part (string): mathematical real expression part.
imaginary_part (string): mathematical imaginary expression part.
a (any): lower integration extreme.
b (any): higher integration extreme.
n (int): wave function index.
coefficient (any): value of the normalization coefficient.
Returns:
plot: the wave-function plot for the index n is returned.
"""
if coefficient != colored( "Error, division by 0!", "red" ):
if a == -np.inf and b != np.inf:
x = np.arange( -10, b, ( ( b+10 ) / 10 ) )
elif a != -np.inf and b == np.inf:
x = np.arange( a, 10, ( ( 10-a ) / 10 ) )
elif a == -np.inf and b == np.inf:
x = np.arange( -10, 10, ( ( 20 ) / 10 ) )
else:
x = np.arange( 10*a, 10*b, ( ( 10*( b-a ) ) / 10 ) )
def func( x ):
return coefficient * e_parser( real_part, imaginary_part, n, x )
my_label = "Normalized wave-function f(x) for n = " + str( n )
plt.figure( figsize = ( 8, 6 ), dpi = 80 )
plt.xlabel( "Re: f(x)" )
plt.ylabel( "Im: f(x)" )
plt.title( my_label )
if real_part == "0" and imaginary_part != "0":
X_Y_Spline = sci.make_interp_spline( x, np.imag( func( x ) ) )
X = np.linspace( x.min(), x.max(), 500 )
Y = X_Y_Spline( X )
plt.xlabel( "x" )
plt.ylabel( "Im: f(x)" )
plt.plot( X, Y, color = "green" )
elif real_part != "0" and imaginary_part == "0":
X_Y_Spline = sci.make_interp_spline( x, np.real( func( x ) ) )
X = np.linspace( x.min(), x.max(), 500 )
Y = X_Y_Spline( X )
plt.xlabel( "x" )
plt.ylabel( "Re: f(x)" )
plt.plot( X, Y, color = "green" )
else:
X = np.real( func( x ) )
Y = np.imag( func( x ) )
tck, u = sci.splprep( [ X, Y ], s = 0 )
unew = np.arange( 0, 1.01, 0.01 )
out = sci.splev( unew, tck )
plt.plot( X, Y, 'x', out[0], out[1], color = "green" )
plt.show()
|
{"hexsha": "4b85105addf3bb5e6821a7830351271fe51ddef0", "size": 2854, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/arsenalgear/plotter.py", "max_stars_repo_name": "JustWhit3/arsenalgear", "max_stars_repo_head_hexsha": "a9402279fa40d46f310b387a8d4cd13883d07cb7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/arsenalgear/plotter.py", "max_issues_repo_name": "JustWhit3/arsenalgear", "max_issues_repo_head_hexsha": "a9402279fa40d46f310b387a8d4cd13883d07cb7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/arsenalgear/plotter.py", "max_forks_repo_name": "JustWhit3/arsenalgear", "max_forks_repo_head_hexsha": "a9402279fa40d46f310b387a8d4cd13883d07cb7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1265822785, "max_line_length": 76, "alphanum_fraction": 0.4751226349, "include": true, "reason": "import numpy,import scipy", "num_tokens": 757}
|
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
import os
from PIL import Image
traindir = r'\train'
valdir = r'\validation'
testdir = r'\test'
PATH = os.path.join(r'C:\Users\shast\.keras\datasets', 'cats_and_dogs_filtered')
#os.makedirs(r'C:\Users\shast\.keras\datasets\cats-und-dogs')
path = r'C:\Users\shast\.keras\datasets\cats-und-dogs'
#os.makedirs(path+traindir)
#os.makedirs(path+valdir)
#os.makedirs(path+testdir)
os.chdir(path)
train_dir = 'train'
test_dir = 'test'
val_dir = 'validation'
'''train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
train_cats_dir = os.path.join(train_dir, 'cats')
train_dogs_dir = os.path.join(train_dir, "dogs")
val_cats_dir = os.path.join(validation_dir, 'cats')
val_dogs_dir = os.path.join(validation_dir, 'dogs')
train_total = len(os.listdir(train_cats_dir)) + len(os.listdir(train_dogs_dir))
val_total = len(os.listdir(val_cats_dir)) + len(os.listdir(val_dogs_dir))'''
image_height, image_width = 150, 150
batch_size = 10
epochs= 30
generator = keras.preprocessing.image.ImageDataGenerator(preprocessing_function=tf.keras.applications.mobilenet.preprocess_input)
train_data = generator.flow_from_directory(train_dir, target_size=(image_width, image_height), batch_size=batch_size)
val_data = generator.flow_from_directory(val_dir, target_size=(image_width, image_height), batch_size=batch_size)
test_data = generator.flow_from_directory(test_dir, target_size=(image_width, image_height), batch_size=batch_size, shuffle=False)
mobile = keras.applications.mobilenet.MobileNet()
last_layer = mobile.layers[-6].output
out = keras.layers.Dense(2, activation='softmax')(last_layer)
model = keras.Model(mobile.input, out)
for layer in model.layers[:-5]:
layer.trainable = False
model.summary()
model.compile(optimizer=keras.optimizers.Adam(lr=0.0001), loss='categorical_crossentropy', metrics=["accuracy"])
#hist = model.fit(train_data, epochs=epochs, steps_per_epoch=8, validation_data = val_data, validation_steps=4)
model.save('mobilenet_finetune.h5')
test_labels = test_data.classes
print(test_labels)
model = keras.models.load_model('mobilenet_finetune.h5')
predictions = model.predict(test_data, steps=2, verbose=0)
print(predictions)
|
{"hexsha": "dac3a3bc49477d67e6d7a3d3363f77725120556f", "size": 2341, "ext": "py", "lang": "Python", "max_stars_repo_path": "mobile_net.py", "max_stars_repo_name": "parth-shastri/neural_nets_tensorflow", "max_stars_repo_head_hexsha": "aea973fba6042c4317511e230532457a6a84fd73", "max_stars_repo_licenses": ["Intel"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mobile_net.py", "max_issues_repo_name": "parth-shastri/neural_nets_tensorflow", "max_issues_repo_head_hexsha": "aea973fba6042c4317511e230532457a6a84fd73", "max_issues_repo_licenses": ["Intel"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mobile_net.py", "max_forks_repo_name": "parth-shastri/neural_nets_tensorflow", "max_forks_repo_head_hexsha": "aea973fba6042c4317511e230532457a6a84fd73", "max_forks_repo_licenses": ["Intel"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3620689655, "max_line_length": 131, "alphanum_fraction": 0.7659120034, "include": true, "reason": "import numpy", "num_tokens": 577}
|
'''
Initialize and update configurations (basis states) in the Sz-basis
Calculate ratios of overlaps based on spin flips
'''
import numpy as np
SQRT_2 = np.sqrt(2)
SQRT_HALF = np.sqrt(0.5)
def is_odd(num):
return num & 0x1
def sz_director_basis(d):
if d.shape[0] == 3:
return np.array([SQRT_HALF*(-1j*d[0, :] + d[1, :]),
1j*d[2, :],
SQRT_HALF*(1j*d[0, :] + d[1, :])])
else:
raise NotImplementedError('Only allowing S=1 directors for now')
def quad_director_basis(d):
if d.shape[0] == 3:
return np.array([SQRT_HALF * 1j * (d[0, :] - d[2, :]),
SQRT_HALF * (d[0, :] + d[2, :]),
-1j * d[1, :]])
else:
raise NotImplementedError('Only allowing S=1 directors for now')
def euler_s1(alpha, beta, gamma, d_list, sz_basis=False):
'''
perform an Euler rotation on the directors d_list. Defined for example in section 3.5 of Sakurai
(pg 198, eqs 3.5.50 and 3.5.57)
:param alpha: Euler angle alpha (second rotation around Jz)
:param beta: Euler angle beta (rotation around Jy)
:param gamma: Euler angle gamma (first rotation around Jz)
:param d_list: List of directors to rotate
:param sz_basis: By default, directors are in quadrupolar basis and need to be transformed to sz-basis.
This parameter is True if d_list is in Sz basis
:return: rotated d_list
'''
if not sz_basis:
d_list = sz_director_basis(d_list)
phase_m = np.array([[1., 0., -1.],
[1., 0., -1.],
[1., 0., -1.]])
phases = np.exp(-1j*gamma*phase_m - 1j*alpha*phase_m.T)
sinb = np.sin(beta)
cosb = np.cos(beta)
d_mat = np.array([[0.5*(1 + cosb), -SQRT_HALF*sinb, 0.5*(1 - cosb)],
[SQRT_HALF*sinb, cosb, -SQRT_HALF*sinb],
[0.5*(1 - cosb), SQRT_HALF*sinb, 0.5*(1 + cosb)]])
euler_mat = phases * d_mat
# print(np.matmul(np.conj(euler_mat.T), euler_mat))
d_transform = np.matmul(euler_mat, d_list)
if not sz_basis:
d_transform = quad_director_basis(d_transform)
return d_transform
class Configuration:
size = 2
S2 = 1
num_each = 1
half_integer = True
conf = np.array([1, -1])
def __init__(self, size, S2, num_each, initial=None):
# size = number of elements
# S2 = 2S, or 1 for spin-1/2, 2 for spin-1, etc.
# num_each = tuple(n(-S),n(-S+1)...n(S-1),n(S)). Default equal numbers
# initial = initial configuration
assert (S2 + 1 == len(num_each))
assert (sum(num_each) == size)
self.size = size
self.S2 = S2
self.num_each = num_each
self.half_integer = is_odd(self.S2)
if initial is None:
self.conf = self.random_conf()
else:
self.conf = initial.copy()
# TODO: check initialization to ensure consistency with other inputs
def __str__(self):
return str(self.conf)
def __getitem__(self, key):
return self.conf[key]
def random_conf(self):
idx = np.random.permutation(np.arange(self.size))
sections = np.cumsum(self.num_each)
partition = np.split(idx, sections)[:-1]
configuration = np.array([0] * self.size)
if self.half_integer:
spin_val = -self.S2
else:
spin_val = -np.rint(self.S2 / 2)
for p in partition:
configuration[p] = spin_val
spin_val += 1
return configuration
def update(self, flip_list):
for item in flip_list:
assert (self.conf[item['site']] == item['old_spin'])
self.conf[item['site']] = item['new_spin']
def get_conf(self):
return self.conf
def get_sz(self, site):
return 0.5 * self.S2 * self.conf[site]
def sz_conf_idx(self, m):
if self.half_integer:
return -m + self.S2
else:
return -m + int(0.5*self.S2)
def get_sz_idx(self, site):
return self.sz_conf_idx(self.conf[site])
class JastrowFactor:
couples_to = 'sz'
strength = 0.0
neighbor_table = np.array([0.0])
exp_table = np.array([0.0])
def __init__(self, couples_to, strength, neighbors, configuration):
"""
Jastrow table with associated coupling. J = exp(1/2 sum_ij v O_i O_j)
:param couples_to: diagonal operator (sz, sz2, etc.)
:param strength: coupling strength v
:param neighbors: list of sites and their neighbors associated with this factor
:param configuration: initial configuration for setting the table
"""
if couples_to != 'sz':
raise NotImplementedError('Jastrow Factor must couple to Sz!')
self.couples_to = couples_to
self.strength = strength
self.neighbor_table = neighbors
self.initialize_table(configuration)
def initialize_table(self, configuration):
"""
table of site-sums. exp_table[i] = sum(v * sum_j O_j)
J = exp(exp_table[i] dot O(conf[i]))
"""
self.exp_table = np.array([np.sum(self.strength * configuration[j] for j in neighborlist) for neighborlist in self.neighbor_table])
def greedy_eval(self, configuration):
self.initialize_table(configuration)
return np.exp(0.5*np.dot(self.exp_table, configuration))
def lazy_eval(self, flip_list):
flip_sum = 0.0
neighbor_sum = 0.0
for flip in flip_list:
del_s = flip['new_spin'] - flip['old_spin']
flip_sum += np.sum([self.strength * del_s * flip2['new_spin'] for flip2 in flip_list if
flip2['site'] in self.neighbor_table[flip['site']]])
neighbor_sum += del_s * self.exp_table[flip['site']]
return np.exp(flip_sum + neighbor_sum)
def update_tables(self, flip_list):
flip_sites = [flip['site'] for flip in flip_list]
del_S = [flip['new_spin'] - flip['old_spin'] for flip in flip_list]
update_list = np.zeros(len(self.neighbor_table))
for flipsite, dels in zip(flip_sites, del_S):
update_list[flipsite] = self.strength * dels
for idx in range(len(self.neighbor_table)):
self.exp_table[idx] += np.sum(update_list[self.neighbor_table[idx]])
class JastrowTable:
jastrows = []
def __init__(self, jastrow_list=None, jastrow_kwargs_list=None):
if jastrow_list is not None:
self.jastrows = jastrow_list
elif jastrow_kwargs_list is not None:
self.jastrows = [JastrowFactor(**kwargs) for kwargs in jastrow_kwargs_list]
else:
raise RuntimeError('Must enter a valid parameter to JastrowTable()')
def greedy_eval(self, configuration):
return np.prod([jast.greedy_eval(configuration) for jast in self.jastrows])
def lazy_eval(self, flip_list):
return np.prod([jast.lazy_eval(flip_list) for jast in self.jastrows])
def update_tables(self, flip_list):
for jast in self.jastrows:
jast.update_tables(flip_list)
class Wavefunction(object):
configuration = Configuration(2, 1, (1, 1))
def __init__(self, conf_init):
"""
:param conf_init: kwargs for initializing the configuration
"""
self.configuration = Configuration(**conf_init)
def psi_over_psi(self, flip_list):
raise NotImplementedError('psi_over_psi must be defined for your Wavefunction!')
def update(self, flip_list):
raise NotImplementedError('update must be defined for your Wavefunction!')
def get_conf(self):
return self.configuration.get_conf()
class ProductState(Wavefunction):
jastrow_table = None
def __init__(self, conf_init, directors, jastrow_init=None):
"""
Site-factorized state of directors
:param conf_init: kwargs for initializing the configuration
:param directors: numpy array of directors by site. A director is a complex vector with 2S+1 elements
:param jastrow_init: kwargs for initalizing the jastrow factors
"""
Wavefunction.__init__(self, conf_init)
assert(self.configuration.size == directors.shape[1])
self.directors = directors
# normalize
norms = np.sum(self.directors * np.conj(self.directors), 0)
self.directors = self.directors / np.sqrt(norms)
self.directors_sz = sz_director_basis(self.directors)
self.site_overlaps = np.array([self.directors_sz[self.configuration.get_sz_idx(site), site]
for site in range(self.configuration.size)])
# initalize the jastrow factors
if jastrow_init is not None:
for factor in jastrow_init:
if 'configuration' not in factor.keys():
factor['configuration'] = self.configuration
self.jastrow_table = JastrowTable(jastrow_kwargs_list=jastrow_init)
def psi_over_psi(self, flip_list):
old_prod = np.prod([self.site_overlaps[flip['site']] for flip in flip_list])
new_prod = np.prod([self.directors_sz[self.configuration.sz_conf_idx(flip['new_spin']), flip['site']]
for flip in flip_list])
if self.jastrow_table is not None:
jastrow_ratio = self.jastrow_table.lazy_eval(flip_list)
else:
jastrow_ratio = 1.0
return jastrow_ratio * np.divide(new_prod, old_prod)
def update(self, flip_list):
self.configuration.update(flip_list)
self.jastrow_table.update_tables(flip_list)
for flip in flip_list:
self.site_overlaps[flip['site']] = self.directors_sz[self.configuration.get_sz_idx(flip['site']), flip['site']]
class UniformState(Wavefunction):
def __init__(self, conf_init):
"""
Uniform wavefunction. Every state is equally likely (psi = 1, const.)
:param conf_init: kwargs for initializing the configuration
"""
Wavefunction.__init__(self, conf_init)
def psi_over_psi(self, flip_list):
return 1.0
def update(self, flip_list):
self.configuration.update(flip_list)
|
{"hexsha": "5f52f0a325ac9b192842476c8bac2375931be03c", "size": 10263, "ext": "py", "lang": "Python", "max_stars_repo_path": "wavefunction.py", "max_stars_repo_name": "butchertx/pyvmc", "max_stars_repo_head_hexsha": "1c10d95a2036160769488a51423a261352e20a01", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "wavefunction.py", "max_issues_repo_name": "butchertx/pyvmc", "max_issues_repo_head_hexsha": "1c10d95a2036160769488a51423a261352e20a01", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wavefunction.py", "max_forks_repo_name": "butchertx/pyvmc", "max_forks_repo_head_hexsha": "1c10d95a2036160769488a51423a261352e20a01", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5555555556, "max_line_length": 139, "alphanum_fraction": 0.6186300302, "include": true, "reason": "import numpy", "num_tokens": 2569}
|
Stadium 5 on G Street has stadiumstyle seating (and smaller theaters and screens than the Regal Holiday), as well as digital surround sound systems in all of its theaters.
Both Stadium and Regal Cinemas Davis Holiday 6 Holiday split what they show, so make sure you have the right theater when you show up. These two Movie Theaters theaters both run a $6.50 Movie Tuesday. Joining the Regal Crown Club and getting the card also entitles you to a $2 small popcorn on Tuesdays. You can upgrade to a larger popcorn for a little more. There is also an educational discount (with a student or staff Aggie Card) at both theaters for those nondiscount days(Sun Thurs Only). You can also pay for your Movies movie and popcorn with a credit card. They also give military discounts.
In summer (2011) Regal Cinemas Davis Stadium 5 Stadium 5 and the Regal Natomas Marketplace 16 in Sacramento showed G and PG rated films at 10:00am on Tuesdays and Wednesdays for $1. The http://www.regmovies.com/summermovieexpress/default.aspx?zip95616 Summer Movie Express reruns movies have long since left the theater.
If you like sitting in the center of the theater, you should go 3 light grayish half triangles up, and 7/8 seats in. Users/StevenDaubert
If you have a bad neck, you should try and sit so your vision level is at two thirds up the screen, which normally corresponds to two thirds up the seats. Users/TerrenceMurphy
Regal on Facebook https://www.facebook.com/RegalEntertainmentGroup?skwall link
See Movie Theaters for other theaters in Davis and the surrounding area.
20060619 14:26:07 nbsp This theater really sucks the screens are very small, and before the movie starts they show commercials from a video projector. If its a widescreen movie, a curtain comes down from the top and actually makes the screen SMALLER for the movie than for the commercials! Not what a theater should be at all, and we lost the old Cinema 2 for this! Users/AlanSmithee
20060619 15:29:22 nbsp I much prefer this theater to F st. It tends to be cleaner, the seats are in better condition and in better locations, and the crowds are smaller so it is less likely youll get that one annoying person who thinks theyre in a cafe. Unfortunately, they stick all of the high demand movies at F st (presumably because the theaters are bigger there). The smaller screen size doesnt bother me. Users/AlexPomeranz
20070401 14:17:53 nbsp i enjoyed my movie seeing experience there Users/JackkiCox
20070505 16:47:26 nbsp This theater has comfortable stadium seating for great sightlines and a very good digital surround sound system. Users/Jedron
20070601 21:19:47 nbsp We went and saw POTC 3 yesterday. Everything was great but the food is way over priced. It was $16 for a popcorn and 2 sodas. Just crazy!!!
Users/HeatherFlood
20070604 02:34:42 nbsp Thanks for posting the photo of the theater! Users/Jedron
20071110 15:02:59 nbsp This theater is nice enough, but unlike many stadium style theaters, the armrests do not lift up to make loveseats and the seats in the stepped part do not have any recline in them whatsoever, although they are still comfortable. The most disturbing thing about this theater was the lack of soundproofing. In Auditorium 1, the movie is noticeably disrupted every time a train rumbles by outside. If it was a bargain theater, it wouldnt be a big deal, but for $9.50 plus a small fortune for popcorn, I dont want to hear the trains. Users/RyanCoates
20080110 11:19:22 nbsp Win Passes To See Cloverfield from the Film Geekz. http://iesb.net/filmgeekz/content/view/388/
Two Ways To Win!
We are a new site based in the Sac/Davis area and we are lookingto connect with local readers. Well be doing many other local contests and screenings so take advantage and get in on the fun.
www.filmgeekz.com Users/FilmGeek
20080313 16:20:31 nbsp I was a projectionist at this theater for a couple years. Yes, when the movie print is in CinemaScope (widescreen) format, the curtain lowers to cover the portion of the screen that is not being used. Think of it like the black bars on your TV when you watch a movie in widescreen format. Users/Default77
20080503 11:35:58 nbsp when you sit down, be sure to go to the tip of the third grey triangle, and then go 7 seats in dead center of theatre >:D Users/StevenDaubert
20090222 22:47:18 nbsp Has anyone ever noticed the Davis theaters dont often show many movies with AfricanAmerican casts? For example this weekend Tyler Perrys Madea Goes to Jail topped the box office with over 41 million, without help from one screen in Davis. Users/GreezySweezy
It probably has something to do with a lack of a big enough audience to make showing the movie profitable. Tyler Perrys movies are geared toward an almost exclusively black audience. That is profitable for theaters in Sacramento and the Bay Area, where there are large black populations, but Davis theaters usually arent going to show Tyler Perry movies because theres not enough black people here to make it profitable. Its not racism, if thats what youre suggesting. Users/PeteWillits
Plus Tyler Perry is just plain not funny Users/Arcturus
Davis doesnt have a large black population, and thus since Tyler Perrys target is mainly an AfricanAmerican audience, it wouldnt be profitable to show the movie here. Stop trying to play the race card.
20090907 23:29:56 nbsp Tyler Perry. ARE YOU Serious? WOW. GUY writes about the same crap over and over again. Yea NOT FUNNY what so ever. And stop playing the RACE CARD .Hes movies are SO ANNOYING. PERIOD. Users/Macco
20100808 19:01:14 nbsp Convenient, and priced accordingly. If you have an extra twenty minutes to spare, it is almost always worth traveling out of Davis to see a movie. Users/TerrenceMurphy
20110113 00:01:10 nbsp Great customer service! Went here with a gift certificate that I thought would work at this theater but the ticket attendant said it is not for their theater but that they will honor it anyways. Wow! Users/MMAC
20110325 18:57:42 nbsp Looks like few people have commented here anyone know if they are still using 35mm here or DLP? The projectors were brandnew when installed in 1999. Users/AlanSmithee
Fandango shows that Stadium 5 still uses 35mm projectors. Holiday 6 has 4 digital projectors. Users/MaxLucas
As of late 2011, Auditoriums 1 & 5 have switched over to digital projection. The rest of the auditoriums are slated to switch over mid 2012.
20121117 22:32:00 nbsp Really wonderful customer service. I was helping my housemate in trying to locate a lost item on an extremely busy Saturday afternoon. I called in several times and was starting to get annoyed, but on the fourth attempt, someone picked up and helped me out. I go to the theater and see a huge (like...Disneyland during the summer huge) line of people at the ticket booth and concessions...I thought it would be impossible to try to talk to someone. I spoke with the lady checking tickets; she and another employee tagteamed and one notified the manager who was busy working concessions. The manager was most agreeable and helped me out within ten minutes and I was able to get my friendd scarf back. Very customeroriented, didnt rush me at all despite how hectic everything was today. Wish more businesses were like them. Thanks :) Users/kg18
20130114 18:00:39 nbsp Is this the modern cinema, intended to compete with cable and Netflix? If so, its not for me. If you get there on time, you are subjected to up 45 minutes of repetitive advertising and previews at earshattering volume. Why must we PAY to be subjected to endless ads? We should pay for an absence of Coke and candy ads. Not possible to talk or think during this prefeature assault. Regal must reduce this if they ever want me back. Users/rednoodler
20130620 09:57:40 nbsp The $1 summer movies are on Tuesday and Wednesday. Users/NoelBruening
|
{"hexsha": "7c7ccee1f4047333ca2d3bab3fcf163fdd927f52", "size": 7885, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Regal_Cinemas_Davis_Stadium_5.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Regal_Cinemas_Davis_Stadium_5.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Regal_Cinemas_Davis_Stadium_5.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 127.1774193548, "max_line_length": 865, "alphanum_fraction": 0.7965757768, "num_tokens": 1917}
|
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import sys
sys.path.append(".")
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
import numpy as np
from datasets import DataSet
import utils
import likelihoods
from sdt_rff_gpu import SdtRff
import losses
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
def import_dataset(dataset, fold):
train_X = np.loadtxt('FOLDS/' + dataset + '_ARD_Xtrain__FOLD_' + fold, delimiter=' ')
train_Y = np.loadtxt('FOLDS/' + dataset + '_ARD_ytrain__FOLD_' + fold, delimiter=' ')
test_X = np.loadtxt('FOLDS/' + dataset + '_ARD_Xtest__FOLD_' + fold, delimiter=' ')
test_Y = np.loadtxt('FOLDS/' + dataset + '_ARD_ytest__FOLD_' + fold, delimiter=' ')
data = DataSet(train_X, train_Y)
test = DataSet(test_X, test_Y)
return data, test
if __name__ == '__main__':
FLAGS = utils.get_flags()
## Set random seed for tensorflow and numpy operations
tf.set_random_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
data, test = import_dataset(FLAGS.dataset, FLAGS.fold)
## Here we define a custom loss for RFSDT to show
error_rate = losses.ZeroOneLoss(data.Dout)
## Likelihood
like = likelihoods.Softmax()
## Optimizer
optimizer = utils.get_optimizer(FLAGS.optimizer, FLAGS.learning_rate)
## Main RFSDT object
sdt = SdtRff(like, data.num_examples, data.X.shape[1], data.Y.shape[1], FLAGS.h_tree, FLAGS.n_rff, FLAGS.kernel_type, FLAGS.ard_type, FLAGS.local_reparam, FLAGS.q_Omega_fixed, FLAGS.theta_fixed, FLAGS.likelihood_type, FLAGS.dataset, FLAGS.fold)
## Learning
sdt.learn(data, FLAGS.learning_rate, FLAGS.mc_train, FLAGS.batch_size, FLAGS.n_iterations, optimizer,
FLAGS.display_step, test, FLAGS.mc_test, error_rate, FLAGS.duration, FLAGS.less_prints)
|
{"hexsha": "c4505abd5a9962a9f6813a87730764e53c883e64", "size": 1917, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/sdt_rff_classification_gpu.py", "max_stars_repo_name": "abcdabcdabcda/GPDT", "max_stars_repo_head_hexsha": "4ea5bc7e4bc50b1a1c820c55026639ccb5f10cae", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "experiments/sdt_rff_classification_gpu.py", "max_issues_repo_name": "abcdabcdabcda/GPDT", "max_issues_repo_head_hexsha": "4ea5bc7e4bc50b1a1c820c55026639ccb5f10cae", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/sdt_rff_classification_gpu.py", "max_forks_repo_name": "abcdabcdabcda/GPDT", "max_forks_repo_head_hexsha": "4ea5bc7e4bc50b1a1c820c55026639ccb5f10cae", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2321428571, "max_line_length": 248, "alphanum_fraction": 0.7271778821, "include": true, "reason": "import numpy", "num_tokens": 490}
|
from filterpy.kalman import KalmanFilter
import numpy as np
data = np.array([10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100])
noise = np.array([1,-1,0, -1, 1, 2, 0,-1, 1,-2,-1,-1,0, 1, 1,-2, 1,0, 0])
impluse = np.array([0,0,0,0,0,0,0,0,0,0,10,10,10,10,10,10,10,10,10])
print data + noise + impluse
def state(f):
print f.x, f.y
if __name__ == '__main__':
f = KalmanFilter(dim_x=4, dim_z=2)
f.x = np.array([2,2,0,0])
f.F = np.array([
[1,0,1,0],
[0,1,0,1],
[0,0,1,0],
[0,0,0,1]])
f.H = np.array([
[1,0,0,0],
[0,1,0,0]])
f.P *= 100
f.R *= 100
print 'Pre',f.get_prediction()[0]
f.predict()
# f.update([2,2])
# state(f)
f.update([2,2])
print 'Pre',f.get_prediction()[0]
f.predict()
# f.update([2,2])
# state(f)
f.update([3.1,3])
# state(f)
print 'Pre',f.get_prediction()[0]
f.predict()
f.update([3.9,4])
print 'Pre',f.get_prediction()[0]
f.predict()
f.update([5,5.1])
print 'Pre',f.get_prediction()[0]
f.predict()
f.update([6.2,6])
print 'Pre',f.get_prediction()[0]
f.predict()
print 'Pre',f.get_prediction()[0]
|
{"hexsha": "b18c2b41af1538552eafc0f89c85b8b3368c1f81", "size": 1070, "ext": "py", "lang": "Python", "max_stars_repo_path": "new/filter_demo.py", "max_stars_repo_name": "ZhouYzzz/CTT", "max_stars_repo_head_hexsha": "385b6c7ac2e6633f72b49df7e8a599f40c50188b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2016-12-19T12:54:38.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-15T05:42:32.000Z", "max_issues_repo_path": "client/filter_demo.py", "max_issues_repo_name": "ZhouYzzz/CTT", "max_issues_repo_head_hexsha": "385b6c7ac2e6633f72b49df7e8a599f40c50188b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "client/filter_demo.py", "max_forks_repo_name": "ZhouYzzz/CTT", "max_forks_repo_head_hexsha": "385b6c7ac2e6633f72b49df7e8a599f40c50188b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-02-07T18:30:15.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-15T05:42:34.000Z", "avg_line_length": 21.4, "max_line_length": 76, "alphanum_fraction": 0.5813084112, "include": true, "reason": "import numpy", "num_tokens": 465}
|
%Template by Mark Jervelund - 2015 - mjerv15@student.sdu.dk
\documentclass[a4paper,10pt,titlepage]{report}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage[english]{babel}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{graphicx}
\usepackage{fancyhdr}
\usepackage{lastpage}
\usepackage{listings}
\usepackage{algorithm}
\usepackage{algpseudocode}
\usepackage[document]{ragged2e}
\usepackage[margin=1in]{geometry}
\usepackage{enumitem}
\usepackage{color}
\usepackage{datenumber}
\usepackage{venndiagram}
\usepackage{chngcntr}
\usepackage{mathtools}
\usepackage{booktabs}
\DeclarePairedDelimiter{\ceil}{\lceil}{\rceil}
%lstlisting ting:
\definecolor{dkgreen}{rgb}{0,0.45,0}
\definecolor{gray}{rgb}{0.5,0.5,0.5}
\definecolor{mauve}{rgb}{0.30,0,0.30}
\lstset{frame=tb,
language=C,
aboveskip=3mm,
belowskip=3mm,
showstringspaces=false,
columns=flexible,
basicstyle={\small\ttfamily},
numbers=left,
numberstyle=\footnotesize,
keywordstyle=\color{dkgreen}\bfseries,
commentstyle=\color{dkgreen},
stringstyle=\color{mauve},
frame=single,
breaklines=true,
breakatwhitespace=false
tabsize=1
}
\renewcommand{\lstlistingname}{Code}
\setdatetoday
\addtocounter{datenumber}{0} %date for dilierry standard is today
\setdatebynumber{\thedatenumber}
\date{}
\setcounter{secnumdepth}{0}
\pagestyle{fancy}
\fancyhf{}
\title{CC - Assignment 2}
\newcommand{\Z}{\mathbb{Z}}
\lhead{Compiler (DM546)}
\rhead{Mjerv15, Trpet15 \& Mojae15}
\rfoot{Page \thepage \, of \pageref{LastPage}}
\counterwithin*{equation}{section}
\begin{document}
\newpage
{%
\centering
\huge
\bfseries
\vspace{5mm}
CC, Spring 2018\\
Exam project, part 2\\
\vspace{5mm}
\begin{tabular}{|l|l|}
\hline
Group & 1 \\ \hline
\end{tabular}
\\
\vspace{10mm}
\begin{tabular}{@{}ll@{}}
\toprule
\multicolumn{1}{|l|}{Name} & \multicolumn{1}{l|}{Mark Wolff Jervelund } \\ \midrule
\multicolumn{1}{|l|}{Birthday} & \multicolumn{1}{l|}{280795} \\ \midrule
\multicolumn{1}{|l|}{Login} & \multicolumn{1}{l|}{mjerv15@student.sdu.dk} \\ \midrule
\multicolumn{1}{|l|}{Signature} & \multicolumn{1}{l|}{\includegraphics[scale=0.3]{mark_sign}} \\ \midrule
& \\ \midrule
\multicolumn{1}{|l|}{Name} & \multicolumn{1}{l|}{Troels Blicher Petersen} \\ \midrule
\multicolumn{1}{|l|}{Birthday} & \multicolumn{1}{l|}{230896} \\ \midrule
\multicolumn{1}{|l|}{Login} & \multicolumn{1}{l|}{trpet15@student.sdu.dk} \\ \midrule
\multicolumn{1}{|l|}{Signature} & \multicolumn{1}{l|}{\includegraphics[scale=0.08]{troels_sign} } \\ \midrule
& \\ \midrule
\multicolumn{1}{|l|}{Name} & \multicolumn{1}{l|}{Morten Kristian Jæger} \\ \midrule
\multicolumn{1}{|l|}{Birthday} & \multicolumn{1}{l|}{030895} \\ \midrule
\multicolumn{1}{|l|}{Login} & \multicolumn{1}{l|}{mojae15@student.sdu.dk} \\ \midrule
\multicolumn{1}{|l|}{Signature} & \multicolumn{1}{l|}{\includegraphics[scale=0.3]{morten_sign}} \\ \midrule
\end{tabular}
\\
\vspace{10mm}
This report contains a total of \underline{ 43 } pages.
}
\begin{titlepage}
\centering
\vspace*{9\baselineskip}
\huge
\bfseries
2. Assignment \\
\normalfont
Mark Jervelund, Troels Blicher Petersen \& Morten Jæger \\
(Mjerv15, Trpet15, Mojae15) \\
\huge
Compiler (DM546) \\[4\baselineskip]
\normalfont
\includegraphics[scale=1]{SDU_logo}
\vfill\
\vspace{5mm}
IMADA \\
\textbf{\datedate} \\[2\baselineskip]
\end{titlepage}
\renewcommand{\thepage}{\roman{page}}% Roman numerals for page counter
\tableofcontents
\newpage
\setcounter{page}{1}
\renewcommand{\thepage}{\arabic{page}}
\newpage
\section{Introduction}
In the second task of the Compiler project, we are tasked with implementing a Scanner and a Parser, using Flex and Bison, and a Pretty printer. This is all to be done in C. As part of the project, all the files needed to complete the project were given beforehand, and only needed to be edited. An essential part of this project, is to implement an Abstract Syntax Tree, to give the compiler a way of understanding the code.
\subsection{How to compile and run}
Besides the main objective of this assignment, the group has also started implementing a more modular project structure, which will be explained in the design section. However, a brief introduction on how to build and run the program will be given here.\\
\vspace{6px}
Since the program is already starting to take modular shape, there are several ways to compile it. The easiest way is to simply run in the directory of the \textsf{Makefile}
\begin{lstlisting}
make all
\end{lstlisting}
\textsf{make all} will, however, build all binaries. Including the ones from Assignment 1. Therefore, for this project it is also possible to simply run
\begin{lstlisting}
make exp
\end{lstlisting}
This will only compile and output the program \textsf{exp} for this assignment. Before running the program \textsf{exp}, it is important to have the input file in the same directory.\\
\vspace{6px}
There are two ways to run the program \textsf{exp}. The first is to simply run
\begin{lstlisting}
./exp
\end{lstlisting}
This will run the program and read an input file \textsf{input.txt}. The other way to run the program is by passing the file to test on the command line, as an argument.
\begin{lstlisting}
./exp <inputfile>
\end{lstlisting}
This makes it a little easier to test several different testcases, without having to change the same file all the time.\\
\vspace{6px}
To remove all object files, run
\begin{lstlisting}
make clean
\end{lstlisting}
To remove all object files and executable binaries, run
\begin{lstlisting}
make clean-all
\end{lstlisting}
\section{Design} %%TODO SKAL NOK OGSÅ VÆRE NOGET OM ABSTRACT SYNTAX TREE
\subsection{Scanner and Parser}
The scanning and parsing is the main subject in this report. It is here the written program (to be compiled) is pulled apart to understand its structure and meaning.
\subsubsection{Scanner}
The scanner needs to be able to recognize the different symbols that are specified in the assignment. This means that it needs to be able to spot and return the different operators we are working with (+, -, /, *, etc.), and the specific words as specified in the syntax. These are words like, "while", "if", "array of", and such. After spotting these symbols, the scanner needs to pass them on to the parser, which will parse the input, and start creating our Abstract Syntax Tree. \\
\vspace{6px}
Another feature of the scanner, is the possibility to be able to weed out some "bad" programs already at this point. To do this, the scanner checks for input not defined in the language, which means that neither the scanner, nor the parser can do anything with it. If this happens, we want to be able to tell the user, that the program is not valid. The scanner should also check whether multi-line comments have been closed by the end on the program, as this is also not legal, according to the grammar of the language.
\subsubsection{Parser}
The parser needs to be a able to recognize the input it gets from the scanner, and match that up with the grammar of the language. This means that it needs to be able to match the input from the scanner, with the rules in the grammar. When the parser matches some input with one of the rules, it should create a node in our Abstract Syntax Tree, which we can use later on, f.x. for printing the AST.\\
\vspace{6px}
Another feature of the parser, it the possibility to weed out some "bad" programs, like we also did in the scanner phase. To do this, the parser checks if a function has the same name in both the name and the tail. If this is not the case, the user should be notified of the error, just like we did in the scanner phase.
\subsection{Abstract Syntax Tree (AST)}
The abstract syntax tree is a datastructure used to keep track of the way, the program we read, is built. The AST is built from the parser, which, given some input from the scanner, matches the input with the rules of the language, and creates nodes in the AST. An example of how a node could be created, is when the parser reads some input with a "+" between two expressions. The parser would then create a node in the AST, which would be a "+"-node. This node would then have two child nodes, which would the the expressions on each side of the "+" symbol. This is the way we build up our AST when we parse the input.
\subsection{Pretty Printer}
The pretty printer needs to be able to print the Abstract Syntax Tree we get from parsing the input program. This must be done in such a way, that the corresponding output from the pretty printer looks like the original program as much as possible, assuming that the original program is not weeded out at this point. Small changes to the output does happen however, such as indentation, and parentheses.
\subsection{New Project Structure}
As mentioned in the introduction, the group has implemented a new project structure to accommodate for a more modular design. The main goal is to have all modules separate, so that removing one module in theory would not break the rest. It was also found that the makefile should be easy to use, so that adding new files to a module in most cases would not require any changes to this file. To some degree this might not seem important, but as the project grows larger it might turn out to be a very convenient choice.\\
\vspace{6px}
It has been chosen, that each module will have its own include directory instead of one big shared include directory. This makes it easier to separate the modules, and removing a module is as simple as removing its directory.\\
\vspace{6px}
The project will be built in the build folder, and all module objects will reside in their respective folder, to prevent possible collisions of filenames.
\newpage
\section{Implementation}
\subsection{Scanner and Parser}
\subsubsection{Scanner}
\begin{lstlisting}
/* abbreviation of symbols we match on, TO BE EXPANDED */
SYMBOLS [+\-*\/\(\)\[\]{}!\|,\.=;:]
%%
[ \t]+ /* ignore */;
\n lineno++;
{SYMBOLS} return yytext[0];
\end{lstlisting}
Above is a small part of the "exp.l" file, which is the file Flex uses to scan the given program. This code handles the different symbols that we want to match on, when scanning the file. We use an list of all the symbols we can match on, as this was easier than writing each and every symbol out individually.
\begin{lstlisting}
"<=" return LEQ;
">" return GT;
">=" return GEQ;
"if" return IF;
"else" return ELSE;
\end{lstlisting}
Above is another small part, that shows how we return tokens when reading certain words, or symbols.
\begin{lstlisting}
<COMMENT_MULTI>{
\n lineno++;
"(*" nested_comment++;
"*)" { nested_comment--;
if (nested_comment == 0){
BEGIN(0);
}
}
. /* ignore */
<<EOF>> fprintf(stderr, "Comment not closed at the end of the file. Found at line: %i\n", lineno); exit(1);
}
\end{lstlisting}
Above is the part of the scanner that takes care of multi-line comments. The state for multi-line comments check for nested comments, and, as described in the design section, returns an error if the comment is not closed by the end of the file.
\subsubsection{Parser}
\begin{lstlisting}
expression : expression '+' expression
{$$ = make_EXP(exp_PLUS, $1, $3);}
| expression '-' expression
{$$ = make_EXP(exp_MIN, $1, $3);}
| expression '*' expression
{$$ = make_EXP(exp_MULT, $1, $3);}
| expression '/' expression
{$$ = make_EXP(exp_DIV, $1, $3);}
| '(' expression ')'
{$$ = $2;}
\end{lstlisting}
Above is a small part of the "exp.y" file, which is the file Bison uses for parsing the input from the scanner. This part of the code is a part of the code that handles expressions, by making nodes in the AST.
\begin{lstlisting}
function : head body tail
{$$ = make_Func($1, $2, $3);
if (check_Func($1, $3) != 0){
fprintf(stderr, "Function name: %s, at line %i, does not match function name: %s, at line %i\n", $1->id, $1->lineno, $3->id, $3->lineno);
YYABORT;
}}
;
\end{lstlisting}
As described in the design section, we want to check if a function has the same name in the head and the tail. This part of the code checks this, by calling the function "check\_func()". This function checks the "id" of the given head and tail. If they are not equal, we give the user an error message, stop the parsing, since the input program is not valid.
\subsection{Abstract Syntax Tree}
\begin{lstlisting}
expression *make_EXP(EXP_kind kind, expression *left, expression *right){
expression *e;
e = NEW(expression);
e->lineno = lineno;
e->kind = kind;
e->val.ops.left = left;
e->val.ops.right = right;
return e;
}
\end{lstlisting}
Above is a small part of the functions used to create the AST. This particular function is used to create expressions, with an expression on each side of an operator. This operator could be "+", "-", "*", and so on. As described in the design section, when we create this node, we set the left and right side of the operator as the children of the node.
\subsection{Pretty Printer}
\begin{lstlisting}
void prettyEXP(expression *e) {
switch (e->kind) {
case exp_MULT:
prettyEXP(e->val.ops.left);
printf("*");
prettyEXP(e->val.ops.right);
break;
case exp_DIV:
prettyEXP(e->val.ops.left);
printf("/");
prettyEXP(e->val.ops.right);
break;
\end{lstlisting}
The code above is a part of the function used to print expressions. The way this is done, is by checking the expressions "kind", which describes what kind of expression we are working with. When we know what kind of expression we have, we can print the expression with the correct operators
\subsection{New Project Structure}
Implementation of this new structure is fairly simple. However, it was chosen to divide the scanner, parser and pretty printer into separate modules. This approach is a little backwards in terms of the new modular design, since they appear to heavily depend on each other. But, it also means that it is possible to remove one module and replace it with another if need be.
\section{Testing}
The programs used to test the scanner, parser, and pretty printer can be found in the appendix. Some are simple, to just test the basics of the program, and some are more complicated (contain unary minus, expressions with absolute values which looks like "||"). The programs are not meant to be run as actual programs, so they may not make sense.
\section{Results}
\subsection{Test1}
This is just a basic test, to make sure indentation works and such.
\begin{lstlisting}
func test(n : int) : int
return 2;
end test
write test();
\end{lstlisting}
\subsection{Test2}
We now test if we can handle unary minuses.
\begin{lstlisting}
func test(n : int) : int
if (n == 0 || n == 1) then
return -1;
else
return n*factorial(n-1);
end test
write test();
\end{lstlisting}
\subsection{Test3}
We now test if we can handle an absolute value expression that looks like an "or"
\begin{lstlisting}
func test(n : int) : int
if (n == 0 || n == 1 || ||a+b|+c|) then
return -1;
else
return n*factorial(n-1);
end test
write test();
\end{lstlisting}
\subsection{Test4}
We now test if we get an error if the function name is not the same in the head and the tail of the function.
\begin{lstlisting}
Function name: test, at line 2, does not match function name: nottest, at line 4
Segmentation fault (core dumped)
\end{lstlisting}
\subsection{Test5}
We now test if we get an error if we do not close a multi-line comment.
\begin{lstlisting}
Comment not closed at the end of the file. Found at line: 9
\end{lstlisting}
\subsection{Test6}
We now test if we get an error if we read a symbol that is not in our grammar.
\begin{lstlisting}
Unrecognized symbol. Found at line: 7
\end{lstlisting}
\section{Conclusion}
From the tests we have run, we can conclude that our scanner, parser, AST, and pretty printer works as intended on a given program.
\newpage
\section{Appendix}
\subsection{exp.l}
\lstinputlisting{f_exp.txt}
\newpage
\subsection{exp.y}
\lstinputlisting{b_exp.txt}
\newpage
\subsection{kind.h}
\lstinputlisting{kind.h}
\newpage
\subsection{memory.h}
\lstinputlisting{memory.h}
\newpage
\subsection{tree.h}
\lstinputlisting{tree.h}
\newpage
\subsection{tree.c}
\lstinputlisting{tree.c}
\newpage
\subsection{pretty.h}
\lstinputlisting{pretty.h}
\newpage
\subsection{pretty.c}
\lstinputlisting{pretty.c}
\newpage
\subsection{scan\_parse.c}
\lstinputlisting{scan_parse.c}
\newpage
\subsection{Test1.txt}
\lstinputlisting{Test1.txt}
\newpage
\subsection{Test2.txt}
\lstinputlisting{Test2.txt}
\newpage
\subsection{Test3.txt}
\lstinputlisting{Test3.txt}
\newpage
\subsection{Test4.txt}
\lstinputlisting{Test4.txt}
\newpage
\subsection{Test5.txt}
\lstinputlisting{Test5.txt}
\newpage
\subsection{Test6.txt}
\lstinputlisting{Test6.txt}
\end{document}
|
{"hexsha": "0ee7361dcfb4250ac91f8799fc3ea0e615fc7966", "size": 17431, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "report/report 2/main.tex", "max_stars_repo_name": "Doommius/Compiler", "max_stars_repo_head_hexsha": "015e62e6c914cdfff3d77998ea937bd8adb8104b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "report/report 2/main.tex", "max_issues_repo_name": "Doommius/Compiler", "max_issues_repo_head_hexsha": "015e62e6c914cdfff3d77998ea937bd8adb8104b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "report/report 2/main.tex", "max_forks_repo_name": "Doommius/Compiler", "max_forks_repo_head_hexsha": "015e62e6c914cdfff3d77998ea937bd8adb8104b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.1038647343, "max_line_length": 619, "alphanum_fraction": 0.7161952843, "num_tokens": 4713}
|
from utility import *
from numpy import prod
def A200544(n):
if n == 0:
return 1
p = [[y for y in multiples(x)] for x in partitions(n)]
return sum([prod([choose(fib(x[i][1]+1)+x[i][0]-1,x[i][0]) for i in range(0,len(x))]) for x in p])
fib_memo = [0, 1]
def fib(n):
s = len(fib_memo)
if n < s:
return fib_memo[n]
for i in range(s, n+1):
fib_memo.append(fib_memo[i-2] + fib_memo[i-1])
return fib_memo[n]
|
{"hexsha": "9ead8fe3f2af7683e57fa320833be5e2f77dc066", "size": 456, "ext": "py", "lang": "Python", "max_stars_repo_path": "oeis/jenga.py", "max_stars_repo_name": "GuySrinivasan/oeis", "max_stars_repo_head_hexsha": "d6e9d11017b613ae64f38cbedb9019c72b594dd0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "oeis/jenga.py", "max_issues_repo_name": "GuySrinivasan/oeis", "max_issues_repo_head_hexsha": "d6e9d11017b613ae64f38cbedb9019c72b594dd0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-01-09T08:53:30.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-11T23:12:50.000Z", "max_forks_repo_path": "oeis/jenga.py", "max_forks_repo_name": "GuySrinivasan/oeis", "max_forks_repo_head_hexsha": "d6e9d11017b613ae64f38cbedb9019c72b594dd0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3333333333, "max_line_length": 102, "alphanum_fraction": 0.5701754386, "include": true, "reason": "from numpy", "num_tokens": 161}
|
@testset "$TEST $G" begin
g = graph(:diamond, G)
@test nv(g) == 4 && ne(g) == 5
@test typeof(g) == G
g = graph(:bull, G)
@test nv(g) == 5 && ne(g) == 5
g = graph(:chvatal, G)
@test nv(g) == 12 && ne(g) == 24
g = graph(:cubical, G)
@test nv(g) == 8 && ne(g) == 12
g = graph(:desargues, G)
@test nv(g) == 20 && ne(g) == 30
g = graph(:dodecahedral, G)
@test nv(g) == 20 && ne(g) == 30
g = graph(:frucht, G)
@test nv(g) == 20 && ne(g) == 18
g = graph(:heawood, G)
@test nv(g) == 14 && ne(g) == 21
g = graph(:house, G)
@test nv(g) == 5 && ne(g) == 6
g = graph(:housex, G)
@test nv(g) == 5 && ne(g) == 8
g = graph(:icosahedral, G)
@test nv(g) == 12 && ne(g) == 30
g = graph(:krackhardtkite, G)
@test nv(g) == 10 && ne(g) == 18
g = graph(:moebiuskantor, G)
@test nv(g) == 16 && ne(g) == 24
g = graph(:octahedral, G)
@test nv(g) == 6 && ne(g) == 12
g = graph(:pappus, G)
@test nv(g) == 18 && ne(g) == 27
g = graph(:petersen, G)
@test nv(g) == 10 && ne(g) == 15
g = graph(:sedgewickmaze, G)
@test nv(g) == 8 && ne(g) == 10
g = graph(:tetrahedral, G)
@test nv(g) == 4 && ne(g) == 6
g = graph(:truncatedcube, G)
@test nv(g) == 24 && ne(g) == 36
g = graph(:truncatedtetrahedron, G)
@test nv(g) == 12 && ne(g) == 18 && !is_directed(g)
g = digraph(:truncatedtetrahedron, DG)
@test nv(g) == 12 && ne(g) == 18 && is_directed(g)
@test typeof(g) == DG
g = graph(:tutte, G)
@test nv(g) == 46 && ne(g) == 69
@test_throws ErrorException g = graph(:nonexistent, G)
end # testset
|
{"hexsha": "60f25b82fe71caa3d88a6082558ef953b1df72bf", "size": 1482, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/generators/smallgraphs.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/Erdos.jl-90d7349d-81aa-5495-813a-883243abfe31", "max_stars_repo_head_hexsha": "2eb248772a05eac35823a07373dd5644913c6dbe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 36, "max_stars_repo_stars_event_min_datetime": "2017-02-24T15:54:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T19:59:23.000Z", "max_issues_repo_path": "test/generators/smallgraphs.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/Erdos.jl-90d7349d-81aa-5495-813a-883243abfe31", "max_issues_repo_head_hexsha": "2eb248772a05eac35823a07373dd5644913c6dbe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 76, "max_issues_repo_issues_event_min_datetime": "2017-02-23T09:31:28.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-27T09:10:31.000Z", "max_forks_repo_path": "test/generators/smallgraphs.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/Erdos.jl-90d7349d-81aa-5495-813a-883243abfe31", "max_forks_repo_head_hexsha": "2eb248772a05eac35823a07373dd5644913c6dbe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2017-03-04T21:05:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-03T12:54:44.000Z", "avg_line_length": 19.76, "max_line_length": 54, "alphanum_fraction": 0.5296896086, "num_tokens": 609}
|
module StingerGraphs
# package code goes here
include("stinger_lib.jl")
include("stinger_graph.jl")
include("stinger_config.jl")
include("stinger_core.jl")
include("fields.jl")
include("traversal.jl")
include("algorithms/bfs.jl")
include("algorithms/parallelbfs.jl")
include("algorithms/kcore.jl")
include("generators/kronecker.jl")
end # module
|
{"hexsha": "1144b4529c86e9399523bca5bb32bcb3cc5c01a1", "size": 349, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/StingerGraphs.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/StingerGraphs.jl-6b32d12a-ade9-56b9-afe9-2f8a1d6b3202", "max_stars_repo_head_hexsha": "f27b6d61bc3eef8c91b20ac4232f60aed5d2f8a6", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-09-13T03:52:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-07T18:29:50.000Z", "max_issues_repo_path": "src/StingerGraphs.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/StingerGraphs.jl-6b32d12a-ade9-56b9-afe9-2f8a1d6b3202", "max_issues_repo_head_hexsha": "f27b6d61bc3eef8c91b20ac4232f60aed5d2f8a6", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2016-04-26T17:45:59.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-17T17:54:53.000Z", "max_forks_repo_path": "src/StingerGraphs.jl", "max_forks_repo_name": "rohitvarkey/StingerGraphs.jl", "max_forks_repo_head_hexsha": "afb22b8f7cee8b06f5b80c7edaaaee43804ad988", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-09-16T11:07:22.000Z", "max_forks_repo_forks_event_max_datetime": "2018-04-27T10:17:37.000Z", "avg_line_length": 20.5294117647, "max_line_length": 36, "alphanum_fraction": 0.7736389685, "num_tokens": 101}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.