text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
"""
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
The algorithm is tested on the Pendulum-v0 OpenAI gym task
and developed with tflearn + Tensorflow
Author: Vamshi Kumar Kurva
improved upon the original code by Patric Emami
"""
import tensorflow as tf
import numpy as np
import gym
from gym import wrappers
import tflearn
import argparse
import pprint as pp
import os
import logz
from replay_buffer import ReplayBuffer
# ===========================
# Actor and Critic DNNs
# ===========================
class ActorNetwork(object):
"""
Input to the network is the state, output is the action
under a deterministic policy.
The output layer activation is a tanh to keep the action
between -action_bound and action_bound
"""
def __init__(self, sess, state_dim, action_dim, action_bound, learning_rate, tau, batch_size):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.action_bound = action_bound
self.learning_rate = learning_rate
self.tau = tau
self.batch_size = batch_size
# Actor Network
self.inputs, self.out, self.scaled_out = self.create_actor_network()
self.network_params = tf.trainable_variables()
self.saver = tf.train.Saver(self.network_params, max_to_keep=1)
# Target Network
self.target_inputs, self.target_out, self.target_scaled_out = self.create_actor_network()
self.target_network_params = tf.trainable_variables()[
len(self.network_params):]
# Op for periodically updating target network with online network
# weights
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) +
tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# This gradient will be provided by the critic network
self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim])
# Combine the gradients here
# negation of action gradients so that we can use maximize the Q-value of the critic network (Gradient ascent)
self.unnormalized_actor_gradients = tf.gradients(
self.scaled_out, self.network_params, -self.action_gradient)
self.actor_gradients = list(map(lambda x: tf.div(x, self.batch_size), self.unnormalized_actor_gradients))
# Optimization Op
self.optimize = tf.train.AdamOptimizer(self.learning_rate). \
apply_gradients(zip(self.actor_gradients, self.network_params))
self.num_trainable_vars = len(
self.network_params) + len(self.target_network_params)
def create_actor_network(self):
inputs = tflearn.input_data(shape=[None, self.s_dim])
net = tflearn.fully_connected(inputs, 400)
net = tflearn.layers.normalization.batch_normalization(net)
net = tflearn.activations.relu(net)
net = tflearn.fully_connected(net, 300)
net = tflearn.layers.normalization.batch_normalization(net)
net = tflearn.activations.relu(net)
# Final layer weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
out = tflearn.fully_connected(
net, self.a_dim, activation='tanh', weights_init=w_init)
# Scale output to -action_bound to action_bound
scaled_out = tf.multiply(out, self.action_bound)
return inputs, out, scaled_out
def train(self, inputs, a_gradient):
self.sess.run(self.optimize, feed_dict={
self.inputs: inputs,
self.action_gradient: a_gradient
})
def predict(self, inputs):
return self.sess.run(self.scaled_out, feed_dict={
self.inputs: inputs
})
def predict_target(self, inputs):
return self.sess.run(self.target_scaled_out, feed_dict={
self.target_inputs: inputs
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
def get_num_trainable_vars(self):
return self.num_trainable_vars
class CriticNetwork(object):
"""
Input to the network is the state and action, output is Q(s,a).
The action must be obtained from the output of the Actor network.
"""
def __init__(self, sess, state_dim, action_dim, learning_rate, tau, gamma, num_actor_vars):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.gamma = gamma
# Create the critic network
self.inputs, self.action, self.out = self.create_critic_network()
self.network_params = tf.trainable_variables()[num_actor_vars:]
self.saver = tf.train.Saver(self.network_params, max_to_keep=1)
# Target Network
self.target_inputs, self.target_action, self.target_out = self.create_critic_network()
self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]
# Op for periodically updating target network with online network
# weights with regularization
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) \
+ tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# Network target (y_i)
self.predicted_q_value = tf.placeholder(tf.float32, [None, 1])
# Define loss and optimization Op
self.loss = tflearn.mean_square(self.predicted_q_value, self.out)
self.optimize = tf.train.AdamOptimizer(
self.learning_rate).minimize(self.loss)
# Get the gradient of the net w.r.t. the action.
# For each action in the minibatch (i.e., for each x in xs),
# this will sum up the gradients of each critic output in the minibatch
# w.r.t. that action. Each output is independent of all
# actions except for one.
self.action_grads = tf.gradients(self.out, self.action)
def create_critic_network(self):
inputs = tflearn.input_data(shape=[None, self.s_dim])
action = tflearn.input_data(shape=[None, self.a_dim])
net = tflearn.fully_connected(inputs, 400)
net = tflearn.layers.normalization.batch_normalization(net)
net = tflearn.activations.relu(net)
# Add the action tensor in the 2nd hidden layer
# Use two temp layers to get the corresponding weights and biases
t1 = tflearn.fully_connected(net, 300)
t2 = tflearn.fully_connected(action, 300)
net = tflearn.activation(
tf.matmul(net, t1.W) + tf.matmul(action, t2.W) + t2.b, activation='relu')
# linear layer connected to 1 output representing Q(s,a)
# Weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
out = tflearn.fully_connected(net, 1, weights_init=w_init)
return inputs, action, out
def train(self, inputs, action, predicted_q_value):
return self.sess.run([self.out, self.optimize], feed_dict={
self.inputs: inputs,
self.action: action,
self.predicted_q_value: predicted_q_value
})
def predict(self, inputs, action):
return self.sess.run(self.out, feed_dict={
self.inputs: inputs,
self.action: action
})
def predict_target(self, inputs, action):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: inputs,
self.target_action: action
})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads, feed_dict={
self.inputs: inputs,
self.action: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
# Taken from https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py, which is
# based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
class OrnsteinUhlenbeckActionNoise:
def __init__(self, mu, sigma=0.3, theta=.15, dt=1e-2, x0=None):
self.theta = theta
self.mu = mu
self.sigma = sigma
self.dt = dt
self.x0 = x0
self.reset()
def __call__(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + \
self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)
self.x_prev = x
return x
def reset(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)
def __repr__(self):
return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
# ===========================
# Tensorflow Summary Ops
# ===========================
def build_summaries():
episode_reward = tf.Variable(0.)
tf.summary.scalar("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.summary.scalar("Qmax Value", episode_ave_max_q)
summary_vars = [episode_reward, episode_ave_max_q]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
# ===========================
# Agent Training
# ===========================
def test(sess, env, args, actor, critic):
checkpoint_actor_dir = os.path.join(os.curdir, 'Actor_InvertedPendulum')
if not os.path.exists(checkpoint_actor_dir):
os.makedirs(checkpoint_actor_dir)
ckpt_1 = tf.train.get_checkpoint_state(checkpoint_actor_dir)
checkpoint_critic_dir = os.path.join(os.curdir, 'Critic_InvertedPendulum')
if not os.path.exists(checkpoint_critic_dir):
os.makedirs(checkpoint_critic_dir)
ckpt_2 = tf.train.get_checkpoint_state(checkpoint_critic_dir)
if ckpt_1 and tf.train.checkpoint_exists(ckpt_1.model_checkpoint_path):
print("Reading actor parameters from %s" % ckpt_1.model_checkpoint_path)
actor.saver.restore(sess, ckpt_1.model_checkpoint_path)
if ckpt_2 and tf.train.checkpoint_exists(ckpt_2.model_checkpoint_path):
print("Reading critic parameters from %s" % ckpt_2.model_checkpoint_path)
critic.saver.restore(sess, ckpt_2.model_checkpoint_path)
uninitialized_vars = []
for var in tf.all_variables():
try:
sess.run(var)
except tf.errors.FailedPreconditionError:
uninitialized_vars.append(var)
if len(uninitialized_vars) > 0:
init_new_vars_op = tf.variables_initializer(uninitialized_vars)
sess.run(init_new_vars_op)
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
s = env.reset()
done = False
total_reward = 0
max_steps = env.spec.timestep_limit
step = 0
from PIL import Image
frames = []
while not done:
frames.append(Image.fromarray(env.render(mode='rgb_array')))
a = actor.predict(np.reshape(s, (1, actor.s_dim)))
s2, r, done, _ = env.step(a[0])
total_reward += r
step += 1
s = s2
# env.render()
if step > max_steps:
break
print('total reward: ', total_reward)
with open('InvertedPendulum_gym_before.gif', 'wb') as f: # change the path if necessary
im = Image.new('RGB', frames[0].size)
im.save(f, save_all=True, append_images=frames)
def train(sess, env, args, actor, critic, actor_noise, logdir):
logz.configure_output_dir(logdir)
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
print('params: ', params)
params['env'] = 'InvertedPendulum'
params['exp_name'] = '3layer'
logz.save_params(params)
# Set up summary Ops
summary_ops, summary_vars = build_summaries()
checkpoint_actor_dir = os.path.join(os.curdir, 'Actor_InvertedPendulum')
if not os.path.exists(checkpoint_actor_dir):
os.makedirs(checkpoint_actor_dir)
actor_prefix = os.path.join(checkpoint_actor_dir, "model.ckpt")
ckpt_1 = tf.train.get_checkpoint_state(checkpoint_actor_dir)
checkpoint_critic_dir = os.path.join(os.curdir, 'Critic_InvertedPendulum')
if not os.path.exists(checkpoint_critic_dir):
os.makedirs(checkpoint_critic_dir)
critic_prefix = os.path.join(checkpoint_critic_dir, "model.ckpt")
ckpt_2 = tf.train.get_checkpoint_state(checkpoint_critic_dir)
if ckpt_1 and tf.train.checkpoint_exists(ckpt_1.model_checkpoint_path):
print("Reading actor parameters from %s" % ckpt_1.model_checkpoint_path)
actor.saver.restore(sess, ckpt_1.model_checkpoint_path)
if ckpt_2 and tf.train.checkpoint_exists(ckpt_2.model_checkpoint_path):
print("Reading critic parameters from %s" % ckpt_2.model_checkpoint_path)
critic.saver.restore(sess, ckpt_2.model_checkpoint_path)
uninitialized_vars = []
for var in tf.all_variables():
try:
sess.run(var)
except tf.errors.FailedPreconditionError:
uninitialized_vars.append(var)
if len(uninitialized_vars) > 0:
init_new_vars_op = tf.variables_initializer(uninitialized_vars)
sess.run(init_new_vars_op)
writer = tf.summary.FileWriter(args['summary_dir'], sess.graph)
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
# Initialize replay memory
replay_buffer = ReplayBuffer(int(args['buffer_size']), int(args['random_seed']))
# Needed to enable BatchNorm.
# This hurts the performance on Pendulum but could be useful
# in other environments.
# tflearn.is_training(True)
def testing():
env1 = gym.make(args['env'])
s = env1.reset()
done = False
total_reward = 0
max_steps = env1.spec.timestep_limit
step = 0
while not done:
a = actor.predict(np.reshape(s, (1, actor.s_dim)))
s2, r, done, _ = env1.step(a[0])
total_reward += r
step += 1
s = s2
# env.render()
if step > max_steps:
break
print('total steps: ', step)
print('total reward: ', total_reward)
return step, total_reward
iter = 0
start = time.time()
best_step, best_rew = testing()
for i in range(int(args['max_episodes'])):
s = env.reset()
ep_reward = 0
ep_ave_max_q = 0
for j in range(int(args['max_episode_len'])):
if args['render_env']:
env.render()
# Added exploration noise
# a = actor.predict(np.reshape(s, (1, 3))) + (1. / (1. + i))
num = np.random.uniform()
a = actor.predict(np.reshape(s, (1, actor.s_dim))) + actor_noise()
s2, r, terminal, info = env.step(a[0])
replay_buffer.add(np.reshape(s, (actor.s_dim,)), np.reshape(a, (actor.a_dim,)), r,
terminal, np.reshape(s2, (actor.s_dim,)))
# Keep adding experience to the memory until
# there are at least minibatch size samples
batch_size = int(args['minibatch_size'])
if replay_buffer.size() > 100000:
iter += 1
s_batch, a_batch, r_batch, t_batch, s2_batch = \
replay_buffer.sample_batch(batch_size)
# Calculate targets
target_q = critic.predict_target(
s2_batch, actor.predict_target(s2_batch))
y_i = []
for k in range(int(args['minibatch_size'])):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + critic.gamma * target_q[k])
# Update the critic given the targets
# critic will be trained to minimise the mean square error of the predicted Q value
# and the target value.
predicted_q_value, _ = critic.train(
s_batch, a_batch, np.reshape(y_i, (int(args['minibatch_size']), 1)))
ep_ave_max_q += np.amax(predicted_q_value)
# Update the actor policy using the sampled gradient
# gradients of the critic Q value according to the action valu --> action gradients
a_outs = actor.predict(s_batch)
grads = critic.action_gradients(s_batch, a_outs) # del_a Q(s,a)
actor.train(s_batch, grads[0]) # del_a Q(s,a) * del_theta Mu_theta(s) ---> actor gradients
# directly apply these gradients on actor params. No special loss to minimize
if iter%20 == 0:
new_steps, new_rew = testing()
if new_rew > best_rew:
best_rew = new_rew
actor.saver.save(sess, actor_prefix)
critic.saver.save(sess, critic_prefix)
print('model saved to disk.')
actor.saver.restore(sess, ckpt_1.model_checkpoint_path)
critic.saver.restore(sess, ckpt_2.model_checkpoint_path)
best_step, best_rew = testing()
# print('actor model saved to: ', actor_prefix)
# print('critic model saved to: ', critic_prefix)
if iter%10 == 0:
new_steps, new_rew = testing()
logz.log_tabular("Time", time.time() - start)
logz.log_tabular('Iteration', iter/10)
logz.log_tabular('Reward', new_rew)
logz.log_tabular('Steps', new_steps)
logz.dump_tabular()
# Update target networks
if iter%50 == 0:
replay_buffer.update()
print('updating buffer')
print('updating target networks..')
actor.update_target_network()
critic.update_target_network()
s = s2
ep_reward += r
if terminal:
summary_str = sess.run(summary_ops, feed_dict={
summary_vars[0]: ep_reward,
summary_vars[1]: ep_ave_max_q / float(j)
})
writer.add_summary(summary_str, i)
writer.flush()
print('| Reward: {:d} | Episode: {:d} | Qmax: {:.4f}'.format(int(ep_reward), \
i, (ep_ave_max_q / float(j))))
break
def main(args, logdir):
with tf.Session() as sess:
env = gym.make(args['env'])
np.random.seed(int(args['random_seed']))
tf.set_random_seed(int(args['random_seed']))
env.seed(int(args['random_seed']))
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
action_bound = env.action_space.high
# Ensure action bound is symmetric
assert (env.action_space.high == -env.action_space.low)
actor = ActorNetwork(sess, state_dim, action_dim, action_bound,
float(args['actor_lr']), float(args['tau']),
int(args['minibatch_size']))
critic = CriticNetwork(sess, state_dim, action_dim,
float(args['critic_lr']), float(args['tau']),
float(args['gamma']),
actor.get_num_trainable_vars())
if args['test']:
test(sess, env, args, actor, critic)
else:
actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(action_dim), sigma=0.4)
if args['use_gym_monitor']:
if not args['render_env']:
env = wrappers.Monitor(
env, args['monitor_dir'], video_callable=False, force=True)
else:
env = wrappers.Monitor(env, args['monitor_dir'], force=True)
train(sess, env, args, actor, critic, actor_noise, logdir)
if args['use_gym_monitor']:
env.monitor.close()
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
parser = argparse.ArgumentParser(description='provide arguments for DDPG agent')
# agent parameters
parser.add_argument('--actor-lr', help='actor network learning rate', default=0.00001)
parser.add_argument('--critic-lr', help='critic network learning rate', default=0.0001)
parser.add_argument('--gamma', help='discount factor for critic updates', default=0.99)
parser.add_argument('--tau', help='soft target update parameter', default=0.999)
parser.add_argument('--buffer-size', help='max size of the replay buffer', default=1000000)
parser.add_argument('--minibatch-size', help='size of minibatch for minibatch-SGD', default=1000)
# run parameters
parser.add_argument('--env', help='choose the gym env- tested on {Pendulum-v0}', default='InvertedPendulum-v2')
parser.add_argument('--random-seed', help='random seed for repeatability', default=0)
parser.add_argument('--max-episodes', help='max num of episodes to do while training', default=50000)
parser.add_argument('--max-episode-len', help='max length of 1 episode', default=1000)
parser.add_argument('--render-env', help='render the gym env', action='store_true')
parser.add_argument('--test', help='set false to train', action='store_true', default=False)
parser.add_argument('--use-gym-monitor', help='record gym results', action='store_true')
parser.add_argument('--monitor-dir', help='directory for storing gym results', default='./results_InvertedPendulum/gym_ddpg')
parser.add_argument('--summary-dir', help='directory for storing tensorboard info', default='./results_InvertedPendulum/tf_ddpg')
parser.set_defaults(render_env=True)
parser.set_defaults(use_gym_monitor=True)
args = vars(parser.parse_args())
print('test: ', args['test'])
import time
logdir = args['env']+'_'+time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join(os.curdir, logdir)
# if not (os.path.exists(logdir)):
# os.makedirs(logdir)
pp.pprint(args)
main(args, logdir)
|
{"hexsha": "e14d019ded81124258dd53232c868d65e9b82cf7", "size": 22821, "ext": "py", "lang": "Python", "max_stars_repo_path": "5_Deep_Deterministic_Policy_Gradients/DDPG/DDPG.py", "max_stars_repo_name": "vaisakh-shaj/DeepReinforcementLearning", "max_stars_repo_head_hexsha": "99f62d9eee6626ac70c3410b72e0a3a151ec375f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2018-06-11T18:26:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-12T07:57:54.000Z", "max_issues_repo_path": "5_Deep_Deterministic_Policy_Gradients/DDPG/DDPG.py", "max_issues_repo_name": "vaisakh-shaj/DeepReinforcementLearning", "max_issues_repo_head_hexsha": "99f62d9eee6626ac70c3410b72e0a3a151ec375f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-06-21T15:17:57.000Z", "max_issues_repo_issues_event_max_datetime": "2018-07-12T14:42:05.000Z", "max_forks_repo_path": "5_Deep_Deterministic_Policy_Gradients/DDPG/DDPG.py", "max_forks_repo_name": "vaisakh-shaj/DeepReinforcementLearning", "max_forks_repo_head_hexsha": "99f62d9eee6626ac70c3410b72e0a3a151ec375f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-07-10T10:16:22.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-02T10:48:32.000Z", "avg_line_length": 39.414507772, "max_line_length": 133, "alphanum_fraction": 0.624293414, "include": true, "reason": "import numpy", "num_tokens": 5118}
|
program cmd_args
implicit none
character(len=32) :: arg_matrix_size_str ! matrix size
integer :: arg_matrix_size
if ( command_argument_count() .ne. 1 ) then
write(*,*) 'Error, only one argument is required for matrix size. Aborting'
stop
endif
! retrieve the argument and convert to string
call get_command_argument(1, arg_matrix_size_str)
read(arg_matrix_size_str, *) arg_matrix_size
WRITE(*,'(a,i0)') 'Matrix size is: ', arg_matrix_size
end program cmd_args
|
{"hexsha": "3365c98fbb32a8f1b7c50f757f16d23b60c1f7d7", "size": 569, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "misc/cmd_args.f90", "max_stars_repo_name": "eusojk/fortran-programs", "max_stars_repo_head_hexsha": "60fe727a341615153e044e7ac7deabc435444e39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "misc/cmd_args.f90", "max_issues_repo_name": "eusojk/fortran-programs", "max_issues_repo_head_hexsha": "60fe727a341615153e044e7ac7deabc435444e39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "misc/cmd_args.f90", "max_forks_repo_name": "eusojk/fortran-programs", "max_forks_repo_head_hexsha": "60fe727a341615153e044e7ac7deabc435444e39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4705882353, "max_line_length": 83, "alphanum_fraction": 0.6326889279, "num_tokens": 133}
|
import os
import open3d as o3d
import numpy as np
import copy
import math
NOISE_BOUND = 0.05
FRAG1_COLOR =[0, 0.651, 0.929]
FRAG2_COLOR = [1, 0.706, 0]
GT_COLOR =[0, 1, 0]
def load_all_gt_pairs(gt_log_path):
"""
Load all possible pairs from GT
"""
with open(gt_log_path) as f:
content = f.readlines()
gt_data = {}
for i in range(len(content)):
tokens = [k.strip() for k in content[i].split(" ")]
if len(tokens) == 3:
frag1 = int(tokens[0])
frag2 = int(tokens[1])
def line_to_list(line):
return [float(k.strip()) for k in line.split("\t")[:4]]
gt_mat = np.array([line_to_list(content[i+1]),
line_to_list(content[i+2]),
line_to_list(content[i+3]),
line_to_list(content[i+4])])
gt_data[(frag1, frag2)] = gt_mat
return gt_data
def load_gt_transformation(fragment1_idx, fragment2_idx, gt_log_path):
"""
Load gt transformation
"""
with open(gt_log_path) as f:
content = f.readlines()
for i in range(len(content)):
tokens = [k.strip() for k in content[i].split(" ")]
if tokens[0] == str(fragment1_idx) and tokens[1] == str(fragment2_idx):
def line_to_list(line):
return [float(k.strip()) for k in line.split("\t")[:4]]
gt_mat = np.array([line_to_list(content[i+1]),
line_to_list(content[i+2]),
line_to_list(content[i+3]),
line_to_list(content[i+4])])
return gt_mat
return None
def get_gt_inliers(fragment1_points, fragment2_points, gt_mat):
"""
Get GT inliers
"""
# gt transformed source
gt_fragment1 = o3d.geometry.PointCloud()
gt_fragment1.points = o3d.utility.Vector3dVector(fragment2_points)
gt_fragment1.transform(gt_mat)
# Count inliers and outliers
inliers_set = set()
num_inliers = 0
total = 0
for i in range(fragment1_points.shape[0]):
total += 1
p1 = np.asarray(gt_fragment1.points)[i, :]
p2 = fragment1_points[i, :]
dist = np.linalg.norm(p1 - p2)
if dist <= NOISE_BOUND:
inliers_set.add(i)
num_inliers += 1
# GT Inliers
print("GT Inliers count:", num_inliers)
print("GT Inliers:", inliers_set)
return inliers_set
def compose_mat4_from_teaserpp_solution(solution):
"""
Compose a 4-by-4 matrix from teaserpp solution
"""
s = solution.scale
rotR = solution.rotation
t = solution.translation
T = np.eye(4)
T[0:3, 3] = t
R = np.eye(4)
R[0:3, 0:3] = rotR
M = T.dot(R)
if s == 1:
M = T.dot(R)
else:
S = np.eye(4)
S[0:3, 0:3] = np.diag([s, s, s])
M = T.dot(R).dot(S)
return M
def get_angular_error(R_gt, R_est):
"""
Get angular error
"""
try:
A = (np.trace(np.dot(R_gt.T, R_est))-1) / 2.0
if A < -1:
A = -1
if A > 1:
A = 1
rotError = math.fabs(math.acos(A));
return math.degrees(rotError)
except ValueError:
import pdb; pdb.set_trace()
return 99999
def compute_transformation_diff(est_mat, gt_mat):
"""
Compute difference between two 4-by-4 SE3 transformation matrix
"""
R_gt = gt_mat[:3,:3]
R_est = est_mat[:3,:3]
rot_error = get_angular_error(R_gt, R_est)
t_gt = gt_mat[:,-1]
t_est = est_mat[:,-1]
trans_error = np.linalg.norm(t_gt - t_est)
return rot_error, trans_error
|
{"hexsha": "bd906b31dbc206afb197334e8959a06946482e24", "size": 3670, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/teaser_python_3dsmooth/bench_utils.py", "max_stars_repo_name": "plusk01/TEASER-plusplus", "max_stars_repo_head_hexsha": "0d497521d261b3fa35c4ca29eb86ba7cf9558f9f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 962, "max_stars_repo_stars_event_min_datetime": "2020-01-21T19:08:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T17:28:49.000Z", "max_issues_repo_path": "examples/teaser_python_3dsmooth/bench_utils.py", "max_issues_repo_name": "plusk01/TEASER-plusplus", "max_issues_repo_head_hexsha": "0d497521d261b3fa35c4ca29eb86ba7cf9558f9f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 105, "max_issues_repo_issues_event_min_datetime": "2020-01-24T15:11:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T02:28:52.000Z", "max_forks_repo_path": "examples/teaser_python_3dsmooth/bench_utils.py", "max_forks_repo_name": "plusk01/TEASER-plusplus", "max_forks_repo_head_hexsha": "0d497521d261b3fa35c4ca29eb86ba7cf9558f9f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 234, "max_forks_repo_forks_event_min_datetime": "2020-01-21T12:28:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T08:41:31.000Z", "avg_line_length": 26.4028776978, "max_line_length": 79, "alphanum_fraction": 0.5558583106, "include": true, "reason": "import numpy", "num_tokens": 1019}
|
# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================
from __future__ import print_function
import numpy as np
from visualdl import LogWriter
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.v2.fluid.framework as framework
from paddle.v2.fluid.initializer import NormalInitializer
from paddle.v2.fluid.param_attr import ParamAttr
# create VisualDL logger and directory
logdir = "./tmp"
logwriter = LogWriter(logdir, sync_cycle=10)
# create 'train' run
with logwriter.mode("train") as writer:
# create 'loss' scalar tag to keep track of loss function
loss_scalar = writer.scalar("loss")
with logwriter.mode("train") as writer:
acc_scalar = writer.scalar("acc")
num_samples = 4
with logwriter.mode("train") as writer:
conv_image = writer.image("conv_image", num_samples,
1) # show 4 samples for every 1 step
input_image = writer.image("input_image", num_samples, 1)
with logwriter.mode("train") as writer:
param1_histgram = writer.histogram(
"param1", 100) # 100 buckets, e.g 100 data sets in a histograms
def vgg16_bn_drop(input):
def conv_block(input, num_filter, groups, dropouts):
return fluid.nets.img_conv_group(
input=input,
pool_size=2,
pool_stride=2,
conv_num_filter=[num_filter] * groups,
conv_filter_size=3,
conv_act='relu',
conv_with_batchnorm=True,
conv_batchnorm_drop_rate=dropouts,
pool_type='max')
conv1 = conv_block(input, 64, 2, [0.3, 0])
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
fc1 = fluid.layers.fc(input=drop, size=512, act=None)
bn = fluid.layers.batch_norm(input=fc1, act='relu')
drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5)
fc2 = fluid.layers.fc(input=drop2, size=512, act=None)
return fc2, conv1
classdim = 10
data_shape = [3, 32, 32]
images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
net, conv1 = vgg16_bn_drop(images)
predict = fluid.layers.fc(
input=net,
size=classdim,
act='softmax',
param_attr=ParamAttr(name="param1", initializer=NormalInitializer()))
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
opts = optimizer.minimize(avg_cost)
accuracy = fluid.evaluator.Accuracy(input=predict, label=label)
BATCH_SIZE = 16
PASS_NUM = 1
train_reader = paddle.batch(
paddle.reader.shuffle(paddle.dataset.cifar.train10(), buf_size=128 * 10),
batch_size=BATCH_SIZE)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(place=place, feed_list=[images, label])
exe.run(fluid.default_startup_program())
step = 0
sample_num = 0
start_up_program = framework.default_startup_program()
param1_var = start_up_program.global_block().var("param1")
for pass_id in range(PASS_NUM):
accuracy.reset(exe)
for data in train_reader():
loss, conv1_out, param1, acc = exe.run(
fluid.default_main_program(),
feed=feeder.feed(data),
fetch_list=[avg_cost, conv1, param1_var] + accuracy.metrics)
pass_acc = accuracy.eval(exe)
# all code below is for VisualDL
# start picking sample from beginning
if sample_num == 0:
input_image.start_sampling()
conv_image.start_sampling()
idx1 = input_image.is_sample_taken()
idx2 = conv_image.is_sample_taken()
assert idx1 == idx2
idx = idx1
if idx != -1:
image_data = data[0][0]
# reshape the image to 32x32 and 3 channels
input_image_data = np.transpose(
image_data.reshape(data_shape), axes=[1, 2, 0])
# add sample to VisualDL Image Writer to view input image
input_image.set_sample(idx, input_image_data.shape,
input_image_data.flatten())
conv_image_data = conv1_out[0][0]
# add sample to view conv image
conv_image.set_sample(idx, conv_image_data.shape,
conv_image_data.flatten())
sample_num += 1
# when we have enough samples, call finish sampling()
if sample_num % num_samples == 0:
input_image.finish_sampling()
conv_image.finish_sampling()
sample_num = 0
# add record for loss and accuracy to scalar
loss_scalar.add_record(step, loss)
acc_scalar.add_record(step, acc)
param1_histgram.add_record(step, param1.flatten())
print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str(
pass_acc))
step += 1
# this model is slow, so if we can train two mini batch, we think it works properly.
# exit(0)
exit(1)
|
{"hexsha": "4d1777ca945b54bdb5cacfee3fc4c7e3d1fc9c87", "size": 5812, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo/paddle/paddle_cifar10.py", "max_stars_repo_name": "nepeplwu/VisualDL", "max_stars_repo_head_hexsha": "a6928902ca0802419fa337236b71d2db8e669e13", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-08-23T08:42:44.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-23T08:42:44.000Z", "max_issues_repo_path": "demo/paddle/paddle_cifar10.py", "max_issues_repo_name": "nepeplwu/VisualDL", "max_issues_repo_head_hexsha": "a6928902ca0802419fa337236b71d2db8e669e13", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demo/paddle/paddle_cifar10.py", "max_forks_repo_name": "nepeplwu/VisualDL", "max_forks_repo_head_hexsha": "a6928902ca0802419fa337236b71d2db8e669e13", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-29T03:38:35.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-29T03:38:35.000Z", "avg_line_length": 34.3905325444, "max_line_length": 92, "alphanum_fraction": 0.6519270475, "include": true, "reason": "import numpy", "num_tokens": 1440}
|
module FiniteHorizonPOMDPs
using POMDPs
using POMDPModelTools
using Random: Random, AbstractRNG
export
HorizonLength,
FiniteHorizon,
InfiniteHorizon,
horizon,
stage,
stage_states,
stage_stateindex,
ordered_stage_states,
stage_observations,
stage_obsindex,
ordered_stage_observations
include("interface.jl")
export
fixhorizon
include("fixhorizon.jl")
end
|
{"hexsha": "3fb56624cdfbf1868eebdf37a98973991136ea58", "size": 409, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/FiniteHorizonPOMDPs.jl", "max_stars_repo_name": "JuliaPOMDP/FiniteHorizonPOMDPs.jl", "max_stars_repo_head_hexsha": "6579a0dcaf95d2e403a48af08465e9fc901be62e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-11-17T02:57:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-20T21:24:35.000Z", "max_issues_repo_path": "src/FiniteHorizonPOMDPs.jl", "max_issues_repo_name": "Omastto1/FiniteHorizonPOMDPs.jl", "max_issues_repo_head_hexsha": "bbb501bfc167b4e29839d3df8b3c89dad6cfee29", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2021-01-10T00:51:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-22T08:53:58.000Z", "max_forks_repo_path": "src/FiniteHorizonPOMDPs.jl", "max_forks_repo_name": "Omastto1/FiniteHorizonPOMDPs.jl", "max_forks_repo_head_hexsha": "bbb501bfc167b4e29839d3df8b3c89dad6cfee29", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:41:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-04T20:11:27.000Z", "avg_line_length": 14.1034482759, "max_line_length": 33, "alphanum_fraction": 0.7457212714, "num_tokens": 106}
|
# using AutomotiveDrivingModels
# using NearestNeighbors
# import AutomotiveDrivingModels: get_actions!, observe!, action_context, get_name
# import Base.rand
# import PyPlot
# export
# HRHC,
# curveDist,
# wrap_to_π,
# kdProject,
# generateObstacleMap,
# updateObstacleMap!,
# generateMotionMap,
# screenCollision,
# tailgateAvoidance,
# getSuccessorStates,
# loopProjectionKD,
# computeTrajectory,
# screenTrajectory,
# checkCollision,
# calculateObjective,
# plot_stϕ,
# plotHRHCInfo,
# plotObjectiveHorizon,
# plotSplineRoadway
# utility functions
function curveDist(pt1::CurvePt, pt2::CurvePt)
d = sqrt((pt1.pos.x - pt2.pos.x)^2 + (pt1.pos.y - pt2.pos.y)^2)
end
function wrap_to_π(θ)
θ = θ - div(θ,2*Float64(π))*(2*Float64(π))
θ = θ + (θ .< -π).*(2*Float64(π)) - (θ .> π).*(2*Float64(π))
end
function kdProject(x,y,θ,tree,roadway,hrhc)
"""
project single (x,y,Θ) point to roadway spline using kdtree to find the nearest spline point
"""
curve = roadway.segments[1].lanes[1].curve
# Δs = roadway.segments[1].lanes[1].curve[2].s - roadway.segments[1].lanes[1].curve[1].s
idx_list,dist = knn(tree,[x;y],1)
idx = idx_list[1]
idxA = idx-1
idxB = idx+1
if idx == length(curve)
idxB = 1 # back to the beginning of the curve
end
if idx == 1
idxA = length(curve)
end
dA = sqrt(sum(([curve[idxA].pos.x, curve[idxA].pos.y]-[x,y]).^2))
dB = sqrt(sum(([curve[idxB].pos.x, curve[idxB].pos.y]-[x,y]).^2))
if dA < dB
idxB = idx
else
idxA = idx
end
# project
vec1 = [curve[idxB].pos.x - curve[idxA].pos.x, curve[idxB].pos.y - curve[idxA].pos.y, 0]
vec2 = [x - curve[idxA].pos.x, y - curve[idxA].pos.y, 0]
idx_t = dot(vec2, vec1)/norm(vec1)^2
pθ = curve[idxA].pos.θ + idx_t*(curve[idxB].pos.θ - curve[idxA].pos.θ)
s = curve[idxA].s + idx_t*hrhc.Δs
t = norm(vec2 - idx_t*vec1)*sign(sum(cross(vec1, vec2)))
ϕ = wrap_to_π(θ - pθ)
s,t,ϕ,idxA
end
# type Hierarchical Receding Horizion Controller
type HRHC <: DriverModel{AccelDesang}
# action_context::IntegratedContinuous
car_ID::Int
v_map
δ_map
motion_map # state changes associated with cmd = (v_command, δ_command) - this is the motion map
v_cmd::Int # index of v in v_cmds
δ_cmd::Int # index of δ in δ_cmds
#car parameters with bicycle geometry model
car_length::Float64 # wheel base
wheel_base::Float64
car_width::Float64
ellipseA::Float64
ellipseB::Float64
a_step::Float64 # max acceleration (m/s)
μ::Float64 # friction coefficient
v_range # possible velocityies
δ_range # possible steering angles
successor_states # array of next states
# current v, current δ
v::Float64
δ::Float64
curve_ind::Int
Δs::Float64
# planning horizon
h::Int
Δt::Float64
# logit level
k::Int
# maximum deviation from center of track (if |t| > T_MAX, car is out of bounds)
T_MAX::Float64
# Action = Next State
action::VehicleState
function HRHC(car_ID::Int,roadway;
car_length::Float64=4.8,
wheel_base::Float64=4.4,
car_width::Float64=2.5,
v::Float64=0.0,
δ::Float64=0.0,
h::Int=10,
Δt::Float64=1.0/24,
ΔV₊::Float64=1.55,
ΔV₋::Float64=3.05,
Δδ::Float64=Float64(π)/12,
v_min::Float64=0.0,
v_max::Float64=100.0,
a_step::Float64=10.0,
a_range=[-1,0,1],
μ::Float64=20.0,
g::Float64=9.81,
δ_max::Float64=Float64(π)/6,
δ_step::Float64=Float64(π)/128,
k::Int=1
)
hrhc = new()
hrhc.T_MAX=(roadway.segments[1].lanes[1].width - car_width)/2.0
hrhc.car_ID = car_ID
hrhc.car_length=car_length
hrhc.wheel_base=wheel_base
hrhc.car_width=car_width
hrhc.h=h
hrhc.Δt=Δt
hrhc.a_step=a_step
hrhc.μ=μ
hrhc.k=k
hrhc.motion_map, hrhc.v_map, hrhc.δ_map, hrhc.v_range, hrhc.δ_range = generateMotionMap(v_min,
v_max,a_step,δ_max,δ_step,wheel_base,h,μ,Δt,a_range=a_range)
hrhc.successor_states = zeros(size(hrhc.motion_map[1],1),size(hrhc.motion_map[1],2),3)
hrhc.v=v
hrhc.δ=δ
hrhc.v_cmd = 1
hrhc.δ_cmd = Int((length(hrhc.δ_range) - 1)/2)
hrhc.curve_ind=1
hrhc.Δs=roadway.segments[1].lanes[1].curve[2].s-roadway.segments[1].lanes[1].curve[1].s
# hrhc.action_context=context
hrhc.action = VehicleState(VecSE2(0,0,0),0.0)
# calculate semimajor axes of bounding ellipse with minimal area (for collision checking)
W = car_width/2.0
L = car_length/2.0
# use quadratic formula to find B^2
a = 2
b = -4*L - (W^2)*2*(L^2)
c = 2*L^4
B = sqrt((-b + sqrt(b^2 - 4*a*c))/(2*a))
A = sqrt(((W)^2) / (1 - (L/(B))^2))
hrhc.ellipseA = A
hrhc.ellipseB = B
hrhc
end
end
function generateObstacleMap(scene, models)
k = maximum([driver.k for (id, driver) in models])
n = length(scene)
h = maximum([driver.h for (id, driver) in models]) # h should be the same for all vehicles on the track
obstacleDict = Dict()
for level in 0:k
obstacleDict[level] = Dict()
for (id, driver) in models
obstacleDict[level][id] = zeros(h,5) # x,y,θ,v,δ
end
end
return obstacleDict
end
function updateObstacleMap!(obstacleMap, level, car_ID, trajectory)
obstacleMap[level][car_ID][1:size(trajectory,1),1:size(trajectory,2)] = trajectory
end
function generateMotionMap(v_min,v_max,a_step,δ_max,δ_step,wheel_base,h,μ,Δt;g=9.81,a_range=[-1,0,1])
v_step = a_step*Δt
v_range = linspace(v_min,v_max,round(v_max/v_step))
δ_range = linspace(-δ_max,δ_max,2*round((δ_max)/δ_step)+1)
δ_range = (1/maximum(δ_range))*δ_range.*abs.(δ_range) # concentrate toward the middle
R_range = wheel_base./tan.(δ_range)
# a_range = [-1,0,1]
motion_map = Dict()
v_map = Dict()
δ_map = Dict()
for v_idx in 1:length(v_range)
v = v_range[v_idx]
v_map[v_idx] = zeros(Int,length(a_range),length(δ_range))
δ_map[v_idx] = zeros(Int,length(a_range),length(δ_range))
for a_idx in 1:length(a_range)
Δv_idx = a_range[a_idx]
tire_force = sqrt(((v^2)./abs.(R_range)).^2 + (Δv_idx*a_step)^2)
if v_idx + Δv_idx >= 1 && v_idx + Δv_idx <= length(v_range)
v_map[v_idx][a_idx,:] = Int(v_idx + Δv_idx)# v_range[v_idx+Δv_idx]
else
v_map[v_idx][a_idx,:] = Int(v_idx) #v_range[v_idx]
end
if v_idx > 1
for j in 1:length(δ_range)
if tire_force[j] >= μ*g
v_map[v_idx][a_idx,j] = Int(v_idx) - 1# v_range[v_idx-1] # decelerate
end
end
end
#δ_map[v_idx][a_idx,:] = δ_range.*(tire_force .< μ*g) +
# maximum(abs.(δ_range[tire_force .< μ*g])).*sign(δ_range).*(tire_force .>= μ*g)
δ_map[v_idx][a_idx,:] = [i for i in 1:length(δ_range)].*(tire_force .< μ*g) +
indmax(abs.(δ_range).*(tire_force .< μ*g)).*(tire_force .>= μ*g).*-sign(δ_range) +
(length(δ_range)+1).*(sign(δ_range) .> 0).*(tire_force .>= μ*g)
end
end
# fill motion map
for v_idx in 1:length(v_range)
motion_map[v_idx] = zeros(length(a_range),length(δ_range),h,3)
level_v_idx = copy(v_map[v_idx])
level_δ_idx = copy(δ_map[v_idx])
Δθ = zeros(length(a_range),length(δ_range))
for i in 1:h
radius = wheel_base./tan.(δ_range[level_δ_idx])
Δs = ((v_range[v_idx] + v_range[level_v_idx])./2.0)*Δt
Δθ = Δs./radius
ΔX = abs.(radius) .* sin.(abs.(Δθ))
ΔX[:,Int((length(δ_range)-1)/2)+1] = Δs[:,Int((length(δ_range)-1)/2)+1]
ΔY = radius.*(1 - cos.(Δθ))
ΔY[:,Int((length(δ_range)-1)/2)+1] = 0
if i == 1
motion_map[v_idx][:,:,i,1] = ΔX
motion_map[v_idx][:,:,i,2] = ΔY
motion_map[v_idx][:,:,i,3] = Δθ
else
motion_map[v_idx][:,:,i,1] = motion_map[v_idx][:,:,i-1,1] +
ΔX.*cos.(motion_map[v_idx][:,:,i-1,3]) - ΔY.*sin.(motion_map[v_idx][:,:,i-1,3])
motion_map[v_idx][:,:,i,2] = motion_map[v_idx][:,:,i-1,2] +
ΔX.*sin.(motion_map[v_idx][:,:,i-1,3]) + ΔY.*cos.(motion_map[v_idx][:,:,i-1,3])
motion_map[v_idx][:,:,i,3] = Δθ + motion_map[v_idx][:,:,i-1,3] # + motion_map[v_idx][:,:,i-1,3]
end
for j in 1:length(level_v_idx) # update to next velocity
level_v_idx[j] = v_map[level_v_idx[j]][j]
level_δ_idx[j] = δ_map[level_v_idx[j]][j]
end
end
end
return motion_map, v_map, δ_map, v_range, δ_range
end
function loopProjectionKD(hrhc,scene,roadway,tree)
"""
projects all points in hrhc.successor_states to the kdtree representing
the spline points along the centerline of roadway
"""
curve = roadway.segments[1].lanes[1].curve
s_grid = zeros(size(hrhc.successor_states,1),size(hrhc.successor_states,2))
t_grid = zeros(size(s_grid))
ϕ_grid = zeros(size(s_grid))
idx_grid = zeros(Int,size(s_grid))
pts = [reshape(hrhc.successor_states[:,:,1],size(hrhc.successor_states[:,:,1],1)*size(hrhc.successor_states[:,:,1],2),1)';
reshape(hrhc.successor_states[:,:,2],size(hrhc.successor_states[:,:,2],1)*size(hrhc.successor_states[:,:,2],2),1)']
idxs_list, _ = knn(tree,pts,1)
idxs=reshape(idxs_list,size(hrhc.successor_states[:,:,2],1),size(hrhc.successor_states[:,:,2],2))
for i in 1:size(s_grid,1)
for j in 1:size(s_grid,2)
idxA = idxs[i,j][1]-1
idxB = idxs[i,j][1]+1
if idxs[i,j][1] == length(curve)
idxB = 1 # wrap to the beginning of the curve
end
if idxs[i,j][1] == 1
idxA = length(curve) # wrap to the end of the curve
end
x = hrhc.successor_states[i,j,1]
y = hrhc.successor_states[i,j,2]
dA = sqrt(sum(([curve[idxA].pos.x, curve[idxA].pos.y]-[x,y]).^2))
dB = sqrt(sum(([curve[idxB].pos.x, curve[idxB].pos.y]-[x,y]).^2))
if dA < dB
idxB = idxs[i,j][1]
else
idxA = idxs[i,j][1]
end
# project
vec1 = [curve[idxB].pos.x - curve[idxA].pos.x, curve[idxB].pos.y - curve[idxA].pos.y, 0]
vec2 = [x - curve[idxA].pos.x, y - curve[idxA].pos.y, 0]
idx_t = dot(vec2, vec1)/norm(vec1)^2
s_θ = curve[idxA].pos.θ + idx_t*(curve[idxB].pos.θ - curve[idxA].pos.θ)
s_grid[i,j] = curve[idxA].s + idx_t*hrhc.Δs
t_grid[i,j] = norm(vec2 - idx_t*vec1)*sign(sum(cross(vec1, vec2)))
ϕ_grid[i,j] = wrap_to_π(hrhc.successor_states[i,j,3] - s_θ)
idx_grid[i,j] = idxA
end
end
# account for wrap-around
s_grid[s_grid .< scene[hrhc.car_ID].state.posF.s] += curve[end].s + hrhc.Δs
return s_grid, t_grid, ϕ_grid
end
function screenTrajectory(trajectory, obstacleMap, scene, roadway, hrhc, tree, k_level)
out_of_bounds = false
collision_flag = false
# check out of bounds
for i in 1 : size(trajectory,1)
x = trajectory[i,1]
y = trajectory[i,2]
θ = trajectory[i,3]
s,t,ϕ = kdProject(x,y,θ,tree,roadway,hrhc)
if abs.(t) > hrhc.T_MAX
out_of_bounds=true
return out_of_bounds
end
end
# collisionFlag = zeros(size(trajectory,1),1) # stores locations collisions
threshold_dist = hrhc.car_length*4 # must be at least this close before we care to calculate collision cost
# if k_level >= 1
# # for all other cars...
# for (id,traj) in obstacleMap[k_level - 1]
# if id != hrhc.car_ID
# state = scene[hrhc.car_ID].state
# state2 = scene[id].state
# diff = state.posG - state2.posG
# s1,_,_ = kdProject(state.posG.x,state.posG.y,state.posG.θ,tree,roadway,hrhc)
# s2,_,_ = kdProject(state2.posG.x,state2.posG.y,state2.posG.θ,tree,roadway,hrhc)
# if (norm([diff.x, diff.y]) < threshold_dist) && (s1 <= s2) # don't care if opponent is behind us
# # R_idx = zeros(Int, size(trajectory,1),1) # to store the sorted indices of R
# # ΔX = zeros(size(trajectory,1),1) # Δx, with opponent at origin
# # ΔY = zeros(size(trajectory,1),1) # Δx, with opponent at origin
# # Δθ = zeros(size(trajectory,1),1) # Δx, with opponent at origin
# # for i in 1:hrhc.h
# # pos = VecSE2(traj[i,1:3]) # x,y,θ of opponent at time step h
# ΔX = trajectory[:,1] - traj[:,1] # Δx, with opponent at origin
# ΔY = trajectory[:,2] - traj[:,2] # Δy with opponent at origin
# Δθ = trajectory[:,3] - traj[:,3] # Δθ with opponent at origin
# # end
# R = sqrt(ΔX.^2 + ΔY.^2)
# R_idx = sortperm(R) # ordered by increasing distance
# for idx in R_idx
# # if collisionFlag[idx] == 1
# # continue
# # end
# if R[idx] > hrhc.car_length # no collision, and all other R values are greater
# break
# end
# # R is less than hrhc.car_length
# ψ = atan2(ΔY[idx],ΔX[idx]) - Δθ[idx]
# r = W*L/sqrt((L*sin.(ψ))^2 + (W*cos.(ψ))^2) # radius of ellipse at given angle
# if R[idx]-r < W*L/8 # collision
# collision_flag = true
# return collision_flag
# end
# end
# end
# end
# end
# end
return out_of_bounds || collision_flag
end
function getSuccessorStates(ΔXYθ, car_ID, h, scene::Scene)
""" gets legal successor_states from motion primitives library """
pos = scene[car_ID].state.posG # global x,y,z of car
ΔX = ΔXYθ[:,:,h,1] * cos.(pos.θ) + ΔXYθ[:,:,h,2] * -sin.(pos.θ)
ΔY = ΔXYθ[:,:,h,1] * sin.(pos.θ) + ΔXYθ[:,:,h,2] * cos.(pos.θ)
Δθ = ΔXYθ[:,:,h,3]
successor_states = zeros(size(ΔXYθ[:,:,h,:]))
successor_states[:,:,1] = ΔX + pos.x
successor_states[:,:,2] = ΔY + pos.y
successor_states[:,:,3] = Δθ + pos.θ
return successor_states
end
function computeTrajectory(ΔXYθ, car_ID, scene, v_cmd, δ_cmd, h)
pos = scene[car_ID].state.posG
trajectory = zeros(h,3)
trajectory[:,1] = pos.x + ΔXYθ[v_cmd,δ_cmd,1:h,1]*cos.(pos.θ) + ΔXYθ[v_cmd,δ_cmd,1:h,2]*-sin.(pos.θ)
trajectory[:,2] = pos.y + ΔXYθ[v_cmd,δ_cmd,1:h,1]*sin.(pos.θ) + ΔXYθ[v_cmd,δ_cmd,1:h,2]*cos.(pos.θ)
trajectory[:,3] = pos.θ + ΔXYθ[v_cmd,δ_cmd,1:h,3]
return trajectory
end
function checkCollisionElliptic(R,Δx,Δy,Δθ,L,W)
b = W # buffer
ζ = atan2(Δy,Δx)
ψ = ζ - Δθ
r1 = W*L/sqrt((L*sin.(ζ))^2 + (W*cos.(ζ))^2) # radius of ellipse at given angle
r2 = W*L/sqrt((L*sin.(ψ))^2 + (W*cos.(ψ))^2)
if R < r1 + r2 + b
return true
else
return false
end
end
function screenCollision(hrhc, obstacleMap, tree, roadway, scene, k_level)
L = hrhc.ellipseB+2
W = hrhc.ellipseA+2
collisionFlag = zeros(size(hrhc.successor_states[:,:,1])) # stores locations collisions
threshold_dist = hrhc.car_length*4 # must be at least this close before we care to calculate collision cost
if k_level >= 1
# for all other cars...
for (id,trajectory) in obstacleMap[k_level - 1]
if id != hrhc.car_ID
state = scene[hrhc.car_ID].state
state2 = scene[id].state
diff = state.posG - state2.posG
s1,_,_ = kdProject(state.posG.x,state.posG.y,state.posG.θ,tree,roadway,hrhc)
s2,_,_ = kdProject(state2.posG.x,state2.posG.y,state2.posG.θ,tree,roadway,hrhc)
if (norm([diff.x, diff.y]) < threshold_dist) && (s1 <= s2) # don't care if opponent is behind us
R_idx = zeros(Int, size(hrhc.successor_states,1)*size(hrhc.successor_states,2)) # to store the sorted indices of R
for i in 1:hrhc.h
pos = VecSE2(trajectory[i,1:3]) # x,y,θ of opponent at time step h
successor_states = getSuccessorStates(hrhc.motion_map[hrhc.v_cmd],hrhc.car_ID,i,scene)
ΔX = successor_states[:,:,1] - pos.x # Δx, with opponent at origin
ΔY = successor_states[:,:,2] - pos.y # Δy with opponent at origin
Δθ = successor_states[:,:,3] - pos.θ # Δθ with opponent at origin
R = sqrt(ΔX.^2 + ΔY.^2)
R_idx = sortperm(reshape(R,size(R,1)*size(R,2),1)[:])
for idx in R_idx
if collisionFlag[idx] == 1
continue
end
if R[idx] > hrhc.car_length # no collision, and all other R values are greater
break
end
# R is less than hrhc.car_length
if checkCollisionElliptic(R[idx],ΔX[idx],ΔY[idx],Δθ[idx],L,W)
collisionFlag[idx] = 1
end
# ψ = atan2(ΔY[idx],ΔX[idx]) - Δθ[idx]
# r = W*L/sqrt((L*sin.(ψ))^2 + (W*cos.(ψ))^2) # radius of ellipse at given angle
# if R[idx]-r < W*L/8 # collision
# collisionFlag[idx] = 1
# end
end
end
end
end
end
end
return collisionFlag
end
function tailgateAvoidance(hrhc, obstacleMap, tree, roadway, scene, k_level)
L = 2*hrhc.car_length
W = hrhc.car_width
collisionFlag = zeros(size(hrhc.successor_states[:,:,1])) # stores locations collisions
threshold_dist = hrhc.car_length*4 # must be at least this close before we care to calculate collision cost
R = zeros(size(hrhc.successor_states[:,:,1]))
ψ = zeros(size(hrhc.successor_states[:,:,1]))
cost = zeros(size(hrhc.successor_states[:,:,1]))
if k_level >= 1
# for all other cars...
for (id,trajectory) in obstacleMap[k_level - 1]
if id != hrhc.car_ID && id == 3
state = scene[hrhc.car_ID].state
state2 = scene[id].state
diff = state.posG - state2.posG
s1,_,_ = kdProject(state.posG.x,state.posG.y,state.posG.θ,tree,roadway,hrhc)
s2,_,_ = kdProject(state2.posG.x,state2.posG.y,state2.posG.θ,tree,roadway,hrhc)
if (norm([diff.x, diff.y]) < threshold_dist) && (s1 <= s2) # don't care if opponent is behind us
pos = VecSE2(trajectory[hrhc.h,1:3]) # x,y,θ of opponent at time step h
successor_states = getSuccessorStates(hrhc.motion_map[hrhc.v_cmd],hrhc.car_ID,hrhc.h,scene)
ΔX = successor_states[:,:,1] - pos.x # Δx, with opponent at origin
ΔY = successor_states[:,:,2] - pos.y # Δy with opponent at origin
Δθ = successor_states[:,:,3] - pos.θ # Δθ with opponent at origin
R = sqrt(ΔX.^2 + ΔY.^2)
ψ = atan2(ΔY,ΔX) - Δθ
cost = (1./(W*R.*cos.(ψ).^2 + 2*L*R.*sin.(ψ).^2 + 1)).*(-cos.(ψ).^3 + 1.1) + 1
end
end
end
end
return cost
end
function calculateObjective(car_ID,s₀,s,t,ϕ,T_MAX;ϕ_MAX=Float64(π),s_factor=1.0)
s_norm = (s-s₀)/maximum(s)
t_norm = t/T_MAX
ϕ_norm = ϕ/ϕ_MAX
#costs
t_cost = abs.(t_norm).^6
ϕ_cost = abs.(ϕ_norm).^6
s_factor = 1
s_cost = s_factor*(1-s_norm)
A = [1 .5; # [ϕ t] [a1 a2] [ϕ]
.5 0] # [a2 a3] [t]
tϕ_cost = A[1,1]*(ϕ_norm).^2 + (A[1,2]+A[2,1])*(ϕ_norm).*(t_norm) + A[2,2]*(t_norm).^2
objective = 1+s_cost+t_cost+ϕ_cost+tϕ_cost
return objective
end
function AutomotiveDrivingModels.observe!(hrhc::HRHC, scene::Scene, roadway::Roadway, egoid::Int, tree::KDTree, obstacleMap, k_level)
"""
Observe the current environment and select optimal action to apply at next
time step
"""
state = scene[hrhc.car_ID].state
hrhc.curve_ind = state.posF.roadind.ind.i
v = state.v # current v
hrhc.v = v
trajectory = zeros(hrhc.h,3)
action_selected = false
δ_cmd = 1
a_cmd = 1
i = 0
for i in 0:(hrhc.h-1)
if action_selected
break # out of for loop
end
# calculate successor states
hrhc.successor_states = getSuccessorStates(hrhc.motion_map[hrhc.v_cmd], hrhc.car_ID, hrhc.h-i, scene)
# project successor states onto track
s,t,ϕ = loopProjectionKD(hrhc, scene, roadway, tree)
# optimization objective
objective = calculateObjective(hrhc.car_ID, scene[hrhc.car_ID].state.posF.s,
s, t, ϕ, hrhc.T_MAX)
tailgateCost = tailgateAvoidance(hrhc, obstacleMap, tree, roadway, scene, k_level)
objective = objective + tailgateCost
collisionFlag = screenCollision(hrhc, obstacleMap, tree, roadway, scene, k_level)
objective[collisionFlag .> 0] = Inf
while (action_selected==false) && (minimum(objective) != Inf)
index = indmin(objective) # find get a better method of optimizing this
a_cmd, δ_cmd = ind2sub(s, index)
hrhc.δ_cmd = δ_cmd
# compute full trajectory up to horizon
trajectory = computeTrajectory(hrhc.motion_map[hrhc.v_cmd], hrhc.car_ID, scene, a_cmd, δ_cmd, hrhc.h-i)
# screen trajectory for collisions / validity
out_of_bounds = screenTrajectory(trajectory, obstacleMap, scene, roadway, hrhc, tree, k_level)
if out_of_bounds
objective[index] = Inf
else
action_selected=true
# updateObstacleMap!(obstacleMap, k_level, hrhc.car_ID, trajectory)
if k_level != hrhc.k # only assign commands for the given k_level
return # do not update the action
end
end
end
end
hrhc.δ_cmd = δ_cmd
# hrhc.v_cmd = max(hrhc.v_cmd + a_cmd - 2,1) # assumes 3 options for acceleration
hrhc.v_cmd = hrhc.v_map[hrhc.v_cmd][a_cmd,δ_cmd]
hrhc.δ = hrhc.δ_range[hrhc.δ_cmd] # next δ
hrhc.v = hrhc.v_range[hrhc.v_cmd] # next v
next_state = VehicleState(VecSE2(trajectory[1,1:3]),roadway,hrhc.v)
hrhc.action = next_state # action
end
AutomotiveDrivingModels.get_name(::HRHC) = "HRHC"
# AutomotiveDrivingModels.action_context(driver::HRHC) = driver.action_context # AutomotiveDrivingModels.action_context
Base.rand(hrhc::HRHC) = hrhc.action
# Plotting functions
function plotSplineRoadway(x,y,θ,lane_width)
perp_lines1 = zeros(2,length(x))
perp_lines2 = zeros(2,length(x))
perp_lines1[1,:] = x + (lane_width/2.0)*sin.(θ)
perp_lines1[2,:] = y - (lane_width/2.0)*cos.(θ)
perp_lines2[1,:] = x - (lane_width/2.0)*sin.(θ)
perp_lines2[2,:] = y + (lane_width/2.0)*cos.(θ)
# PyPlot.figure()
# PyPlot.scatter(x,y)
PyPlot.plot(x,y)
PyPlot.plot(perp_lines1[1,:],perp_lines1[2,:],color="green")
PyPlot.plot(perp_lines2[1,:],perp_lines2[2,:],color="green")
PyPlot.axis("equal")
# PyPlot.show()
end
function plotObjectiveHorizon(hrhc,scene,roadway,tree,trajectory,obstacleMap,xR,yR,θR)
lo=hrhc.curve_ind
if :V_MAX in fieldnames(hrhc)
hi = hrhc.curve_ind + Int(1+div(hrhc.V_MAX*hrhc.Δt*hrhc.h,hrhc.Δs))
else
hi = hrhc.curve_ind + Int(1+div(hrhc.v_range[end]*hrhc.Δt*hrhc.h,hrhc.Δs))
end
lane_width = roadway.segments[1].lanes[1].width
x = zeros(hrhc.h,size(hrhc.successor_states,1),size(hrhc.successor_states,2))
y = zeros(size(x))
Θ = zeros(size(x))
s = zeros(size(x))
t = zeros(size(x))
ϕ = zeros(size(x))
objective = zeros(size(x))
for i in 1:hrhc.h
getLegalMoves!(hrhc, scene, h=i)
getSuccessorStates!(hrhc, scene)
x[i,:,:] = copy(hrhc.successor_states[:,:,1])
y[i,:,:] = copy(hrhc.successor_states[:,:,2])
Θ[i,:,:] = copy(hrhc.successor_states[:,:,3])
s[i,:,:], t[i,:,:], ϕ[i,:,:] = loopProjectionKD(hrhc,scene,roadway,tree)
objective[i,:,:] = calculateObjective(hrhc,scene, roadway, tree,s[i,:,:],t[i,:,:],ϕ[i,:,:],obstacleMap,hrhc.k,hrhc.h)
end
PyPlot.figure(figsize=[12,4])
PyPlot.subplot(141) # ϕ
plotSplineRoadway(xR[lo:hi],yR[lo:hi],θR[lo:hi],lane_width)
PyPlot.scatter(x,y,c=ϕ,edgecolor="none")
PyPlot.plot(trajectory[:,1],trajectory[:,2],color="red")
PyPlot.axis("off")
PyPlot.title("|phi|")
PyPlot.subplot(142) # s
plotSplineRoadway(xR[lo:hi],yR[lo:hi],θR[lo:hi],lane_width)
PyPlot.scatter(x,y,c=s,edgecolor="none")
PyPlot.plot(trajectory[:,1],trajectory[:,2],color="red")
PyPlot.axis("off")
PyPlot.title("s")
PyPlot.subplot(143) # t
plotSplineRoadway(xR[lo:hi],yR[lo:hi],θR[lo:hi],lane_width)
PyPlot.scatter(x,y,c=t,edgecolor="none")
PyPlot.plot(trajectory[:,1],trajectory[:,2],color="red")
PyPlot.axis("off")
PyPlot.title("t")
PyPlot.subplot(144) # objective
plotSplineRoadway(xR[lo:hi],yR[lo:hi],θR[lo:hi],lane_width)
PyPlot.scatter(x,y,c=log(objective),edgecolor="none")
PyPlot.plot(trajectory[:,1],trajectory[:,2],color="red")
PyPlot.axis("off")
PyPlot.title("log objective")
end
function plot_stϕ(hrhc,roadway,scene,x,y,θ,trajectory,s,t,ϕ,objective)
lo=hrhc.curve_ind
hi=hrhc.curve_ind + Int(1+2*div(hrhc.V_MAX*hrhc.Δt*hrhc.h,hrhc.Δs))
if hi > length(roadway.segments[1].lanes[1].curve)
lo = length(roadway.segments[1].lanes[1].curve)
hi=hrhc.curve_ind + Int(1+2*div(hrhc.V_MAX*hrhc.Δt*hrhc.h,hrhc.Δs))
end
lane_width = roadway.segments[1].lanes[1].width
PyPlot.figure(figsize=[12,4])
PyPlot.subplot(141)
plotSplineRoadway(x[lo:hi],y[lo:hi],θ[lo:hi],lane_width)
# PyPlot.scatter(Pts[1,:],Pts[2,:],color="red")
PyPlot.scatter(hrhc.successor_states[:,:,1],hrhc.successor_states[:,:,2],c=abs.(ϕ),edgecolor="none")
PyPlot.plot(trajectory[:,1],trajectory[:,2],color="red")
PyPlot.scatter(scene[hrhc.car_ID].state.posG.x, scene[hrhc.car_ID].state.posG.y, c="k", edgecolors="none",s=40)
PyPlot.axis("off")
PyPlot.title("|phi|")
PyPlot.subplot(142)
plotSplineRoadway(x[lo:hi],y[lo:hi],θ[lo:hi],lane_width)
# PyPlot.scatter(Pts[1,:],Pts[2,:],color="red")
PyPlot.scatter(hrhc.successor_states[:,:,1],hrhc.successor_states[:,:,2],c=abs.(t),edgecolor="none")
PyPlot.plot(trajectory[:,1],trajectory[:,2],color="red")
PyPlot.scatter(scene[hrhc.car_ID].state.posG.x, scene[hrhc.car_ID].state.posG.y, c="k", edgecolors="none",s=40)
PyPlot.axis("off")
PyPlot.title("|t|")
PyPlot.subplot(143)
plotSplineRoadway(x[lo:hi],y[lo:hi],θ[lo:hi],lane_width)
# PyPlot.scatter(Pts[1,:],Pts[2,:],color="red")
PyPlot.scatter(hrhc.successor_states[:,:,1],hrhc.successor_states[:,:,2],c=s,edgecolor="none")
PyPlot.scatter(scene[hrhc.car_ID].state.posG.x, scene[hrhc.car_ID].state.posG.y, c="k", edgecolors="none",s=40)
PyPlot.plot(trajectory[:,1],trajectory[:,2],color="red")
PyPlot.axis("off")
PyPlot.title("s")
PyPlot.subplot(144)
plotSplineRoadway(x[lo:hi],y[lo:hi],θ[lo:hi],lane_width)
# PyPlot.scatter(Pts[1,:],Pts[2,:],color="red")
PyPlot.scatter(hrhc.successor_states[:,:,1],hrhc.successor_states[:,:,2],c=log(objective),edgecolor="none")
PyPlot.plot(trajectory[:,1],trajectory[:,2],color="red")
PyPlot.scatter(scene[hrhc.car_ID].state.posG.x, scene[hrhc.car_ID].state.posG.y, c="k", edgecolors="none",s=40)
PyPlot.axis("off")
PyPlot.title("objective")
end
function plotHRHCInfo(hrhc,models,scene,roadway,trajectory,cmd,x,y,Θ,s,t,ϕ,objective)
lo = hrhc.curve_ind
hi = hrhc.curve_ind + Int(1+2*div(hrhc.V_MAX*hrhc.Δt*hrhc.h,hrhc.Δs))
lane_width = roadway.segments[1].lanes[1].width
if hi > length(roadway.segments[1].lanes[1].curve)
lo = length(roadway.segments[1].lanes[1].curve)
hi=hrhc.curve_ind + Int(1+2*div(hrhc.V_MAX*hrhc.Δt*hrhc.h,hrhc.Δs))
end
PyPlot.figure(figsize=[12,10])
# Plot Raceway
PyPlot.subplot(221)
# plotSplineRoadway(x[lo:hi],y[lo:hi],θ[lo:hi],lane_width)
plotSplineRoadway(x,y,Θ,lane_width)
PyPlot.scatter(hrhc.successor_states[:,:,1],hrhc.successor_states[:,:,2],color="red")
PyPlot.plot(trajectory[:,1],trajectory[:,2],color="red")
PyPlot.scatter(roadway.segments[1].lanes[1].curve[hrhc.curve_ind].pos.x, roadway.segments[1].lanes[1].curve[hrhc.curve_ind].pos.y, c="white", s=40)
for (id,car) in models
if id == hrhc.car_ID
PyPlot.scatter(scene[id].state.posG.x,scene[id].state.posG.y,c="red",s=20)
else
PyPlot.scatter(scene[id].state.posG.x,scene[id].state.posG.y,c="blue",s=20)
end
end
PyPlot.axis("off")
PyPlot.title("Raceway with Motion Primitives")
PyPlot.subplot(222)
plotSplineRoadway(x[lo:hi],y[lo:hi],Θ[lo:hi],lane_width)
PyPlot.scatter(scene[hrhc.car_ID].state.posG.x, scene[hrhc.car_ID].state.posG.y, c="red", edgecolors="none",s=40)
PyPlot.scatter(hrhc.successor_states[:,:,1], hrhc.successor_states[:,:,2],c=log(objective),edgecolors="none")
PyPlot.scatter(hrhc.successor_states[cmd[1],cmd[2],1], hrhc.successor_states[cmd[1],cmd[2],2],c="white",s=40)
PyPlot.plot(trajectory[:,1],trajectory[:,2],color="red")
PyPlot.axis("off")
PyPlot.title("Log Objective Function")
end
|
{"hexsha": "fa747786bce953096263b4e957791a5c54026bb0", "size": 30390, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/controllers/HierarchicalRecedingHorizonController.jl", "max_stars_repo_name": "kylejbrown17/LevelKRacing.jl", "max_stars_repo_head_hexsha": "2e66f89cbe2afe571f23030ad04bc48de77c5e98", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-06-06T22:43:13.000Z", "max_stars_repo_stars_event_max_datetime": "2017-06-06T22:43:13.000Z", "max_issues_repo_path": "src/controllers/HierarchicalRecedingHorizonController.jl", "max_issues_repo_name": "kylejbrown17/LevelKRacing.jl", "max_issues_repo_head_hexsha": "2e66f89cbe2afe571f23030ad04bc48de77c5e98", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/controllers/HierarchicalRecedingHorizonController.jl", "max_forks_repo_name": "kylejbrown17/LevelKRacing.jl", "max_forks_repo_head_hexsha": "2e66f89cbe2afe571f23030ad04bc48de77c5e98", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3585657371, "max_line_length": 151, "alphanum_fraction": 0.5804211912, "num_tokens": 9564}
|
import os
# set number of threads - this should be optimized for your compute instance
mynt="16"
os.environ["TF_NUM_INTEROP_THREADS"] = mynt
os.environ["TF_NUM_INTRAOP_THREADS"] = mynt
os.environ["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"] = mynt
import os.path
from os import path
import glob as glob
import math
import tensorflow
import ants
import antspynet
import tensorflow as tf
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from superiq import super_resolution_segmentation_per_label
from superiq import ljlf_parcellation
from superiq import images_to_list
from superiq import check_for_labels_in_image
from superiq import sort_library_by_similarity
from superiq import basalforebrain_segmentation
from superiq import native_to_superres_ljlf_segmentation
from superiq import list_to_string
# get data from here https://ndownloader.figshare.com/files/26224727
tdir = "/Users/stnava/data/superiq_data_resources/"
if ( not path. exists( tdir ) ):
raise RuntimeError('Failed to find the data directory')
brains = glob.glob(tdir+"segmentation_libraries/OASIS30/Brains/*")
brains.sort()
brainsSeg = glob.glob(tdir+"segmentation_libraries/OASIS30/SegmentationsJLFOR/*")
brainsSeg.sort()
templatefilename = tdir + "template/adni_template.nii.gz"
templatesegfilename = tdir + "template/adni_template_dkt_labels.nii.gz"
seg_params={
'submask_dilation': 8,
'reg_iterations': [100, 100, 20],
'searcher': 1,
'radder': 2,
'syn_sampling': 32,
'syn_metric': 'mattes',
'max_lab_plus_one': True, 'verbose': False}
seg_params_sr={
'submask_dilation': seg_params['submask_dilation']*1,
'reg_iterations': seg_params['reg_iterations'],
'searcher': seg_params['searcher'],
'radder': seg_params['radder'],
'syn_sampling': seg_params['syn_sampling'],
'syn_metric': seg_params['syn_metric'],
'max_lab_plus_one': True, 'verbose': False}
sr_params={"upFactor": [2,2,2], "dilation_amount": seg_params["submask_dilation"], "verbose":True}
mdl = tf.keras.models.load_model("models/SEGSR_32_ANINN222_3.h5")
# store output data
brainName = []
# the three types of output which we will compute in series
dicevalNativeSeg = []
dicevalSRNativeSeg = []
dicevalSRSeg = []
dicevalSRSeg2 = []
wlab = [36,55,57] # for PPMI
wlab = [47,116,122,154,170] # eisai cortex
wlab = [75,76] # basal forebrain
evalfn='./dkt_eval' + list_to_string( wlab ) + '.csv'
print( "Labels:" + list_to_string( wlab ) + " " + evalfn, " : n : ", len( brains ) )
# full eval here
for k in range( len(brainName), len( brains ) ):
localid=os.path.splitext( os.path.splitext( os.path.basename( brains[k]) )[0])[0]
print( str(k) + " " + localid)
brainsLocal=brains.copy()
brainsSegLocal=brainsSeg.copy()
del brainsLocal[k:(k+1)]
del brainsSegLocal[k:(k+1)]
original_image = ants.image_read(brains[k])
sloop = native_to_superres_ljlf_segmentation(
target_image = original_image,
segmentation_numbers = wlab,
template = ants.image_read(templatefilename),
template_segmentation = ants.image_read(templatesegfilename),
library_intensity=images_to_list(brainsLocal),
library_segmentation=images_to_list(brainsSegLocal),
seg_params = seg_params,
seg_params_sr = seg_params_sr,
sr_params = sr_params,
sr_model = mdl )
# first - create a SR version of the image and the ground truth
# NOTE: we binarize the labels
# NOTE: the below call would only be used for evaluation ie when we have GT
nativeGroundTruth = ants.image_read(brainsSeg[k])
nativeGroundTruth = ants.mask_image( nativeGroundTruth, nativeGroundTruth, level = wlab, binarize=False )
gtSR = super_resolution_segmentation_per_label(
imgIn = ants.iMath( original_image, "Normalize"),
segmentation = nativeGroundTruth, # usually, an estimate from a template, not GT
upFactor = sr_params['upFactor'],
sr_model = mdl,
segmentation_numbers = wlab,
dilation_amount = sr_params['dilation_amount'],
verbose = sr_params['verbose']
)
nativeGroundTruthProbSR = gtSR['probability_images'][0]
nativeGroundTruthSR = gtSR['super_resolution_segmentation']
nativeGroundTruthBinSR = ants.mask_image( nativeGroundTruthSR, nativeGroundTruthSR, wlab, binarize=True)
# The full method involves: (GT denotes ground truth)
# [0.0] use template-based mapping to estimate initial labels
# [1.0] run LJLF at native resolution (evaluate this wrt native res GT)
# [1.1] evaluate [1.0] wrt NN-Up-GT
# [2.0] perform local simultaneous SR-Image and SR-Seg based on output of [1.0] (evaluate this wrt SR-GT)
# [2.1] evaluate [2.0] wrt NN-Up-GT
# [3.0] run LJLF at SR based on [2.0] (evaluate this at SR wrt SR-GT)
# [3.1] evaluate [3.0] this wrt NN-Up-GT
mypt = 0.5
srsegLJLF = ants.threshold_image( sloop['srSeg']['probsum'], mypt, math.inf )
nativeOverlapSloop = ants.label_overlap_measures( nativeGroundTruth, sloop['nativeSeg']['segmentation'] )
srOnNativeOverlapSloop = ants.label_overlap_measures( nativeGroundTruthSR, sloop['srOnNativeSeg']['super_resolution_segmentation'] )
srOverlapSloop = ants.label_overlap_measures( nativeGroundTruthSR, sloop['srSeg']['segmentation'] )
srOverlap2 = ants.label_overlap_measures( nativeGroundTruthBinSR, srsegLJLF )
# collect the 3 evaluation results - ready for data frame
brainName.append( localid )
dicevalNativeSeg.append(nativeOverlapSloop["MeanOverlap"][0])
dicevalSRNativeSeg.append( srOnNativeOverlapSloop["MeanOverlap"][0])
dicevalSRSeg.append( srOverlapSloop["MeanOverlap"][0])
dicevalSRSeg2.append( srOverlap2["MeanOverlap"][0])
print( brainName[k] + ": N: " + str(dicevalNativeSeg[k]) + " SRN: " + str(dicevalSRNativeSeg[k])+ " SRN: " + str(dicevalSRSeg[k]) )
################################################################################
dict = {
'name': brainName,
'diceNativeSeg': dicevalNativeSeg,
'diceSRNativeSeg': dicevalSRNativeSeg,
'diceSRSeg': dicevalSRSeg }
df = pd.DataFrame(dict)
df.to_csv( evalfn )
################################################################################
# these are the outputs you would write out, along with label geometry for each segmentation
ants.image_write( sloop['srOnNativeSeg']['super_resolution'], '/tmp/tempI.nii.gz' )
ants.image_write( nativeGroundTruthSR, '/tmp/tempGT.nii.gz' )
ants.image_write( sloop['srSeg']['segmentation'], '/tmp/tempSRSeg.nii.gz' )
ants.image_write( sloop['nativeSeg']['segmentation'], '/tmp/tempORSeg.nii.gz' )
|
{"hexsha": "aeff7eca9c84b69f000d9a72136695d0ace2c84f", "size": 6719, "ext": "py", "lang": "Python", "max_stars_repo_path": "applications/evaluate_DKT_dice_overlap_cv.py", "max_stars_repo_name": "stnava/superiq", "max_stars_repo_head_hexsha": "a13befe5f525bbef02cd095031952db62c5d054e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "applications/evaluate_DKT_dice_overlap_cv.py", "max_issues_repo_name": "stnava/superiq", "max_issues_repo_head_hexsha": "a13befe5f525bbef02cd095031952db62c5d054e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "applications/evaluate_DKT_dice_overlap_cv.py", "max_forks_repo_name": "stnava/superiq", "max_forks_repo_head_hexsha": "a13befe5f525bbef02cd095031952db62c5d054e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.7933333333, "max_line_length": 137, "alphanum_fraction": 0.6992111921, "include": true, "reason": "import numpy", "num_tokens": 1786}
|
import rospy
import cv2
import numpy as np
from sensor_msgs.msg import Image
from std_msgs.msg import String
from cv_bridge import CvBridge
import iamangrynow
def image_callback(data):
img = bridge.imgmsg_to_cv2(data, 'bgr8') # OpenCV image
iamangrynow.recognize_digit(img)
#imgStack = stackImages(1.0, ([img, imgGray, imgCanny],
# [imgDil, imgContour, imgCenters]))
#image_pub.publish(bridge.cv2_to_imgmsg(imgStack, 'bgr8'))
rospy.init_node('computer_vision_sample')
image_pub = rospy.Publisher('~debug', Image, queue_size=1)
maxRect_pub = rospy.Publisher('maxRect', String, queue_size=1)
bridge = CvBridge()
image_sub = rospy.Subscriber('main_camera/image_raw', Image, image_callback)
rospy.spin()
|
{"hexsha": "8ce506a482a8bbcfc99f252c348d9630e37671ba", "size": 757, "ext": "py", "lang": "Python", "max_stars_repo_path": "clover_simulation/src/static_test/digit_test.py", "max_stars_repo_name": "SailorTheMan/NTI_PoROSiata", "max_stars_repo_head_hexsha": "2eb2fe56ee67714492cf9c6e7bce258ccf9b9d8b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "clover_simulation/src/static_test/digit_test.py", "max_issues_repo_name": "SailorTheMan/NTI_PoROSiata", "max_issues_repo_head_hexsha": "2eb2fe56ee67714492cf9c6e7bce258ccf9b9d8b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "clover_simulation/src/static_test/digit_test.py", "max_forks_repo_name": "SailorTheMan/NTI_PoROSiata", "max_forks_repo_head_hexsha": "2eb2fe56ee67714492cf9c6e7bce258ccf9b9d8b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.037037037, "max_line_length": 76, "alphanum_fraction": 0.7305151915, "include": true, "reason": "import numpy", "num_tokens": 206}
|
immutable WindowsPath <: AbstractPath
parts::Tuple{Vararg{String}}
drive::String
root::String
end
WindowsPath() = WindowsPath(tuple(), "", "")
WindowsPath(parts::Tuple) = WindowsPath(parts, "", "")
function WindowsPath(str::AbstractString)
if isempty(str)
return WindowsPath(tuple("."), "", "")
end
if startswith(str, "\\\\?\\")
error("The \\\\?\\ prefix is currently not supported.")
end
str = replace(str, POSIX_PATH_SEPARATOR, WIN_PATH_SEPARATOR)
if startswith(str, "\\\\")
error("UNC paths are currently not supported.")
elseif startswith(str, "\\")
tokenized = split(str, WIN_PATH_SEPARATOR)
return WindowsPath(tuple(WIN_PATH_SEPARATOR, String.(tokenized[2:end])...), "", WIN_PATH_SEPARATOR)
elseif contains(str, ":")
l_drive, l_path = splitdrive(str)
tokenized = split(l_path, WIN_PATH_SEPARATOR)
l_root = isempty(tokenized[1]) ? WIN_PATH_SEPARATOR : ""
if isempty(tokenized[1])
tokenized = tokenized[2:end]
end
if !isempty(l_drive) || !isempty(l_root)
tokenized = tuple(string(l_drive, l_root), tokenized...)
end
return WindowsPath(tuple(String.(tokenized)...), l_drive, l_root)
else
tokenized = split(str, WIN_PATH_SEPARATOR)
return WindowsPath(tuple(String.(tokenized)...), "", "")
end
end
function ==(a::WindowsPath, b::WindowsPath)
return lowercase.(parts(a)) == lowercase.(parts(b)) &&
lowercase(drive(a)) == lowercase(drive(b)) &&
lowercase(root(a)) == lowercase(root(b))
end
Base.String(path::WindowsPath) = joinpath(parts(path)...)
parts(path::WindowsPath) = path.parts
drive(path::WindowsPath) = path.drive
root(path::WindowsPath) = path.root
function Base.show(io::IO, path::WindowsPath)
print(io, "p\"")
if isabs(path)
print(io, replace(anchor(path), "\\", "/"))
print(io, join(parts(path)[2:end], "/"))
else
print(io, join(parts(path), "/"))
end
print(io, "\"")
end
function isabs(path::WindowsPath)
return !isempty(drive(path)) || !isempty(root(path))
end
expanduser(path::WindowsPath) = path
|
{"hexsha": "6853429156ec37c54247c2cdc0da703c5ce56bbe", "size": 2194, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/windows.jl", "max_stars_repo_name": "vtjnash/FilePaths.jl", "max_stars_repo_head_hexsha": "a480e3c1c8b0239acb0f3320486f8be4e9b7a127", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/windows.jl", "max_issues_repo_name": "vtjnash/FilePaths.jl", "max_issues_repo_head_hexsha": "a480e3c1c8b0239acb0f3320486f8be4e9b7a127", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/windows.jl", "max_forks_repo_name": "vtjnash/FilePaths.jl", "max_forks_repo_head_hexsha": "a480e3c1c8b0239acb0f3320486f8be4e9b7a127", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4935064935, "max_line_length": 107, "alphanum_fraction": 0.6235186873, "num_tokens": 532}
|
from paper_1.data.data_loader import load_val_data, load_train_data, sequential_data_loader, random_data_loader
from paper_1.utils import read_parameter_file, create_experiment_directory
from paper_1.evaluation.eval_utils import init_metrics_object
from paper_1.baseline.main import train as baseline_train
from paper_1.model.model_utils import initialize_model
from torch.utils.tensorboard import SummaryWriter
from train import select_splitted_pseudo_labels
from os.path import dirname, abspath
from torch.optim import Adam
import pandas as pd
import numpy as np
import random
import torch
import os
def main(main_params: dict, data_params: dict, metric_params: dict, model_params: dict,
parent_dir, source_domain: str, target_domain: str):
# clear the cuda memory
torch.cuda.empty_cache()
# get the current validation fold
val_fold = data_params['data']['val_fold']
# read the train params
num_train_iter = main_params['num_train_iter']
experiment_id = main_params['experiment_id']
num_epochs = main_params['num_epochs']
quantiles = main_params['quantiles']
model_dir = main_params['model_dir']
base_dir = main_params['base_dir']
# get the data loader parameters
balance_keys = data_params['data_loader']['balance_keys']
batch_size = data_params['data_loader']['batch_size']
# load the data
data_train_src, data_train_tar = load_train_data(data_params, source_domain, target_domain)
data_list_val = load_val_data(data_params)
num_val_iter_list = [df.shape[0] for df in data_list_val]
validation_domains = data_params['data']['validation']['validation_domains']
val_loader_list = [sequential_data_loader(data_frame) for data_frame in data_list_val]
# load a pre trained model
model_path = model_dir + source_domain + '/' + 'None' + '/' + str(val_fold) + '/f1_best.pt'
# load a previously stored model, which is the init point for curriculum labeling
pretrained_model = torch.load(model_path)
mapping = metric_params['inverse_class_mapping']
# initialize the metrics object
metric_object = init_metrics_object(metric_params)
# create a directory for the current experiments
file_names_params = os.listdir(parent_dir + '/parameters/')
file_names_params = [parent_dir + '/parameters/' + x for x in file_names_params]
file_names_baseline = os.listdir(parent_dir + '/baseline/')
file_names_baseline = [parent_dir + '/baseline/' + x for x in file_names_baseline]
file_names = []
file_names.extend(file_names_params)
file_names.extend(file_names_baseline)
file_names = [x for x in file_names if not os.path.isdir(x)]
val_fold = data_params['data']['val_fold']
exp_base_dir = create_experiment_directory(base_dir, source_domain, target_domain, val_fold, file_names, experiment_id)
for quantile in quantiles:
exp_dir = exp_base_dir + str(quantile) + '/'
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
# create a tensorboard writer
writer = SummaryWriter(exp_dir)
# create data loader with current pseudo labels
data_frame_pseudo = select_splitted_pseudo_labels(pretrained_model, data_train_tar, quantile, mapping)
# delete the previously trained model, as it is no longer in use
del pretrained_model
# create the train data loader
data_train = pd.concat([data_train_src, data_frame_pseudo])
train_loader = random_data_loader(data_train, balance_keys, batch_size)
# initialize a new model to train it from scratch
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = initialize_model(model_params, parent_dir, device)
model.cuda()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
# create an optimizer for the model
optimizer = Adam(model.parameters(), lr=4e-5, betas=(0.9, 0.999))
# train the newly created model from scratch
baseline_train(model, optimizer, metric_object, num_train_iter, metric_params, train_loader, val_loader_list,
source_domain, writer, num_val_iter_list, validation_domains, num_epochs, exp_dir)
# update the pretrained model
pretrained_model = model
del model
del optimizer
if __name__ == '__main__':
# set the seed for reproducability
seed_value = 0
random.seed(seed_value)
np.random.seed(seed_value)
torch.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
# get the current and parent directory
current_file = abspath(__file__)
current_dir = dirname(current_file)
parent_dir = dirname(current_dir)
metric_param_file = parent_dir + '/parameters/metric_params.yaml'
model_param_file = parent_dir + '/parameters/model_params.yaml'
data_param_file = parent_dir + '/parameters/data_params.yaml'
main_param_file = current_dir + '/main_params.yaml'
# load the parameters
metric_params = read_parameter_file(metric_param_file)
model_params = read_parameter_file(model_param_file)
main_params = read_parameter_file(main_param_file)
data_params = read_parameter_file(data_param_file)
# define the domains, on which the models should be trained
source_domains = ['Race', 'Religion', 'Sexual Orientation']
target_domains = ['Race', 'Religion', 'Sexual Orientation']
for source_domain in source_domains:
for target_domain in target_domains:
if source_domain != target_domain:
main(main_params, data_params, metric_params, model_params, parent_dir, source_domain, target_domain)
|
{"hexsha": "7d6ad190979d6481b1c2985d3daa77d4ce6fbfd1", "size": 5689, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/paper_1/curriculum/main.py", "max_stars_repo_name": "ludwigflo/paper1", "max_stars_repo_head_hexsha": "13202febdb01a76bbf115435ce9676f6b82e1393", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/paper_1/curriculum/main.py", "max_issues_repo_name": "ludwigflo/paper1", "max_issues_repo_head_hexsha": "13202febdb01a76bbf115435ce9676f6b82e1393", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/paper_1/curriculum/main.py", "max_forks_repo_name": "ludwigflo/paper1", "max_forks_repo_head_hexsha": "13202febdb01a76bbf115435ce9676f6b82e1393", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3475177305, "max_line_length": 123, "alphanum_fraction": 0.7317630515, "include": true, "reason": "import numpy", "num_tokens": 1252}
|
# Bizzaro Francesco
# March 2020
#
# This script can generate random
# Symbolic Regression problem instances.
import random
import json
import math
import numpy as np
def f1(x):
return 3+1/(x+1)+math.pow(x,2)
def f2(x):
return x*math.sin(3*x)
def f3(x):
return math.cos(math.sin(x))+0.5*x
def f4(x):
return 3-2*x+math.pow(x,3)
def f5(x):
return math.atan(x)
def f6(x):
return math.pi
def f7(x):
return math.sqrt(x)
def f8(x):
return (11.0/7.0)*x
fs = [f1,f2,f3,f4,f5,f6]
for i in range(len(fs)):
fun = fs[i]
x = [xx for xx in np.arange(0,20,0.5)]
y = [fun(xx) for xx in x]
p = {"x":x,"y":y}
with open("problems/approx"+str(i)+".json","w") as f:
json.dump(p,f)
print "done!"
|
{"hexsha": "0d1ea2f2b06fe8306629d2ff55e41e7cbe57b745", "size": 744, "ext": "py", "lang": "Python", "max_stars_repo_path": "python-GAs/SymbolicRegression/generator.py", "max_stars_repo_name": "D33pBlue/Study-on-Genetic-Algorithms", "max_stars_repo_head_hexsha": "456f2ac93c307320ddee0ceded7f735f9e8e93a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python-GAs/SymbolicRegression/generator.py", "max_issues_repo_name": "D33pBlue/Study-on-Genetic-Algorithms", "max_issues_repo_head_hexsha": "456f2ac93c307320ddee0ceded7f735f9e8e93a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-24T11:51:12.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-24T11:51:12.000Z", "max_forks_repo_path": "python-GAs/SymbolicRegression/generator.py", "max_forks_repo_name": "D33pBlue/Study-on-Genetic-Algorithms", "max_forks_repo_head_hexsha": "456f2ac93c307320ddee0ceded7f735f9e8e93a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.5333333333, "max_line_length": 57, "alphanum_fraction": 0.5954301075, "include": true, "reason": "import numpy", "num_tokens": 265}
|
import tensorflow as tf
import numpy as np
workers = ['127.0.0.1:50001', '127.0.0.2:50002', '127.0.0.2:50003']
cluster_spec = tf.train.ClusterSpec({'workers': workers})
server = tf.train.Server(cluster_spec, job_name='workers', task_index=0)
server.join()
|
{"hexsha": "e19d2db97302805724d10251ea87870615f765c0", "size": 259, "ext": "py", "lang": "Python", "max_stars_repo_path": "Stage1-SimpleConcept/TF-distribute-server1.py", "max_stars_repo_name": "markliou/DistributedTensorflow", "max_stars_repo_head_hexsha": "5b8c78fea2e8a0e061129a24144289aa55509077", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Stage1-SimpleConcept/TF-distribute-server1.py", "max_issues_repo_name": "markliou/DistributedTensorflow", "max_issues_repo_head_hexsha": "5b8c78fea2e8a0e061129a24144289aa55509077", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Stage1-SimpleConcept/TF-distribute-server1.py", "max_forks_repo_name": "markliou/DistributedTensorflow", "max_forks_repo_head_hexsha": "5b8c78fea2e8a0e061129a24144289aa55509077", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9, "max_line_length": 72, "alphanum_fraction": 0.7181467181, "include": true, "reason": "import numpy", "num_tokens": 81}
|
double complex function BSYA1fggpppp(e1,p2,p3,e4,za,zb,zab,zba)
implicit none
C-----Authors: John Campbell and Keith Ellis, March 2012
C---- arXiv:1101.5947 [hep-ph], Eq. (100),fully Badger-compliant
C---- (These are twiddle functions, c.f.arXiv:1101.5947[hep-ph],Eq.(91))
include 'constants.f'
include 'masses.f'
include 'zprods_decl.f'
include 'sprods_com.f'
include 'zabprods_decl.f'
include 'qdef.f'
double precision s23
integer e1,p2,p3,e4
s23=s(p2,p3)
BSYA1fggpppp=
& -2d0*mt*(za(e1,e4)*zab(p2,q1,p2)-za(p2,e1)*za(p3,e4)*zb(p2,p3))
& /(za(p2,p3)**3*zb(p2,p3))*(s23/6d0)
return
end
|
{"hexsha": "f698ea5bec083aff158a13156bcd795125f865b4", "size": 684, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "MCFM-JHUGen/src/TopdkBSY/BSYA1fggpppp.f", "max_stars_repo_name": "tmartini/JHUGen", "max_stars_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-06-08T13:09:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-04T19:59:36.000Z", "max_issues_repo_path": "MCFM-JHUGen/src/TopdkBSY/BSYA1fggpppp.f", "max_issues_repo_name": "tmartini/JHUGen", "max_issues_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 64, "max_issues_repo_issues_event_min_datetime": "2015-06-24T15:08:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-25T04:59:32.000Z", "max_forks_repo_path": "MCFM-JHUGen/src/TopdkBSY/BSYA1fggpppp.f", "max_forks_repo_name": "tmartini/JHUGen", "max_forks_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2015-05-04T22:15:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T10:04:40.000Z", "avg_line_length": 29.7391304348, "max_line_length": 72, "alphanum_fraction": 0.6111111111, "num_tokens": 260}
|
# =============================================================================
# IMPORT SCIPY MODULES
# =============================================================================
import numpy as np
from tabulate import tabulate
from numba import jit
class RotationHelper:
def transformCompl(self,S,th,**kwargs):
xsect = kwargs.pop('xsect',False)
Sxsect = S
if xsect:
# Note: This operation is done because during material property input,
# the convention is that the fibers of a composite run in the
# x direction, however for cross-sectional analysis, the nominal fiber
# angle is parallel to the z-axis, and so before any in plane rotations
# occur, the fiber must first be rotated about the y axis.
Rysig, Ryeps = self.genCompRy(-90)
Sxsect = np.dot(Ryeps,np.dot(Sxsect,np.linalg.inv(Rysig)))
# Rotate material about x:
Rxsig, Rxeps = self.genCompRx(th[0])
Sxsect = np.dot(Rxeps,np.dot(Sxsect,np.linalg.inv(Rxsig)))
# Rotate material about x:
Rysig, Ryeps = self.genCompRy(th[1])
Sxsect = np.dot(Ryeps,np.dot(Sxsect,np.linalg.inv(Rysig)))
# Rotate material about x:
Rzsig, Rzeps = self.genCompRz(th[2])
Sxsect = np.dot(Rzeps,np.dot(Sxsect,np.linalg.inv(Rzsig)))
return Sxsect
def rotXYZ(self,th,deg2rad=True):
if deg2rad:
th = np.deg2rad(th)
Rx = np.array([[1.,0.,0.],\
[0.,np.cos(th[0]),-np.sin(th[0])],\
[0.,np.sin(th[0]),np.cos(th[0])]])
Ry = np.array([[np.cos(th[1]),0.,np.sin(th[1])],\
[0.,1.,0.],\
[-np.sin(th[1]),0.,np.cos(th[1])]])
Rz = np.array([[np.cos(th[2]),-np.sin(th[2]),0.],\
[np.sin(th[2]),np.cos(th[2]),0.],\
[0.,0.,1.]])
return np.dot(Rz,np.dot(Ry,Rx))
def genRotMat(self,a,b):
if all(a==b):
return np.eye(3)
else:
v = np.cross(a,b)
s = np.linalg.norm(v)
c = np.dot(a,b)
vstar = np.array([[0.,-v[2],v[1]],[v[2],0.,-v[0]],[-v[1],v[0],0.]])
return np.eye(3)+vstar+np.dot(vstar,vstar)*(1-c)/s**2
def genCompRx(self,th):
th = np.deg2rad(th)
s = np.sin(th)
c = np.cos(th)
Rxsig = np.array([[1.,0.,0.,0.,0.,0.],\
[0.,c**2,s**2,2*c*s,0.,0.],\
[0.,s**2,c**2,-2*c*s,0.,0.],\
[0.,-c*s,c*s,c**2-s**2,0.,0.],\
[0.,0.,0.,0.,c,-s],\
[0.,0.,0.,0.,s,c]])
Rxeps = np.array([[1.,0.,0.,0.,0.,0.],\
[0.,c**2,s**2,c*s,0.,0.],\
[0.,s**2,c**2,-c*s,0.,0.],\
[0.,-2*c*s,2*c*s,c**2-s**2,0.,0.],\
[0.,0.,0.,0.,c,-s],\
[0.,0.,0.,0.,s,c]])
return Rxsig, Rxeps
def genCompRy(self,th):
th = np.deg2rad(th)
s = np.sin(th)
c = np.cos(th)
Rysig = np.array([[c**2,0.,s**2,0.,2*c*s,0.],\
[0.,1.,0.,0.,0.,0.],\
[s**2,0.,c**2,0.,-2*c*s,0.],\
[0.,0.,0.,c,0.,-s],\
[-c*s,0.,c*s,0.,c**2-s**2,0.],\
[0.,0.,0.,s,0.,c]])
Ryeps = np.array([[c**2,0.,s**2,0.,c*s,0.],\
[0.,1.,0.,0.,0.,0.],\
[s**2,0.,c**2,0.,-c*s,0.],\
[0.,0.,0.,c,0.,-s],\
[-2*c*s,0.,2*c*s,0.,c**2-s**2,0.],\
[0.,0.,0.,s,0.,c]])
return Rysig, Ryeps
def genCompRz(self,th):
th = np.deg2rad(th)
s = np.sin(th)
c = np.cos(th)
Rzsig = np.array([[c**2,s**2,0.,0.,0.,2*c*s],\
[s**2,c**2,0.,0.,0.,-2*c*s],\
[0.,0.,1.,0.,0.,0.],\
[0.,0.,0.,c,s,0.],\
[0.,0.,0.,-s,c,0.],\
[-c*s,c*s,0.,0.,0.,c**2-s**2]])
Rzeps = np.array([[c**2,s**2,0.,0.,0.,c*s],\
[s**2,c**2,0.,0.,0.,-c*s],\
[0.,0.,1.,0.,0.,0.],\
[0.,0.,0.,c,s,0.],\
[0.,0.,0.,-s,c,0.],\
[-2*c*s,2*c*s,0.,0.,0.,c**2-s**2]])
return Rzsig, Rzeps
def getEulerAxisRotMat(self,a,th):
astar = np.array([[0.,-a[2],a[1]],[a[2],0.,-a[0]],[-a[1],a[0],0.]])
a = np.reshape(a,(3,1))
R = np.cos(th)*np.eye(3)+(1-np.cos(th))*np.dot(a,a.T)+np.sin(th)*astar
return R
def getTransMat(self,x):
return np.array([[1.,0.,0.,-x[0]],\
[0.,1.,0.,-x[1]],\
[0.,0.,1.,-x[2]],\
[0.,0.,0.,1.]])
@jit
def genMAC(modes0,modes1,weightMat):
MAC = np.zeros((np.size(modes0,axis=1),np.size(modes0,axis=1)))
for i in range(0,np.size(modes0,axis=1)):
for j in range(0,np.size(modes0,axis=1)):
MAC[i,j] = np.dot(modes0[:,j],np.dot(weightMat,np.conj(modes1[:,i])))**2/\
(np.dot(modes0[:,j],np.dot(weightMat,np.conj(modes0[:,j])))\
*np.dot(modes1[:,i],np.dot(weightMat,np.conj(modes1[:,i]))))
return MAC
|
{"hexsha": "9b9c31864bf3f39b0f7fa766799ce5fb6d95c771", "size": 5454, "ext": "py", "lang": "Python", "max_stars_repo_path": "AeroComBAT/Utilities.py", "max_stars_repo_name": "bennames/AeroComBAT-Project", "max_stars_repo_head_hexsha": "ddc7194d5ccc0b8bf09b73cc0c2c3d64adf4a472", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2016-01-20T04:42:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T04:49:48.000Z", "max_issues_repo_path": "AeroComBAT/Utilities.py", "max_issues_repo_name": "mldmnn/AeroComBAT-Project", "max_issues_repo_head_hexsha": "ddc7194d5ccc0b8bf09b73cc0c2c3d64adf4a472", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-04-18T14:19:26.000Z", "max_issues_repo_issues_event_max_datetime": "2016-04-18T21:55:25.000Z", "max_forks_repo_path": "AeroComBAT/Utilities.py", "max_forks_repo_name": "mldmnn/AeroComBAT-Project", "max_forks_repo_head_hexsha": "ddc7194d5ccc0b8bf09b73cc0c2c3d64adf4a472", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-05-03T07:45:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-07T08:55:14.000Z", "avg_line_length": 44.7049180328, "max_line_length": 86, "alphanum_fraction": 0.3861386139, "include": true, "reason": "import numpy,from numba", "num_tokens": 1775}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from pathlib import Path
import os
import sys
import time
import cv2
import numpy as np
import pandas as pd
from cova.dnn import infer, metrics
from cova.motion import object_crop as crop
from cova.motion.motion_detector import merge_overlapping_boxes, resize_if_smaller
def read_virat(fn):
annotations = pd.read_csv(fn, header=None, sep=" ", index_col=False)
annotations.columns = [
"object_id",
"object_duration",
"current_frame",
"xmin",
"ymin",
"width",
"height",
"object_type",
]
annotations = annotations[annotations.object_type > 0]
annotations["xmax"] = annotations["xmin"] + annotations["width"]
annotations["ymax"] = annotations["ymin"] + annotations["height"]
object_labels = ["person", "car", "vehicle", "object", "bike"]
annotations["label"] = annotations["object_type"].apply(
lambda obj: object_labels[obj - 1]
)
annotations = annotations[annotations.label != "object"]
annotations = annotations[annotations.label != "bike"]
return annotations
def draw_detection(frame, box, label, score, color=(255, 0, 0)):
x1, y1, x2, y2 = box
try:
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 1)
except Exception as e:
print(e)
import pdb
pdb.set_trace()
cv2.putText(
frame,
f"{label} ({int(score*100)}%)",
(x1, y1 - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
color,
1,
)
def draw_top5(frame, labels, scores, method, color=(255, 0, 0), pos=0):
if len(labels) < 5:
print(labels)
return
# Draw gray box where detections will be printed
height, width, _ = frame.shape
x1, y1 = (width - 200 * (pos + 1), 10)
x2, y2 = (width - 200 * pos, 10 + 15 * 7)
cv2.rectangle(frame, (x1, y1), (x2, y2), (250, 250, 250), -1)
cv2.putText(
frame, method, (x1 + 10, y1 + 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2
)
for i in range(5):
label = labels[i]
score = scores[i]
cv2.putText(
frame,
f"{label} ({int(score*100)}%)",
(x1 + 10, y1 + 10 + 15 * (i + 1)),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
color,
1,
)
def main():
parser = argparse.ArgumentParser(
description="This program evaluates accuracy of a CNN after using different BGS methods."
)
parser.add_argument(
"-v",
"--video",
type=str,
help="Path to a video or a sequence of image.",
default=None,
)
parser.add_argument(
"--algo",
type=str,
help="Background subtraction method (KNN, MOG2).",
default="mog",
)
# parser.add_argument('--gt', type=str, help='Path to ground-truth.')
# parser.add_argument('--bgs', type=str, help='Path to BGS results.')
parser.add_argument(
"--show", default=False, action="store_true", help="Show window with results."
)
parser.add_argument(
"--write", default=False, action="store_true", help="Write results as images."
)
parser.add_argument("--model", default=None, help="Path to CNN model.")
parser.add_argument(
"--min-score",
type=float,
default=0.1,
help="Minimum score to accept a detection.",
)
parser.add_argument(
"--input", default=(300, 300), nargs="+", help="Models input size."
)
args = parser.parse_args()
valid_classes = ["person", "car", "bike"]
cap = cv2.VideoCapture(args.video)
video_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
video_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
# video_width = 1280
# video_height = 720
max_boxes = 100
detection_results = []
columns = [
"frame_id",
"method",
"label",
"score",
"xmin",
"ymin",
"xmax",
"ymax",
"roi_xmin",
"roi_ymin",
"roi_xmax",
"roi_ymax",
]
# gt_fn = os.path.join(Path(args.input).video, '../annotations', Path(args.video).stem + '.viratdata.objects.txt')
# gt = read_virat(gt_fn)
bgs = pd.read_csv(os.path.join(os.getcwd(), f"{Path(args.video).stem}_rois.csv"))
# We don't consider the first 500 frames
# gt = gt[gt.current_frame >= 500]
# bgs = bgs[bgs.frame_id >= 500]
colors = {
"full_frame": (255, 0, 0),
"gt": (0, 255, 0),
"mog": (255, 255, 0),
"mean": (255, 0, 255),
"hybrid": (0, 0, 255),
}
model = infer.Model(
model_dir=args.model,
label_map=None, # Will load MSCOCO
min_score=0.01,
iou_threshold=0.3,
)
frames_with_objects = sorted(bgs[bgs.method == "gt"]["frame_id"].unique())
for frame_id in frames_with_objects:
if frame_id % 10 != 0:
continue
cap.set(1, frame_id)
ret, frame = cap.read()
frame_bgr = frame
frame_rgb = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2RGB)
# import pdb; pdb.set_trace()
composed_frames = [cv2.resize(frame.copy(), (300, 300))]
composed_frames_rgb = [cv2.resize(frame_rgb.copy(), (300, 300))]
cf2method = ["full_frame"]
object_lists = [None]
object_maps = [None]
for method in ["gt", "mog", "mean", "hybrid"]:
regions_proposed = bgs[(bgs.frame_id == frame_id) & (bgs.method == method)][
["xmin", "ymin", "xmax", "ymax"]
].values
if not len(regions_proposed):
continue
rois_proposed = []
for roi in regions_proposed:
if any([r > 1 or r < 0 for r in roi]):
import pdb
pdb.set_trace()
xmin = int(roi[0] * video_width)
ymin = int(roi[1] * video_height)
xmax = int(roi[2] * video_width)
ymax = int(roi[3] * video_height)
# cropped_rois.append(np.array(frame[ymin:ymax, xmin:xmax]))
rois_proposed.append([xmin, ymin, xmax, ymax])
# print(f'roi: {rois_proposed[-1]}')
# draw_detection(frame=frame_bgr, box=rois_proposed[-1], label=method, score=1, color=(255, 0, 0))
combined_width = sum(roi[2] - roi[0] for roi in rois_proposed)
combined_height = sum(roi[3] - roi[1] for roi in rois_proposed)
resize_x, resize_y = (1, 1)
if combined_width < args.input[0]:
resize_x = args.input[0] / combined_width
if combined_height < args.input[1]:
resize_y = args.input[1] / combined_height
# increase width to reach model input's width combined
if resize_x > 1 or resize_y > 1:
print((resize_x, resize_y))
for roi_id, roi in enumerate(rois_proposed):
new_size = (
int((roi[2] - roi[0]) * resize_x),
int((roi[3] - roi[1]) * resize_y),
)
new_box = resize_if_smaller(
roi, max_dims=(video_width, video_height), min_size=new_size
)
# print(f'new roi: {new_box}')
rois_proposed[roi_id] = new_box
# draw_detection(frame=frame_bgr, box=new_box, label=method, score=1, color=(0, 255, 0))
# if method == 'gt':
rois_proposed = merge_overlapping_boxes(rois_proposed)
# Check area covered by RoIs proposed. If > 80% of the frame, just use the whole frame.
area_rois = sum(
[(roi[2] - roi[0]) * (roi[3] - roi[1]) for roi in rois_proposed]
)
if area_rois > (video_width * video_height) * 0.8:
row = [frame_id, method] + [-1] * 10
detection_results.append([row])
print(f"RoIs take more than 80% of the frame. Skipping")
continue
composed_frame = None
object_map = None
objects = []
# import pdb; pdb.set_trace()
# ts0_crop = time.time()
composed_frame, object_map, objects = crop.combine_border(
[frame],
[rois_proposed],
border_size=5,
min_combined_size=(300, 300),
max_dims=(video_width, video_height),
)
composed_frame_rgb, _, _ = crop.combine_border(
[frame_rgb],
[rois_proposed],
border_size=5,
min_combined_size=(300, 300),
max_dims=(video_width, video_height),
)
# import pdb; pdb.set_trace()
composed_frames.append(
cv2.resize(composed_frame, (300, 300)).astype("uint8")
)
composed_frames_rgb.append(
cv2.resize(composed_frame_rgb, (300, 300)).astype("uint8")
)
object_maps.append(object_map)
object_lists.append(objects)
cf2method.append(method)
# regions_proposed = [[0, 0, composed_frame.shape[1]-1, composed_frame.shape[0]-1]]
# ts1_crop = time.time()
# total_crop_time = ts1_crop - ts0_crop
# if args.show:
# for i, method in enumerate(cf2method):
# cv2.imshow(method, composed_frames[i])
# if method != 'full_frame':
# cv2.setWindowTitle(method, f'{method} ({object_maps[i].shape[1]}x{object_map[i].shape[0]})')
# cv2.imshow('Full Frame', frame_bgr)
# key = cv2.waitKey(1) & 0xFF
# if key == ord("q"):
# sys.exit(1)
# time.sleep(2)
# continue
ts0_infer = time.time()
results = model.run(composed_frames_rgb)
ts1_infer = time.time()
infer_latency = ts1_infer - ts0_infer
print(
f"[{frame_id}] Took {infer_latency:.2f} seconds to process {len(composed_frames)} frames ({1/infer_latency:.2f} fps) -> {cf2method}."
)
# total_time_infer += (ts1_infer-ts0_infer)
for method_id, method in enumerate(cf2method):
object_map = object_maps[method_id]
objects = object_lists[method_id]
composed_frame = composed_frames[method_id]
boxes = results[method_id]["boxes"]
scores = results[method_id]["scores"]
labels = results[method_id]["labels"]
draw_top5(
frame_bgr, labels, scores, method, color=colors[method], pos=method_id
)
ts0_decode_infer = time.time()
for i in range(min(len(boxes), max_boxes)):
label = labels[i]
score = scores[i]
if valid_classes is not None and label not in valid_classes:
continue
if score < args.min_score:
continue
ymin, xmin, ymax, xmax = tuple(boxes[i])
# Object/Detection coordinates in merged frame
(infer_left, infer_right, infer_top, infer_bottom) = (
int(xmin * composed_frame.shape[1]),
int(xmax * composed_frame.shape[1]),
int(ymin * composed_frame.shape[0]),
int(ymax * composed_frame.shape[0]),
)
draw_detection(
composed_frame,
[infer_left, infer_top, infer_right, infer_bottom],
label,
score,
)
if method == "full_frame":
(left, right, top, bottom) = (
int(xmin * frame_bgr.shape[1]),
int(xmax * frame_bgr.shape[1]),
int(ymin * frame_bgr.shape[0]),
int(ymax * frame_bgr.shape[0]),
)
detection_results.append(
[
frame_id,
method,
label,
score,
left,
top,
right,
bottom,
infer_left,
infer_top,
infer_right,
infer_bottom,
]
)
draw_detection(frame_bgr, [left, top, right, bottom], label, score)
continue
# Find object id consulting the object map
(composed_left, composed_right, composed_top, composed_bottom) = (
int(xmin * object_map.shape[1]),
int(xmax * object_map.shape[1]),
int(ymin * object_map.shape[0]),
int(ymax * object_map.shape[0]),
)
predicted_box = [
composed_left,
composed_top,
composed_right,
composed_bottom,
]
obj_id = int(
np.median(
object_map[
composed_top:composed_bottom, composed_left:composed_right
]
)
)
if obj_id == 0:
continue
# import pdb; pdb.set_trace()
obj = objects[obj_id - 1]
# Translate to coordinates in original frame from the camera
# roi is in camera frame coordinates
roi = obj.box
# inference box is in merged frame coordinates and includes borders
box_in_inference = obj.inf_box
# Sanity check
assert predicted_box[0] < predicted_box[2]
assert predicted_box[1] < predicted_box[3]
# First, we adjust coordinates within merged frame by making sure borders are taken into account and subtracted
adjusted_coords = [
max(predicted_box[0], box_in_inference[0]),
max(predicted_box[1], box_in_inference[1]),
min(predicted_box[2], box_in_inference[2]),
min(predicted_box[3], box_in_inference[3]),
]
# Check if adjusted coordinates still fall within the RoI box.
if (
adjusted_coords[0] > box_in_inference[2]
or adjusted_coords[1] > box_in_inference[3]
or adjusted_coords[2] < box_in_inference[0]
or adjusted_coords[3] < box_in_inference[1]
):
print("coords out of frame")
print(adjusted_coords)
continue
# Second, we compute the relative object coordinates within RoI by removing box_in_inference coordinates
relative_coords = [
adjusted_coords[0] - box_in_inference[0],
adjusted_coords[1] - box_in_inference[1],
adjusted_coords[2] - box_in_inference[0],
adjusted_coords[3] - box_in_inference[1],
]
# Second, we remove borders such that 0,0 within roi is 0,0
no_border_coords = [
max(0, relative_coords[0] - obj.border[0]),
max(0, relative_coords[1] - obj.border[1]),
min(frame.shape[1], relative_coords[2] - obj.border[0]),
min(frame.shape[0], relative_coords[3] - obj.border[1]),
]
# Now, we can compute the absolute coordinates within the camera frames by adding roi coordinates
obj_coords = [
no_border_coords[0] + roi[0],
no_border_coords[1] + roi[1],
min(frame_bgr.shape[1], no_border_coords[2] + roi[0]),
min(frame_bgr.shape[0], no_border_coords[3] + roi[1]),
]
# if new box does not intersect enough with the original detection, skip it
predicted_coords_origin = [
0,
0,
predicted_box[2] - predicted_box[0],
predicted_box[3] - predicted_box[1],
]
translated_coords_origin = [
0,
0,
obj_coords[2] - obj_coords[0],
obj_coords[3] - obj_coords[1],
]
iou, _ = metrics.get_iou(
predicted_coords_origin, translated_coords_origin
)
if iou < 0.5:
print("new box is too different. Skipping")
continue
# (left, right, top, bottom) = (roi[0] + obj_coords[0], roi[0] + obj_coords[2],
# roi[1] + obj_coords[1], roi[1] + obj_coords[3])
if (
any([c < 0 for c in obj_coords])
or obj_coords[2] > frame_bgr.shape[1]
or obj_coords[3] > frame_bgr.shape[0]
):
import pdb
pdb.set_trace()
(left, top, right, bottom) = obj_coords
detection_results.append(
[
frame_id,
method,
label,
score,
left,
top,
right,
bottom,
infer_left,
infer_top,
infer_right,
infer_bottom,
]
)
draw_detection(
composed_frame,
[infer_left, infer_top, infer_right, infer_bottom],
label,
score,
color=colors[method],
)
draw_detection(
frame_bgr,
[left, top, right, bottom],
label,
score,
color=colors[method],
)
# ts1_decode_infer = time.time()
# total_decode_infer = ts1_decode_infer - ts0_decode_infer
if args.write:
cv2.imwrite(
os.path.join(
os.getcwd(),
"results",
f"{Path(args.video).stem}_{frame_id}_{method}.png",
),
composed_frame,
)
# import pdb; pdb.set_trace()
# composed_frame = cv2.cvtColor(np.float32(composed_frame), cv2.COLOR_RGB2BGR)
if args.show:
cv2.imshow(method, composed_frame)
if method != "full_frame":
cv2.setWindowTitle(
method,
f"{method} ({object_map.shape[1]}x{object_map.shape[0]})",
)
cv2.imshow("Full Frame", frame_bgr)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
sys.exit(1)
# if frame_id == 50:
# import pdb; pdb.set_trace()
# import pdb; pdb.set_trace()
detection_results = pd.DataFrame(detection_results, columns=columns)
detection_results.to_csv(
f"{Path(args.video).stem}_detections.csv", index=False, sep=","
)
if __name__ == "__main__":
main()
|
{"hexsha": "8686d0858527651ace7a73d19f8943ec170c79bd", "size": 20075, "ext": "py", "lang": "Python", "max_stars_repo_path": "apps/bgs_infer.py", "max_stars_repo_name": "danirivas/cova-tuner", "max_stars_repo_head_hexsha": "e7eaf7e75f0c15ce35c449fb67529c9c73386817", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "apps/bgs_infer.py", "max_issues_repo_name": "danirivas/cova-tuner", "max_issues_repo_head_hexsha": "e7eaf7e75f0c15ce35c449fb67529c9c73386817", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "apps/bgs_infer.py", "max_forks_repo_name": "danirivas/cova-tuner", "max_forks_repo_head_hexsha": "e7eaf7e75f0c15ce35c449fb67529c9c73386817", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7206405694, "max_line_length": 145, "alphanum_fraction": 0.4883686177, "include": true, "reason": "import numpy", "num_tokens": 4445}
|
C @(#)swapxai.f 20.3 2/13/96
subroutine swapxai (i,j)
C This subroutine exchanges two "I" intertie entities OARCINT(*,I)
c and OARCINT(*,J).
include 'ipfinc/parametr.inc'
include 'ipfinc/alt_case.inc'
character tempc*10
tempc = oarcint(1,i)
oarcint(1,i) = oarcint(1,j)
oarcint(1,j) = tempc
tempc = oarcint(2,i)
oarcint(2,i) = oarcint(2,j)
oarcint(2,j) = tempc
temp = oarcinp(i)
oarcinp(i) = oarcinp(j)
oarcinp(j) = temp
return
end
|
{"hexsha": "8208d42fb38bfd62ebfe03724641416c2672840a", "size": 577, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ipf/swapxai.f", "max_stars_repo_name": "mbheinen/bpa-ipf-tsp", "max_stars_repo_head_hexsha": "bf07dd456bb7d40046c37f06bcd36b7207fa6d90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-04-02T15:34:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T08:57:45.000Z", "max_issues_repo_path": "ipf/swapxai.f", "max_issues_repo_name": "cuihantao/bpa-ipf-tsp", "max_issues_repo_head_hexsha": "cb2d0917ae42eff571017e9162f550f87900b83f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-02-08T14:21:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-13T01:27:56.000Z", "max_forks_repo_path": "ipf/swapxai.f", "max_forks_repo_name": "mbheinen/bpa-ipf-tsp", "max_forks_repo_head_hexsha": "bf07dd456bb7d40046c37f06bcd36b7207fa6d90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2020-02-03T04:26:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T15:04:31.000Z", "avg_line_length": 24.0416666667, "max_line_length": 73, "alphanum_fraction": 0.5285961872, "num_tokens": 203}
|
[STATEMENT]
lemma (in padic_integers) Zp_residue_eq:
assumes "a \<in> carrier Zp"
assumes "b \<in> carrier Zp"
assumes "val_Zp (a \<ominus> b) > k"
shows "(a k) = (b k)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a k = b k
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. a k = b k
[PROOF STEP]
have 0: "(a \<ominus> b) k = a k \<ominus>\<^bsub>Zp_res_ring k\<^esub> b k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (a \<ominus> b) k = a k \<ominus>\<^bsub>residue_ring (p ^ k)\<^esub> b k
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
a \<in> carrier Zp
b \<in> carrier Zp
eint (int k) < val_Zp (a \<ominus> b)
goal (1 subgoal):
1. (a \<ominus> b) k = a k \<ominus>\<^bsub>residue_ring (p ^ k)\<^esub> b k
[PROOF STEP]
by (simp add: residue_of_diff)
[PROOF STATE]
proof (state)
this:
(a \<ominus> b) k = a k \<ominus>\<^bsub>residue_ring (p ^ k)\<^esub> b k
goal (1 subgoal):
1. a k = b k
[PROOF STEP]
have 1: "(a \<ominus> b) k = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (a \<ominus> b) k = 0
[PROOF STEP]
using assms zero_below_val
[PROOF STATE]
proof (prove)
using this:
a \<in> carrier Zp
b \<in> carrier Zp
eint (int k) < val_Zp (a \<ominus> b)
\<lbrakk>prime ?p; ?x \<in> padic_set ?p; int ?n \<le> padic_val ?p ?x\<rbrakk> \<Longrightarrow> ?x ?n = \<zero>\<^bsub>residue_ring (?p ^ ?n)\<^esub>
\<lbrakk>prime ?p; ?x \<in> padic_set ?p; int ?n \<le> padic_val ?p ?x\<rbrakk> \<Longrightarrow> ?x ?n = 0
goal (1 subgoal):
1. (a \<ominus> b) k = 0
[PROOF STEP]
by (smt R.minus_closed Zp_def eint_ord_simps(2) padic_integers.p_res_ring_zero
padic_integers.residue_of_zero(1) padic_integers.val_ord_Zp padic_integers.zero_below_ord padic_integers_axioms)
[PROOF STATE]
proof (state)
this:
(a \<ominus> b) k = 0
goal (1 subgoal):
1. a k = b k
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a k = b k
[PROOF STEP]
apply(cases "k = 0")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. k = 0 \<Longrightarrow> a k = b k
2. k \<noteq> 0 \<Longrightarrow> a k = b k
[PROOF STEP]
apply (metis assms(1) assms(2) p_res_ring_0' residues_closed)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. k \<noteq> 0 \<Longrightarrow> a k = b k
[PROOF STEP]
using 0 1 assms p_residues R_cring Zp_def assms(1) assms(2) cring_def padic_set_res_closed
residues.res_zero_eq ring.r_right_minus_eq
[PROOF STATE]
proof (prove)
using this:
(a \<ominus> b) k = a k \<ominus>\<^bsub>residue_ring (p ^ k)\<^esub> b k
(a \<ominus> b) k = 0
a \<in> carrier Zp
b \<in> carrier Zp
eint (int k) < val_Zp (a \<ominus> b)
0 < ?m \<Longrightarrow> residues (p ^ ?m)
0 < ?m \<Longrightarrow> cring (residue_ring (p ^ ?m))
Zp \<equiv> padic_int p
a \<in> carrier Zp
b \<in> carrier Zp
cring ?R \<equiv> ring ?R \<and> Group.comm_monoid ?R
?f \<in> padic_set ?p \<Longrightarrow> ?f ?m \<in> carrier (residue_ring (?p ^ ?m))
residues ?m \<Longrightarrow> \<zero>\<^bsub>residue_ring ?m\<^esub> = 0
\<lbrakk>ring ?R; ?a \<in> carrier ?R; ?b \<in> carrier ?R\<rbrakk> \<Longrightarrow> (?a \<ominus>\<^bsub>?R\<^esub> ?b = \<zero>\<^bsub>?R\<^esub>) = (?a = ?b)
goal (1 subgoal):
1. k \<noteq> 0 \<Longrightarrow> a k = b k
[PROOF STEP]
by (metis Zp_defs(3) linorder_neqE_nat not_less0 p_res_ring_zero)
[PROOF STATE]
proof (state)
this:
a k = b k
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1585, "file": "Padic_Ints_Padic_Integers", "length": 13}
|
subroutine cmo_addatt_cmo(imsgin,xmsgin,cmsgin,msgtype,nwds,
* ierror_return)
C
C
C#######################################################################
C
C PURPOSE -
C
C This Routine Adds Attributes to an existing Mesh Object.
C
C INPUT ARGUMENTS -
C
C imsgin() - Integer array of command input tokens
C xmsgin() - Real array of command input tokens
C cmsgin() - Character array of command input tokens
C msgtype() - Integer array of command input token types
C nwds - Number of command input tokens
C
C OUTPUT ARGUMENTS -
C
C ierror_return - Error Return Code (==0 ==> OK, <>0 ==> Error)
C
C CHANGE HISTORY -
C
C $Log: cmo_addatt_cmo.f,v $
C Revision 2.00 2007/11/05 19:45:47 spchu
C Import to CVS
C
CPVCS
CPVCS Rev 1.9 07 Jul 2006 08:49:12 gable
CPVCS Modified screen output to reduce blank space.
CPVCS
CPVCS Rev 1.8 28 Aug 2001 10:51:42 dcg
CPVCS set persistence to value input via cmsgin (used to be hard wired 'temporary')
CPVCS
CPVCS Rev 1.7 10 Apr 2001 11:04:08 dcg
CPVCS shorten too long name
CPVCS
CPVCS Rev 1.6 17 Feb 2000 20:44:16 jtg
CPVCS added verbosity flag in 12th position so tone down screen output
CPVCS for calls from iterative routines like copyatt_mpary_lg if
CPVCS desired
CPVCS
CPVCS Rev 1.5 07 Feb 2000 16:45:42 dcg
CPVCS
CPVCS Rev 1.4 Tue Feb 01 13:40:02 2000 dcg
CPVCS
CPVCS Rev 1.3 Mon Jan 31 09:54:56 2000 dcg
CPVCS
CPVCS Rev 1.14 Tue Nov 02 19:08:16 1999 jtg
CPVCS fixed hardwired character*32 for cmsgin
CPVCS
CPVCS Rev 1.13 Mon Apr 14 16:39:50 1997 pvcs
CPVCS No change.
CPVCS
CPVCS Rev 1.12 Thu Jan 30 19:35:30 1997 het
CPVCS Refresh the ipcmoatt pointer because the memory was being
CPVCS changed but the pointer was not updates.
CPVCS
CPVCS Rev 1.11 11/13/95 16:21:16 dcg
CPVCS allocate integer arrays for VINT - real for VDOUBLE
CPVCS
CPVCS Rev 1.10 11/07/95 17:15:34 dcg
CPVCS change flag to 2 in mmgetblk calls
CPVCS
CPVCS Rev 1.9 10/16/95 10:21:16 het
CPVCS Correct sbnloc/sbnstor pointer problem
CPVCS
CPVCS Rev 1.8 09/15/95 10:32:18 dcg
CPVCS fix index field for new attribute
CPVCS
CPVCS Rev 1.6 09/13/95 16:45:34 dcg
CPVCS replace character literals in argument lists with
CPVCS character variables
CPVCS
CPVCS Rev 1.5 09/11/95 14:43:10 het
CPVCS Change to the storage block based CMO stuff.
CPVCS
CPVCS Rev 1.4 03/15/95 15:22:14 ejl
CPVCS Finished installing the defaults.
CPVCS
CPVCS Rev 1.3 02/16/95 10:22:54 ejl
CPVCS Put return afer end statement.
CPVCS
CPVCS Rev 1.2 02/16/95 09:55:36 ejl
CPVCS Fixed bugs, fixed hole in the Create command.
CPVCS Added commands MODATT, LENGTH, MEMORY, & COMPRESS.
CPVCS
CPVCS Rev 1.1 02/10/95 14:06:26 ejl
CPVCS Fix bugs left from last update.
C
C#######################################################################
C
implicit none
C
C#######################################################################
C
include 'cmo_lg.h'
C
C#######################################################################
C
integer nwds, imsgin(nwds), msgtype(nwds)
REAL*8 xmsgin(nwds)
character*(*) cmsgin(nwds)
C
integer ierror_return
C
C#######################################################################
C
C LOCAL VARIABLE DEFINITION
C
character*32 cmo_name, att_name, ctype,crank,clength
integer j, icmo_index,ierror,len,natts,i,lout,itype,
* posname,postype,posrank,poslen,posint,posio,pospers
integer lentype, length, irank, mmlength, ierr, ier
* , icscode, verbosity
C
character*132 logmess
C
integer idefault
real*8 xdefault
character*32 cdefault
C
pointer (ipcmo_pointer, icmo_pointer)
pointer (ipcmo_pointer, xcmo_pointer)
pointer (ipcmo_pointer, ccmo_pointer)
integer icmo_pointer(*)
REAL*8 xcmo_pointer(*)
character*32 ccmo_pointer(*)
C
character*32 partname
C
C#######################################################################
C
integer icharlnf
C
C#######################################################################
C
C
C
if (nwds.ge.12.and.msgtype(12).eq.1) then
verbosity=imsgin(12)
else
verbosity=1
endif
if (nwds.lt.4) goto 9998
partname='define_cmo_lg'
cmo_name = cmsgin(3)
att_name = cmsgin(4)
C
if((cmo_name(1:icharlnf(cmo_name))) .eq. '-def-') then
call cmo_get_name(cmo_name, ier)
if(ier.ne.0) then
write(logmess,9000) cmo_name(1:icharlnf(cmo_name))
9000 format(" ADDATT: CMO found bad mesh object: ",a)
call writloga('default',0,logmess,0,ier)
ierror_return = 1
goto 9999
endif
endif
C Check the mesh object name
call cmo_exist(cmo_name,ier)
if(ier.ne.0) then
write(logmess,'(a,a)')
* 'ADDATT: Not a valid mesh object: ',
* cmo_name(1:icharlnf(cmo_name))
call writloga('default',1,logmess,1,ier)
ierror_return = 1
goto 9999
endif
C
C.... Check if this a new Attribute.
C
C
call cmo_get_index(cmo_name,icmo_index,ierror)
C
if(ierror.ne.0) go to 9998
call mmfindbk('cmo_natts',partname,ipcmo_natts,len,
* icscode)
natts=cmo_natts(icmo_index)
call mmfindbk('cmo_attlist',cmo_name,ipcmo_attlist,
* len,icscode)
do i=1,natts
if(cmo_attlist(number_of_params_per_att*(i-1)+1)
* .eq.att_name) then
C
C... Existing Attribute.
C
C
if (verbosity.gt.0) then
ierror_return=-1
write(logmess,'(a,a,2x,a)')
* 'CMO_ADDATT warning: attribute already exist: ',
* cmo_name(1:icharlnf(cmo_name)),
* att_name(1:icharlnf(att_name))
call writloga('default',0,logmess,0,ierr)
else
ierror_return=-2
endif
C
go to 9999
endif
enddo
C
C.... This is a new attribute.
C
ierror_return=0
natts=natts+1
cmo_natts(icmo_index)=natts
C
C.... See if there is enough space for new attribute
C
call mmfindbk('cmo_attlist',cmo_name,ipcmo_attlist,len,icscode)
if(len.lt.natts*number_of_params_per_att) then
call mmincblk('cmo_attlist',cmo_name,ipcmo_attlist,
* number_of_params_per_att*20,icscode)
endif
C
c.... Find postions of name, type, rank and length
10 call mmfindbk( 'defcmo_attparam_names',partname,
* ipdefcmo_attparam_names,len,icscode)
call mmfindbk( 'cmo_attparam_idefault',cmo_name,
* ipcmo_attparam_idefault,len,icscode)
if(len.lt.natts) call mmincblk( 'cmo_attparam_idefault',
* cmo_name,ipcmo_attparam_idefault,20,icscode)
call mmfindbk( 'cmo_attparam_rdefault',cmo_name,
* ipcmo_attparam_rdefault,len,icscode)
if(len.lt.natts) call mmincblk( 'cmo_attparam_rdefault',
* cmo_name,ipcmo_attparam_rdefault,20,icscode)
call mmfindbk( 'cmo_attparam_cdefault',cmo_name,
* ipcmo_attparam_cdefault,len,icscode)
if(len.lt.natts) call mmincblk( 'cmo_attparam_cdefault',
* cmo_name,ipcmo_attparam_cdefault,20,icscode)
do i=1,number_of_default_attparam_name
if(defcmo_attparam_names(i).eq.'name') posname=i
if(defcmo_attparam_names(i).eq.'type') postype=i
if(defcmo_attparam_names(i).eq.'rank') posrank=i
if(defcmo_attparam_names(i).eq.'length') poslen=i
if(defcmo_attparam_names(i).eq.'interpolation') posint=i
if(defcmo_attparam_names(i).eq.'persistence') pospers=i
if(defcmo_attparam_names(i).eq.'ioflag') posio=i
enddo
cmo_attlist(number_of_params_per_att*(natts-1)+posname)=att_name
cmo_attlist(number_of_params_per_att*(natts-1)+postype)=cmsgin(5)
cmo_attlist(number_of_params_per_att*(natts-1)+posrank)=cmsgin(6)
cmo_attlist(number_of_params_per_att*(natts-1)+poslen)=cmsgin(7)
cmo_attlist(number_of_params_per_att*(natts-1)+posint)=cmsgin(8)
cmo_attlist(number_of_params_per_att*(natts-1)+pospers)=
* cmsgin(9)
cmo_attlist(number_of_params_per_att*(natts-1)+posio)=cmsgin(10)
C
C.... Set up the Memory Managed Arrays for the new Attribute.
C
ctype=cmsgin(5)
clength=cmsgin(7)
crank=cmsgin(6)
lentype=icharlnf(ctype)
C
if(ctype(1:lentype).eq.'VINT') then
C
call cmo_get_info(clength,cmo_name,length,lout,itype,
* ierror_return)
call cmo_get_info(crank,cmo_name,irank,lout,itype,
* ierror_return)
C
mmlength=max(irank*length,1)
C
call mmgetblk(att_name,
* cmo_name,
* ipcmo_pointer,mmlength,
* 1,ier)
idefault=imsgin(11)
if(msgtype(11).eq.2) idefault=nint(xmsgin(11))
C
if(ier.ne.0) then
call cmo_mm_error('cmo_addatt_cmo')
else
do j=1,mmlength
icmo_pointer(j)=idefault
enddo
endif
cmo_attparam_idefault(natts)=idefault
cmo_attparam_rdefault(natts)=idefault
C
elseif(ctype(1:lentype).eq.'VDOUBLE') then
C
call cmo_get_info(clength,cmo_name,length,lout,itype,
* ierror_return)
call cmo_get_info(crank,cmo_name,irank,lout,itype,
* ierror_return)
C
mmlength=max(irank*length,1)
C
call mmgetblk(att_name,
* cmo_name,
* ipcmo_pointer,mmlength,
* 2,ier)
xdefault=xmsgin(11)
if(msgtype(11).eq.1) xdefault=imsgin(11)
C
if(ier.ne.0) then
call cmo_mm_error('cmo_addatt_cmo')
else
do j=1,length*irank
xcmo_pointer(j)=xdefault
enddo
endif
cmo_attparam_rdefault(natts)=xdefault
cmo_attparam_idefault(natts)=nint(xdefault)
c
elseif(ctype(1:lentype).eq.'VCHAR') then
C
call cmo_get_info(clength,cmo_name,length,lout,itype,
* ierror_return)
call cmo_get_info(crank,cmo_name,irank,lout,itype,
* ierror_return)
mmlength=max(irank*length,1)
call mmgetblk(att_name,
* cmo_name,
* ipcmo_pointer,mmlength,
* 3,ier)
cdefault=cmsgin(11)
C
if(ier.ne.0) then
call cmo_mm_error('cmo_addatt_cmo')
else
do j=1,length*irank
ccmo_pointer(j)=cdefault
enddo
endif
cmo_attparam_cdefault(natts)=cdefault
C
elseif(ctype(1:lentype).eq.'INT') then
idefault=imsgin(11)
if(msgtype(11).eq.2) idefault=nint(xmsgin(11))
cmo_attparam_idefault(natts)=idefault
cmo_attparam_rdefault(natts)=idefault
elseif(ctype(1:lentype).eq.'REAL') then
xdefault=xmsgin(11)
if(msgtype(11).eq.1) xdefault=imsgin(11)
cmo_attparam_rdefault(natts)=xdefault
cmo_attparam_idefault(natts)=nint(xdefault)
elseif(ctype(1:lentype).eq.'CHARACTER') then
cmo_attparam_cdefault(natts)=cmsgin(11)
C
C
else
C
C.... Unsupported Type.
C
ierror_return=1
C
write(logmess,9060) cmo_name(1:icharlnf(cmo_name)),
* ctype
call writloga('default',0,logmess,0,ierr)
9060 format('CMO_ADDATT error: Unsupported Type:',a,a)
C
C
endif
C
9998 continue
9999 return
end
|
{"hexsha": "c48737d4297e7bc5f008e59dd2173337085ac772", "size": 12066, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/cmo_addatt_cmo.f", "max_stars_repo_name": "millerta/LaGriT-1", "max_stars_repo_head_hexsha": "511ef22f3b7e839c7e0484604cd7f6a2278ae6b9", "max_stars_repo_licenses": ["CNRI-Python"], "max_stars_count": 73, "max_stars_repo_stars_event_min_datetime": "2017-02-09T17:54:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T22:22:32.000Z", "max_issues_repo_path": "src/cmo_addatt_cmo.f", "max_issues_repo_name": "millerta/LaGriT-1", "max_issues_repo_head_hexsha": "511ef22f3b7e839c7e0484604cd7f6a2278ae6b9", "max_issues_repo_licenses": ["CNRI-Python"], "max_issues_count": 166, "max_issues_repo_issues_event_min_datetime": "2017-01-26T17:15:45.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T21:36:28.000Z", "max_forks_repo_path": "src/lg_core/cmo_addatt_cmo.f", "max_forks_repo_name": "daniellivingston/LaGriT", "max_forks_repo_head_hexsha": "decd0ce0e5dab068034ef382cabcd134562de832", "max_forks_repo_licenses": ["Intel"], "max_forks_count": 63, "max_forks_repo_forks_event_min_datetime": "2017-02-08T21:56:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T06:48:36.000Z", "avg_line_length": 32.3485254692, "max_line_length": 86, "alphanum_fraction": 0.5878501575, "num_tokens": 3792}
|
[STATEMENT]
lemma subdegree_minus_commute [simp]:
"subdegree (f-(g::('a::group_add) fps)) = subdegree (g - f)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. subdegree (f - g) = subdegree (g - f)
[PROOF STEP]
proof (-, cases "g-f=0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. g - f = 0 \<Longrightarrow> subdegree (f - g) = subdegree (g - f)
2. g - f \<noteq> 0 \<Longrightarrow> subdegree (f - g) = subdegree (g - f)
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
g - f = 0
goal (2 subgoals):
1. g - f = 0 \<Longrightarrow> subdegree (f - g) = subdegree (g - f)
2. g - f \<noteq> 0 \<Longrightarrow> subdegree (f - g) = subdegree (g - f)
[PROOF STEP]
have "\<And>n. (f - g) $ n = -((g - f) $ n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n. (f - g) $ n = - ((g - f) $ n)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(f - g) $ ?n = - ((g - f) $ ?n)
goal (2 subgoals):
1. g - f = 0 \<Longrightarrow> subdegree (f - g) = subdegree (g - f)
2. g - f \<noteq> 0 \<Longrightarrow> subdegree (f - g) = subdegree (g - f)
[PROOF STEP]
with True
[PROOF STATE]
proof (chain)
picking this:
g - f = 0
(f - g) $ ?n = - ((g - f) $ ?n)
[PROOF STEP]
have "f - g = 0"
[PROOF STATE]
proof (prove)
using this:
g - f = 0
(f - g) $ ?n = - ((g - f) $ ?n)
goal (1 subgoal):
1. f - g = 0
[PROOF STEP]
by (intro fps_ext) simp
[PROOF STATE]
proof (state)
this:
f - g = 0
goal (2 subgoals):
1. g - f = 0 \<Longrightarrow> subdegree (f - g) = subdegree (g - f)
2. g - f \<noteq> 0 \<Longrightarrow> subdegree (f - g) = subdegree (g - f)
[PROOF STEP]
with True
[PROOF STATE]
proof (chain)
picking this:
g - f = 0
f - g = 0
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
g - f = 0
f - g = 0
goal (1 subgoal):
1. subdegree (f - g) = subdegree (g - f)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
subdegree (f - g) = subdegree (g - f)
goal (1 subgoal):
1. g - f \<noteq> 0 \<Longrightarrow> subdegree (f - g) = subdegree (g - f)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. g - f \<noteq> 0 \<Longrightarrow> subdegree (f - g) = subdegree (g - f)
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
g - f \<noteq> 0
goal (1 subgoal):
1. g - f \<noteq> 0 \<Longrightarrow> subdegree (f - g) = subdegree (g - f)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. subdegree (f - g) = subdegree (g - f)
[PROOF STEP]
using nth_subdegree_nonzero[OF False]
[PROOF STATE]
proof (prove)
using this:
(g - f) $ subdegree (g - f) \<noteq> (0::'a)
goal (1 subgoal):
1. subdegree (f - g) = subdegree (g - f)
[PROOF STEP]
by (fastforce intro: subdegreeI)
[PROOF STATE]
proof (state)
this:
subdegree (f - g) = subdegree (g - f)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1298, "file": null, "length": 16}
|
\chapter{\ac{GPU} Programs}
There's a broad range of shader languages and \ac{API}s like \ac{GLSL}, \ac{HLSL} and Cg. Therefore, when designing the \ac{GPU} program interfaces for PLRenderer, one design goal was to be able to implement as many \ac{GPU} program backends as possible - and this without producing to much internal overhead.
In PixelLight, we're using \ac{OpenGL} and \ac{GLSL} terminology if not otherwise mentioned. The reason for this is simple: \ac{OpenGL} and \ac{GLSL} are popular open, multi-platform standard.
\section{Shader}
\paragraph{Uniforms}
Uniforms are per-program variables that are constant during program execution.
\section{Vertex Shader}
A vertex shader can reference a number of variables as it executes.
\paragraph{Vertex Attributes}
Vertex attributes are the per-vertex values.
\paragraph{Varying Vertex Shader Input}
In the first years of programmable shaders, there was a need to connect the legacy fixed functions with the new programmable shaders. Therefore, within certain shader languages, binding semantic names for varying vertex shader input were introduces. Table~\ref{Table:VaryingVertexShaderInput} gives an overview over the first 16 vertex shader input arguments within \ac{GLSL} and Cg and their corresponding binding semantic names.
\begin{table}[htb]
\centering
\begin{ThreePartTable}
\begin{tabular}{|l|l|p{0.2\textwidth}|l|}
\toprule
\textbf{Index} & \textbf{\ac{GLSL}\tnote{1}} & \textbf{Cg\tnote{2}} & \textbf{Description}\\
\midrule
\hline
0 & gl\_Vertex & POSITION, ATTR0 & Position\\
\hline
1 & - & BLENDWEIGHT, ATTR1 & Weight\\
\hline
2 & gl\_Normal & NORMAL, ATTR2 & Normal\\
\hline
3 & gl\_Color & COLOR0, DIFFUSE, ATTR3 & Primary color\\
\hline
4 & gl\_SecondaryColor & COLOR1, SPECULAR, ATTR4 & Secondary color\\
\hline
5 & gl\_FogCoord & TESSFACTOR, FOGCOORD, ATTR5 & Fog coordinate\\
\hline
6 & - & PSIZE, ATTR6 & Point size\\
\hline
7 & - & BLENDINDICES, ATTR7 & Blend indices\\
\hline
8 & gl\_MultiTexCoord0 & TEXCOORD0, ATTR8 & Texture coordinate 0\\
\hline
9 & gl\_MultiTexCoord1 & TEXCOORD1, ATTR9 & Texture coordinate 1\\
\hline
10 & gl\_MultiTexCoord2 & TEXCOORD2, ATTR10 & Texture coordinate 2\\
\hline
11 & gl\_MultiTexCoord3 & TEXCOORD3, ATTR11 & Texture coordinate 3\\
\hline
12 & gl\_MultiTexCoord4 & TEXCOORD4, ATTR12 & Texture coordinate 4\\
\hline
13 & gl\_MultiTexCoord4 & TEXCOORD5, ATTR13 & Texture coordinate 5\\
\hline
14 & gl\_MultiTexCoord6 & TEXCOORD6, TANGENT, ATTR14 & Texture coordinate 6, tangent\\
\hline
15 & gl\_MultiTexCoord7 & TEXCOORD7, BINORMAL, ATTR15 & Texture coordinate 7, binormal\\
\hline
\bottomrule
\end{tabular}
\begin{tablenotes}
\item[1] Cg Users Manual (2.2, Release 1.4 September 2005), \url{http://developer.download.nvidia.com/cg/Cg\_2.2/CgUsersManual.pdf}, Page 299, Table 30. vp20 Varying Input Binding Semantics
\item[2] \ac{OpenGL} Shading Language 3.30.6 Specification (updated March 11, 2010), \url{http://www.opengl.org/registry/doc/GLSLangSpec.3.30.6.clean.pdf} Page 73, 7.3 Compatibility Profile Vertex Shader Built-In Inputs
\end{tablenotes}
\caption{Binding semantic names for varying vertex shader input in \ac{GLSL} and Cg}
\label{Table:VaryingVertexShaderInput}
\end{ThreePartTable}
\end{table}
Nowadays, one should avoid using this binding semantic names because they are often depriciated and in general, restricts the usage of programmable shaders. Modern graphic \ac{API}s and shader languages no longer demand that for example, the first vertex shader input argument is the vertex position. One should carefully plan the vertex shader input design. If not otherwise mentioned we're using within PixelLight the vertex shader input layout as seen within the table~\ref{Table:VaryingVertexShaderInput} above - this way we stay compatible to fixed function stuff and in general, it's less confusing to use a standard layout if possible.
\section{Fragment Shader}
A vertex shader can reference a number of variables as it executes.
|
{"hexsha": "2d0bd419b5a6564afa616718a624968bcfe1de36", "size": 4194, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Docs/PixelLightBase/PLRenderer/GPUPrograms.tex", "max_stars_repo_name": "ktotheoz/pixellight", "max_stars_repo_head_hexsha": "43a661e762034054b47766d7e38d94baf22d2038", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 83, "max_stars_repo_stars_event_min_datetime": "2015-01-08T15:06:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-20T17:07:00.000Z", "max_issues_repo_path": "Docs/PixelLightBase/PLRenderer/GPUPrograms.tex", "max_issues_repo_name": "PixelLightFoundation/pixellight", "max_issues_repo_head_hexsha": "43a661e762034054b47766d7e38d94baf22d2038", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2019-06-18T06:46:07.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-02T11:11:28.000Z", "max_forks_repo_path": "Docs/PixelLightBase/PLRenderer/GPUPrograms.tex", "max_forks_repo_name": "naetherm/PixelLight", "max_forks_repo_head_hexsha": "d7666f5b49020334cbb5debbee11030f34cced56", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 40, "max_forks_repo_forks_event_min_datetime": "2015-02-25T18:24:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-06T09:01:48.000Z", "avg_line_length": 49.3411764706, "max_line_length": 642, "alphanum_fraction": 0.7298521698, "num_tokens": 1311}
|
"""
解AVSb的方程
"""
import os
import multiprocessing
import argparse
import numpy
from basics import get_procs_num
from fermi.avsb import shift_kv, get_von_hove_patches
from fermi.avsb import d1_disp, p2_disp
from fermi.avsb import intra_band_u, inter_band_uprime
import flowequ.mulitband_hubbard as hubbard
from helpers.ettriangulated import load_from as triload
from helpers.discretization import load_from as patload
def load_brillouin(args):
'''加载布里渊区'''
brlu, nps, ltris, ladjs =\
triload('{0}tris.txt'.format(args.prefix))
if nps != args.mesh:
raise ValueError('mesh数量对应不上')
#找到patches
mpinfo = numpy.ndarray((2, args.patches), dtype=object)
mpinfo[0, :] = get_von_hove_patches(args.patches, d1_disp)
mpinfo[1, :] = get_von_hove_patches(args.patches, p2_disp)
#找到所有的小三角形属于哪个patch
mlpats = numpy.ndarray((2, len(ltris)), dtype=int)
mlpats[0, :] = patload('{0}d1pats.txt'.format(args.prefix))
mlpats[1, :] = patload('{0}p2pats.txt'.format(args.prefix))
return brlu, ltris, ladjs, mpinfo, mlpats
def slove_equ(args, brlu, ltris, ladjs, mpinfo, mlpats):
'''解方程'''
#初始化U
uval = numpy.zeros((2, 2, 2, 2, args.patches, args.patches, args.patches))
uval += intra_band_u(5.0, mpinfo)
uval += inter_band_uprime(4.0, mpinfo)
#初始化hubbard
hubbard.uinit(uval, args.patches, 2)
lamb0 = 2.0
print('lamb0 = ', lamb0)
hubbard.config_init(
brlu, ltris, ladjs, mpinfo, mlpats,
[d1_disp, p2_disp], [None, None],
shift_kv, lamb0, find_mode=3
)
#输出文件夹
if not os.path.isdir('heatmap8'):
os.mkdir('heatmap8')
rpath = 'heatmap8/avsb'
if not os.path.isdir(rpath):
os.mkdir(rpath)
lval = 0.
lstep = 0.01
numpy.save('{0}/{1:.2f}U.npy'.format(rpath, lval), hubbard.U)
for _ in range(1000):
hubbard.precompute_contour(lval)
hubbard.precompute_qpp(lval)
hubbard.precompute_qfs(lval)
hubbard.precompute_nqfs(lval)
hubbard.precompute_qex(lval)
hubbard.precompute_nqex(lval)
#计算每个idx的导数
data_list = []
ndit = numpy.nditer(hubbard.U, flags=['multi_index'])
while not ndit.finished:
bd1, bd2, bd3, bd4, idx1, idx2, idx3 = ndit.multi_index
data_list.append((lval, bd1, bd2, bd3, bd4, idx1, idx2, idx3))
ndit.iternext()
#进程池
#KNOWN ISSUE: 在修改全局变量之间建立的Pool,里面不会包含全局变量
with multiprocessing.Pool(get_procs_num()) as pool:
result = pool.starmap(hubbard.dl_ec, data_list)
duval = numpy.reshape(result, hubbard.U.shape)
#把每个idx的值加上
#这两个过程不能放在一起,因为计算dl_ec的时候用到了hubbard.U
hubbard.U += duval * lstep
lval += lstep
#
del data_list, result, duval
#
#uval2 = numpy.load('{0}/{1:.2f}U.chk'.format(rpath, 10.76))
#if lval == 10.76:
# assert numpy.allclose(hubbard.U, uval2)
numpy.save('{0}/{1:.2f}U.npy'.format(rpath, lval), hubbard.U)
def main():
'''入口'''
parser = argparse.ArgumentParser(
prog='python3 scripts/avsb/solution.py',
description='compute equation'
)
parser.add_argument('-p', '--patches', type=int, required=True, help='patches number')
parser.add_argument('-m', '--mesh', type=int, default=50, help='triangles number')
parser.add_argument('--prefix', type=str, default='scripts/avsb/',\
help='saved file prefix')
args = parser.parse_args()
print('patch数量', args.patches)
print('布里渊区网格数量', args.mesh)
print('读取自 ', args.prefix)
brlu, ltris, ladjs, mpinfo, mlpats = load_brillouin(args)
slove_equ(args, brlu, ltris, ladjs, mpinfo, mlpats)
if __name__ == '__main__':
main()
|
{"hexsha": "eb464c2d28d4ca8c2410ef2f683a066e615c2166", "size": 3754, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/avsb/solution.py", "max_stars_repo_name": "maryprimary/frg", "max_stars_repo_head_hexsha": "e789439f599eb884a6220ae5b471cf610b0c2b2a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/avsb/solution.py", "max_issues_repo_name": "maryprimary/frg", "max_issues_repo_head_hexsha": "e789439f599eb884a6220ae5b471cf610b0c2b2a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2021-02-04T06:46:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-01T00:43:38.000Z", "max_forks_repo_path": "scripts/avsb/solution.py", "max_forks_repo_name": "maryprimary/frg", "max_forks_repo_head_hexsha": "e789439f599eb884a6220ae5b471cf610b0c2b2a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5178571429, "max_line_length": 90, "alphanum_fraction": 0.6403835908, "include": true, "reason": "import numpy", "num_tokens": 1243}
|
# reading image (storing image in 'img' variable) and writeing image (saving the image in detination folder)
# image location (relative path) -> "res/lena.jpg"
# destination to save images -> "result/*.jpg"
# importing OpenCV, Numpy, Matplotlib.Pyplot
import cv2
import numpy as np
import matplotlib.pyplot as plt
# cv2.imread -> image read
img = cv2.imread("res/lena.jpg", -1)
# cv2.IMREAD_COLOR -> 1 --> color
# cv2.IMREAD_GRAYSCALE -> 0 --> black and white
# cv2.IMREAD_UNCHANGED -> -1 --> color + alpha scale
cv2.imshow("Image", img)
cv2.imwrite("result/lena_Unchanged.jpg", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# using plt method to display image
# cv2.imread -> B,G,R
# plt.imshow -> R,G,B
# so we convert from B,G,R to R,G,B
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# plt.imshow(img)
# plt.show()
|
{"hexsha": "f3448cc49d67ff16b2de5b5e902c8ba0fba1c927", "size": 853, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/class01.py", "max_stars_repo_name": "sarveswar1/AlgoBook", "max_stars_repo_head_hexsha": "7e1692ee768cc84f581c9f33151869e8d0d18550", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/class01.py", "max_issues_repo_name": "sarveswar1/AlgoBook", "max_issues_repo_head_hexsha": "7e1692ee768cc84f581c9f33151869e8d0d18550", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/class01.py", "max_forks_repo_name": "sarveswar1/AlgoBook", "max_forks_repo_head_hexsha": "7e1692ee768cc84f581c9f33151869e8d0d18550", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5161290323, "max_line_length": 109, "alphanum_fraction": 0.6799531067, "include": true, "reason": "import numpy", "num_tokens": 239}
|
import datetime
import numpy as np
import os
import random
import sys
import time
import torch
import torch.nn as nn
import torchvision.utils as vutils
from torch.backends import cudnn
import utils
from sagan_models import Generator, Discriminator
class Trainer(object):
def __init__(self, config):
# Images data path & Output path
self.dataset = config.dataset
self.data_path = config.data_path
self.save_path = os.path.join(config.save_path, config.name)
# Training settings
self.batch_size = config.batch_size
self.total_step = config.total_step
self.d_steps_per_iter = config.d_steps_per_iter
self.g_steps_per_iter = config.g_steps_per_iter
self.d_lr = config.d_lr
self.g_lr = config.g_lr
self.beta1 = config.beta1
self.beta2 = config.beta2
self.inst_noise_sigma = config.inst_noise_sigma
self.inst_noise_sigma_iters = config.inst_noise_sigma_iters
self.start = 0 # Unless using pre-trained model
# Image transforms
self.shuffle = not config.dont_shuffle
self.drop_last = not config.dont_drop_last
self.resize = not config.dont_resize
self.imsize = config.imsize
self.centercrop = config.centercrop
self.centercrop_size = config.centercrop_size
# Step size
self.log_step = config.log_step
self.sample_step = config.sample_step
self.model_save_step = config.model_save_step
self.save_n_images = config.save_n_images
self.max_frames_per_gif = config.max_frames_per_gif
# Pretrained model
self.pretrained_model = config.pretrained_model
# Misc
self.manual_seed = config.manual_seed
self.disable_cuda = config.disable_cuda
self.parallel = config.parallel
self.num_workers = config.num_workers
# Output paths
self.model_weights_path = os.path.join(self.save_path, config.model_weights_dir)
self.sample_path = os.path.join(self.save_path, config.sample_dir)
# Model hyper-parameters
self.adv_loss = config.adv_loss
self.z_dim = config.z_dim
self.g_conv_dim = config.g_conv_dim
self.d_conv_dim = config.d_conv_dim
self.lambda_gp = config.lambda_gp
# Model name
self.name = config.name
# Create directories if not exist
utils.make_folder(self.save_path)
utils.make_folder(self.model_weights_path)
utils.make_folder(self.sample_path)
# Copy files
utils.write_config_to_file(config, self.save_path)
utils.copy_scripts(self.save_path)
# Make dataloader
self.dataloader, self.num_of_classes = utils.make_dataloader(self.batch_size, self.dataset, self.data_path,
self.shuffle, self.num_workers, self.drop_last,
self.resize, self.imsize, self.centercrop, self.centercrop_size)
# Data iterator
self.data_iter = iter(self.dataloader)
# Check for CUDA
utils.check_for_CUDA(self)
# Build G and D
self.build_models()
# Start with pretrained model (if it exists)
if self.pretrained_model != '':
utils.load_pretrained_model(self)
if self.adv_loss == 'dcgan':
self.criterion = nn.BCELoss()
def train(self):
# Seed
np.random.seed(self.manual_seed)
random.seed(self.manual_seed)
torch.manual_seed(self.manual_seed)
# For fast training
cudnn.benchmark = True
# For BatchNorm
self.G.train()
self.D.train()
# Fixed noise for sampling from G
fixed_noise = torch.randn(self.batch_size, self.z_dim, device=self.device)
if self.num_of_classes < self.batch_size:
fixed_labels = torch.from_numpy(np.tile(np.arange(self.num_of_classes), self.batch_size//self.num_of_classes + 1)[:self.batch_size]).to(self.device)
else:
fixed_labels = torch.from_numpy(np.arange(self.batch_size)).to(self.device)
# For gan loss
label = torch.full((self.batch_size,), 1, device=self.device)
ones = torch.full((self.batch_size,), 1, device=self.device)
# Losses file
log_file_name = os.path.join(self.save_path, 'log.txt')
log_file = open(log_file_name, "wt")
# Init
start_time = time.time()
G_losses = []
D_losses_real = []
D_losses_fake = []
D_losses = []
D_xs = []
D_Gz_trainDs = []
D_Gz_trainGs = []
# Instance noise - make random noise mean (0) and std for injecting
inst_noise_mean = torch.full((self.batch_size, 3, self.imsize, self.imsize), 0, device=self.device)
inst_noise_std = torch.full((self.batch_size, 3, self.imsize, self.imsize), self.inst_noise_sigma, device=self.device)
# Start training
for self.step in range(self.start, self.total_step):
# Instance noise std is linearly annealed from self.inst_noise_sigma to 0 thru self.inst_noise_sigma_iters
inst_noise_sigma_curr = 0 if self.step > self.inst_noise_sigma_iters else (1 - self.step/self.inst_noise_sigma_iters)*self.inst_noise_sigma
inst_noise_std.fill_(inst_noise_sigma_curr)
# ================== TRAIN D ================== #
for _ in range(self.d_steps_per_iter):
# Zero grad
self.reset_grad()
# TRAIN with REAL
# Get real images & real labels
real_images, real_labels = self.get_real_samples()
# Get D output for real images & real labels
inst_noise = torch.normal(mean=inst_noise_mean, std=inst_noise_std).to(self.device)
d_out_real = self.D(real_images + inst_noise, real_labels)
# Compute D loss with real images & real labels
if self.adv_loss == 'hinge':
d_loss_real = torch.nn.ReLU()(ones - d_out_real).mean()
elif self.adv_loss == 'wgan_gp':
d_loss_real = -d_out_real.mean()
else:
label.fill_(1)
d_loss_real = self.criterion(d_out_real, label)
# Backward
d_loss_real.backward()
# TRAIN with FAKE
# Create random noise
z = torch.randn(self.batch_size, self.z_dim, device=self.device)
# Generate fake images for same real labels
fake_images = self.G(z, real_labels)
# Get D output for fake images & same real labels
inst_noise = torch.normal(mean=inst_noise_mean, std=inst_noise_std).to(self.device)
d_out_fake = self.D(fake_images.detach() + inst_noise, real_labels)
# Compute D loss with fake images & real labels
if self.adv_loss == 'hinge':
d_loss_fake = torch.nn.ReLU()(ones + d_out_fake).mean()
elif self.adv_loss == 'dcgan':
label.fill_(0)
d_loss_fake = self.criterion(d_out_fake, label)
else:
d_loss_fake = d_out_fake.mean()
# Backward
d_loss_fake.backward()
# If WGAN_GP, compute GP and add to D loss
if self.adv_loss == 'wgan_gp':
d_loss_gp = self.lambda_gp * self.compute_gradient_penalty(real_images, real_labels, fake_images.detach())
d_loss_gp.backward()
# Optimize
self.D_optimizer.step()
# ================== TRAIN G ================== #
for _ in range(self.g_steps_per_iter):
# Zero grad
self.reset_grad()
# Get real images & real labels (only need real labels)
real_images, real_labels = self.get_real_samples()
# Create random noise
z = torch.randn(self.batch_size, self.z_dim).to(self.device)
# Generate fake images for same real labels
fake_images = self.G(z, real_labels)
# Get D output for fake images & same real labels
inst_noise = torch.normal(mean=inst_noise_mean, std=inst_noise_std).to(self.device)
g_out_fake = self.D(fake_images + inst_noise, real_labels)
# Compute G loss with fake images & real labels
if self.adv_loss == 'dcgan':
label.fill_(1)
g_loss = self.criterion(g_out_fake, label)
else:
g_loss = -g_out_fake.mean()
# Backward + Optimize
g_loss.backward()
self.G_optimizer.step()
# Print out log info
if self.step % self.log_step == 0:
G_losses.append(g_loss.mean().item())
D_losses_real.append(d_loss_real.mean().item())
D_losses_fake.append(d_loss_fake.mean().item())
D_loss = D_losses_real[-1] + D_losses_fake[-1]
if self.adv_loss == 'wgan_gp':
D_loss += d_loss_gp.mean().item()
D_losses.append(D_loss)
D_xs.append(d_out_real.mean().item())
D_Gz_trainDs.append(d_out_fake.mean().item())
D_Gz_trainGs.append(g_out_fake.mean().item())
curr_time = time.time()
curr_time_str = datetime.datetime.fromtimestamp(curr_time).strftime('%Y-%m-%d %H:%M:%S')
elapsed = str(datetime.timedelta(seconds=(curr_time - start_time)))
log = ("[{}] : Elapsed [{}], Iter [{} / {}], G_loss: {:.4f}, D_loss: {:.4f}, D_loss_real: {:.4f}, D_loss_fake: {:.4f}, D(x): {:.4f}, D(G(z))_trainD: {:.4f}, D(G(z))_trainG: {:.4f}".
format(curr_time_str, elapsed, self.step, self.total_step,
G_losses[-1], D_losses[-1], D_losses_real[-1], D_losses_fake[-1],
D_xs[-1], D_Gz_trainDs[-1], D_Gz_trainGs[-1]))
print(log)
log_file.write(log)
log_file.flush()
utils.make_plots(G_losses, D_losses, D_losses_real, D_losses_fake, D_xs, D_Gz_trainDs, D_Gz_trainGs,
self.log_step, self.save_path)
# Sample images
if self.step % self.sample_step == 0:
fake_images = self.G(fixed_noise, fixed_labels)
sample_images = utils.denorm(fake_images.detach()[:self.save_n_images])
# Save batch images
vutils.save_image(sample_images, os.path.join(self.sample_path, 'fake_{:05d}.png'.format(self.step)))
# Save gif
utils.make_gif(sample_images[0].cpu().numpy().transpose(1, 2, 0)*255, self.step,
self.sample_path, self.name, max_frames_per_gif=self.max_frames_per_gif)
# Save model
if self.step % self.model_save_step == 0:
utils.save_ckpt(self)
def build_models(self):
self.G = Generator(self.z_dim, self.g_conv_dim, self.num_of_classes).to(self.device)
self.D = Discriminator(self.d_conv_dim, self.num_of_classes).to(self.device)
if 'cuda' in self.device.type and self.parallel:
self.G = nn.DataParallel(self.G)
self.D = nn.DataParallel(self.D)
# Loss and optimizer
# self.G_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])
self.G_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.G.parameters()), self.g_lr, [self.beta1, self.beta2])
self.D_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.D.parameters()), self.d_lr, [self.beta1, self.beta2])
# print networks
print(self.G)
print(self.D)
def reset_grad(self):
self.G_optimizer.zero_grad()
self.D_optimizer.zero_grad()
def get_real_samples(self):
try:
real_images, real_labels = next(self.data_iter)
except:
self.data_iter = iter(self.dataloader)
real_images, real_labels = next(self.data_iter)
real_images, real_labels = real_images.to(self.device), real_labels.to(self.device)
return real_images, real_labels
def compute_gradient_penalty(self, real_images, real_labels, fake_images):
# Compute gradient penalty
alpha = torch.rand(real_images.size(0), 1, 1, 1).expand_as(real_images).to(device)
interpolated = torch.tensor(alpha * real_images + (1 - alpha) * fake_images, requires_grad=True)
out = self.D(interpolated, real_labels)
exp_grad = torch.ones(out.size()).to(device)
grad = torch.autograd.grad(outputs=out,
inputs=interpolated,
grad_outputs=exp_grad,
retain_graph=True,
create_graph=True,
only_inputs=True)[0]
grad = grad.view(grad.size(0), -1)
grad_l2norm = torch.sqrt(torch.sum(grad ** 2, dim=1))
d_loss_gp = torch.mean((grad_l2norm - 1) ** 2)
return d_loss_gp
|
{"hexsha": "84cc055cc400efa720b5f9c240966bb272530424", "size": 13574, "ext": "py", "lang": "Python", "max_stars_repo_path": "trainer.py", "max_stars_repo_name": "christopher-beckham/self-attention-GAN-pytorch", "max_stars_repo_head_hexsha": "9b10ecfd6957633bc6fde099dc9674acf1c222e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-01T01:07:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-01T01:07:23.000Z", "max_issues_repo_path": "trainer.py", "max_issues_repo_name": "christopher-beckham/self-attention-GAN-pytorch", "max_issues_repo_head_hexsha": "9b10ecfd6957633bc6fde099dc9674acf1c222e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trainer.py", "max_forks_repo_name": "christopher-beckham/self-attention-GAN-pytorch", "max_forks_repo_head_hexsha": "9b10ecfd6957633bc6fde099dc9674acf1c222e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-03-11T16:07:39.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-11T16:07:39.000Z", "avg_line_length": 40.7627627628, "max_line_length": 197, "alphanum_fraction": 0.58479446, "include": true, "reason": "import numpy", "num_tokens": 2892}
|
# ms_mint/io.py
import pandas as pd
import numpy as np
import io
import pymzml
from pathlib import Path as P
from datetime import date
from pyteomics import mzxml, mzml
def ms_file_to_df(fn):
fn = str(fn)
if fn.lower().endswith('.mzxml'):
df = mzxml_to_df(fn)
elif fn.lower().endswith('.mzml'):
df = mzml_to_df(fn)
elif fn.lower().endswith('hdf'):
df = pd.read_hdf(fn)
elif fn.lower().endswith('feather'):
df = pd.read_feather(fn)
# Compatibility with old schema
df = df.rename(columns={
'retentionTime': 'scan_time_min',
'intensity array': 'intensity',
'm/z array': 'mz'})
return df
def mzxml_to_df(fn):
'''
Reads mzXML file and returns a pandas.DataFrame.
'''
slices = []
with mzxml.MzXML( fn ) as ms_data:
while True:
try:
data = ms_data.next()
df = pd.DataFrame(data)
# Fix byteorder issue
df.loc[:,:] = df.values.byteswap().newbyteorder()
df = df[['num', 'msLevel', 'polarity', 'retentionTime', 'm/z array', 'intensity array']]
slices.append( df )
except StopIteration as e:
break
df = pd.concat(slices)
df['retentionTime'] = df['retentionTime'].astype(np.float32)
df['m/z array'] = df['m/z array'].astype(np.float32)
df['intensity array'] = df['intensity array'].astype(int)
df = df.rename(columns={'num': 'scan_id',
'msLevel': 'ms_level',
'retentionTime': 'scan_time_min',
'm/z array': 'mz',
'intensity array': 'intensity'})
df = df.reset_index(drop=True)
cols = ['scan_id', 'ms_level', 'polarity',
'scan_time_min', 'mz', 'intensity']
df = df[cols]
return df
def mzml_to_pandas_df_pyteomics(fn):
'''
Reads mzML file and returns a pandas.DataFrame.
'''
cols = ['retentionTime', 'm/z array', 'intensity array']
slices = []
with mzml.MzML(fn) as ms_data:
while True:
try:
data = ms_data.next()
data['retentionTime'] = data['scanList']['scan'][0]['scan time'] / 60
del data['scanList']
slices.append( pd.DataFrame(data) )
except:
break
df = pd.concat(slices)[cols]
df_to_numeric(df)
df['intensity array'] = df['intensity array'].astype(int)
df = df.reset_index(drop=True)
return df
def mzml_to_df(fn, assume_time_unit='seconds', remove_noise=False):
with pymzml.run.Reader(fn) as ms_data:
data = []
for spectrum in ms_data:
# Try to convert time units with build-in method
# some files have no time unit set. Then convert
# to minutes assuming the time unit is as set
# by assume_time_unit argument.
try:
RT = spectrum.scan_time_in_minutes()
except:
if assume_time_unit == 'seconds':
RT = spectrum.scan_time[0] / 60.
elif assume_time_unit == 'minutes':
RT = spectrum.scan_time[0]
if remove_noise: spectrum = spectrum.remove_noise()
peaks = spectrum.peaks("centroided")
data.append((spectrum.index, spectrum.ms_level, '+' if spectrum["positive scan"] else '-', RT, peaks))
ndx_explode = 4
df = pd.DataFrame(data).explode(ndx_explode)
df['mz'] = df[ndx_explode].apply(lambda x: x[0])
df['intensity'] = df[ndx_explode].apply(lambda x: x[1]).astype(int)
del df[ndx_explode]
df = df.rename(columns={0: 'scan_id', 1: 'ms_level', 2: 'polarity', 3: 'scan_time_min' })
df = df.reset_index(drop=True)
return df
def df_to_numeric(df):
'''
Converts dataframe to numeric types if possible.
'''
for col in df.columns:
df.loc[:, col] = pd.to_numeric(df[col], errors='ignore')
def export_to_excel(mint, fn=None):
date_string = str(date.today())
if fn is None:
file_buffer = io.BytesIO()
writer = pd.ExcelWriter(file_buffer)
else:
writer = pd.ExcelWriter(fn)
# Write into file
mint.peaklist.to_excel(writer, 'Peaklist', index=False)
mint.results.to_excel(writer, 'Results', index=False)
meta = pd.DataFrame({'MINT_version': [mint.version],
'Date': [date_string]}).T[0]
meta.to_excel(writer, 'Metadata', index=True, header=False)
# Close writer and maybe return file buffer
writer.close()
if fn is None:
return file_buffer.seek(0)
def convert_ms_file_to_feather(fn, fn_out=None):
fn = P(fn)
if fn_out is None:
fn_out = fn.with_suffix('.feather')
df = ms_file_to_df(fn).reset_index(drop=True)
df.to_feather(fn_out)
return fn_out
|
{"hexsha": "e9a47a5a0ac3da24b3598e49798067b6bf496938", "size": 4948, "ext": "py", "lang": "Python", "max_stars_repo_path": "ms_mint/io.py", "max_stars_repo_name": "luis-ponce/ms-mint", "max_stars_repo_head_hexsha": "cefd0d455c6658bf8c737160bd7253bb147c9c14", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ms_mint/io.py", "max_issues_repo_name": "luis-ponce/ms-mint", "max_issues_repo_head_hexsha": "cefd0d455c6658bf8c737160bd7253bb147c9c14", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ms_mint/io.py", "max_forks_repo_name": "luis-ponce/ms-mint", "max_forks_repo_head_hexsha": "cefd0d455c6658bf8c737160bd7253bb147c9c14", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2080536913, "max_line_length": 114, "alphanum_fraction": 0.5707356508, "include": true, "reason": "import numpy", "num_tokens": 1242}
|
"""
This script was made by Nick at 19/07/20.
To implement code for inference with your model.
"""
from argparse import ArgumentParser, Namespace
import os
import matplotlib.pyplot as plt
import numpy as np
import pytorch_lightning as pl
import torch
from src.utils import Config, get_dataloader
pl.seed_everything(777)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def parse_args() -> Namespace:
# configurations
parser = ArgumentParser(description="Inference Autoencoders")
parser.add_argument(
"--cfg-dataset",
default="./configs/dataset/mnist.yml",
type=str,
help="select dataset",
)
parser.add_argument(
"--cfg-model", default="./configs/model/AE.yml", type=str, help="select model"
)
return parser.parse_args()
def show_result(input_img, output_img):
fig = plt.figure()
rows = 1
cols = 2
ax1 = fig.add_subplot(rows, cols, 1)
ax1.imshow(input_img)
ax1.set_title("Input")
ax1.axis("off")
ax2 = fig.add_subplot(rows, cols, 2)
ax2.imshow(output_img)
ax2.set_title("Ouput")
ax2.axis("off")
plt.show()
def run(cfg: dict):
# Load checkpoint
checkpoint_path = os.path.join(cfg.model.ckpt.path, cfg.model.ckpt.filename)
Model = getattr(__import__("src"), cfg.model.name)
model = Model(cfg.model.params)
model = model.load_from_checkpoint(
checkpoint_path=checkpoint_path,
)
# Select test image
_, val_dataloader = get_dataloader(cfg)
test_image = None
for data in val_dataloader:
images, _ = data
test_image = images[0, :, :, :].unsqueeze(0)
break
# Inference
x = torch.Tensor(test_image)
y = model(x)
output = np.transpose(y[0].cpu().detach().numpy(), [1, 2, 0])
test_image = np.transpose(test_image[0, :, :, :].cpu().numpy(), [1, 2, 0])
show_result(test_image, output)
if __name__ == "__main__":
args = parse_args()
cfg = Config()
cfg.add_dataset(args.cfg_dataset)
cfg.add_model(args.cfg_model)
run(cfg)
|
{"hexsha": "127ff7df37ff42b84b26a441171af0497fd7e3f8", "size": 2098, "ext": "py", "lang": "Python", "max_stars_repo_path": "infer.py", "max_stars_repo_name": "HephaestusProject/seq2seq-att", "max_stars_repo_head_hexsha": "383f72d8bb46bdd4d66f0f7838f39c94eeb069b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "infer.py", "max_issues_repo_name": "HephaestusProject/seq2seq-att", "max_issues_repo_head_hexsha": "383f72d8bb46bdd4d66f0f7838f39c94eeb069b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-11-01T12:29:58.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-07T01:11:49.000Z", "max_forks_repo_path": "infer.py", "max_forks_repo_name": "HephaestusProject/seq2seq-att", "max_forks_repo_head_hexsha": "383f72d8bb46bdd4d66f0f7838f39c94eeb069b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8409090909, "max_line_length": 86, "alphanum_fraction": 0.6549094376, "include": true, "reason": "import numpy", "num_tokens": 537}
|
chapter \<open>Future Work\<close>
theory %invisible Future_Work
imports Main
begin
text \<open>\label{chap:future}\<close>
section \<open>Populating the Framework\<close>
text \<open>\label{sec:populate}\<close>
text \<open>Pop-refinement provides a framework,
which must be populated with re-usable
concepts, methodologies, and theorem prover libraries
for full fruition.
The simple examples in \chapref{chap:exampleI} and \chapref{chap:exampleII},
and the discussion in \chapref{chap:general},
suggests a few initial ideas.
Working out examples of increasing complexity should suggest more ideas.\<close>
section \<open>Automated Transformations\<close>
text \<open>\label{sec:xform}\<close>
text \<open>A pop-refinement step from @{term spec\<^sub>i} can be performed manually,
by writing down \<open>spec\<^sub>i\<^sub>+\<^sub>1\<close> and proving \<open>spec\<^sub>i\<^sub>+\<^sub>1 p \<Longrightarrow> spec\<^sub>i p\<close>.
It is sometimes possible to generate \<open>spec\<^sub>i\<^sub>+\<^sub>1\<close> from @{term spec\<^sub>i},
along with a proof of \<open>spec\<^sub>i\<^sub>+\<^sub>1 p \<Longrightarrow> spec\<^sub>i p\<close>,
using automated transformation techniques like
term rewriting,
application of algorithmic templates,
and term construction by witness finding,
e.g.\ \cite{SmithMarktoberdorf,SpecwareWebSite}.
Automated transformations may require
parameters to be provided and applicability conditions to be proved,
but should generally save effort
and make derivations more robust against changes in requirement specifications.
Extending existing theorem provers with automated transformation capabilities
would be advantageous for pop-refinement.\<close>
section \<open>Other Kinds of Design Objects\<close>
text \<open>\label{sec:otherdesign}\<close>
text \<open>It has been suggested~\cite{LambertPrivate}
that pop-refinement could be used
to develop other kinds of design objects than programs,
e.g.\ protocols, digital circuits, and hybrid systems.
Perhaps pop-refinement could be used to develop
engines, cars, buildings, etc.
So long as these design objects can be described
by languages amenable to formalization,
pop-refinement should be applicable.\<close>
end %invisible
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Example/afp-2020-05-16/thys/Pop_Refinement/Future_Work.thy"}
|
"""
This module provides R style pairs plotting functionality.
"""
import matplotlib.cm as cm
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
# from . import plotting_util
from .plotting_util import (LegendEnum, get_color,
prepare_pairs_data, make_legend)
from ..util import get_module_logger
# .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
__all__ = ['pairs_scatter', 'pairs_lines', 'pairs_density']
_logger = get_module_logger(__name__)
def pairs_lines(experiments, outcomes,
outcomes_to_show=[],
group_by=None,
grouping_specifiers=None,
ylabels={},
legend=True,
**kwargs):
"""
Generate a `R style pairs <http://www.stat.psu.edu/~dhunter/R/html/graphics/html/pairs.html>`_
lines multiplot. It shows the behavior of two outcomes over time against
each other. The origin is denoted with a circle and the end is denoted
with a '+'.
Parameters
----------
experiments : DataFrame
outcomes : dict
outcomes_to_show : list of str, optional
list of outcome of interest you want to plot.
group_by : str, optional
name of the column in the cases array to group results by.
Alternatively, `index` can be used to use indexing arrays as the
basis for grouping.
grouping_specifiers : dict, optional
dict of categories to be used as a basis for grouping
by. Grouping_specifiers is only meaningful if
group_by is provided as well. In case of grouping by
index, the grouping specifiers should be in a
dictionary where the key denotes the name of the
group.
ylabels : dict, optional
ylabels is a dictionary with the outcome names as keys, the
specified values will be used as labels for the y axis.
legend : bool, optional
if true, and group_by is given, show a legend.
point_in_time : float, optional
the point in time at which the scatter is to be made. If
None is provided (default), the end states are used.
point_in_time should be a valid value on time
Returns
-------
fig
the figure instance
dict
key is tuple of names of outcomes, value is associated axes
instance
"""
# unravel return from run_experiments
_logger.debug("making a pars lines plot")
prepared_data = prepare_pairs_data(
experiments,
outcomes,
outcomes_to_show,
group_by,
grouping_specifiers,
None)
outcomes, outcomes_to_show, grouping_labels = prepared_data
grid = gridspec.GridSpec(len(outcomes_to_show), len(outcomes_to_show))
grid.update(wspace=0.1,
hspace=0.1)
# the plotting
figure = plt.figure()
axes_dict = {}
combis = [(field1, field2) for field1 in outcomes_to_show
for field2 in outcomes_to_show]
for field1, field2 in combis:
i = list(outcomes_to_show).index(field1)
j = list(outcomes_to_show).index(field2)
ax = figure.add_subplot(grid[i, j])
axes_dict[(field1, field2)] = ax
if group_by:
for x, entry in enumerate(grouping_labels):
data1 = outcomes[entry][field1]
data2 = outcomes[entry][field2]
color = get_color(x)
if i == j:
color = 'white'
simple_pairs_lines(ax, data1, data2, color)
else:
data1 = outcomes[field1]
data2 = outcomes[field2]
color = 'b'
if i == j:
color = 'white'
simple_pairs_lines(ax, data1, data2, color)
do_text_ticks_labels(ax, i, j, field1, field2, ylabels,
outcomes_to_show)
if group_by and legend:
gs1 = grid[0, 0]
for ax in figure.axes:
gs2 = ax._subplotspec
if all((gs1._gridspec == gs2._gridspec,
gs1.num1 == gs2.num1,
gs1.num2 == gs2.num2)):
break
make_legend(grouping_labels, ax, legend_type=LegendEnum.LINE)
return figure, axes_dict
def simple_pairs_lines(ax, y_data, x_data, color):
"""
Helper function for generating a simple pairs lines plot
Parameters
----------
ax : axes
data1 : ndarray
data2 : ndarray
color : str
"""
ax.plot(x_data.T, y_data.T, c=color)
ax.scatter(x_data[:, 0], y_data[:, 0],
edgecolor=color, facecolor=color,
marker='o')
ax.scatter(x_data[:, -1], y_data[:, -1],
edgecolor=color, facecolor=color,
marker='+')
def pairs_density(experiments, outcomes,
outcomes_to_show=[],
group_by=None,
grouping_specifiers=None,
ylabels={},
point_in_time=-1,
log=True,
gridsize=50,
colormap='coolwarm',
filter_scalar=True):
"""
Generate a `R style pairs <http://www.stat.psu.edu/~dhunter/R/html/graphics/html/pairs.html>`_
hexbin density multiplot. In case of time-series data, the end
states are used.
hexbin makes hexagonal binning plot of x versus y, where x, y are 1-D
sequences of the same length, N. If C is None (the default), this is a
histogram of the number of occurences of the observations at (x[i],y[i]).
For further detail see `matplotlib on hexbin <http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.hexbin>`_
Parameters
----------
experiments : DataFrame
outcomes : dict
outcomes_to_show : list of str, optional
list of outcome of interest you want to plot.
group_by : str, optional
name of the column in the cases array to group results by.
Alternatively, `index` can be used to use indexing arrays as the
basis for grouping.
grouping_specifiers : dict, optional
dict of categories to be used as a basis for grouping
by. Grouping_specifiers is only meaningful if
group_by is provided as well. In case of grouping by
index, the grouping specifiers should be in a
dictionary where the key denotes the name of the
group.
ylabels : dict, optional
ylabels is a dictionary with the outcome names as keys, the
specified values will be used as labels for the y axis.
point_in_time : float, optional
the point in time at which the scatter is to be made. If
None is provided (default), the end states are used.
point_in_time should be a valid value on time
log: bool, optional
indicating whether density should be log scaled. Defaults to True.
gridsize : int, optional
controls the gridsize for the hexagonal bining. (default = 50)
cmap : str
color map that is to be used in generating the hexbin. For details
on the available maps, see `pylab <http://matplotlib.sourceforge.net/examples/pylab_examples/show_colormaps.html#pylab-examples-show-colormaps>`_.
(Defaults = coolwarm)
filter_scalar: bool, optional
remove the non-time-series outcomes. Defaults to True.
Returns
-------
fig
the figure instance
dict
key is tuple of names of outcomes, value is associated axes
instance
"""
_logger.debug("generating pairwise density plot")
prepared_data = prepare_pairs_data(
experiments,
outcomes,
outcomes_to_show,
group_by,
grouping_specifiers,
point_in_time,
filter_scalar)
outcomes, outcomes_to_show, grouping_specifiers = prepared_data
if group_by:
# figure out the extents for each combination
extents = determine_extents(outcomes, outcomes_to_show)
axes_dicts = {}
figures = []
for key, value in outcomes.items():
figure, axes_dict = simple_pairs_density(value, outcomes_to_show,
log, colormap, gridsize,
ylabels,
extents=extents,
title=key)
axes_dicts[key] = axes_dict
figures.append(figure)
# harmonize the color scaling across figures
combis = [(field1, field2) for field1 in outcomes_to_show
for field2 in outcomes_to_show]
for combi in combis:
vmax = -1
for entry in axes_dicts.values():
vmax = max(entry[combi].collections[0].norm.vmax, vmax)
for entry in axes_dicts.values():
ax = entry[combi]
ax.collections[0].set_clim(vmin=0, vmax=vmax)
del vmax
return figures, axes_dicts
else:
return simple_pairs_density(outcomes, outcomes_to_show, log, colormap,
gridsize, ylabels)
def determine_extents(outcomes, outcomes_to_show):
"""
Helper function used by pairs_density to make sure that multiple groups
share the same axes extent.
Parameters
----------
outcomes : dict
outcomes_to_show : list of str
Returns
-------
dict
tuple of str as key, and 4-tuple with extent
"""
limits = {}
for pol_out in outcomes.values():
for entry in outcomes_to_show:
out = pol_out[entry]
minimum = np.amin(out)
maximum = np.amax(out)
try:
cur = limits[entry]
new = (min(cur[0], minimum),
max(cur[1], maximum))
limits[entry] = new
except KeyError:
limits[entry] = (minimum, maximum)
extents = {}
combis = [(field1, field2) for field1 in outcomes_to_show
for field2 in outcomes_to_show]
for field1, field2 in combis:
limits_1 = limits[field1]
limits_2 = limits[field2]
extents[(field1, field2)] = (limits_1[0], limits_1[1],
limits_2[0], limits_2[1])
return extents
def simple_pairs_density(outcomes,
outcomes_to_show,
log,
colormap,
gridsize,
ylabels,
extents=None,
title=None):
"""
Helper function for generating a simple pairs density plot
Parameters
----------
outcomes : dict
outcomes_to_show : list of str
log : bool
colormap : str
gridsize : int
ylabels: dict
extents : dict, optional
used to control scaling of plots across figures, If provided, it
should be a dict with a tuple of outcomes as key and the extend to
be used as value.
title : str, optional
"""
grid = gridspec.GridSpec(len(outcomes_to_show), len(outcomes_to_show))
grid.update(wspace=0.1,
hspace=0.1)
# the plotting
figure = plt.figure()
combis = [(field1, field2) for field1 in outcomes_to_show
for field2 in outcomes_to_show]
axes_dict = {}
for field1, field2 in combis:
i = list(outcomes_to_show).index(field1)
j = list(outcomes_to_show).index(field2)
ax = figure.add_subplot(grid[i, j])
axes_dict[(field1, field2)] = ax
y_data = outcomes[field1]
x_data = outcomes[field2]
bins = None
if log:
bins = 'log'
extent = None
if extents:
extent = extents[(field2, field1)]
# text and labels
if i == j:
# only plot the name in the middle
ax.hexbin(x_data, y_data, bins=bins, gridsize=gridsize,
cmap=cm.__dict__[colormap], alpha=0, edgecolor='white',
linewidths=1, extent=extent)
else:
ax.hexbin(x_data, y_data, bins=bins, gridsize=gridsize,
cmap=cm.__dict__[colormap], edgecolor='black',
linewidths=1, extent=extent, mincnt=1)
do_text_ticks_labels(ax, i, j, field1, field2, ylabels,
outcomes_to_show)
return figure, axes_dict
def pairs_scatter(experiments, outcomes,
outcomes_to_show=[],
group_by=None,
grouping_specifiers=None,
ylabels={},
legend=True,
point_in_time=-1,
filter_scalar=False,
**kwargs):
"""
Generate a `R style pairs <http://www.stat.psu.edu/~dhunter/R/html/graphics/html/pairs.html>`_
scatter multiplot. In case of time-series data, the end states are used.
Parameters
----------
experiments : DataFrame
outcomes : dict
outcomes_to_show : list of str, optional
list of outcome of interest you want to plot.
group_by : str, optional
name of the column in the cases array to group results by.
Alternatively, `index` can be used to use indexing arrays as the
basis for grouping.
grouping_specifiers : dict, optional
dict of categories to be used as a basis for grouping
by. Grouping_specifiers is only meaningful if
group_by is provided as well. In case of grouping by
index, the grouping specifiers should be in a
dictionary where the key denotes the name of the
group.
ylabels : dict, optional
ylabels is a dictionary with the outcome names as keys, the
specified values will be used as labels for the y axis.
legend : bool, optional
if true, and group_by is given, show a legend.
point_in_time : float, optional
the point in time at which the scatter is to be made. If
None is provided (default), the end states are used.
point_in_time should be a valid value on time
filter_scalar: bool, optional
remove the non-time-series outcomes. Defaults to True.
Returns
-------
fig : Figure instance
the figure instance
axes : dict
key is tuple of names of outcomes, value is associated axes
instance
.. note:: the current implementation is limited to seven different
categories in case of column, categories, and/or
discretesize. This limit is due to the colors specified
in COLOR_LIST.
"""
_logger.debug("generating pairwise scatter plot")
prepared_data = prepare_pairs_data(experiments, outcomes,
outcomes_to_show, group_by,
grouping_specifiers, point_in_time,
filter_scalar)
outcomes, outcomes_to_show, grouping_labels = prepared_data
grid = gridspec.GridSpec(len(outcomes_to_show), len(outcomes_to_show))
grid.update(wspace=0.1,
hspace=0.1)
# the plotting
figure = plt.figure()
axes_dict = {}
combis = [(field1, field2) for field1 in outcomes_to_show
for field2 in outcomes_to_show]
for field1, field2 in combis:
i = list(outcomes_to_show).index(field1)
j = list(outcomes_to_show).index(field2)
ax = figure.add_subplot(grid[i, j])
axes_dict[(field1, field2)] = ax
if group_by:
for x, group in enumerate(grouping_labels):
y_data = outcomes[group][field1]
x_data = outcomes[group][field2]
facecolor = get_color(x)
edgecolor = 'k'
if i == j:
facecolor = 'white'
edgecolor = 'white'
ax.scatter(x_data, y_data,
facecolor=facecolor, edgecolor=edgecolor)
else:
y_data = outcomes[field1]
x_data = outcomes[field2]
facecolor = get_color(0)
edgecolor = 'k'
if i == j:
facecolor = 'white'
edgecolor = 'white'
ax.scatter(x_data, y_data,
facecolor=facecolor, edgecolor=edgecolor)
do_text_ticks_labels(ax, i, j, field1, field2, ylabels,
outcomes_to_show)
if group_by and legend:
gs1 = grid[0, 0]
for ax in figure.axes:
gs2 = ax._subplotspec
if all((gs1._gridspec == gs2._gridspec,
gs1.num1 == gs2.num1,
gs1.num2 == gs2.num2)):
break
make_legend(grouping_labels, ax, legend_type=LegendEnum.SCATTER)
return figure, axes_dict
def do_text_ticks_labels(ax, i, j, field1, field2, ylabels, outcomes_to_show):
"""
Helper function for setting the tick labels on the axes correctly on and
off
Parameters
----------
ax : axes
i : int
j : int
field1 : str
field2 : str
ylabels : dict, optional
outcomes_to_show : str
"""
# text and labels
if i == j:
# only plot the name in the middle
if ylabels:
text = ylabels[field1]
else:
text = field1
ax.text(0.5, 0.5, text,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes)
# are we at the end of the row?
if i != len(outcomes_to_show) - 1:
# xaxis off
ax.set_xticklabels([])
else:
if ylabels:
try:
ax.set_xlabel(ylabels.get(field2))
except KeyError:
_logger.info("no label specified for " + field2)
else:
ax.set_xlabel(field2)
# are we at the end of the column?
if j != 0:
# yaxis off
ax.set_yticklabels([])
else:
if ylabels:
try:
ax.set_ylabel(ylabels.get(field1))
except KeyError:
_logger.info("no label specified for " + field1)
else:
ax.set_ylabel(field1)
|
{"hexsha": "0cc11918ffc4b7ec1d9204d44385af88a7baff8d", "size": 19001, "ext": "py", "lang": "Python", "max_stars_repo_path": "ema_workbench/analysis/pairs_plotting.py", "max_stars_repo_name": "quaquel/EMAworkbench", "max_stars_repo_head_hexsha": "b16a454d734465bb163ea9ff1c52536cd945563e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 75, "max_stars_repo_stars_event_min_datetime": "2015-01-14T20:39:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T09:28:15.000Z", "max_issues_repo_path": "ema_workbench/analysis/pairs_plotting.py", "max_issues_repo_name": "quaquel/EMAworkbench", "max_issues_repo_head_hexsha": "b16a454d734465bb163ea9ff1c52536cd945563e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 92, "max_issues_repo_issues_event_min_datetime": "2015-01-15T16:12:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T20:46:37.000Z", "max_forks_repo_path": "ema_workbench/analysis/pairs_plotting.py", "max_forks_repo_name": "quaquel/EMAworkbench", "max_forks_repo_head_hexsha": "b16a454d734465bb163ea9ff1c52536cd945563e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 64, "max_forks_repo_forks_event_min_datetime": "2015-02-16T15:07:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T16:17:16.000Z", "avg_line_length": 33.7495559503, "max_line_length": 157, "alphanum_fraction": 0.5595494974, "include": true, "reason": "import numpy", "num_tokens": 4097}
|
import numpy as np
import pandas as pd
ops = ["mean", "sum", "median", "std", "skew", "kurt", "mad", "prod", "sem", "var"]
class FrameOps:
params = [ops, ["float", "int"], [0, 1], [True, False]]
param_names = ["op", "dtype", "axis", "use_bottleneck"]
def setup(self, op, dtype, axis, use_bottleneck):
df = pd.DataFrame(np.random.randn(100000, 4)).astype(dtype)
try:
pd.options.compute.use_bottleneck = use_bottleneck
except TypeError:
from pandas.core import nanops
nanops._USE_BOTTLENECK = use_bottleneck
self.df_func = getattr(df, op)
def time_op(self, op, dtype, axis, use_bottleneck):
self.df_func(axis=axis)
class FrameMultiIndexOps:
params = ([0, 1, [0, 1]], ops)
param_names = ["level", "op"]
def setup(self, level, op):
levels = [np.arange(10), np.arange(100), np.arange(100)]
codes = [
np.arange(10).repeat(10000),
np.tile(np.arange(100).repeat(100), 10),
np.tile(np.tile(np.arange(100), 100), 10),
]
index = pd.MultiIndex(levels=levels, codes=codes)
df = pd.DataFrame(np.random.randn(len(index), 4), index=index)
self.df_func = getattr(df, op)
def time_op(self, level, op):
self.df_func(level=level)
class SeriesOps:
params = [ops, ["float", "int"], [True, False]]
param_names = ["op", "dtype", "use_bottleneck"]
def setup(self, op, dtype, use_bottleneck):
s = pd.Series(np.random.randn(100000)).astype(dtype)
try:
pd.options.compute.use_bottleneck = use_bottleneck
except TypeError:
from pandas.core import nanops
nanops._USE_BOTTLENECK = use_bottleneck
self.s_func = getattr(s, op)
def time_op(self, op, dtype, use_bottleneck):
self.s_func()
class SeriesMultiIndexOps:
params = ([0, 1, [0, 1]], ops)
param_names = ["level", "op"]
def setup(self, level, op):
levels = [np.arange(10), np.arange(100), np.arange(100)]
codes = [
np.arange(10).repeat(10000),
np.tile(np.arange(100).repeat(100), 10),
np.tile(np.tile(np.arange(100), 100), 10),
]
index = pd.MultiIndex(levels=levels, codes=codes)
s = pd.Series(np.random.randn(len(index)), index=index)
self.s_func = getattr(s, op)
def time_op(self, level, op):
self.s_func(level=level)
class Rank:
params = [["DataFrame", "Series"], [True, False]]
param_names = ["constructor", "pct"]
def setup(self, constructor, pct):
values = np.random.randn(10 ** 5)
self.data = getattr(pd, constructor)(values)
def time_rank(self, constructor, pct):
self.data.rank(pct=pct)
def time_average_old(self, constructor, pct):
self.data.rank(pct=pct) / len(self.data)
class Correlation:
params = [["spearman", "kendall", "pearson"], [True, False]]
param_names = ["method", "use_bottleneck"]
def setup(self, method, use_bottleneck):
try:
pd.options.compute.use_bottleneck = use_bottleneck
except TypeError:
from pandas.core import nanops
nanops._USE_BOTTLENECK = use_bottleneck
self.df = pd.DataFrame(np.random.randn(1000, 30))
self.df2 = pd.DataFrame(np.random.randn(1000, 30))
self.df_wide = pd.DataFrame(np.random.randn(1000, 200))
self.df_wide_nans = self.df_wide.where(np.random.random((1000, 200)) < 0.9)
self.s = pd.Series(np.random.randn(1000))
self.s2 = pd.Series(np.random.randn(1000))
def time_corr(self, method, use_bottleneck):
self.df.corr(method=method)
def time_corr_wide(self, method, use_bottleneck):
self.df_wide.corr(method=method)
def time_corr_wide_nans(self, method, use_bottleneck):
self.df_wide_nans.corr(method=method)
def peakmem_corr_wide(self, method, use_bottleneck):
self.df_wide.corr(method=method)
def time_corr_series(self, method, use_bottleneck):
self.s.corr(self.s2, method=method)
def time_corrwith_cols(self, method, use_bottleneck):
self.df.corrwith(self.df2, method=method)
def time_corrwith_rows(self, method, use_bottleneck):
self.df.corrwith(self.df2, axis=1, method=method)
class Covariance:
params = [[True, False]]
param_names = ["use_bottleneck"]
def setup(self, use_bottleneck):
try:
pd.options.compute.use_bottleneck = use_bottleneck
except TypeError:
from pandas.core import nanops
nanops._USE_BOTTLENECK = use_bottleneck
self.s = pd.Series(np.random.randn(100000))
self.s2 = pd.Series(np.random.randn(100000))
def time_cov_series(self, use_bottleneck):
self.s.cov(self.s2)
from .pandas_vb_common import setup # noqa: F401 isort:skip
|
{"hexsha": "ed5ebfa61594ec56483a2881cb36412d9d6f4dd9", "size": 4912, "ext": "py", "lang": "Python", "max_stars_repo_path": "asv_bench/benchmarks/stat_ops.py", "max_stars_repo_name": "LauraCollard/pandas", "max_stars_repo_head_hexsha": "b1c3a9031569334cafc4e8d45d35408421f7dea4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-12-02T11:24:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-28T12:13:54.000Z", "max_issues_repo_path": "asv_bench/benchmarks/stat_ops.py", "max_issues_repo_name": "LauraCollard/pandas", "max_issues_repo_head_hexsha": "b1c3a9031569334cafc4e8d45d35408421f7dea4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-10-31T08:19:49.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-31T08:19:49.000Z", "max_forks_repo_path": "asv_bench/benchmarks/stat_ops.py", "max_forks_repo_name": "LauraCollard/pandas", "max_forks_repo_head_hexsha": "b1c3a9031569334cafc4e8d45d35408421f7dea4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-10-09T07:52:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-12T02:37:59.000Z", "avg_line_length": 30.1349693252, "max_line_length": 83, "alphanum_fraction": 0.6192996743, "include": true, "reason": "import numpy", "num_tokens": 1325}
|
From Coq Require Vector List.
Require Import Rupicola.Lib.Core.
Require Import Rupicola.Lib.Notations.
Require Import Rupicola.Lib.Loops.
Require Export bedrock2.ArrayCasts.
Open Scope list_scope.
Module VectorArray.
Section VectorArray.
Context {K: Type}.
Context {Conv: Convertible K nat}.
Open Scope nat_scope.
Definition t V n := Vector.t V n.
Definition get {V n} (a: t V n) (k: K) (pr: cast k < n) : V :=
Vector.nth_order a pr.
Definition put {V n} (a: t V n) (k: K) (pr: cast k < n) (v: V) : t V n :=
Vector.replace_order a pr v.
Definition map {V V' n} (f: V -> V') (a: t V n) : t V' n :=
Vector.map f a.
Definition fold_left {V V' n} (f: V' -> V -> V') v0 (a: t V n) : V' :=
Vector.fold_left f v0 a.
(* FIXME needs an accessor that generates a test and returns a default *)
End VectorArray.
Arguments t : clear implicits.
End VectorArray.
Module ListArray.
Section ListArray.
Context {K: Type} {Conv: Convertible K nat}.
Section __.
Context {V: Type} {HD: HasDefault V}.
Open Scope nat_scope.
Definition t := list V.
Definition get (a: t) (k: K) : V :=
List.nth (cast k) a default.
Definition put (a: t) (k: K) (v: V) : t :=
replace_nth (cast k) a v.
(* FIXME needs an accessor that generates a test and returns a default *)
Lemma put_length (a: t) (k: K) (v: V) :
List.length (put a k v) = List.length a.
Proof. intros; apply replace_nth_length. Qed.
Lemma put_0 (a: t) (k: K) (v: V) :
0 < length a -> cast k = 0 ->
put a k v = v :: List.tl a.
Proof.
unfold put; intros ? ->.
destruct a; simpl in *; reflexivity || lia.
Qed.
Lemma put_app_len (a1 a2: t) (k: K) (v: V) :
0 < length a2 -> cast k = length a1 ->
put (a1 ++ a2) k v = a1 ++ v :: List.tl a2.
Proof.
unfold put; intros ? -> ;
rewrite !replace_nth_eqn by (rewrite ?app_length; lia).
rewrite List.firstn_app_l by reflexivity.
change (S ?x) with (1 + x); rewrite <- List.skipn_skipn.
rewrite List.skipn_app_r by reflexivity.
reflexivity.
Qed.
End __.
Definition map {V V'} (f: V -> V') (a: t) :=
List.map f a.
Definition fold_left {V V'} (f: V' -> V -> V') (a: t) (v0: V') :=
List.fold_left f a v0.
End ListArray.
Arguments t : clear implicits.
End ListArray.
Section with_parameters.
Context {width: Z} {BW: Bitwidth width} {word: word.word width} {memT: map.map word Byte.byte}.
Context {localsT: map.map String.string word}.
Context {env: map.map String.string (list String.string * list String.string * Syntax.cmd)}.
Context {ext_spec: bedrock2.Semantics.ExtSpec}.
Context {word_ok : word.ok word} {mem_ok : map.ok memT}.
Context {locals_ok : map.ok localsT}.
Context {env_ok : map.ok env}.
Context {ext_spec_ok : Semantics.ext_spec.ok ext_spec}.
Section GenericArray.
Record access_info :=
{ ai_type : Type;
ai_size : access_size.access_size;
ai_repr : word -> ai_type -> memT -> Prop;
ai_to_word : ai_type -> word;
ai_to_truncated_word : ai_type -> word;
ai_default : HasDefault ai_type;
ai_width := Z.of_nat (@Memory.bytes_per width ai_size) }.
(* FIXME it might be better to use Cyclic.ZModulo.ZModulo. for the 16 and 32
cases, since it would avoid the truncation step (and it would make it
possible to recognize which type of array we're working with
unambiguously); otherwise currently the result of a 16 or 32-bits read
does not match the result of the Gallina-level get function. *)
Local Definition _access_info asize : access_info :=
match asize with
| access_size.one =>
{| ai_size := asize;
ai_type := byte;
ai_repr := scalar8;
ai_default := Byte.x00;
ai_to_word v := word_of_byte v;
ai_to_truncated_word v := word_of_byte v |}
| access_size.two =>
{| ai_size := asize;
ai_type := word;
ai_repr := scalar16;
ai_default := word.of_Z 0;
ai_to_word v := v;
ai_to_truncated_word v := truncate_word access_size.two v |}
| access_size.four =>
{| ai_size := asize;
ai_type := word;
ai_repr := scalar32;
ai_default := word.of_Z 0;
ai_to_word v := v;
ai_to_truncated_word v := truncate_word access_size.four v |}
| access_size.word =>
{| ai_size := access_size.word;
ai_type := word;
ai_repr := scalar;
ai_default := word.of_Z 0;
ai_to_word v := v;
ai_to_truncated_word v := v |}
end.
Lemma ai_width_bounded a :
0 < a.(ai_width) < 2 ^ width.
Proof.
unfold ai_width; destruct ai_size; simpl.
all: try (destruct width_cases as [H|H]; rewrite H; simpl; lia).
Qed.
Context (sz: access_size.access_size).
Notation ai := (_access_info sz).
Notation V := ai.(ai_type).
(* FIXME should this be a type class? *)
Context
{A K}
(to_list: A -> list V)
(K_to_nat: Convertible K nat)
(get: A -> K -> V)
(put: A -> K -> V -> A)
(repr: word -> A -> memT -> Prop).
Definition array_repr a_ptr a :=
(array ai.(ai_repr) (word.of_Z ai.(ai_width)) a_ptr (to_list a)).
Notation K_to_word x := (word.of_Z (Z.of_nat (K_to_nat x))).
Context (a : A) (a_ptr : word) (a_expr : expr)
(val: V) (val_expr : expr)
(idx : K) (idx_expr : expr).
Context
(Hget: forall a,
exists default,
get a idx =
List.hd default (List.skipn (K_to_nat idx) (to_list a)))
(Hrw:
Lift1Prop.iff1
(repr a_ptr a)
(array_repr a_ptr a)).
Definition offset base idx width :=
(expr.op bopname.add base (expr.op bopname.mul width idx)).
(* FIXME this should be an expression lemma, not a statement one *)
Lemma compile_array_get {tr mem locals functions}:
let v := get a idx in
forall {P} {pred: P v -> predicate} {k: nlet_eq_k P v} {k_impl : cmd}
R (var : string),
Z.of_nat (K_to_nat idx) < Z.of_nat (Datatypes.length (to_list a)) ->
sep (repr a_ptr a) R mem ->
WeakestPrecondition.dexpr mem locals a_expr a_ptr ->
WeakestPrecondition.dexpr mem locals idx_expr (K_to_word idx) ->
(let v := v in
<{ Trace := tr;
Memory := mem;
Locals := map.put locals var (ai.(ai_to_truncated_word) v);
Functions := functions }>
k_impl
<{ pred (k v eq_refl) }>) ->
<{ Trace := tr;
Memory := mem;
Locals := locals;
Functions := functions }>
cmd.seq
(cmd.set
var
(expr.load (ai.(ai_size))
(offset a_expr idx_expr (expr.literal ai.(ai_width)))))
k_impl
<{ pred (nlet_eq [var] v k) }>.
Proof.
cbn; intros Hlt *. clear put.
pose proof word.unsigned_range (K_to_word idx) as (Hge & _).
destruct (Hget a) as [default Hget0].
exists (ai.(ai_to_truncated_word) (get a idx)); split; cbn; [ | assumption ].
eapply WeakestPrecondition_dexpr_expr; eauto.
eapply WeakestPrecondition_dexpr_expr; eauto.
eexists; split; [ | reflexivity ].
seprewrite_in Hrw H0. (* FIXME seprewrite shouldn't rename *)
(* FIXME: BEDROCK2: Adding an extra "_" at the end shelves an inequality *)
match goal with
| [ H: (array_repr _ _ ⋆ _) ?mem |- _ ] =>
unfold array_repr in *;
seprewrite_in open_constr:(array_index_nat_inbounds
_ _ (default := default) _ _ (K_to_nat idx)) H
end.
{ lia. }
match goal with
| [ H: context[word.of_Z (_ * _)] |- _ ] =>
rewrite word.ring_morph_mul, !word.of_Z_unsigned in H by assumption
end.
rewrite Hget0.
clear Hget Hrw.
destruct sz; cbv [_access_info ai_type ai_size ai_repr ai_to_truncated_word ai_width] in *.
- eapply load_one_of_sep; ecancel_assumption.
- eapply load_two_of_sep; ecancel_assumption.
- eapply load_four_of_sep; ecancel_assumption.
- eapply load_word_of_sep; ecancel_assumption.
Qed.
Context (Hput:
(K_to_nat idx < List.length (to_list a))%nat ->
to_list (put a idx val) =
List.app
(List.firstn (K_to_nat idx) (to_list a))
(val :: List.skipn (S (K_to_nat idx)) (to_list a)))
(Hgetput:
(K_to_nat idx < List.length (to_list a))%nat ->
get (put a idx val) idx = val)
(Hrw_put:
Lift1Prop.iff1
(repr a_ptr (put a idx val))
(array_repr a_ptr (put a idx val))).
Definition map_step tmp_var vars f a (idx: K) :=
(let/n tmp as tmp_var := get a idx in
let/n tmp as tmp_var := f tmp in
nlet vars (put a idx tmp) id).
Definition foldl_step {A} tmp_var vars (f: A -> V -> A) bs acc (idx: K) :=
(let/n tmp as tmp_var := get bs idx in
nlet vars (f acc tmp) id).
Local Lemma compile_array_put_length :
(K_to_nat idx < Datatypes.length (to_list a))%nat ->
(K_to_nat idx < List.length (to_list (put a idx val)))%nat.
Proof.
intros; rewrite Hput by assumption.
rewrite List.app_length, List.firstn_length_le by lia.
cbn [List.length]; rewrite List.length_skipn.
lia.
Qed.
Local Lemma compile_array_put_firstn :
(K_to_nat idx < Datatypes.length (to_list a))%nat ->
List.firstn (K_to_nat idx) (to_list (put a idx val)) =
List.firstn (K_to_nat idx) (to_list a).
Proof.
intros; rewrite Hput by lia.
rewrite List.firstn_app.
rewrite List.firstn_firstn, Min.min_idempotent.
rewrite List.firstn_length_le by lia.
rewrite Nat.sub_diag; cbn [List.firstn]; rewrite app_nil_r.
reflexivity.
Qed.
Local Lemma compile_array_put_skipn :
(K_to_nat idx < Datatypes.length (to_list a))%nat ->
List.skipn (S (K_to_nat idx)) (to_list (put a idx val)) =
List.skipn (S (K_to_nat idx)) (to_list a).
Proof.
intros; rewrite Hput by lia.
change (val :: ?tl) with (List.app [val] tl).
rewrite List.app_assoc, List.skipn_app, List.skipn_all, List.app_nil_l, List.skipn_skipn;
rewrite !List.app_length, List.firstn_length_le, (Nat.add_comm _ 1) by lia.
- rewrite Nat.sub_diag, Nat.add_0_l.
reflexivity.
- reflexivity.
Qed.
Lemma compile_array_put {tr mem locals functions} :
let v := put a idx val in
forall {P} {pred: P v -> predicate} {k: nlet_eq_k P v} {k_impl: cmd}
R (var: string),
(K_to_nat idx < Datatypes.length (to_list a))%nat ->
sep (repr a_ptr a) R mem ->
WeakestPrecondition.dexpr mem locals a_expr a_ptr ->
WeakestPrecondition.dexpr mem locals idx_expr (K_to_word idx) ->
WeakestPrecondition.dexpr mem locals val_expr (ai.(ai_to_word) val) ->
(let v := v in
forall mem',
sep (repr a_ptr v) R mem' ->
<{ Trace := tr;
Memory := mem';
Locals := locals;
Functions := functions }>
k_impl
<{ pred (k v eq_refl) }>) ->
<{ Trace := tr;
Memory := mem;
Locals := locals;
Functions := functions }>
cmd.seq
(cmd.store
(ai.(ai_size))
(offset a_expr idx_expr (expr.literal ai.(ai_width)))
val_expr)
k_impl
<{ pred (nlet_eq [var] v k) }>.
Proof.
cbn; intros Hlt *.
pose proof compile_array_put_length as Hputlen.
pose proof compile_array_put_firstn as Hputfst.
pose proof compile_array_put_skipn as Hputskp.
pose proof word.unsigned_range (K_to_word idx) as (Hge & _).
destruct (Hget (put a idx val)) as [default Hget0].
eexists; split; cbn.
{ repeat (eapply WeakestPrecondition_dexpr_expr; eauto). }
{ eexists; split; cbn.
{ repeat (eapply WeakestPrecondition_dexpr_expr; eauto). }
{ seprewrite_in Hrw H0.
match goal with
| [ H: (array_repr _ _ ⋆ _) ?mem |- _ ] =>
unfold array_repr in *;
seprewrite_in open_constr:(array_index_nat_inbounds
_ _ (default := default) _ _ (K_to_nat idx)) H
end.
{ assumption. }
match goal with
| [ H: context[word.of_Z (_ * _)] |- _ ] =>
rewrite word.ring_morph_mul, !word.of_Z_unsigned in H by assumption
end.
destruct sz;
cbv [_access_info ai_type ai_size ai_repr ai_to_word ai_width] in *.
1: eapply store_one_of_sep; [ ecancel_assumption | ].
2: eapply store_two_of_sep; [ ecancel_assumption | ].
3: eapply store_four_of_sep; [ ecancel_assumption | ].
4: eapply store_word_of_sep; [ ecancel_assumption | ].
all: intros m Hm; apply H4; seprewrite Hrw_put.
all: seprewrite
open_constr:(array_index_nat_inbounds
_ _ (default := default)
_ _ (K_to_nat idx));
[ apply Hputlen; assumption | ].
all: rewrite <- Hget0, Hgetput, !Hputfst, !Hputskp by assumption.
all: repeat rewrite word.ring_morph_mul, !word.of_Z_unsigned by lia.
1: rewrite to_byte_of_byte_nowrap in Hm.
all: try ecancel_assumption.
} }
Qed.
End GenericArray.
Section GenericVectorArray.
Context (sz: access_size.access_size).
Notation ai := (_access_info sz).
Context {K: Type}
{ConvNat: Convertible K nat}.
Notation to_list := Vector.to_list.
Notation K_to_nat idx := (cast (proj1_sig (P:=fun idx0 : K => (cast idx0 < _)%nat) idx)).
Notation K_to_word x := (word.of_Z (Z.of_nat (K_to_nat x))).
Notation get a idx := (VectorArray.get a (proj1_sig idx) (proj2_sig idx)).
Notation put a idx v := (VectorArray.put a (proj1_sig idx) (proj2_sig idx) v).
Definition vectorarray_value {n}
(addr: word) (a: VectorArray.t ai.(ai_type) n)
: memT -> Prop :=
array ai.(ai_repr) (word.of_Z ai.(ai_width)) addr (to_list a).
Notation repr := vectorarray_value.
Lemma VectorArray_Hget {n}:
forall (a: VectorArray.t ai.(ai_type) n) idx,
exists default,
get a idx =
List.hd default (List.skipn (K_to_nat idx) (to_list a)).
Proof.
intros. exists ai.(ai_default).
apply Vector_nth_hd_skipn.
rewrite Fin.to_nat_of_nat; reflexivity.
Qed.
Lemma VectorArray_Hput {n} :
forall (a : VectorArray.t ai.(ai_type) n) (idx: {idx : K | (cast idx < n)%nat}) v,
to_list (put a idx v) =
List.app
(List.firstn (K_to_nat idx) (to_list a))
(v :: List.skipn (S (K_to_nat idx)) (to_list a)).
Proof.
unfold put.
destruct idx as [widx pr]; cbn [proj1_sig proj2_sig].
unfold cast in *.
intros.
unfold Vector.replace_order; erewrite Vector_to_list_replace by reflexivity.
rewrite <- replace_nth_eqn by (rewrite Vector_to_list_length; assumption).
rewrite Fin.to_nat_of_nat; reflexivity.
Qed.
Lemma VectorArray_Hgetput {n} :
forall (a : VectorArray.t ai.(ai_type) n) (idx: {idx : K | (cast idx < n)%nat}) v,
get (put a idx v) idx = v.
Proof.
unfold get, put, Vector.nth_order, Vector.replace_order.
intros; apply Vector_nth_replace.
Qed.
Lemma VectorArray_Hrw {n}:
forall addr (a: VectorArray.t ai.(ai_type) n),
Lift1Prop.iff1
(repr addr a)
(array_repr sz to_list addr a).
Proof. reflexivity. Qed.
Lemma compile_vectorarray_get {n} {tr mem locals functions}
(a: VectorArray.t ai.(ai_type) n) (idx: K) pr:
let v := VectorArray.get a idx pr in
forall {P} {pred: P v -> predicate} {k: nlet_eq_k P v} {k_impl}
R (a_ptr: word) a_expr idx_expr var,
sep (vectorarray_value a_ptr a) R mem ->
WeakestPrecondition.dexpr mem locals a_expr a_ptr ->
WeakestPrecondition.dexpr mem locals idx_expr (word.of_Z (Z.of_nat (cast idx))) ->
(let v := v in
<{ Trace := tr;
Memory := mem;
Locals := map.put locals var (ai.(ai_to_truncated_word) v);
Functions := functions }>
k_impl
<{ pred (k v eq_refl) }>) ->
<{ Trace := tr;
Memory := mem;
Locals := locals;
Functions := functions }>
cmd.seq
(cmd.set
var
(expr.load
(ai.(ai_size))
(offset a_expr idx_expr (expr.literal ai.(ai_width)))))
k_impl
<{ pred (nlet_eq [var] v k) }>.
Proof.
intros.
change v with
((fun a idx => VectorArray.get a (proj1_sig idx) (proj2_sig idx))
a (exist (fun idx => cast idx < n)%nat idx pr)).
eapply (compile_array_get sz to_list (fun x => K_to_nat x));
eauto using VectorArray_Hget, VectorArray_Hrw.
{ rewrite Vector_to_list_length. simpl; lia. }
Qed.
Lemma compile_vectorarray_put {n} {tr mem locals functions}
(a: VectorArray.t ai.(ai_type) n) (idx: K) pr val:
let v := VectorArray.put a idx pr val in
forall {P} {pred: P v -> predicate} {k: nlet_eq_k P v} {k_impl}
R a_ptr a_expr idx_expr val_expr var,
sep (vectorarray_value a_ptr a) R mem ->
WeakestPrecondition.dexpr mem locals a_expr a_ptr ->
WeakestPrecondition.dexpr mem locals idx_expr (word.of_Z (Z.of_nat (cast idx))) ->
WeakestPrecondition.dexpr mem locals val_expr (ai.(ai_to_word) val) ->
(let v := v in
forall mem',
sep (vectorarray_value a_ptr v) R mem' ->
<{ Trace := tr;
Memory := mem';
Locals := locals;
Functions := functions }>
k_impl
<{ pred (k v eq_refl) }>) ->
<{ Trace := tr;
Memory := mem;
Locals := locals;
Functions := functions }>
cmd.seq
(cmd.store
(ai.(ai_size))
(offset a_expr idx_expr (expr.literal ai.(ai_width)))
val_expr)
k_impl
<{ pred (nlet_eq [var] v k) }>.
Proof.
intros.
change v with
((fun v idx val => VectorArray.put a (proj1_sig idx) (proj2_sig idx) val)
v (exist (fun idx => cast idx < n)%nat idx pr) val).
eapply (compile_array_put sz
to_list (fun x => K_to_nat x)
(fun a idx => get a idx)
(fun a idx v => put a idx v));
eauto using VectorArray_Hget, VectorArray_Hrw,
VectorArray_Hgetput, VectorArray_Hput.
rewrite Vector_to_list_length; assumption.
Qed.
End GenericVectorArray.
Section GenericListArray.
Context (sz: access_size.access_size).
Context {HD: HasDefault (_access_info sz).(ai_type)}.
Notation ai := (_access_info sz).
Context {K: Type}
{ConvNat: Convertible K nat}.
Notation to_list x := x (only parsing).
Notation K_to_nat idx := (@cast _ _ ConvNat idx).
Notation K_to_word x := (word.of_Z (Z.of_nat (K_to_nat x))).
Notation get a idx := (ListArray.get a idx).
Notation put a idx v := (ListArray.put a idx v).
Definition listarray_value
(addr: word) (a: ListArray.t ai.(ai_type))
: memT -> Prop :=
array ai.(ai_repr) (word.of_Z ai.(ai_width)) addr a.
Notation repr := listarray_value.
Lemma ListArray_Hget:
forall (a: ListArray.t ai.(ai_type)) idx,
exists default,
get a idx =
List.hd default (List.skipn (K_to_nat idx) (to_list a)).
Proof.
intros. exists default.
unfold get; rewrite <- List.nth_default_eq.
apply List.hd_skipn_nth_default.
Qed.
Lemma ListArray_Hput :
forall (a : ListArray.t ai.(ai_type)) (idx: K) v,
(K_to_nat idx < Datatypes.length a)%nat ->
to_list (put a idx v) =
List.app
(List.firstn (K_to_nat idx) (to_list a))
(v :: List.skipn (S (K_to_nat idx)) (to_list a)).
Proof.
unfold put; eauto using replace_nth_eqn.
Qed.
Lemma ListArray_Hgetput :
forall (a : ListArray.t ai.(ai_type)) (idx: K) v,
(K_to_nat idx < Datatypes.length a)%nat ->
get (put a idx v) idx = v.
Proof.
unfold get, put.
intros; apply nth_replace_nth; (assumption || reflexivity).
Qed.
Lemma ListArray_Hrw:
forall addr a,
Lift1Prop.iff1
(repr addr a)
(array_repr sz (fun x => to_list x) addr a).
Proof. reflexivity. Qed.
Lemma compile_unsizedlistarray_get {tr mem locals functions}
(a: ListArray.t ai.(ai_type)) (idx: K):
let v := ListArray.get a idx in
forall {P} {pred: P v -> predicate} {k: nlet_eq_k P v} {k_impl}
R (a_ptr: word) a_expr idx_expr var,
sep (listarray_value a_ptr a) R mem ->
WeakestPrecondition.dexpr mem locals a_expr a_ptr ->
WeakestPrecondition.dexpr mem locals idx_expr (word.of_Z (Z.of_nat (cast idx))) ->
Z.of_nat (cast idx) < Z.of_nat (Datatypes.length a) ->
(let v := v in
<{ Trace := tr;
Memory := mem;
Locals := map.put locals var (ai.(ai_to_truncated_word) v);
Functions := functions }>
k_impl
<{ pred (k v eq_refl) }>) ->
<{ Trace := tr;
Memory := mem;
Locals := locals;
Functions := functions }>
cmd.seq
(cmd.set
var
(expr.load
(ai.(ai_size))
(offset a_expr idx_expr (expr.literal ai.(ai_width)))))
k_impl
<{ pred (nlet_eq [var] v k) }>.
Proof.
intros.
eapply (compile_array_get sz id (fun x => K_to_nat x));
eauto using ListArray_Hget, ListArray_Hrw.
Qed.
Lemma compile_unsizedlistarray_put {tr mem locals functions}
(a: ListArray.t ai.(ai_type)) (idx: K) val:
let v := ListArray.put a idx val in
forall {P} {pred: P v -> predicate} {k: nlet_eq_k P v} {k_impl}
R (a_ptr: word) a_expr idx_expr val_expr var,
sep (listarray_value a_ptr a) R mem ->
WeakestPrecondition.dexpr mem locals a_expr a_ptr ->
WeakestPrecondition.dexpr mem locals idx_expr (word.of_Z (Z.of_nat (cast idx))) ->
WeakestPrecondition.dexpr mem locals val_expr (ai.(ai_to_word) val) ->
Z.of_nat (K_to_nat idx) < Z.of_nat (List.length a) ->
(let v := v in
forall mem',
sep (listarray_value a_ptr v) R mem' ->
<{ Trace := tr;
Memory := mem';
Locals := locals;
Functions := functions }>
k_impl
<{ pred (k v eq_refl) }>) ->
<{ Trace := tr;
Memory := mem;
Locals := locals;
Functions := functions }>
cmd.seq
(cmd.store
(ai.(ai_size))
(offset a_expr idx_expr (expr.literal ai.(ai_width)))
val_expr)
k_impl
<{ pred (nlet_eq [var] v k) }>.
Proof.
intros.
eapply (compile_array_put sz id (fun x => K_to_nat x));
eauto using ListArray_Hget, ListArray_Hput,
ListArray_Hgetput, ListArray_Hrw.
unfold id; pose proof word.unsigned_range (K_to_word idx).
lia.
Qed.
Notation map_step := (map_step sz ListArray.get ListArray.put).
Lemma ListArray_map_step_ok {tmp_var vars f}:
acts_as_replace_nth f (map_step tmp_var vars f).
Proof.
unfold acts_as_replace_nth, map_step, nlet, id, get, put, cast, Convertible_Z_nat.
intros; rewrite Nat2Z.id, replace_nth_app_skip, nth_middle; reflexivity.
Qed.
Definition compile_listarray_map
f (a: ListArray.t ai.(ai_type)) tmp_var vars :=
compile_map f (map_step tmp_var vars f) a (ListArray.map f a) vars
ListArray_map_step_ok eq_refl.
Notation foldl_step := (foldl_step sz ListArray.get).
Lemma ListArray_foldl_step_ok {A} {tmp_var vars} {f: A -> _ -> A} bs:
acts_as_foldl_step f (foldl_step tmp_var vars f bs) bs.
Proof.
unfold acts_as_foldl_step, foldl_step, nlet, id, get, put, cast, Convertible_Z_nat.
eintros * ->%nth_error_nth; reflexivity.
Qed.
Definition compile_listarray_scalar_fold_left {A}
(f: A -> _ -> A) (bs: ListArray.t ai.(ai_type)) a
tmp_var vars :=
compile_scalar_fold_left
f (foldl_step tmp_var vars f bs) bs
a (ListArray.fold_left f bs a) vars
eq_refl (ListArray_foldl_step_ok bs).
End GenericListArray.
Section GenericSizedListArray.
Context {sz: access_size.access_size}.
Context {HD: HasDefault (_access_info sz).(ai_type)}.
Notation ai := (_access_info sz).
Context {K: Type}
{ConvNat: Convertible K nat}.
Notation to_list x := x (only parsing).
Notation K_to_nat idx := (@cast _ _ ConvNat idx).
Notation K_to_word x := (word.of_Z (Z.of_nat (K_to_nat x))).
Notation get a idx := (ListArray.get a idx).
Notation put a idx v := (ListArray.put a idx v).
Definition sizedlistarray_value
(len: nat) (addr: word) (a: ListArray.t ai.(ai_type))
: memT -> Prop :=
sep (emp (List.length a = len))
(listarray_value sz addr a).
Lemma sizedlistarray_value_of_array {len addr a mem} :
List.length a = len ->
listarray_value sz addr a mem ->
sizedlistarray_value len addr a mem.
Proof. intros; apply sep_emp_l; eauto. Qed.
Lemma array_of_sizedlistarray_value {len addr a mem} :
sizedlistarray_value len addr a mem ->
listarray_value sz addr a mem.
Proof. intros H; apply sep_emp_l in H; intuition. Qed.
Lemma length_of_sizedlistarray_value {len addr a mem} :
sizedlistarray_value len addr a mem ->
List.length a = len.
Proof. intros H; apply sep_emp_l in H; intuition. Qed.
Lemma length_of_sizedlistarray_value_R {len addr a R mem} :
(sizedlistarray_value len addr a ⋆ R) mem ->
List.length a = len.
Proof.
intros (?&?&H); decompose [and] H.
eauto using length_of_sizedlistarray_value.
Qed.
Lemma sizedlistarray_value_max_length {len addr a R mem} :
(sizedlistarray_value len addr a ⋆ R) mem ->
(ai_width ai) * Z.of_nat len <= 2 ^ width.
Proof.
pose proof ai_width_bounded ai.
intros * (<- & Hmem)%sep_assoc%sep_emp_l.
etransitivity; [ | eapply array_max_length ]; eauto.
- rewrite word.unsigned_of_Z_nowrap by lia; reflexivity.
- destruct sz; apply scalar8_no_aliasing || apply scalar_no_aliasing1.
- rewrite word.unsigned_of_Z_nowrap; lia.
Qed.
Lemma sizedlistarray_value_app1_length {len addr a1 a2 R mem} :
(sizedlistarray_value len addr (a1 ++ a2) ⋆ R) mem ->
(length a1 = len - length a2)%nat.
Proof.
intros * Hmem.
apply length_of_sizedlistarray_value_R in Hmem.
rewrite app_length in Hmem; lia.
Qed.
Notation repr := sizedlistarray_value.
Lemma SizedListArray_Hrw:
forall len addr (a: ListArray.t ai.(ai_type)),
List.length a = len ->
Lift1Prop.iff1
(repr len addr a)
(array_repr sz (fun x => to_list x) addr a).
Proof.
unfold sizedlistarray_value.
red; intros; rewrite sep_emp_l; tauto.
Qed.
Lemma SizedListArray_length :
forall len addr (a: ListArray.t ai.(ai_type)) mem R,
(repr len addr a * R)%sep mem -> List.length a = len.
Proof.
intros * H; unfold repr in H.
eapply proj1. apply sep_emp_l with (m := mem).
ecancel_assumption.
Qed.
Lemma compile_sizedlistarray_get {len} {tr mem locals functions}
(a: ListArray.t ai.(ai_type)) (idx: K):
let v := ListArray.get a idx in
forall {P} {pred: P v -> predicate} {k: nlet_eq_k P v} {k_impl}
R (a_ptr: word) a_expr idx_expr var,
sep (sizedlistarray_value len a_ptr a) R mem ->
WeakestPrecondition.dexpr mem locals a_expr a_ptr ->
WeakestPrecondition.dexpr mem locals idx_expr (word.of_Z (Z.of_nat (cast idx))) ->
Z.of_nat (cast idx) < Z.of_nat len ->
(let v := v in
<{ Trace := tr;
Memory := mem;
Locals := map.put locals var (ai.(ai_to_truncated_word) v);
Functions := functions }>
k_impl
<{ pred (k v eq_refl) }>) ->
<{ Trace := tr;
Memory := mem;
Locals := locals;
Functions := functions }>
cmd.seq
(cmd.set
var
(expr.load
(ai.(ai_size))
(offset a_expr idx_expr (expr.literal ai.(ai_width)))))
k_impl
<{ pred (nlet_eq [var] v k) }>.
Proof.
intros.
eapply (compile_array_get sz id (fun x => K_to_nat idx) _ (fun (addr: word) a => repr len addr a));
eauto using ListArray_Hget.
- eapply SizedListArray_Hrw, SizedListArray_length.
eassumption.
- unfold id.
erewrite SizedListArray_length by eassumption.
assumption.
Qed.
Lemma compile_sizedlistarray_put {len} {tr mem locals functions}
(a: ListArray.t ai.(ai_type)) (idx: K) val:
let v := ListArray.put a idx val in
forall {P} {pred: P v -> predicate} {k: nlet_eq_k P v} {k_impl}
R (a_ptr: word) a_expr idx_expr val_expr var,
sep (sizedlistarray_value len a_ptr a) R mem ->
WeakestPrecondition.dexpr mem locals a_expr a_ptr ->
WeakestPrecondition.dexpr mem locals idx_expr (word.of_Z (Z.of_nat (cast idx))) ->
WeakestPrecondition.dexpr mem locals val_expr (ai.(ai_to_word) val) ->
Z.of_nat (cast idx) < Z.of_nat len ->
(let v := v in
forall mem',
sep (sizedlistarray_value len a_ptr v) R mem' ->
<{ Trace := tr;
Memory := mem';
Locals := locals;
Functions := functions }>
k_impl
<{ pred (k v eq_refl) }>) ->
<{ Trace := tr;
Memory := mem;
Locals := locals;
Functions := functions }>
cmd.seq
(cmd.store
(ai.(ai_size))
(offset a_expr idx_expr (expr.literal ai.(ai_width)))
val_expr)
k_impl
<{ pred (nlet_eq [var] v k) }>.
Proof.
intros.
eapply (compile_array_put sz id (fun x => K_to_nat x) _ _ (fun (addr: word) a => repr len addr a));
eauto using ListArray_Hget, ListArray_Hput,
ListArray_Hgetput.
- eapply SizedListArray_Hrw, SizedListArray_length.
eassumption.
- eapply SizedListArray_Hrw.
unfold id; rewrite ListArray.put_length.
eapply SizedListArray_length.
eassumption.
- unfold id.
erewrite SizedListArray_length by eassumption.
lia.
Qed.
End GenericSizedListArray.
Ltac prepare_array_lemma lemma sz := (* This makes [simple apply] work *)
pose (lemma sz) as lem;
cbv beta iota delta [_access_info ai_type ai_repr ai_to_word ai_to_truncated_word ai_width ai_size
map_step foldl_step]
in (type of lem);
change (let ai_width := _ in ?x) with x in (type of lem);
cbv beta in (type of lem);
let t := type of lem in
exact (lem: t).
Notation prepare_array_lemma lemma sz :=
ltac:(prepare_array_lemma lemma sz) (only parsing).
Definition compile_byte_vectorarray_get :=
prepare_array_lemma (@compile_vectorarray_get) access_size.one.
Definition compile_w16_vectorarray_get :=
prepare_array_lemma (@compile_vectorarray_get) access_size.two.
Definition compile_w32_vectorarray_get :=
prepare_array_lemma (@compile_vectorarray_get) access_size.four.
Definition compile_word_vectorarray_get :=
prepare_array_lemma (@compile_vectorarray_get) access_size.word.
Definition compile_byte_vectorarray_put :=
prepare_array_lemma (@compile_vectorarray_put) access_size.one.
Definition compile_w16_vectorarray_put :=
prepare_array_lemma (@compile_vectorarray_put) access_size.two.
Definition compile_w32_vectorarray_put :=
prepare_array_lemma (@compile_vectorarray_put) access_size.four.
Definition compile_word_vectorarray_put :=
prepare_array_lemma (@compile_vectorarray_put) access_size.word.
Definition compile_byte_unsizedlistarray_get :=
prepare_array_lemma (@compile_unsizedlistarray_get) access_size.one.
Definition compile_w16_unsizedlistarray_get :=
prepare_array_lemma (@compile_unsizedlistarray_get) access_size.two.
Definition compile_w32_unsizedlistarray_get :=
prepare_array_lemma (@compile_unsizedlistarray_get) access_size.four.
Definition compile_word_unsizedlistarray_get :=
prepare_array_lemma (@compile_unsizedlistarray_get) access_size.word.
Definition compile_byte_unsizedlistarray_put :=
prepare_array_lemma (@compile_unsizedlistarray_put) access_size.one.
Definition compile_w16_unsizedlistarray_put :=
prepare_array_lemma (@compile_unsizedlistarray_put) access_size.two.
Definition compile_w32_unsizedlistarray_put :=
prepare_array_lemma (@compile_unsizedlistarray_put) access_size.four.
Definition compile_word_unsizedlistarray_put :=
prepare_array_lemma (@compile_unsizedlistarray_put) access_size.word.
Definition compile_byte_sizedlistarray_get :=
prepare_array_lemma (@compile_sizedlistarray_get) access_size.one.
Definition compile_w16_sizedlistarray_get :=
prepare_array_lemma (@compile_sizedlistarray_get) access_size.two.
Definition compile_w32_sizedlistarray_get :=
prepare_array_lemma (@compile_sizedlistarray_get) access_size.four.
Definition compile_word_sizedlistarray_get :=
prepare_array_lemma (@compile_sizedlistarray_get) access_size.word.
Definition compile_byte_sizedlistarray_put :=
prepare_array_lemma (@compile_sizedlistarray_put) access_size.one.
Definition compile_w16_sizedlistarray_put :=
prepare_array_lemma (@compile_sizedlistarray_put) access_size.two.
Definition compile_w32_sizedlistarray_put :=
prepare_array_lemma (@compile_sizedlistarray_put) access_size.four.
Definition compile_word_sizedlistarray_put :=
prepare_array_lemma (@compile_sizedlistarray_put) access_size.word.
Definition compile_byte_listarray_map :=
prepare_array_lemma (@compile_listarray_map) access_size.one.
Definition compile_w16_listarray_map :=
prepare_array_lemma (@compile_listarray_map) access_size.two.
Definition compile_w32_listarray_map :=
prepare_array_lemma (@compile_listarray_map) access_size.four.
Definition compile_word_listarray_map :=
prepare_array_lemma (@compile_listarray_map) access_size.word.
Definition compile_byte_listarray_scalar_fold_left :=
prepare_array_lemma (@compile_listarray_scalar_fold_left) access_size.one.
Definition compile_w16_listarray_scalar_fold_left :=
prepare_array_lemma (@compile_listarray_scalar_fold_left) access_size.two.
Definition compile_w32_listarray_scalar_fold_left :=
prepare_array_lemma (@compile_listarray_scalar_fold_left) access_size.four.
Definition compile_word_listarray_scalar_fold_left :=
prepare_array_lemma (@compile_listarray_scalar_fold_left) access_size.word.
End with_parameters.
Arguments sizedlistarray_value {width word memT} sz len addr a _ : assert.
Arguments Arrays._access_info /.
Arguments Arrays.ai_width /.
Import Rupicola.Lib.Invariants Rupicola.Lib.Gensym.
Ltac _compile_map locals to thm :=
let idx_v := gensym locals "from" in
let tmp_v := gensym locals "tmp" in
let to_v := gensym locals "to" in
let lp := infer_ranged_for_predicate idx_v to_v to in
eapply thm with (idx_var := idx_v) (to_var := to_v)
(tmp_var := tmp_v) (loop_pred := lp).
Ltac compile_map :=
lazymatch goal with
| [ |- WeakestPrecondition.cmd _ _ _ _ ?locals (_ (nlet_eq _ ?v _)) ] =>
lazymatch v with
| (ListArray.map (V := Init.Byte.byte) _ ?l) =>
_compile_map locals (Z.of_nat (List.length l)) compile_byte_listarray_map
| (ListArray.map (V := @word.rep _ _) _ ?l) =>
_compile_map locals (Z.of_nat (List.length l)) compile_word_listarray_map
end
end.
Ltac _compile_scalar_fold_left locals to thm :=
let idx_v := gensym locals "from" in
let tmp_v := gensym locals "tmp" in
let to_v := gensym locals "to" in
let lp := infer_ranged_for_predicate idx_v to_v to in
eapply thm with (idx_var := idx_v) (to_var := to_v)
(tmp_var := tmp_v) (loop_pred := lp).
Ltac compile_fold_left :=
lazymatch goal with
| [ |- WeakestPrecondition.cmd _ _ _ _ ?locals (_ (nlet_eq _ ?v _)) ] =>
lazymatch v with
| (ListArray.fold_left (V := Init.Byte.byte) _ ?l _) =>
_compile_scalar_fold_left
locals (Z.of_nat (List.length l))
compile_byte_listarray_scalar_fold_left
| (ListArray.fold_left (V := @word.rep _ _) _ ?l _) =>
_compile_scalar_fold_left
locals (Z.of_nat (List.length l))
compile_word_listarray_scalar_fold_left
end
end.
Module VectorArrayCompiler.
#[export] Hint Extern 1 (WP_nlet_eq (VectorArray.get _ _ _)) =>
simple eapply (@compile_byte_vectorarray_get); shelve : compiler.
#[export] Hint Extern 1 (WP_nlet_eq (VectorArray.get _ _ _)) =>
simple eapply (@compile_word_vectorarray_get); shelve : compiler.
#[export] Hint Extern 1 (WP_nlet_eq (VectorArray.put _ _ _ _)) =>
simple eapply (@compile_byte_vectorarray_put); shelve : compiler.
#[export] Hint Extern 1 (WP_nlet_eq (VectorArray.put _ _ _ _)) =>
simple eapply (@compile_word_vectorarray_put); shelve : compiler.
End VectorArrayCompiler.
Module UnsizedListArrayCompiler.
#[export] Hint Extern 1 (WP_nlet_eq (ListArray.get _ _)) =>
simple eapply (@compile_byte_unsizedlistarray_get); shelve : compiler.
#[export] Hint Extern 1 (WP_nlet_eq (ListArray.get _ _)) =>
simple eapply (@compile_word_unsizedlistarray_get); shelve : compiler.
#[export] Hint Extern 1 (WP_nlet_eq (ListArray.put _ _ _)) =>
simple eapply (@compile_byte_unsizedlistarray_put); shelve : compiler.
#[export] Hint Extern 1 (WP_nlet_eq (ListArray.put _ _ _)) =>
simple eapply (@compile_word_unsizedlistarray_put); shelve : compiler.
#[export] Hint Extern 1 (WP_nlet_eq (ListArray.map _ _)) =>
compile_map; shelve : compiler.
#[export] Hint Extern 1 (WP_nlet_eq (ListArray.fold_left _ _ _)) =>
compile_fold_left; shelve : compiler.
End UnsizedListArrayCompiler.
Module SizedListArrayCompiler.
#[export] Hint Extern 1 (WP_nlet_eq (ListArray.get _ _)) =>
simple eapply (@compile_byte_sizedlistarray_get); shelve : compiler.
#[export] Hint Extern 1 (WP_nlet_eq (ListArray.get _ _)) =>
simple eapply (@compile_word_sizedlistarray_get); shelve : compiler.
#[export] Hint Extern 1 (WP_nlet_eq (ListArray.put _ _ _)) =>
simple eapply (@compile_byte_sizedlistarray_put); shelve : compiler.
#[export] Hint Extern 1 (WP_nlet_eq (ListArray.put _ _ _)) =>
simple eapply (@compile_word_sizedlistarray_put); shelve : compiler.
#[export] Hint Extern 1 (WP_nlet_eq (ListArray.map _ _)) =>
compile_map; shelve : compiler.
#[export] Hint Extern 1 (WP_nlet_eq (ListArray.fold_left _ _ _)) =>
compile_fold_left; shelve : compiler.
End SizedListArrayCompiler.
Module AccessSizeCompatibilityNotations.
Notation AccessByte := access_size.one (only parsing).
Notation AccessWord := access_size.word (only parsing).
End AccessSizeCompatibilityNotations.
Export AccessSizeCompatibilityNotations.
Export VectorArrayCompiler.
(* UnsizedListArrayCompiler and SizedListArrayCompiler conflict, so don't export them. *)
|
{"author": "mit-plv", "repo": "rupicola", "sha": "3f59b3d2404ce425ddf4fd55ad2314996a573dc3", "save_path": "github-repos/coq/mit-plv-rupicola", "path": "github-repos/coq/mit-plv-rupicola/rupicola-3f59b3d2404ce425ddf4fd55ad2314996a573dc3/src/Rupicola/Lib/Arrays.v"}
|
import os
import glob
from calibrator import Calibrator
import cv2
import numpy as np
try:
import python.modules.tf_calib
except ImportError:
pass
def calibrate(path: str, filter: str, nrows: int, ncols: int):
calibrator = Calibrator(1)
objp = np.zeros((nrows * ncols, 3), np.float32)
objp[:, :2] = np.mgrid[0:nrows, 0:ncols].T.reshape(-1, 2)
counter = 0
image_names = glob.glob(os.path.join(path, filter))
print(image_names)
for i in range(100):
for imname in glob.glob(os.path.join(path, filter)):
counter += 1
image = cv2.imread(imname, cv2.IMREAD_GRAYSCALE)
f, corners = cv2.findChessboardCorners(image, (nrows, ncols), None)
cv2.cornerSubPix(image, corners, (11, 11), (-1, -1),
(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001))
print(counter)
calibrator.train(objp, [np.squeeze(corners.astype(np.float64))])
# if counter == 300:
# for camera in calibrator.cameras:
# print(camera.get_intrinsic_matrix(calibrator.session))
# print(camera.distortion.get_variables(calibrator.session))
# return
if __name__ == '__main__':
calibrate('data', '*.png', 8, 13)
|
{"hexsha": "15866c6023d1934d6843b277056b1da1c9a6a806", "size": 1316, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "vahagnIV/tf_calib", "max_stars_repo_head_hexsha": "24088b593c41d9bc2123e39f3d0523b0762a761e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2019-05-16T16:04:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-21T09:22:36.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "vahagnIV/tf_calib", "max_issues_repo_head_hexsha": "24088b593c41d9bc2123e39f3d0523b0762a761e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "vahagnIV/tf_calib", "max_forks_repo_head_hexsha": "24088b593c41d9bc2123e39f3d0523b0762a761e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-05-16T16:58:01.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-18T21:03:13.000Z", "avg_line_length": 33.7435897436, "max_line_length": 93, "alphanum_fraction": 0.6041033435, "include": true, "reason": "import numpy", "num_tokens": 352}
|
*DECK TRBAK1
SUBROUTINE TRBAK1 (NM, N, A, E, M, Z)
C***BEGIN PROLOGUE TRBAK1
C***PURPOSE Form the eigenvectors of real symmetric matrix from
C the eigenvectors of a symmetric tridiagonal matrix formed
C by TRED1.
C***LIBRARY SLATEC (EISPACK)
C***CATEGORY D4C4
C***TYPE SINGLE PRECISION (TRBAK1-S)
C***KEYWORDS EIGENVECTORS OF A REAL SYMMETRIC MATRIX, EISPACK
C***AUTHOR Smith, B. T., et al.
C***DESCRIPTION
C
C This subroutine is a translation of the ALGOL procedure TRBAK1,
C NUM. MATH. 11, 181-195(1968) by Martin, Reinsch, and Wilkinson.
C HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 212-226(1971).
C
C This subroutine forms the eigenvectors of a REAL SYMMETRIC
C matrix by back transforming those of the corresponding
C symmetric tridiagonal matrix determined by TRED1.
C
C On Input
C
C NM must be set to the row dimension of the two-dimensional
C array parameters, A and Z, as declared in the calling
C program dimension statement. NM is an INTEGER variable.
C
C N is the order of the matrix. N is an INTEGER variable.
C N must be less than or equal to NM.
C
C A contains information about the orthogonal transformations
C used in the reduction by TRED1 in its strict lower
C triangle. A is a two-dimensional REAL array, dimensioned
C A(NM,N).
C
C E contains the subdiagonal elements of the tridiagonal matrix
C in its last N-1 positions. E(1) is arbitrary. These
C elements provide the remaining information about the
C orthogonal transformations. E is a one-dimensional REAL
C array, dimensioned E(N).
C
C M is the number of columns of Z to be back transformed.
C M is an INTEGER variable.
C
C Z contains the eigenvectors to be back transformed in its
C first M columns. Z is a two-dimensional REAL array,
C dimensioned Z(NM,M).
C
C On Output
C
C Z contains the transformed eigenvectors in its first M columns.
C
C Note that TRBAK1 preserves vector Euclidean norms.
C
C Questions and comments should be directed to B. S. Garbow,
C APPLIED MATHEMATICS DIVISION, ARGONNE NATIONAL LABORATORY
C ------------------------------------------------------------------
C
C***REFERENCES B. T. Smith, J. M. Boyle, J. J. Dongarra, B. S. Garbow,
C Y. Ikebe, V. C. Klema and C. B. Moler, Matrix Eigen-
C system Routines - EISPACK Guide, Springer-Verlag,
C 1976.
C***ROUTINES CALLED (NONE)
C***REVISION HISTORY (YYMMDD)
C 760101 DATE WRITTEN
C 890831 Modified array declarations. (WRB)
C 890831 REVISION DATE from Version 3.2
C 891214 Prologue converted to Version 4.0 format. (BAB)
C 920501 Reformatted the REFERENCES section. (WRB)
C***END PROLOGUE TRBAK1
C
INTEGER I,J,K,L,M,N,NM
REAL A(NM,*),E(*),Z(NM,*)
REAL S
C
C***FIRST EXECUTABLE STATEMENT TRBAK1
IF (M .EQ. 0) GO TO 200
IF (N .EQ. 1) GO TO 200
C
DO 140 I = 2, N
L = I - 1
IF (E(I) .EQ. 0.0E0) GO TO 140
C
DO 130 J = 1, M
S = 0.0E0
C
DO 110 K = 1, L
110 S = S + A(I,K) * Z(K,J)
C .......... DIVISOR BELOW IS NEGATIVE OF H FORMED IN TRED1.
C DOUBLE DIVISION AVOIDS POSSIBLE UNDERFLOW ..........
S = (S / A(I,L)) / E(I)
C
DO 120 K = 1, L
120 Z(K,J) = Z(K,J) + S * A(I,K)
C
130 CONTINUE
C
140 CONTINUE
C
200 RETURN
END
|
{"hexsha": "00b8ac94afd762068f2c1159c69e776ce29b7123", "size": 3566, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "slatec/src/trbak1.f", "max_stars_repo_name": "andremirt/v_cond", "max_stars_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "slatec/src/trbak1.f", "max_issues_repo_name": "andremirt/v_cond", "max_issues_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "slatec/src/trbak1.f", "max_forks_repo_name": "andremirt/v_cond", "max_forks_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9607843137, "max_line_length": 72, "alphanum_fraction": 0.6194615816, "num_tokens": 1103}
|
#!/usr/bin/env python
import os
import subprocess
# import science modules
import numpy as np
import astropy.units as u
from astropy.time import Time
from numpy.linalg import norm
from scipy.interpolate import interp1d
from scipy.optimize import leastsq
from astropy.coordinates import SkyCoord, EarthLocation, \
get_body_barycentric, solar_system_ephemeris, \
ITRS, ICRS, HCRS, GCRS, PrecessedGeocentric
# Define constants
MU_EARTH = 3.986005000e14 #4418e14 # Earth's standard gravitational parameter (m3/s2)
MU_SUN = 1.32712440018e20 # Sun's standard gravitational parameter (m3/s2)
OMEGA_EARTH = 7.2921158553e-5 # Earth's rotation rate (rad/s)
class PoorTriangulationResiduals(Exception):
'''
Exception class used when StraightLineLeastSquares produces a poor SLLS residual
'''
pass
class TriangulationError(Exception):
'''
Exception class used when triangulation fails
'''
pass
class TriangulationOutOfRange(Exception):
'''
Exception class used when triangulation fails
'''
pass
class TriangulationInvalidInput(Exception):
'''
Exception class used when not enough/incorrect data is passed to trajctory solver
'''
pass
#------------------------------------------------------------------------------
# Determine Earths parameters
#------------------------------------------------------------------------------
def grav_params():
"""gravitational constant and mass of earth.
"""
G=6.6726e-11 # N-m2/kg2
M=5.98e24 # kg Mass of the earth
return G, M
def Gravity(ECEF):
"""uses [x, y, z]_ECEF position of a particle to get LLH
in order to transform gravity at that location in ENU coordinates
to ECEF. Also outputs local altitude.
particle[x, y, z]_ECEF--->>particle[LLH]
gravity at particle[LLH] ---->> gravity at particle in ECEF"""
gravity = - MU_EARTH * np.asarray(ECEF) / (np.linalg.norm(ECEF)**3)
[lat, lon, height] = ECEF2LLH(ECEF)
# grav = MU_EARTH / (np.linalg.norm(ECEF)**2)
# [lat, lon, height] = ECEF2LLH(ECEF)
# trans = ENU2ECEF(lon, lat)
# gravity = np.dot(trans , np.vstack((0, 0, -grav)))
return gravity, lat, lon, height
def WGS84_params():
# WGS84 Defining Parameters
a = 6378137.0 # Semi-major axis
f = 1 / 298.257223563 # Flattening
b = a * (1 - f) # Semi-minor axis
return a, b
def EarthRadius(lat):
"""radius of the Earth as fn of latitude"""
# http://en.wikipedia.org/wiki/Earth_radius#Radii_with_location_dependence
#latr=radians(lat)
[a, b] = WGS84_params()
nominator = (a**2 * np.cos(lat))**2 + (b**2 * np.sin(lat))**2
denominator = (a * np.cos(lat))**2 + (b * np.sin(lat))**2
return np.sqrt( nominator / denominator )
def gravity_vector(pos):
# Position can be in either ECEF or ECI coordinates (tiny error using eci)
# The ellipsoid has just such a shape, so that the effective gravitational
# acceleration acts everywhere perpendicular to the surface of the ellipsoid.
# ^-- http://walter.bislins.ch/bloge/index.asp?page=Earth+Gravity+Calculator
# Convert the position to geodetic lat/lon
# pos_ecef = ECI2ECEF_pos(pos_eci, t_jd)
[lat, lon, hei] = ECEF2LLH(pos)
[a, b] = WGS84_params()
G_e = 9.7803253359 #m/s2 gravity at the equator
G_p = 9.8321849378 #m/s2 gravity at the poles
k = (b * G_p - a * G_e) / (a * G_e)
e2 = 1 - (b / a)**2
# Calculate the gravity on the surface
surface_gravity = G_e * ((1 + k * np.sin(lat)**2) \
/ np.sqrt(1 - e2 * np.sin(lat)**2))
# Add the height component to the gravity
R = EarthRadius(lat)
delta_g = MU_EARTH * (1 / (R + hei)**2 - 1 / R**2)
gravity = surface_gravity + delta_g
# Construct the gravity vector
gravity_vec = -gravity * np.vstack((np.cos(lat) * np.cos(lon),
np.cos(lat) * np.sin(lon), np.sin(lat)))
return gravity_vec
#########################################################
# Timing offset calculator
#########################################################
def calculate_timing_offsets(pos_all, t_jd_all, cam_no_all, cleared_cameras=None):
# Determine the master camera with zero offset
t_rel_all = (t_jd_all - np.min(t_jd_all)) * 24*60*60
[orig_cams, cam_counts] = np.unique(cam_no_all, return_counts=True)
master = orig_cams[np.argmax(cam_counts)]
# If there is only one camera
if len(orig_cams) == 1: return {master: 0.}
# Determine the length along the trajectory from the top of the atmosphere
idx = np.argmax(norm(pos_all, axis=0))
length_all = norm(pos_all - pos_all[:,idx:idx+1], axis=0)
# Separate the master camera from the rest
length_m = length_all[cam_no_all == master]
t_rel_m = t_rel_all[cam_no_all == master]
length = length_all[cam_no_all != master]
t_rel = t_rel_all[cam_no_all != master]
cam_no = cam_no_all[cam_no_all != master]
# Crop the data outside our length bounds so interp1d works properly
min_len = np.min(length_m); max_len = np.max(length_m)
out_of_bounds = np.where((length < min_len) + (length > max_len))[0]
length = np.delete(length, out_of_bounds)
t_rel = np.delete(t_rel, out_of_bounds)
cam_no = np.delete(cam_no, out_of_bounds)
# Return if there are no overlaps
if len(cam_no) == 0: return dict(list(enumerate(np.zeros(len(orig_cams)))))
[remaning_cams, cam_counts] = np.unique(cam_no, return_counts=True)
# The displacement function...
def displacement(offsets):
offset_vect = np.hstack([[offset]*count
for offset, count in zip(offsets, cam_counts)])
adjusted_times = t_rel + offset_vect
length_est = interp1d(t_rel_m, length_m,
fill_value='extrapolate')(adjusted_times)
return length_est - length
# Get on with the least squares!!
offsets_est = np.zeros(len(cam_counts))
offsets = leastsq(displacement, offsets_est, full_output=True)[0]
# All the master back
offsets_all = np.hstack((0., offsets))
unique_cams = np.hstack((master, remaning_cams))
if cleared_cameras is None:
cleared_cameras = unique_cams
else:
cleared_cameras = [obs for obs in cleared_cameras if obs in list(unique_cams)]
# If the master camera is not on the cleared list, change the zero offset cam
if len(cleared_cameras) != 0:
offset_combos = np.zeros((len(cleared_cameras), len(offsets_all)))
for i, cam in enumerate(cleared_cameras):
idx = np.where(unique_cams == cam)[0][0]
offset_combos[i] = offsets_all - offsets_all[idx]
# min_sum_offset = np.argmin(norm(offset_combos, axis=1)) # L2-norm
min_sum_offset = np.argmin(np.sum(np.abs(offset_combos), axis=1)) # L1-norm
offsets_corrected = list(offset_combos[min_sum_offset])
else:
offsets_corrected = list(offsets_all)
# Make the offset dictionary
offset_dict = dict(zip(unique_cams, offsets_corrected))
# ^--- moved away from a telescope defined dictionary
# in the case of the same camera across multiple images
print(offset_dict)
return offset_dict
#########################################################
# Trajectory maths / transforms
#########################################################
def get_zenith_and_bearing(table, segment):
'''
Choose the part of the trajectory to calculate: 'all', 'beg', or 'end'.
'''
if segment == 'all':
X_beg = np.vstack((table['X_geo'][ 0], table['Y_geo'][ 0], table['Z_geo'][ 0]))
X_end = np.vstack((table['X_geo'][-1], table['Y_geo'][-1], table['Z_geo'][-1]))
X_mid = (X_beg + X_end) / 2.0
elif segment == 'beg':
X_beg = np.vstack((table['X_geo'][0], table['Y_geo'][0], table['Z_geo'][0]))
X_end = np.vstack((table['X_geo'][1], table['Y_geo'][1], table['Z_geo'][1]))
X_mid = X_beg
elif segment == 'end':
X_beg = np.vstack((table['X_geo'][-2], table['Y_geo'][-2], table['Z_geo'][-2]))
X_end = np.vstack((table['X_geo'][-1], table['Y_geo'][-1], table['Z_geo'][-1]))
X_mid = X_end
else:
raise NameError("Must choose between segment of either 'all', 'beg', or 'end'.")
[[X],[Y],[Z]] = X_mid
try:
X_mid_EL = EarthLocation(x=X.value*u.m, y=Y.value*u.m, z=Z.value*u.m)
except AttributeError:
X_mid_EL = EarthLocation(x=X*u.m, y=Y*u.m, z=Z*u.m)
X_mid_LLH = X_mid_EL.to_geodetic()
lon = np.deg2rad(X_mid_LLH[0].value)
lat = np.deg2rad(X_mid_LLH[1].value)
# Rotation matrix from ENU to ECEF (geo) coordinates
trans = ECEF2ENU(lon, lat)
X_ENU = np.dot(trans, X_end - X_beg)
[[E],[N],[U]] = X_ENU
# Convert the cartesian to spherical coordinates
try:
zenith = np.rad2deg(np.arctan2(np.sqrt(E**2 + N**2), abs(U))).value * u.deg
except AttributeError:
zenith = np.rad2deg(np.arctan2(np.sqrt(E**2 + N**2), abs(U))) * u.deg
try:
bearing = (np.arctan2(E, N)* u.rad).to(u.deg) % (360 * u.deg)
except:
bearing = (np.arctan2(E, N) * u.rad).to(u.deg) % (360 * u.deg)
return zenith, bearing
def ShortestMidPoint(obs_ECEF_all, UV_ECEF_all):
''' Finds the point mid way between the closest section of two rays '''
# obs_ECEF, UV_ECEF are in list form. i.e [np.vstack((x,y,z)), np.vstack(()), ]
UV1 = UV_ECEF_all[0]; GS1 = obs_ECEF_all[0]
UV2 = UV_ECEF_all[1]; GS2 = obs_ECEF_all[1]
V = np.cross(UV1, UV2, axis=0) # Vector at right angles to both UV1 & UV2
n1 = np.cross(V, UV1, axis=0) # Normal of plane with UV1 embedded
n2 = np.cross(V, UV2, axis=0) # Normal of plane with UV2 embedded
d1 = n2.T.dot(GS2 - GS1) / n2.T.dot(UV1) # Distance along UV1 to closest approach
d2 = n1.T.dot(GS1 - GS2) / n1.T.dot(UV2) # Distance along UV2 to closest approach
x_opt = ((GS1 + UV1*d1) + (GS2 + UV2*d2)) / 2 # Average of two closest points
return x_opt
def TotalAngSep(x, obs_ECEF, UV_ECEF, scaling=1.0):
# obs_ECEF, UV_ECEF are in list form. i.e [np.vstack((x,y,z)), np.vstack(()), ]
x = np.vstack((x * scaling))
theta = np.zeros(len(obs_ECEF))
for i in range(len(obs_ECEF)):
obs = obs_ECEF[i]
UV = UV_ECEF[i]
theta[i] = np.arccos( (x - obs).T.dot(UV) / (norm(x - obs) * norm(UV)) )
return norm(theta)
def angular_difference(reference_angle, target_angle):
''' There may be a better way, but this works '''
diff = reference_angle - target_angle
return np.arctan2(np.sin(diff), np.cos(diff))
def angular_difference_2d(point1, point2, input_type='uv'): #[n,2or3],[n,2or3]
if input_type == 'ang':
''' Haversine Formula '''
if point1.ndim == 1: point1 = point1.reshape((1,2))
if point2.ndim == 1: point2 = point2.reshape((1,2))
ra1 = point1[:,0]; dec1 = point1[:,1]
ra2 = point2[:,0]; dec2 = point2[:,1]
a = np.sin((dec1-dec2)/2)**2 + np.cos(dec1) * np.cos(dec2) * np.sin((ra1-ra2)/2)**2
ang_diff = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a)) #[n]
elif input_type == 'uv':
if point1.ndim == 1: point1 = point1.reshape((1,3))
if point2.ndim == 1: point2 = point2.reshape((1,3))
UV1 = point1; UV2 = point2 #[n,3]
ang_diff = np.arctan2(norm(np.cross(UV1, UV2, axis=1), axis=1),
np.sum(UV1*UV2, axis=1)) #[n]
else:
print('Not a valid input_type, soz.'); exit()
return ang_diff #[n]
def track_errors(Pos, Vel, Obs, UV_obs, eci=True): #[3,n],[3,n],[3,m],[3,m]
# Relative pos/vel needed to determine the rotation matrix
Pos_rel = Pos - Obs; ground_rot = 0 * Obs
if eci: ground_rot[2] = OMEGA_EARTH # Slight error here... Earth's axis != z-axis
Vel_rel = Vel - np.cross(ground_rot, Obs, axis=0)
# Estimated LOS based on model fit
UV_est = Pos_rel / norm(Pos_rel, axis=0)
# Find the velocity components w.r.t. UV_est
v_para = np.sum(Vel_rel * UV_est, axis=0) * UV_est
v_perp = Vel_rel - v_para
# Construct the ECI-to-body rotation matrix
x_body = UV_est #[3,n]
y_body = v_perp / norm(v_perp, axis=0) #[3,n]
z_body = np.cross(x_body, y_body, axis=0) #[3,n]
C_ECI2BODY = np.vstack((x_body.flatten('f'),
y_body.flatten('f'), z_body.flatten('f'))) #[3,3n] or [3,3]
# Convert UV_obs into the body frame and determine the track errors
# UV_obs_block = block_diag(*np.hsplit(UV_obs, len(Obs[0]))) #[3n,n] or [3,m]
# UV_body = C_ECI2BODY.dot(UV_obs_block) #[3,n] or [3,m]
UV_body = np.hstack([C_ECI2BODY[:,3*i:3*i+3].dot(
UV_obs[:,i:i+1]) for i in range(len(UV_obs[0]))])
ATE = np.arctan2(UV_body[1], UV_body[0]) #[n] or [m]
CTE = np.arctan2(UV_body[2], UV_body[0]) #[n] or [m]
return ATE, CTE #[n],[n]
def track_errors_radec_jac(Pos, Vel, Obs, UV_obs, eci=True): #[3,n],[3,n],[3,n],[3,n]
''' Jacobian of the track errors using central differencing '''
# Convert back to ra/dec
z = uv2ang(UV_obs.T) #[2,n]
# Setup the state step - h_opt according to step_size_optimiser
step = np.diag([1e-7, 1e-7])
# Compute the jacobian
jac = np.zeros((len(z),2,2))
for i, s in enumerate(step):
te_pos = np.vstack((track_errors(Pos, Vel, Obs, ang2uv(z + s).T, eci))).T #[n,2]
te_neg = np.vstack((track_errors(Pos, Vel, Obs, ang2uv(z - s).T, eci))).T #[n,2]
jac[:,i,:] = (te_pos - te_neg) / (2 * s[i])
return jac #[n,2,2]
def altaz2radec(altaz, C_ENU2ECI): #[n,2],[3,n],[n,3,3],[n]
[alt, azi] = altaz.T
UV_enu = np.vstack((np.sin(azi) * np.cos(alt),
np.cos(azi) * np.cos(alt), np.sin(alt))) #[3,n]
UV_enu_prime = UV_enu.T.reshape((len(altaz),3,1))
UV_eci = np.matmul(C_ENU2ECI, UV_enu_prime) #[n,3,1]
radec = uv2ang(UV_eci.reshape((len(altaz),3))) #[n,2]
return radec #[n,2]
def altaz2radec_jac(altaz, C_ENU2ECI):
''' Jacobian of the track errors using central differencing '''
# Setup the state step - h_opt according to step_size_optimiser
step = np.diag([1e-7, 1e-7])
# Compute the jacobian
jac = np.zeros((len(altaz),2,2))
for i, s in enumerate(step):
radec_pos = altaz2radec(altaz+s, C_ENU2ECI) #[n,2]
radec_neg = altaz2radec(altaz-s, C_ENU2ECI) #[n,2]
jac[:,i,:] = (radec_pos - radec_neg) / (2 * s[i])
return jac #[n,2,2]
# ''' Spherical to Cartesian functions '''
def ang2uv(mean_ang): #[n,2]
if mean_ang.ndim == 1: mean_ang = mean_ang.reshape((1,2))
ra = mean_ang[:,0]; dec = mean_ang[:,1]
mean_uv = np.vstack((np.cos(dec) * np.cos(ra),
np.cos(dec) * np.sin(ra), np.sin(dec))).T
return mean_uv #[n,3]
def uv2ang(mean_uv): #[n,3]
if mean_uv.ndim == 1: mean_uv = mean_uv.reshape((1,3))
dist = norm(mean_uv, axis=1)
ra = np.arctan2(mean_uv[:,1], mean_uv[:,0])
dec = np.arcsin(mean_uv[:,2] / dist)
mean_ang = np.vstack((ra, dec)).T
return mean_ang #[n,2]
# def ang2uv_jac(mean_ang): #[n,2]
# if mean_ang.ndim == 1: mean_ang = mean_ang.reshape((1,2))
# # Setup the state step - move by one arcsec
# dang = np.deg2rad(1./3600) * np.eye(2)
# n = len(mean_ang); DANG = np.tile(dang,(n,1))
# MEAN_ang = np.hstack([mean_ang]*2).reshape((2*n,2)) #[2n,2]
# # Compute the jacobian
# uv_pos = ang2uv(MEAN_ang + DANG) #[2n,3]
# uv_neg = ang2uv(MEAN_ang - DANG) #[2n,3]
# step = np.tile(np.vstack(np.diag(dang)),(n,3)) #[2n,3]
# jac = (uv_pos - uv_neg) / (2 * step)
# jac = jac.reshape((n,2,3))
# return jac #[n,2,3]
# def convert_angle_to_uv(ra, dec, ra_err, dec_err):
# # Calculate the uv means
# mean_ang = np.vstack((ra,dec)).T #[n,2]
# mean_uv = ang2uv_jac(mean_ang) #[n,3]
# # Construct the angular covariance matrix
# cov_ang = np.zeros((len(mean_ang),2,2)) #[n,2,2]
# cov_ang[:,0,0] = ra_err**2; cov_ang[:,1,1] = dec_err**2
# # Determine the uv covariance matrix
# thi = ang2uv_jac(mean_ang) #[n,2,3]
# cov_uv = np.matmul(np.transpose(thi,(0,2,1)), \
# np.matmul(cov_ang, thi)) #[n,3,3]
# ### Might need to flatten here if n=1
# return mean_uv, cov_uv #[n,3], [n,3,3]
# def ECEF2LLH(ECEF):
# '''
# Converts coords Earth Centered Earth Fixed (ECEF=[rx;ry;rz])
# into LLH=[longitude;latitude;height] coords.
# '''
# # WGS84 Defining Parameters
# a_earth = 6378137.0 # Semi-major axis
# f_earth = 1 / 298.257223563 # Flattening
# b_earth = a_earth * (1 - f_earth) # Semi-minor axis
# e_earth = np.sqrt(1 - (b_earth**2) / (a_earth**2)) # Eccentricity
# e_prime_earth = np.sqrt((a_earth**2) / (b_earth**2) - 1) # Second eccentricity
# [G, M] = grav_params()
# # Separate the variables
# X = ECEF[0]
# Y = ECEF[1]
# Z = ECEF[2]
# # Auxiliary values
# p = np.sqrt(X**2 + Y**2)
# theta = np.arctan2(Z * a_earth, p * b_earth)
# # Calculate the LLH coords
# Lat = np.arctan2(Z + (e_prime_earth**2) * b_earth * (np.sin(theta))**3,
# p - (e_earth**2) * a_earth * (np.cos(theta))**3)
# Long = np.arctan2(Y, X)
# H = p / np.cos(Lat) - a_earth / np.sqrt(1 - (e_earth**2) * (np.sin(Lat))**2)
# LLH = np.vstack((Lat, Long, H))
# return Lat, Long, H #LLH
# def LLH2ECEF(LLH):
# '''
# Converts geodetic LLH=[longitude;latitude;height] coords to
# Earth Centered Earth Fixed (ECEF=[rx;ry;rz]) coords.
# '''
# # WGS84 Defining Parameters
# a_earth = 6378137.0 # Semi-major axis
# f_earth = 1 / 298.257223563 # Flattening
# b_earth = a_earth * (1 - f_earth) # Semi-minor axis
# e_earth = np.sqrt(1 - (b_earth**2) / (a_earth**2)) # Eccentricity
# e_prime_earth = np.sqrt((a_earth**2) / (b_earth**2) - 1) # Second eccentricity
# [G, M] = grav_params()
# # Separate the variables
# Lat = LLH[0]
# Long = LLH[1]
# H = LLH[2]
# # Calculate the ECEF coords
# N = a_earth / np.sqrt(1 - e_earth**2 * (np.sin(Lat))**2)
# rx = (N + H) * np.cos(Lat) * np.cos(Long)
# ry = (N + H) * np.cos(Lat) * np.sin(Long)
# rz = (N * (1 - e_earth**2) + H) * np.sin(Lat)
# # Construct the ECEF vector
# ECEF = np.vstack((rx, ry, rz))
# return ECEF
def enu_matrix(obs_LLH, t_jd):
# Determine the ECI coordinates
obs_LLH_plus = np.hstack((obs_LLH, obs_LLH + np.vstack((0,0,100)))) #[3,2n]
t_jd_plus = np.hstack((t_jd, t_jd)); n = len(t_jd)
obs_ECI_plus = ECEF2ECI_pos(LLH2ECEF(obs_LLH_plus), t_jd_plus) #[3,2n]
obs_ECI = obs_ECI_plus[:,:n]; obs_ECI_plus = obs_ECI_plus[:,n:]
# Compute the transformation matrix
E = np.vstack((-obs_ECI[1], obs_ECI[0], np.zeros(n))) \
/ np.sqrt(obs_ECI[1]**2 + obs_ECI[0]**2) #[3,n]
U = (obs_ECI_plus - obs_ECI) / norm(obs_ECI_plus - obs_ECI, axis=0) #[3,n]
N = np.cross(U, E, axis=0) / norm(np.cross(U, E, axis=0), axis=0) #[3,n]
C_ENU2ECI = np.zeros((n,3,3)); C_ENU2ECI[:,:,0] = E.T
C_ENU2ECI[:,:,1] = N.T; C_ENU2ECI[:,:,2] = U.T
return C_ENU2ECI, obs_ECI #[n,3,3],[3,n]
def ECEF2ENU(lon, lat): # slightly inaccurate -> should use enu_matrix(obs_LLH, t_jd)?
"""
# convert to local ENU coords
# http://www.navipedia.net/index.php/Transformations_between_ECEF_and_ENU_coordinates
# Title Transformations between ECEF and ENU coordinates
# Author(s) J. Sanz Subirana, J.M. Juan Zornoza and M. Hernandez-Pajares, Technical University of Catalonia, Spain.
# Year of Publication 2011
# use long to make greenwich mean turn to meridian: A clockwise rotation over the z-axis by and angle to align the east-axis with the x-axis
# use lat to rotate z to zenith
"""
ECEF2ENU = np.array([[-np.sin(lon) , np.cos(lon) , 0],
[-np.cos(lon)*np.sin(lat) , -np.sin(lon) * np.sin(lat), np.cos(lat)],
[np.cos(lon) * np.cos(lat), np.sin(lon) * np.cos(lat) , np.sin(lat)]])
return ECEF2ENU
def ENU2ECEF(lon, lat): # slightly inaccurate -> should use enu_matrix(obs_LLH, t_jd)?
"""transform of ENU2ECEF
"""
lon = float(lon); lat = float(lat)
C_ENU2ECEF = np.array([[-np.sin(lon), -np.sin(lat) * np.cos(lon), np.cos(lat) * np.cos(lon)],
[ np.cos(lon), -np.sin(lat) * np.sin(lon), np.cos(lat) * np.sin(lon)],
[ 0 , np.cos(lat) , np.sin(lat) ]])
return C_ENU2ECEF
def LLH2ECEF(Pos_LLH):
Pos_EL = EarthLocation(lat=Pos_LLH[0] * u.rad, lon=Pos_LLH[1] * u.rad, height=Pos_LLH[2] * u.m)
Pos_ECEF_temp = np.vstack((Pos_EL.x.value, Pos_EL.y.value, Pos_EL.z.value))
Pos_ECEF = np.full(np.shape(Pos_ECEF_temp), np.nan)
Pos_ECEF[:,~np.isnan(Pos_ECEF_temp[2])] = Pos_ECEF_temp[:,~np.isnan(Pos_ECEF_temp[2])]
return Pos_ECEF
def ECEF2LLH(Pos_ECEF):
Pos_EL = EarthLocation(x=Pos_ECEF[0] * u.m, y=Pos_ECEF[1] * u.m, z=Pos_ECEF[2] * u.m)
Pos_LLH_temp = np.vstack((Pos_EL.lat.rad, Pos_EL.lon.rad, Pos_EL.height.value))
Pos_LLH = np.full(np.shape(Pos_LLH_temp), np.nan)
Pos_LLH[:,~np.isnan(Pos_LLH_temp[2])] = Pos_LLH_temp[:,~np.isnan(Pos_LLH_temp[2])]
return Pos_LLH
def ECI2ECEF_pos(Pos_ECI, t):
T = Time(t, format='jd', scale='utc')
dist_vect = norm(Pos_ECI, axis=0)
ra_vect = np.arctan2(Pos_ECI[1], Pos_ECI[0])
dec_vect = np.arcsin(Pos_ECI[2] / dist_vect)
Pos_ECI_SC = GCRS(ra=ra_vect * u.rad, dec=dec_vect * u.rad,
distance=dist_vect * u.m, obstime=T)
Pos_ECEF_SC = Pos_ECI_SC.transform_to(ITRS(obstime=T))
Pos_ECEF = np.vstack(Pos_ECEF_SC.cartesian.xyz.value)
return Pos_ECEF
def ECI2ECEF(Pos_ECI, Vel_ECI, t):
Pos_ECEF = ECI2ECEF_pos(Pos_ECI, t)
PV_ECI = Pos_ECI + Vel_ECI
PV_ECEF = ECI2ECEF_pos(PV_ECI, t)
Vel_ECEF = PV_ECEF - Pos_ECEF - np.cross( np.vstack((0,0,OMEGA_EARTH)), Pos_ECEF, axis=0)
return Pos_ECEF, Vel_ECEF
def ECEF2ECI_pos(Pos_ECEF, t):
T = Time(t, format='jd', scale='utc')
Pos_ECEF_SC = ITRS(x=Pos_ECEF[0] * u.m, y=Pos_ECEF[1] * u.m,
z=Pos_ECEF[2] * u.m, obstime=T)
Pos_ECI_SC = Pos_ECEF_SC.transform_to(GCRS(obstime=T))
Pos_ECI = np.vstack(Pos_ECI_SC.cartesian.xyz.value)
return Pos_ECI
def ECEF2ECI(Pos_ECEF, Vel_ECEF, t):
Pos_ECI = ECEF2ECI_pos(Pos_ECEF, t)
PV_ECEF = Pos_ECEF + Vel_ECEF + np.cross( np.vstack((0,0,OMEGA_EARTH)), Pos_ECEF, axis=0)
PV_ECI = ECEF2ECI_pos(PV_ECEF, t)
Vel_ECI = PV_ECI - Pos_ECI
return Pos_ECI, Vel_ECI
def ECI2TEME_pos(Pos_ECI, t):
T = Time(t, format='jd', scale='utc')
precessed_frame = PrecessedGeocentric(equinox=T, obstime=T)
dist_vect = norm(Pos_ECI, axis=0)
ra_vect = np.arctan2(Pos_ECI[1], Pos_ECI[0])
dec_vect = np.arcsin(Pos_ECI[2] / dist_vect)
Pos_ECI_SC = GCRS(ra=ra_vect * u.rad, dec=dec_vect * u.rad,
distance=dist_vect * u.m, obstime=T)
Pos_TEME_SC = Pos_ECI_SC.transform_to(precessed_frame)
Pos_TEME = np.vstack(Pos_TEME_SC.cartesian.xyz.value)
return Pos_TEME
def ECI2TEME(Pos_ECI, Vel_ECI, t):
Pos_TEME = ECI2TEME_pos(Pos_ECI, t)
PV_ECI = Pos_ECI + Vel_ECI
PV_TEME = ECI2TEME_pos(PV_ECI, t)
Vel_TEME = PV_TEME - Pos_TEME
return Pos_TEME, Vel_TEME
def TEME2ECI_pos(Pos_TEME, t):
T = Time(t, format='jd', scale='utc')
dist_vect = norm(Pos_TEME, axis=0)
ra_vect = np.arctan2(Pos_TEME[1], Pos_TEME[0])
dec_vect = np.arcsin(Pos_TEME[2] / dist_vect)
Pos_TEME_SC = PrecessedGeocentric(ra=ra_vect * u.rad,
dec=dec_vect * u.rad, distance=dist_vect * u.m, equinox=T, obstime=T)
Pos_ECI_SC = Pos_TEME_SC.transform_to(GCRS(obstime=T))
Pos_ECI = np.vstack(Pos_ECI_SC.cartesian.xyz.value)
return Pos_ECI
def TEME2ECI(Pos_TEME, Vel_TEME, t):
Pos_ECI = TEME2ECI_pos(Pos_TEME, t)
PV_TEME = Pos_TEME + Vel_TEME
PV_ECI = TEME2ECI_pos(PV_TEME, t)
Vel_ECI = PV_ECI - Pos_ECI
return Pos_ECI, Vel_ECI
''' There is a milli-arcsec difference between ICRS and J2000, but it
is at least three orders of magnitude smaller than the error on
our best orbital prediction. Hence, it has been deemed negligable.
See the following references for material:
https://www.aanda.org/articles/aa/full/2004/02/aa3851/aa3851.html
http://cdsads.u-strasbg.fr/cgi-bin/nph-bib_query?1998A&A...331L..33F
https://www.iers.org/IERS/EN/Science/ICRS/ICRS.html
'''
# eps = np.deg2rad(np.array([-5.1, 17.2, -78.0]) / (3600 * 1000))
# R_x = np.array([[ 1, 0 , 0 ],
# [ 0, np.cos(eps[0]),-np.sin(eps[0])],
# [ 0, np.sin(eps[0]), np.cos(eps[0])]])
# R_y = np.array([[ np.cos(eps[1]), 0, np.sin(eps[1])],
# [ 0 , 1, 0 ],
# [-np.sin(eps[1]), 0, np.cos(eps[1])]])
# R_z = np.array([[ np.cos(eps[2]),-np.sin(eps[2]), 0],
# [ np.sin(eps[2]), np.cos(eps[2]), 0],
# [ 0 , 0 , 1]])
# ICRS2J2000 = R_x.dot(R_y).dot(R_z)
# Obliquity at J2000 (Chapront et al. | 2002)
# http://hpiers.obspm.fr/eop-pc/models/constants.html
e = np.deg2rad(23 + 26/60 + 21.4119/3600)
C_EQ2ECLIP = np.array([[1, 0 , 0 ],
[0, np.cos(e), np.sin(e)],
[0, -np.sin(e), np.cos(e)]])
def HCRS2HCI(HCRS):
HCI = C_EQ2ECLIP.dot(HCRS)
return HCI
def HCI2HCRS(HCI):
HCRS = C_EQ2ECLIP.T.dot(HCI)
return HCRS
def ECI2HCI_pos(Pos_ECI, t):
# Position & velocity relative to the sun
T = Time(t, format='jd', scale='utc')
dist_vect = norm(Pos_ECI, axis=0)
ra_vect = np.arctan2(Pos_ECI[1], Pos_ECI[0])
dec_vect = np.arcsin(Pos_ECI[2] / dist_vect)
Pos_ECI_SC = GCRS(ra=ra_vect * u.rad, dec=dec_vect * u.rad,
distance=dist_vect * u.m, obstime=T)
Pos_HCRS = np.vstack(Pos_ECI_SC.transform_to(HCRS(obstime=T)).cartesian.xyz.value)
Pos_HCI = HCRS2HCI(Pos_HCRS)
return Pos_HCI
def ECI2HCI(Pos_ECI, Vel_ECI, t):
Pos_HCI = ECI2HCI_pos(Pos_ECI, t)
PV_ECI = Pos_ECI + Vel_ECI
PV_HCI = ECI2HCI_pos(PV_ECI, t)
Vel_HCI = PV_HCI - Pos_HCI + EarthVelocity(t)
return Pos_HCI, Vel_HCI
def HCI2ECI_pos(Pos_HCI, t):
# Position & velocity relative to the sun
T = Time(t, format='jd', scale='utc')
Pos_HCRS = HCI2HCRS(Pos_HCI)
# TODO. HARD: had to remove it
#Vel_HCRS_rel = HCI2HCRS(Vel_HCI - EarthVelocity(t))
Pos_HCRS_SC = HCRS(x=Pos_HCRS[0]*u.m, y=Pos_HCRS[1]*u.m, z=Pos_HCRS[2]*u.m,
representation_type='cartesian', obstime=T)
Pos_ECI = np.vstack(Pos_HCRS_SC.transform_to(GCRS(obstime=T)).cartesian.xyz.value)
return Pos_ECI
def HCI2ECI(Pos_HCI, Vel_HCI, t):
Pos_ECI = HCI2ECI_pos(Pos_HCI, t)
PV_HCI = Pos_HCI + Vel_HCI - EarthVelocity(t)
PV_ECI = HCI2ECI_pos(PV_HCI, t)
Vel_ECI = PV_ECI - Pos_ECI
return Pos_ECI, Vel_ECI
def PosVel2OrbitalElements(Pos, Vel, OrbitBody, Type):
if Pos.ndim == 1: Pos = np.vstack(Pos)
if Vel.ndim == 1: Vel = np.vstack(Vel)
if OrbitBody == 'Sun':
mu = MU_SUN
elif OrbitBody == 'Earth':
mu = MU_EARTH
else:
print('Not valid OrbitBody: PosVel2OrbitalElements')
exit()
# Pre-calculations
dim = np.shape(Pos)[1]
w_hat = np.cross(Pos, Vel, axis=0) / norm(np.cross(Pos, Vel, axis=0), axis=0)
i_hat = np.vstack((np.ones((1, dim)), np.zeros((2, dim))))
k_hat = np.vstack((np.zeros((2, dim)), np.ones((1, dim))))
n_hat = np.cross(k_hat, w_hat, axis=0) / norm(np.cross(k_hat, w_hat, axis=0), axis=0)
e_vec = ((norm(Vel, axis=0)**2 - mu / norm(Pos, axis=0)) * Pos - np.diag(np.dot(Pos.T, Vel)) * Vel) / mu
# Classical Orbital Elements
a = mu * norm(Pos, axis=0) / (2 * mu - norm(Pos, axis=0) * norm(Vel, axis=0)**2)
e = norm(e_vec, axis=0)
i = np.arccos(np.diag(np.dot(w_hat.T, k_hat)))
omega = np.arctan2(np.diag(np.dot(np.cross(n_hat, e_vec, axis=0).T, w_hat)),
np.diag(np.dot(n_hat.T, e_vec))) % (2 * np.pi)
Omega = np.arctan2(np.diag(np.dot(np.cross(i_hat, n_hat, axis=0).T, k_hat)),
np.diag(np.dot(i_hat.T, n_hat))) % (2 * np.pi)
theta = np.arctan2(np.diag(np.dot(np.cross(e_vec, Pos, axis=0).T, w_hat)),
np.diag(np.dot(e_vec.T, Pos))) % (2 * np.pi)
if Type == 'Classical':
COE = np.vstack((a, e, i, omega, Omega, theta))
return COE
elif Type == 'Equinoctial':
# Convert to equinoctial orbital elements
p = a * (1 - e**2)
f = e * np.cos(omega + Omega)
g = e * np.sin(omega + Omega)
h = np.tan(i / 2) * np.cos(Omega)
k = np.tan(i / 2) * np.sin(Omega)
L = (Omega + omega + theta) % (2 * np.pi)
EOE = np.vstack((p, f, g, h, k, L))
return EOE
else:
print('Not valid Type: PosVel2OrbitalElements')
exit()
def OrbitalElements2PosVel(OE, OrbitBody, Type):
if OrbitBody == 'Sun':
mu = MU_SUN
elif OrbitBody == 'Earth':
mu = MU_EARTH
else:
print('Not valid OrbitBody: OrbitalElements2PosVel')
exit()
if Type == 'Classical':
# Assigning the elements
a = OE[0]; e = OE[1]; i = OE[2]; omega = OE[3]; Omega = OE[4]; theta = OE[5]
# Convert to equinoctial orbital elements
p = a * (1 - e**2)
f = e * np.cos(omega + Omega)
g = e * np.sin(omega + Omega)
h = np.tan(i / 2) * np.cos(Omega)
k = np.tan(i / 2) * np.sin(Omega)
L = Omega + omega + theta
elif Type == 'Equinoctial':
# Assigning the elements
p = OE[0]; f = OE[1]; g = OE[2]; h = OE[3]; k = OE[4]; L = OE[5]
else:
print('Not valid Type: OrbitalElements2PosVel')
exit()
w = 1 + f * np.cos(L) + g * np.sin(L)
s2 = 1 + h**2 + k**2
alpha2 = h**2 - k**2
r = p / w
# Positions
Pos = r / s2 * np.vstack((np.cos(L) + alpha2 * np.cos(L) + 2 * h * k * np.sin(L),
np.sin(L) - alpha2 * np.sin(L) + 2 * h * k * np.cos(L),
2 * (h * np.sin(L) - k * np.cos(L))))
# Velocities
Vel = -1 / s2 * np.sqrt(mu / p) * np.vstack((np.sin(L) + alpha2 * np.sin(L) - 2 * h * k * np.cos(L) + g - 2 * f * h * k + alpha2 * g,
-np.cos(L) + alpha2 * np.cos(L) + 2 * h * k * np.sin(L) - f + 2 * g * h * k + alpha2 * f,
-2 * (h * np.cos(L) + k * np.sin(L) + f * h + g * k)))
return Pos, Vel
#########################################################
# Earth motion
#########################################################
def EarthPosition(t, ephem='builtin'):
'''
The position of Earth at time, t, w.r.t. the Sun
using astropy.coordinates.solar_system_ephemeris.
'''
# logger = logging.getLogger()
# logger.critical('As of 1.3, astropy has changed the behavior of some functions, giving diverging answers from previous versions')
T = Time(t, format='jd', scale='utc')
# pos_bary, vel_bary = get_body_barycentric_posvel('earth', time=T)
with solar_system_ephemeris.set(ephem):
pos_bary = get_body_barycentric('earth', time=T)
pos_ICRS = ICRS(pos_bary)
hcrs_frame = HCRS(obstime=T)
Pos_HCRS = np.vstack((SkyCoord(pos_ICRS).transform_to(hcrs_frame).
cartesian.xyz.to(u.meter).value))
Pos_HCI = HCRS2HCI(Pos_HCRS)
return Pos_HCI
def EarthVelocity(t, ephem='builtin'):
'''
The velocity of Earth at time, t, w.r.t. the Sun using central differencing.
'''
# TODO when astropy ends up implementing velocity transformations between frames, use that instead
dt = 1.0 * 60 * 60 # 1 hour
EarthPos1 = EarthPosition(t - dt / (24 * 60 * 60), ephem) # This time is in JD
EarthPos2 = EarthPosition(t + dt / (24 * 60 * 60), ephem) # This time is in JD
Vel_HCI = (EarthPos2 - EarthPos1) / (2 * dt)
return Vel_HCI
#########################################################
# File queries
#########################################################
def find_events_folder(datadir, event_pattern="DN*"):
""" return a list of event folders within 'datadir' that follow the event pattern given
"""
find_comm = ['find',
datadir,
'-type', 'd',
'-name', event_pattern,
'-maxdepth', '1']
list_results = subprocess.check_output(find_comm).decode().split('\n')
return [it for it in list_results if it != '']
def find_file(path_to_event):
"""return the most recent trajectory folder for a given event codename
"""
event_codename = os.path.basename(os.path.normpath(path_to_event))
try:
traj_folder = get_most_recent_traj_folder(path_to_event)
except FileNotFoundError:
return None
return traj_folder
def get_most_recent_traj_folder(event_folder):
'''
Try to find a the most recent trajectory folder. Has to start with 'trajectory_20'
Finds the most recent by sorting them lexicographically
Parameters:
event_folder: folder where to look
Returns:
the directory where the most recent full trajectory data is stored
'''
dir_list = os.listdir(event_folder)
traj_folds = sorted([d for d in dir_list if d.startswith('trajectory_20')])
if len(traj_folds) < 1:
raise FileNotFoundError('Cannot find trajectory data for that event')
return os.path.join(event_folder, traj_folds[-1])
|
{"hexsha": "7c414addef2ca601c5565b43689a4c1e5e338a5f", "size": 33786, "ext": "py", "lang": "Python", "max_stars_repo_path": "trajectory_utilities.py", "max_stars_repo_name": "desertfireballnetwork/DFN_darkflight", "max_stars_repo_head_hexsha": "f41d2a2b82ce96f380f26acfe278c0afa536b9cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-19T15:13:09.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-19T15:13:09.000Z", "max_issues_repo_path": "trajectory_utilities.py", "max_issues_repo_name": "desertfireballnetwork/DFN_darkflight", "max_issues_repo_head_hexsha": "f41d2a2b82ce96f380f26acfe278c0afa536b9cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trajectory_utilities.py", "max_forks_repo_name": "desertfireballnetwork/DFN_darkflight", "max_forks_repo_head_hexsha": "f41d2a2b82ce96f380f26acfe278c0afa536b9cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.267223382, "max_line_length": 144, "alphanum_fraction": 0.5945657965, "include": true, "reason": "import numpy,from numpy,from scipy,import astropy,from astropy", "num_tokens": 10820}
|
import numba
from numba import deferred_type
from numba.experimental import jitclass
from morpyneural.Genetic.JitElementClass import JitElement, JitElementListType
@jitclass([
('elements', JitElementListType)
])
class JitPopulation(object):
def __init__(self):
self.elements = [JitElement()]
self.elements.pop()
def build(self, layers_configuration, max_elements):
"""
Building new set of Elements
:param layers_configuration:
:param max_elements:
:return:
"""
for i in range(max_elements):
new_element = JitElement().build(layers_configuration)
self.elements.insert(len(self.elements), new_element)
return self
def feed_forward(self, inputs):
"""
Feed forwarding all Elements
:param inputs:
:return:
"""
results = []
for element in self.elements:
results.append(element.feed_forward(inputs))
return results
def evolve(self, parent_a, parent_b, learning_rate=0.001, low=-1.0, high=1.0):
"""
Evolve each Element
:param parent_a:
:param parent_b:
:param learning_rate:
:param low:
:param high:
:return:
"""
for element in self.elements:
if element != parent_a and element != parent_b:
element.evolve(parent_a, parent_b, learning_rate=learning_rate, low=low, high=high)
return self
"""
Define Customs Types
"""
JitPopulationType = deferred_type()
JitPopulationType.define(JitPopulation.class_type.instance_type)
JitPopulationListType = numba.types.List(JitPopulation.class_type.instance_type)
|
{"hexsha": "2ba32dcf358f5425f7bb1de55f8f5e5e8e5f2dc0", "size": 1716, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/morpyneural/Genetic/JitPopulationClass.py", "max_stars_repo_name": "Morgiver/neural-network", "max_stars_repo_head_hexsha": "b5c4a600bfe8032bc7ad859bb7286efdac90e74d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-15T15:28:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-15T15:28:08.000Z", "max_issues_repo_path": "src/morpyneural/Genetic/JitPopulationClass.py", "max_issues_repo_name": "Morgiver/neural-network", "max_issues_repo_head_hexsha": "b5c4a600bfe8032bc7ad859bb7286efdac90e74d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/morpyneural/Genetic/JitPopulationClass.py", "max_forks_repo_name": "Morgiver/neural-network", "max_forks_repo_head_hexsha": "b5c4a600bfe8032bc7ad859bb7286efdac90e74d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2380952381, "max_line_length": 99, "alphanum_fraction": 0.641025641, "include": true, "reason": "import numba,from numba", "num_tokens": 370}
|
using DSP
"""
highlow_butterworth_filter(data,sampling_rate; low_pass=30, high_pass=1, bw_n_pole=5, offset=true)
Applies a high and low-pass filter of butterworth design (n pole 5). For altering the
threshold values for filters, change add keyword arguments low_pass for low pass filter cut-off
(default=30) and high_pass for high-pass cut-off (default=1). To change the nth order
of the butterworth filter, use keyword bw_n_pole (default=5). Offset due to filtering at 0z,
i.e the mean of the time series, can be turned off by setting `offset=false` which
manually adds back the offset
Returns filtered data
"""
function highlow_butterworth_filter(
data, sampling_rate;
low_pass=30,
high_pass=1,
bw_n_pole=5,
offset = true,
)
# Setting up low-pass filter properties
low_threshold = low_pass
responsetype_low = Lowpass(low_threshold, fs=sampling_rate)
designmethod = Butterworth(bw_n_pole)
# Applying filter
low_pass_filter = digitalfilter(responsetype_low, designmethod)
lowp_filtered_data = filtfilt(low_pass_filter, data)
# Setup high-pass filter properties
high_threshold = high_pass
responsetype_high = Highpass(high_threshold, fs=sampling_rate)
# Applying filter
high_pass_filter = digitalfilter(responsetype_high, designmethod)
filtered_data = filtfilt(high_pass_filter, lowp_filtered_data)
if offset == false
# Resetting the offset
filtered_data = filtered_data .+ mean(data, dims=1)
end
return filtered_data
end
"""
highlow_butterworth_filter(data::Dict, sampling_rate; low_pass=30, high_pass=1, bw_n_pole=5)
Data contains all conditions and is a Dict (subject). Applies a high and low-pass filter of butterworth design (n pole 5). For altering the
threshold values for filters, change add keyword arguments low_pass for low pass filter cut-off
(default=30) and high_pass for high-pass cut-off (default=1). To change the nth order
of the butterworth filter, use keyword bw_n_pole (default=5).
Returns filtered data as a Dict
"""
function highlow_butterworth_filter(
data::Dict, sampling_rate;
low_pass=30,
high_pass=1,
bw_n_pole=5,
offset=true
)
filtered_data = Dict()
for (condition, cond_data) in data
filtered_data[condition] = highlow_butterworth_filter(
cond_data,
sampling_rate,
low_pass=low_pass,
high_pass=high_pass,
bw_n_pole=bw_n_pole,
offset=offset,
)
end
return filtered_data
end
|
{"hexsha": "e9bfd6aaa008b08e86e8fb5a6e10e69c9dde8d3b", "size": 2546, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/filters.jl", "max_stars_repo_name": "ElectronicTeaCup/MegTools", "max_stars_repo_head_hexsha": "fef50fdcc6261fc645fee54c847d51b6c05d7f6f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/filters.jl", "max_issues_repo_name": "ElectronicTeaCup/MegTools", "max_issues_repo_head_hexsha": "fef50fdcc6261fc645fee54c847d51b6c05d7f6f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2020-03-08T21:42:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-17T14:05:13.000Z", "max_forks_repo_path": "src/filters.jl", "max_forks_repo_name": "ElectronicTeaCup/MegTools", "max_forks_repo_head_hexsha": "fef50fdcc6261fc645fee54c847d51b6c05d7f6f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2278481013, "max_line_length": 139, "alphanum_fraction": 0.7293794187, "num_tokens": 637}
|
import numpy as np
def cross_entropy(y, y_net):
n = y.shape[0]
return -1 / n * (y * np.log(y_net) + (1 - y) * np.log(1 - y_net)).sum(axis=0)
def sigmoid(x):
ex = np.exp(x)
return ex / (1 + ex)
class NeuralNet:
def __init__(self, in_size, hl_size, out_size, dna=None):
if dna is not None:
self.dna = dna
else:
size = (in_size + 1) * hl_size + (hl_size + 1) * out_size
self.dna = np.random.randn(size)
self.W1 = self.dna[: (in_size + 1) * hl_size].reshape((in_size + 1, hl_size))
self.W2 = self.dna[(in_size + 1) * hl_size :].reshape((hl_size + 1, out_size))
if dna is None:
self.W1 /= np.sqrt(self.W1.shape[0])
self.W2 /= np.sqrt(self.W2.shape[0])
def forward(self, x1):
x2 = np.tanh(x1 @ self.W1[:-1] + self.W1[-1])
x3 = x2 @ self.W2[:-1] + self.W2[-1]
softmax_x3 = np.exp(x3 - x3.max(axis=-1, keepdims=True))
softmax_x3 /= softmax_x3.sum(axis=-1, keepdims=True)
return softmax_x3
def decide(self, x1):
return np.argmax(self.forward(x1))
def __getstate__(self):
return {"W1": self.W1, "W2": self.W2}
def __setstate__(self, state):
self.__dict__.update(state)
|
{"hexsha": "175ce68d72ed111345600f8e5f493a2d757dd08c", "size": 1270, "ext": "py", "lang": "Python", "max_stars_repo_path": "snakipy/neuro.py", "max_stars_repo_name": "gab50000/PySnake", "max_stars_repo_head_hexsha": "22ec382d7aa3d957897d6f85ce65b2e52a05b863", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "snakipy/neuro.py", "max_issues_repo_name": "gab50000/PySnake", "max_issues_repo_head_hexsha": "22ec382d7aa3d957897d6f85ce65b2e52a05b863", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "snakipy/neuro.py", "max_forks_repo_name": "gab50000/PySnake", "max_forks_repo_head_hexsha": "22ec382d7aa3d957897d6f85ce65b2e52a05b863", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5348837209, "max_line_length": 86, "alphanum_fraction": 0.5535433071, "include": true, "reason": "import numpy", "num_tokens": 403}
|
Job Board Job hunting can be a royal pain in the youknowwhat. This page may help streamline that process. Note that many places that have online applications also have paper applications available instore.
Retail
Bookstores
Bring in resume:
Avid Reader
OffCampus Books
Newsbeat
Other bookstores (please indicate the type of application process for any of these, if you know):
Bizarro World
Davis Christian Bookroom
Bicycle Shops and Repair
Apply Online:
Kens Bike & Ski (http://kensbikeski.com/page.cfm?pageID222 Apply Here)
Print application, then submit instore:
B&L Bike Shop (http://blbikeshop.com/merchant/504/files/EmploymentApplication.pdf Print Here)
Apply in person:
APEX Cycles Come talk to Aaron and see if you are a fit for the shop. 12 week probationary period required
Other Bicycle Shops (please indicate the type of application process for any of these, if you know):
Davis Bike Exchange
Freewheeler
Wheelworks
Mobile Phones Cell Phone Retailers
Other cell phone retailers (please indicate the type of application process for any of these, if you know):
Absolute Cellular
AT&T Wireless
Heron Technologiesis now Davis Computer
Mobile Connections
Parrot Cellular
Children ChildOriented
Apply online:
Gap Gap Kids (http://www.gapinc.com/public/Careers/car_jobsearch.shtml Apply Here)
Other childoriented stores (please indicate the type of application process for any of these, if you know):
Mother and Baby Source
Teach Your Children
Clothing Stores
Apply Online:
The Gap (http://www.gapinc.com/public/Careers/car_jobsearch.shtml Apply Here)
Pick up application instore:
The Wardrobe
Other clothing stores (please indicate the type of application process for any of these, if you know):
Ground Zero
Nina and Tom
Preeti Girl
Pinkadot
Renew Denim
Riki
Other
Apply Online:
Petco (http://www.petco.com/petco_Page_PC_retailstoreopps_Nav_117.aspx Apply Here) Online only
Cost Plus
Office Max
Print application, then submit instore:
Dollar Tree (http://www.dollartree.com/downloads/career/Sales_Associate_Application.pdf Print Here)
Pick up an application instore:
Ace Hardware
Davis Home Trends
The Paint Chip
Submit resume:
Ink Monkey Graphics
Other places (please indicate the type of application process for any of these, if you know):
Bath and Body Works
Big 5 Sporting Goods
H2O to go
Davis Academy Driving School
Davis Shoe Shop & Pedorthic
Fleet Feet
Hibbert Lumber
Nestware
Rivers to Reef
Tibet Nepal
Valley Wine Company
Volleys Tennis Shop
Food and Restaurants
Casual Dining
Apply online:
Noahs Bagels (http://www.noahs.com/#/careers/ Apply Here)
Print application, then submit instore:
Beach Hut Deli (http://www.beachhutdeli.com/ Print here)
Dos Coyotes (http://www.doscoyotes.net/dosapp.pdf Print here)
Pick up application instore:
Village Bakery
Submit resume:
Delta of Venus
Other casual dining (please indicate the type of application process for any of these, if you know):
Ali Baba
Chuys Taqueria
El Mariachi
El Toro Bravo
Golden Sun
Hot Dogger
Konditorei
Mr. Pickles Sandwich Shop
Taqueria Davis
Taqueria El Burrito
Taqueria Guadalajara
The Posh Bagel
The Graduate
Coffee Shops
Apply online:
Starbucks (http://www.starbucks.com/aboutus/job_search_pop.asp Apply for a Corporate Position Here)
Print application, then submit instore:
Common Grounds (http://www.commongroundsdavis.com/files/Application.pdf Print Here)
Peets Coffee (http://www.peets.com/careers/job_postings.asp?cm_refooter_career_text&cm_spcareer_footer_text Print Here)
Starbucks (http://media.starbucks.com.edgesuite.net/dotcom/media/pdf/Starbucks_Application.pdf Print Retail Application Here)
Pick up application instore:
Cloud Forest Cafe
Mocha Joes
Bring in resume:
Mishkas
Other coffee shops (please indicate the type of application process for any of these, if you know):
3rd & U Cafe
Barista Brew
Dinein
Submit resume online
Cafe Bernardo (http://www.paragarys.com/go/prg/careers/submityourresume/index.cfm Submit here, but you can also apply inperson)
Print application, then submit instore:
Bistro 33 (http://www.bistro33.com/bistro33_davis/SRO_Job_App.pdf Print here)
Caffe Italia (http://www.dancingtomato.com/employment.html Print here)
Plutos (http://www.plutosfreshfood.com/documents/PlutosEmpApp_10.19.06.pdf Print here)
Sushi Unlimited (http://www.sushiunlimited.net/empApp.pdf Print Here)
Pick up application instore:
Applebees
Submit resume instore:
Osteria Fasulo
Tucos Wine Market and Cafe
Other dinein restaurants (please indicate the type of application process for any of these, if you know):
Black Bear Diner
Burgers and Brew
Café Méditerranée
Cindys
Crepeville
Davis Noodle City
Davis Oshio Cafe
Davis Sushi Buffet Japanese Restaurant
Ding How
Farmers Kitchen Cafe
Fuji Chef
Froggys
G Street Pub now G St. Wunderbar
Great Wall of China II
Hoa Viet
Hometown Chinese Food
House of Chang
Hunan
IHOP
Jade Garden
Jusco
Kathmandu Kitchen
KetMoRee
Little Prague
Moshi Moshi
Nobu Hiro
Old Teahouse
Plainfield Station
Rajas
Red Orchid Restaurant
Seasons
Sams Mediterranean
Shanghai Town
Silver Dragon
Sophias Thai Kitchen
Sudwerk
Sunrise Restaurant
Symposium
Thai Nakorn
Thai Recipes
Mustard Seed The Mustard Seed
Wildhorse Grill
Wok of Flame
Zen Toro
Fast Food
Apply online:
Burger King (http://www.bk.com/companyinfo/careers/rmp.aspx Apply Here)
Chipotle (http://www4.recruitingcenter.net/clients/chipotle/publicjobs/controller.cfm?jbactionApplyToJob Apply Here)
Del Taco (http://www.deltaco.com/page42.html Apply Here)
McDonalds (http://www.mcstate.com/careers/ Apply Here)
Jamba Juice (http://www.jambajuice.com/jobs_careers#/job_listings/ Apply Here)
Panda Express (http://www.pandacareers.com/index.asp Apply Here)
Taco Bell (http://www.yumcareers.com/jobs_framesetup.html Apply Here)
Print application, then return instore:
InnOut (http://www.innout.com/inoapp.pdf Print Here)
Ohana Hawaiian BBQ (http://www.ohanabbqcorp.com/pdf/ohib_jobapp.pdf Print Here)
Pick up application instore:
Carls Jr.
Chipotle
Habit Burger
Other fast food restaurants (please indicate the type of application process for any of these, if you know):
Dairy Queen
Jack in the Box
KFC
Quickly (http://www.quicklyusa.com/jobopportunity.html Print Here)
Redrum Burger
Pizza
Apply online:
Papa Murphys (http://www.selfmgmt.com/clients/papamurphys/ Apply Here)
Pizza Guys (https://www.pizzaguys.com/home/jobs.cfm Apply Here)
Round Table Pizza (http://www.roundtablepizza.com/RTP/HI/ Apply Here)
Print application, then return instore:
Cenarios Pizza (http://www.cenariospizza.com/ Print Here, click downloads and scroll down)
Woodstocks Pizza (http://woodstocksdavis.com/pg/Jobs.htm Print Here)
Pick up an application instore:
Lamppost Pizza
Email resume:
Uncle Vitos Slice of N.Y. (email to jobs@paesanos.biz)
Other pizza places (please indicate the type of application process for any of these, if you know):
Dominos Pizza
Little Caesers
Original Steves
Sandwiches
Apply online:
Subway (http://www.subway.com/subwayroot/AboutSubway/Employment/Local/index.aspx Apply Here)
Print application, then return instore:
Beach Hut Deli (http://www.beachhutdeli.com/BHD_emp_ap.pdf Print Here)
Pick up an application instore:
Other sandwich places (please indicate the type of application process for any of these, if you know):
Quiznos
Togos
Zias Delicatessen
Ice Cream and Dessert
Print application, then return instore:
Cultive Frozen Yogurt (http://cultiveyogurt.com/images/cultive_application.pdf Print Here)
Pick up an application instore:
Baskin Robbins
Email Resume:
Yolo Berry Yogurt (email to yoloberry@gmail.com)
Other ice cream and dessert places (please indicate the type of application process for any of these, if you know):
Ciocolat
Fluffy Donuts
IceKrimski cafe
Let them eat Cake
SugarPlum
The Candy House of Davis
Tutti Frutti Frozen Yogurt
Grocery, Convenience and Drug Stores
Apply Online:
7Eleven (http://www.7eleven.com/Careers/SalesAssociate/tabid/143/Default.aspx Apply Here)
Nugget Market (https://jobsearch.unicru.com/JLohome.aspx?rscid{8eb07c836aff42b0e543ed3a6af31c61}&RPCR Apply Here) Online only
Safeway (http://www.safeway.com/Employment/#iframetop Apply Here) Online applications only
Savemart (http://www.savemart.com/storelevel.php Apply Here)
Print application, then submit instore:
Davis Food Coop (http://www.davisfood.coop/employment.html Print Here)
Pick up application instore:
Circle K
Rite Aid
Other Grocery, Convenience and Drug Stores (please indicate the type of application process for any of these, if you know):
AM/PM
CVS Pharmacy
El Macero Pharmacy
Fast and Easy Mart
Kims Mart
Quick Shop Market
The Olive Drive Market
Services
Copies
Apply Online:
FedEx Office (formerly known as Kinkos) (http://fedex.hodesiq.com/careers/job_search.aspx Apply Here)
Pick up an application instore:
Davis Copy Shop
Other copy places (please indicate the type of application process for any of these, if you know):
Copyland
Gyms and Fitness
Other gyms (please indicate the type of application process for any of these, if you know):
Davis Athletic Club
Davis Swim and Fitness Club
Peak Performance
Physical Edge
Shipping
Pick up application instore:
Parcel Dispatch PDQ
Submit Resume:
PostMarks
Other shipping places (please indicate the type of application process for any of these, if you know): UPS Store
Tutoring
Apply Online:
Kaplan (http://www.kaptest.com/Teach_for_Kaplan/Overview/CS_teach_about.html Apply Here)
The Princeton Review (http://www.princetonreview.com/teachfortheprincetonreview.aspx?uidbadge Apply Here)
Club Z! InHome Tutoring (http://www.clubztutoring.com/tutorform.php Apply or inquire here)
Print application, then submit instore:
OneonOne Tutoring and Educational Mapping (http://www.oneononedavis.com/index5.html Print Here)
Submit resume:
Partners in Learning
Other tutoring venues (please indicate the type of application process for any of these, if you know):
Kumon Math & Reading Centers
LINK Tutoring Services
Research
Evaluating Grain Products Study Another way to earn money (and score free tasty food) is by participating in studies like this one, where food is given out free, and participants are paid for their time!
Dietary Fiber Study Free food for 3 weeks and participants are paid handsomely for their time!
Entertainment
Theaters
Regal Cinemas Davis Holiday 6
Regal Cinemas Davis Stadium 5
Varsity Theater
Music
Submit resume:
Armadillo Music
Other music places (please indicate the type of application process for any of these, if you know): Watermelon Music
Video Rental
Other video rental places (please indicate the type of application process for any of these, if you know):
Stonegate Video
|
{"hexsha": "6c161255e2b5d7faf364be47ef507ef2fbd3021b", "size": 11071, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Job_Applications.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Job_Applications.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Job_Applications.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0112044818, "max_line_length": 207, "alphanum_fraction": 0.7760816548, "num_tokens": 2887}
|
""" Subclass from Abstract Base Class featureExtractor that outputs features of the raw data that are required for machine learning models """
import numpy as np
from prosi3d.meta.featureExtractor import FeatureExtractor
class Nircamera (FeatureExtractor):
"""
Attribute:
xxx: xxx.
xxx: xxx.
"""
def get_data(self):
""" Beschreibung """
pass
def process(self):
""" Beschreibung """
pass
def write(self):
""" Beschreibung """
pass
def get_feature(self):
""" Beschreibung """
pass
|
{"hexsha": "22e0e3a56a81b9f4f3cd5e954dff822dd60c27ed", "size": 598, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/prosi3d/sensors/nircamera.py", "max_stars_repo_name": "pzimbrod/prosi-3d", "max_stars_repo_head_hexsha": "6eaa5b9cdb7192f542417429b1775c3e61a9bc60", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/prosi3d/sensors/nircamera.py", "max_issues_repo_name": "pzimbrod/prosi-3d", "max_issues_repo_head_hexsha": "6eaa5b9cdb7192f542417429b1775c3e61a9bc60", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-11-11T07:32:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-23T15:42:26.000Z", "max_forks_repo_path": "src/prosi3d/sensors/nircamera.py", "max_forks_repo_name": "pzimbrod/prosi-3d", "max_forks_repo_head_hexsha": "6eaa5b9cdb7192f542417429b1775c3e61a9bc60", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.2903225806, "max_line_length": 142, "alphanum_fraction": 0.5969899666, "include": true, "reason": "import numpy", "num_tokens": 123}
|
[STATEMENT]
lemma expands_to_powr_nat_0_0:
assumes "eventually (\<lambda>x. f x = 0) at_top" "eventually (\<lambda>x. g x = 0) at_top"
"basis_wf basis" "length basis = expansion_level TYPE('a :: multiseries)"
shows "((\<lambda>x. powr_nat (f x) (g x)) expands_to (const_expansion 1 :: 'a)) basis"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lambda>x. powr_nat (f x) (g x)) expands_to const_expansion 1) basis
[PROOF STEP]
proof (rule expands_to_cong [OF expands_to_const])
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. basis_wf basis
2. length basis = expansion_level TYPE('a)
3. \<forall>\<^sub>F x in at_top. 1 = powr_nat (f x) (g x)
[PROOF STEP]
from assms(1,2)
[PROOF STATE]
proof (chain)
picking this:
\<forall>\<^sub>F x in at_top. f x = 0
\<forall>\<^sub>F x in at_top. g x = 0
[PROOF STEP]
show "eventually (\<lambda>x. 1 = powr_nat (f x) (g x)) at_top"
[PROOF STATE]
proof (prove)
using this:
\<forall>\<^sub>F x in at_top. f x = 0
\<forall>\<^sub>F x in at_top. g x = 0
goal (1 subgoal):
1. \<forall>\<^sub>F x in at_top. 1 = powr_nat (f x) (g x)
[PROOF STEP]
by eventually_elim (simp add: powr_nat_def)
[PROOF STATE]
proof (state)
this:
\<forall>\<^sub>F x in at_top. 1 = powr_nat (f x) (g x)
goal (2 subgoals):
1. basis_wf basis
2. length basis = expansion_level TYPE('a)
[PROOF STEP]
qed fact+
|
{"llama_tokens": 595, "file": null, "length": 5}
|
# histogramPlotter.py
# Input is a file containing a single column of data (going to be using this for BLEU scores)
# Output is a histogram of the data.
#
# Expects 2 arguments:
# --input_data /path/to/test/dataset.csv
# --output_file /path/to/output/file.jpg
#
# Dylan Auty, 31/05/16
import argparse, json
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
# Use argparse to fetch the input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--input_data')
parser.add_argument('--output_file')
args = parser.parse_args()
if __name__ == '__main__':
# Read in the single column csv
data = np.genfromtxt(args.input_data, delimiter=",")
mean = sum(data) / float(len(data))
print("Data mean = " + `mean`)
print("data.mean() = " + `data.mean()`)
fig1 = plt.figure()
resultGraph = plt.hist(data, bins=100, color='blue', alpha=0.75)
plt.axvline(data.mean(), color='b', linestyle='dashed', linewidth=2)
plt.xlabel("BLEU Score")
plt.ylabel("Frequency")
plt.savefig(args.output_file)
plt.close(fig1)
print("Done.")
|
{"hexsha": "b385d0a8417346b0b2bacc11bb327ab735c8ebd2", "size": 1175, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/evaluation/histogramPlotter.py", "max_stars_repo_name": "DylanAuty/torch-rnn-constrained", "max_stars_repo_head_hexsha": "49ac085ca5dc3ef68741b8fbabe4804eb6e19fc2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2016-06-27T11:32:14.000Z", "max_stars_repo_stars_event_max_datetime": "2018-01-08T14:29:56.000Z", "max_issues_repo_path": "scripts/evaluation/histogramPlotter.py", "max_issues_repo_name": "DylanAuty/torch-rnn-constrained", "max_issues_repo_head_hexsha": "49ac085ca5dc3ef68741b8fbabe4804eb6e19fc2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2016-02-23T19:15:58.000Z", "max_issues_repo_issues_event_max_datetime": "2016-06-09T13:15:04.000Z", "max_forks_repo_path": "scripts/evaluation/histogramPlotter.py", "max_forks_repo_name": "DylanAuty/torch-rnn-constrained", "max_forks_repo_head_hexsha": "49ac085ca5dc3ef68741b8fbabe4804eb6e19fc2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3255813953, "max_line_length": 93, "alphanum_fraction": 0.6876595745, "include": true, "reason": "import numpy", "num_tokens": 305}
|
import warnings
import pickle
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import sys
import os
import math
import bisect
import tensorflow as tf
import warnings
# if you run python inside the folder, then:
sys.path.insert(0, '../lib')
print(sys.path)
from cde.data_collector import ParquetDataset
from cde.density_estimator import NoNaNGPDExtremeValueMixtureDensityNetwork
from cde.density_estimator import MixtureDensityNetwork
from cde.evaluation.empirical_eval import evaluate_models_save_plots
# Path
path_train = '../training/saves/'
path_valid = 'saves/'
warnings.filterwarnings('ignore')
predictor_nums = [1,2,3]
model_strs = ['emm','gmm']
batch_sizes = [10000]
replica_nums = [0]
#most common: ([1,1,2],[1,1],[1])
network_states = ([1,4,2],[4,2],[2])
models = []
for idx,predictor_num in enumerate(predictor_nums):
""" import the test dataset into Numpy array """
file_addr = ['../data/sim3hop_1_dataset_06_Sep_2021_11_20_40.parquet']
batch_size_test = 80000000
test_dataset = ParquetDataset(file_addresses=file_addr,predictor_num=predictor_num)
test_data = test_dataset.get_data_unshuffled(batch_size_test)
ndim_x_test = len(test_data[0])-1
#print(np.shape(train_data))
print('Predictor-%d dataset loaded from ' % predictor_num, file_addr,'. Rows: %d ' % len(test_data[:,0]), ' Columns: %d ' % len(test_data[0,:]), ' ndim_x: %d' % ndim_x_test)
models_type = []
for batch_size in batch_sizes:
""" load training data """
FILE_NAME = 'traindata_p'+str(predictor_num)+'_'+str(int(batch_size/1000))+'k.npz'
npzfile = np.load(path_train + FILE_NAME)
train_data = npzfile['arr_0']
meta_info = npzfile['arr_1']
batch_size = int(meta_info[0])
ndim_x = int(meta_info[1])
predictor_num = int(meta_info[2])
n_replicas = int(meta_info[3])
print('Predictor-%d training data loaded from .npz file. Rows: %d ' %(predictor_num,len(train_data[:,0,0])) , 'with Batch size: %dk ' % int(batch_size/1000) , ' Columns: %d ' % len(train_data[0,:,0]), ' Replicas: %d' % len(train_data[0,0,:]) , ' ndim_x: %d' % ndim_x)
# currently only one batch size so no model array
for replica_num in replica_nums:
train_data = train_data[:,:,replica_num]
for model_str in model_strs:
""" load trained emm models """
FILE_NAME = 'trained_'+model_str+'_p'+str(predictor_num)+'_s'+str(int(batch_size/1000))+'_r'+str(replica_num)+'.pkl'
with open(path_train + 'trained_models/' + FILE_NAME, 'rb') as input:
if model_str is 'emm':
model = NoNaNGPDExtremeValueMixtureDensityNetwork(name=model_str+str(predictor_num), ndim_x=4-predictor_num, ndim_y=1)
else:
model = MixtureDensityNetwork(name=model_str+str(predictor_num), ndim_x=4-predictor_num, ndim_y=1)
model._setup_inference_and_initialize()
model = pickle.load(input)
models_type.append(model)
print(model)
network_state = network_states[predictor_num-1]
plt.style.use('plot_style.txt')
evaluate_models_save_plots(models=models_type,model_names=["EMM prediction","GMM prediction"],train_data=train_data,cond_state=network_state,test_dataset=test_data,quantiles=[1-1e-1,1-1e-2,1-1e-3,1-1e-4,1-1e-5],save_fig_addr=path_valid+'dual_test_')
models.append(models_type)
print('models shape: ' + str([len(models),len(models[0])]))
#evaluate_models_singlestate(models=[model],model_names=["EMM"],train_data=train_data,cond_state=[1],test_dataset=test_data,quantiles=[1-1e-1,1-1e-2,1-1e-3,1-1e-5])
#evaluate_models_singlestate(models=[model],model_names=["EMM"],train_data=train_data,cond_state=[2],test_dataset=test_data,quantiles=[1-1e-1,1-1e-2,1-1e-3,1-1e-5])
#evaluate_models_singlestate(models=[model],model_names=["EMM"],train_data=train_data,cond_state=[10],test_dataset=test_data,quantiles=[1-1e-1,1-1e-2,1-1e-3,1-1e-5])
#unique_states,_,_ = get_most_common_unique_states(test_data[1000000:5000000,:],ndim_x=1,N=30,plot=True,save_fig_addr=path)
|
{"hexsha": "268cd10e1ef2a4a624cf1c3425d6977fabc73ea5", "size": 4222, "ext": "py", "lang": "Python", "max_stars_repo_path": "latency_prediction/validation/validate_model_dual.py", "max_stars_repo_name": "samiemostafavi/data-driven-dvp-prediction", "max_stars_repo_head_hexsha": "a6f4ac16f047f677dca532ba1303521628a053fe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "latency_prediction/validation/validate_model_dual.py", "max_issues_repo_name": "samiemostafavi/data-driven-dvp-prediction", "max_issues_repo_head_hexsha": "a6f4ac16f047f677dca532ba1303521628a053fe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "latency_prediction/validation/validate_model_dual.py", "max_forks_repo_name": "samiemostafavi/data-driven-dvp-prediction", "max_forks_repo_head_hexsha": "a6f4ac16f047f677dca532ba1303521628a053fe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.914893617, "max_line_length": 275, "alphanum_fraction": 0.6882993842, "include": true, "reason": "import numpy", "num_tokens": 1187}
|
# parse readable numeric strings
parse_readable(::Type{T}, s::String, ch::Char) where {T <: Union{Integer, AbstractFloat}} =
Base.parse(T, join(split(s,ch),""))
parse_readable(::Type{T}, s::String, ch1::Char, ch2::Char) where {T <: AbstractFloat} =
Base.parse(T, join(split(s,(ch1,ch2)),""))
"""
how many times does char c occur in string s
"""
function count_char(s::String, c::Char)
r = (c=='.') ? Regex("\\.") : Regex(string(c))
return length( matchall(r,s) )
end
|
{"hexsha": "a96fd88d980b0050320f178599f1fc99382dd718", "size": 487, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/parse.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/ReadableNumbers.jl-7774933c-dd73-5de8-a8c3-ca082e6dff1c", "max_stars_repo_head_hexsha": "16e65bed68cad3d1674db547a40ef1b174e870e2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/parse.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/ReadableNumbers.jl-7774933c-dd73-5de8-a8c3-ca082e6dff1c", "max_issues_repo_head_hexsha": "16e65bed68cad3d1674db547a40ef1b174e870e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/parse.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/ReadableNumbers.jl-7774933c-dd73-5de8-a8c3-ca082e6dff1c", "max_forks_repo_head_hexsha": "16e65bed68cad3d1674db547a40ef1b174e870e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6470588235, "max_line_length": 91, "alphanum_fraction": 0.6283367556, "num_tokens": 141}
|
#=
Copyright 2020 INSIGNEO Institute for in silico Medicine
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=#
__precompile__(true)
module openBF
export Vessel, Heart, Blood, runSimulation
using YAML
using ArgParse
using StaticArrays
using DelimitedFiles
using LinearAlgebra
using Printf
"""
Heart type
"""
struct Heart
inlet_type :: String
cardiac_T :: Float64
input_data :: Array{Float64,2}
inlet_number :: Int
end
"""
Blood type
"""
struct Blood
mu :: Float64
rho :: Float64
rho_inv :: Float64
end
"""
Vessel type
"""
mutable struct Vessel
label :: String
#Topology
ID :: Int
sn :: Int
tn :: Int
#Inlet
inlet :: Bool
heart :: Heart
#Numerical constants
M :: Int
dx :: Float64
invDx :: Float64
halfDx :: Float64
#Physical constants
beta :: Array{Float64,1}
gamma :: Array{Float64,1}
s_15_gamma :: Array{Float64,1}
gamma_ghost :: Array{Float64,1}
A0 :: Array{Float64,1}
s_A0 :: Array{Float64,1}
inv_A0 :: Array{Float64,1}
s_inv_A0 :: Array{Float64,1}
Pext :: Float64
viscT :: Float64
wallE :: Array{Float64,1}
wallVa :: Array{Float64,1}
wallVb :: Array{Float64,1}
#Iterative solution
A :: Array{Float64,1}
Q :: Array{Float64,1}
u :: Array{Float64,1}
c :: Array{Float64,1}
P :: Array{Float64,1}
A_t :: Array{Float64,2}
Q_t :: Array{Float64,2}
u_t :: Array{Float64,2}
c_t :: Array{Float64,2}
P_t :: Array{Float64,2}
A_l :: Array{Float64,2}
Q_l :: Array{Float64,2}
u_l :: Array{Float64,2}
c_l :: Array{Float64,2}
P_l :: Array{Float64,2}
#Riemann invariants
W1M0 :: Float64
W2M0 :: Float64
#Ghost cells
U00A :: Float64
U00Q :: Float64
U01A :: Float64
U01Q :: Float64
UM1A :: Float64
UM1Q :: Float64
UM2A :: Float64
UM2Q :: Float64
#Result file names
last_P_name :: String
last_Q_name :: String
last_A_name :: String
last_c_name :: String
last_u_name :: String
out_P_name :: String
out_Q_name :: String
out_A_name :: String
out_c_name :: String
out_u_name :: String
#Saving nodes
node2 :: Int
node3 :: Int
node4 :: Int
#Peripheral boundary condition
Rt :: Float64
R1 :: Float64
R2 :: Float64
Cc :: Float64
Pc :: Float64
#Slope
slope :: Array{Float64,1}
#MUSCLArrays
flux :: Array{Float64,2}
uStar :: Array{Float64,2}
vA :: Array{Float64,1}
vQ :: Array{Float64,1}
dU :: Array{Float64,2}
slopesA :: Array{Float64,1}
slopesQ :: Array{Float64,1}
Al :: Array{Float64,1}
Ar :: Array{Float64,1}
Ql :: Array{Float64,1}
Qr :: Array{Float64,1}
Fl :: Array{Float64,2}
Fr :: Array{Float64,2}
#Outlet type
outlet :: String
end
include("initialise.jl")
include("boundary_conditions.jl")
include("solver.jl")
include("junctions.jl")
include("conjunctions.jl")
include("bifurcations.jl")
include("anastomosis.jl")
include("IOutils.jl")
include("check_convergence.jl")
include("program.jl")
end
|
{"hexsha": "a178f1b514c39722403d8cf4c633d17bb8742f22", "size": 4257, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/openBF.jl", "max_stars_repo_name": "ibenemerito88/openBF_workshop", "max_stars_repo_head_hexsha": "a63a6fbd1ef8528890fb1072730124e054875008", "max_stars_repo_licenses": ["Zlib", "Apache-2.0"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2017-07-11T10:11:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T06:11:06.000Z", "max_issues_repo_path": "src/openBF.jl", "max_issues_repo_name": "ibenemerito88/openBF_workshop", "max_issues_repo_head_hexsha": "a63a6fbd1ef8528890fb1072730124e054875008", "max_issues_repo_licenses": ["Zlib", "Apache-2.0"], "max_issues_count": 39, "max_issues_repo_issues_event_min_datetime": "2017-07-06T12:57:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-05T16:05:29.000Z", "max_forks_repo_path": "src/openBF.jl", "max_forks_repo_name": "ibenemerito88/openBF_workshop", "max_forks_repo_head_hexsha": "a63a6fbd1ef8528890fb1072730124e054875008", "max_forks_repo_licenses": ["Zlib", "Apache-2.0"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2017-08-18T15:04:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T08:52:47.000Z", "avg_line_length": 22.5238095238, "max_line_length": 72, "alphanum_fraction": 0.5407564012, "num_tokens": 1167}
|
# coding: utf-8
# ************************************
# Author: Ziqin Wang
# Email: ziqin.wang.edu@gmail.com
# Github: https://github.com/Storife
# ************************************
import argparse
from math import log10
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import time
import numpy as np
import os
from RANet_lib import *
from RANet_lib.RANet_lib import *
from RANet_model import RANet as Net
import os
import os.path as osp
from glob import glob
net_name = 'RANet'
parser = argparse.ArgumentParser(description='RANet')
parser.add_argument('--deviceID', default=[0], help='device IDs')
parser.add_argument('--threads', type=int, default=16, help='number of threads for data loader to use')
parser.add_argument('--workfolder', default='../models/')
parser.add_argument('--savePName', default=net_name)
parser.add_argument('--net_type', default='single_object')
parser.add_argument('--fp16', default=False)
print('===> Setting ......')
opt = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
try:
os.mkdir(opt.workfolder)
print('build working folder: ' + opt.workfolder)
except:
print(opt.workfolder + 'exists')
# print(opt)
print('using device ID: {}'.format(opt.deviceID))
print('===> Building model')
model = Net(pretrained=False, type=opt.net_type)
model_cuda = None
def predict_SVOS(model_cuda=None, params='', add_name='', dataset='16val', save_root='./test/'):
inSize1 = 480
inSize2 = 864
print('save root = ' + save_root)
if dataset in ['16val', '16trainval', '16all']:
#model.set_type('single_object')
year = '2016'
elif dataset in ['17val', '17test_dev']:
#model.set_type('multi_object')
year = '2017'
else:
assert('dataset error')
DAVIS = dict(reading_type='SVOS',
year=year,
root='../datasets/DAVIS/',
subfolder=['', '', ''],
mode=dataset,
tar_mode='rep',
train=0, val=0, test=0, predict=1,
length=None,
init_folder=None,
)
dataset = DAVIS2017_loader(
[DAVIS], mode='test',
transform=[PAD_transform([inSize1, inSize2], random=False),
PAD_transform([inSize1, inSize2], random=False)],
rand=Rand_num())
checkpoint_load(opt.workfolder + params, model)
if opt.deviceID==[0]:
model_cuda = model.cuda()
else:
model_cuda = nn.DataParallel(model).cuda()
if opt.fp16:
model_cuda = model_cuda.half()
model_cuda.fp16 = True
fitpredict17(dataset, model_cuda, add_name=add_name, threads=1, batchSize=1, save_root=save_root)
if __name__ == '__main__':
#predict_SVOS(params='RANet_video_single.pth', dataset='16val', save_root='../predictions/RANet_Video_16val')
#predict_SVOS(params='RANet_image_single.pth', dataset='16all', save_root='../predictions/RANet_Image_16all')
predict_SVOS(params='RANet_video_single.pth', dataset='17val', save_root='../predictions/RANet_Video_17val')
# predict_SVOS(params='RANet_video_multi.pth', dataset='17test_dev', save_root='../predictions/RANet_Video_17test_dev')
|
{"hexsha": "78f6e231536513fd6fab6ef5e9bcab6d6a418969", "size": 3376, "ext": "py", "lang": "Python", "max_stars_repo_path": "codes/RANet.py", "max_stars_repo_name": "cvmlarun/RANet", "max_stars_repo_head_hexsha": "3f67a3f36aaacd9cc7fb98ec79f77db8f1ebdc60", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-05T05:40:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-05T05:40:45.000Z", "max_issues_repo_path": "codes/RANet.py", "max_issues_repo_name": "cvmlarun/RANet", "max_issues_repo_head_hexsha": "3f67a3f36aaacd9cc7fb98ec79f77db8f1ebdc60", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "codes/RANet.py", "max_forks_repo_name": "cvmlarun/RANet", "max_forks_repo_head_hexsha": "3f67a3f36aaacd9cc7fb98ec79f77db8f1ebdc60", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-05T05:41:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-05T05:41:01.000Z", "avg_line_length": 30.9724770642, "max_line_length": 123, "alphanum_fraction": 0.6531398104, "include": true, "reason": "import numpy", "num_tokens": 843}
|
! mimic NWChem tgt_sd_t_s1_1 kernel
! RL: do not redefine simd clause to be schedule(static, 1)
! RL: make the schedule clause usage be explicit
implicit integer (a-z)
l1 = 1; l2 = 1; l3 = 1; l4 = 1; l5 = 1; l6 = 1;
u1 = 24; u2 = 24; u3 = 24; u4 = 24; u5 = 24; u6 = 24;
call tgt_sd_t_s1_1(l1,l2,l3,l4,l5,l6, u1,u2,u3,u4,u5,u6)
end
subroutine tgt_sd_t_s1_1(l1,l2,l3,l4,l5,l6, u1,u2,u3,u4,u5,u6)
implicit integer (a-z)
real a(24,24,24,24,24,24)
real b(24,24,24,24,24,24)
a=3.0
b=0.0
!$omp target teams distribute parallel do schedule(static,1) collapse(6)
do i1 = l1, u1
do i2 = l2, u2
do i3 = l3, u3
do i4 = l4, u4
do i5 = l5, u5
do i6 = l6, u6
b(i6,i5,i4,i3,i2,i1) = a(i6,i5,i4,i3,i2,i1) + i3
end do
end do
end do
end do
end do
end do
!$omp end target teams distribute parallel do
! write(6,*) b(1,1,1,1,1,1)
! write(6,*) a(1,1,1,1,1,1)
write(6,*) ((b(k,j,1,1,1,1),j=1,4),k=1,4)
return
end
|
{"hexsha": "dc9392db449392194738161bf6684d1652acc438", "size": 996, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "test/smoke-fails/nwchem-s1_1/nwchem-s1_1.f90", "max_stars_repo_name": "raramakr/aomp", "max_stars_repo_head_hexsha": "9a224fe01ca8eff4209b8b79aa1fa15a18da65db", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/smoke-fails/nwchem-s1_1/nwchem-s1_1.f90", "max_issues_repo_name": "raramakr/aomp", "max_issues_repo_head_hexsha": "9a224fe01ca8eff4209b8b79aa1fa15a18da65db", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/smoke-fails/nwchem-s1_1/nwchem-s1_1.f90", "max_forks_repo_name": "raramakr/aomp", "max_forks_repo_head_hexsha": "9a224fe01ca8eff4209b8b79aa1fa15a18da65db", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.1333333333, "max_line_length": 72, "alphanum_fraction": 0.5763052209, "num_tokens": 480}
|
"""
Core module for methods related to flat fielding.
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import inspect
import copy
import os
import numpy as np
from scipy import interpolate, ndimage
from matplotlib import pyplot as plt
from IPython import embed
from pypeit import msgs
from pypeit.core import parse
from pypeit.core import pixels
from pypeit.core import tracewave
from pypeit import utils
from pypeit.core import pydl
# TODO: Put this in utils
def linear_interpolate(x1, y1, x2, y2, x):
r"""
Interplate or extrapolate between two points.
Given a line defined two points, :math:`(x_1,y_1)` and
:math:`(x_2,y_2)`, return the :math:`y` value of a new point on
the line at coordinate :math:`x`.
This function is meant for speed. No type checking is performed and
the only check is that the two provided ordinate coordinates are not
numerically identical. By definition, the function will extrapolate
without any warning.
Args:
x1 (:obj:`float`):
First abscissa position
y1 (:obj:`float`):
First ordinate position
x2 (:obj:`float`):
Second abscissa position
y3 (:obj:`float`):
Second ordinate position
x (:obj:`float`):
Abcissa for new value
Returns:
:obj:`float`: Interpolated/extrapolated value of ordinate at
:math:`x`.
"""
return y1 if np.isclose(x1,x2) else y1 + (x-x1)*(y2-y1)/(x2-x1)
# TODO: Make this function more general and put it in utils.
def sorted_flat_data(data, coo, gpm=None):
"""
Sort a set of data by the provided coordinates.
Args:
data (`numpy.ndarray`_):
Data array with arbirary shape and data type.
coo (`numpy.ndarray`_):
Relevant coordinate array. Shape must match ``data``.
gpm (`numpy.ndarray`_, optional):
Good-pixel mask for array. Used to select data (where
``gpm`` is True) to sort and return. Shape must match
``data``. If None, all data is used.
Returns:
tuple: Four `numpy.ndarray`_ objects are returned:
- A boolean array with the pixels used in the sorting.
Shape is identical to ``data``. If ``gpm`` is provided,
this is identicall (i.e., not a copy) of the input
array; otherwise, it is ``np.ones(data.shape,
dtype=bool)``.
- A vector with the length of ``numpy.sum(gpm)`` with the
indices that sorts the flattened list of good
coordinate values.
- A vector with the sorted coordinate data.
- A vector with the data sorted by the respective
coordinates.
To reconstruct the input data array for the good pixels::
_data = np.zeros(data.shape, dtype=data.dtype)
_data[gpm] = srt_data[np.argsort(srt)]
where ``data`` is the input array, ``gpm`` is the first
returned object, ``srt`` is the second returned object, and
``srt_data`` is the last returned object of this method.
"""
if gpm is None:
gpm = np.ones(data.shape, dtype=bool)
# Sort the pixels by their spatial coordinate. NOTE: By default
# np.argsort sorts the data over the last axis. To avoid coo[gpm]
# returning an array (which will happen if the gpm is not provided
# as an argument), all the arrays are explicitly flattened.
srt = np.argsort(coo[gpm].ravel())
coo_data = coo[gpm].ravel()[srt]
flat_data = data[gpm].ravel()[srt]
return gpm, srt, coo_data, flat_data
def illum_filter(spat_flat_data_raw, med_width):
"""
Filter the flat data to produce the empirical illumination
profile.
This is primarily a convenience method for
:func:`construct_illum_profile`. The method first median filters
with a window set by ``med_width`` and then Gaussian-filters the
result with a kernel sigma set to be the maximum of 0.5 or
``med_width``/20.
Args:
spat_flat_data_raw (`numpy.ndarray`_);
Raw flat data collapsed along the spectral direction.
med_width (:obj:`int`):
Width of the median filter window.
Returns:
`numpy.ndarray`_: Returns the filtered spatial profile of the
flat data.
"""
# Median filter the data
spat_flat_data = utils.fast_running_median(spat_flat_data_raw, med_width)
# Gaussian filter the data with a kernel that is 1/20th of the
# median-filter width (or at least 0.5 pixels where here a "pixel"
# is just the index of the data to fit)
return ndimage.filters.gaussian_filter1d(spat_flat_data, np.fmax(med_width/20.0, 0.5),
mode='nearest')
def construct_illum_profile(norm_spec, spat_coo, slitwidth, spat_gpm=None, spat_samp=5,
illum_iter=0, illum_rej=None, debug=False):
"""
Construct the slit illumination profile.
Provided an image with the spectral response normalized out, this
iteratively filters and rejects the flat-field data to construct
the empirical slit illumination profile. The data are collapsed
spectrally using the provided coordinate array to construct a 1D
profile. Nominally, the provided spatial coordinates and
good-pixel mask should be for a single slit.
The iterations involve constructing the illumination profile
using :func:`illum_profile` and then rejecting deviant residuals.
Each rejection iteration recomputes the standard deviation and
pixels to reject from the full input set (i.e., rejected pixels
are not kept between iterations). Rejection iterations are only
performed if ``illum_iter > 0 and illum_rej is not None``.
Args:
norm_spec (`numpy.ndarray`_):
Flat-field image (2D array) with the spectral response
normalized out.
spat_coo (`numpy.ndarray`_):
An image with the slit spatial coordinates, expected to
be with respect to a single slit and span the full image
region selected by the good-pixel mask (``spat_gpm``).
Shape must match ``norm_spec``.
slitwidth (:obj:`float`):
Fiducial slit width used to set the median-filter window
size.
spat_gpm (`numpy.ndarray`_, optional):
The good-pixel mask that selects the pixels to include in
the slit illumination profile calculation. If None, **all
pixels in the provided images are used**. For virtually
all practical purposes, this array should be provided.
spat_samp (:obj:`int`, :obj:`float`, optional):
Spatial sampling for slit illumination function. This is
the width of the median filter in detector pixels used to
determine the slit illumination function, and thus sets
the minimum scale on which the illumination function will
have features.
illum_iter (:obj:`int`, optional):
Iteratively construct the slit illumination profile and
reject outliers. To include rejection iterations, this
must be larger than 0, and you have to provide the sigma
threshold (``illum_rej``); otherwise, no iterations are
performed.
illum_rej (:obj:`float`, optional):
Sigma rejection threshold for iterations. If None, no
rejection iterations will be performed, regardless of the
value of ``illum_iter``.
debug (:obj:`bool`, optional):
Construct plots output to the screen that show the result
of each iteration. Regardless of this flag, no plots are
shown if there are no iterations.
Returns:
tuple: Five `numpy.ndarray`_ objects are returned:
- A boolean array with the pixels used in the
construction of the illumination profile. Shape is
identical to ``norm_spec``.
- A vector with the length of the number of good pixels
(sum of the first returned object) with the indices
that sorts the flattened list of good coordinate
values.
- A vector with the sorted coordinate data.
- A vector with the data sorted by the respective
coordinates.
- A vector with the slit illumination profile.
To construct the empirical 2D illumination profile::
illum = np.zeros(norm_spec.shape, dtype=float)
illum[_spat_gpm] = profile[np.argsort(srt)]
where ``norm_spec`` is the input array, ``_spat_gpm`` is the
first returned object, ``srt`` is the second returned object,
and ``profile`` is the last returned object.
"""
if illum_rej is None and illum_iter > 0:
msgs.warn('Cannot use iterative rejection to construct the illumination function if the '
'rejection is not provided. Continuing without iteration.')
_spat_gpm = np.ones(norm_spec.shape, dtype=bool) if spat_gpm is None else np.copy(spat_gpm)
_spat_gpm, spat_srt, spat_coo_data, spat_flat_data_raw \
= sorted_flat_data(norm_spec, spat_coo, gpm=_spat_gpm)
spat_gpm_data_raw = np.ones(spat_flat_data_raw.size, dtype=bool)
# Assume the density of samples in any given spatial coordinate is
# roughly the same at all spatial positions. Calculate the fraction
# of the slit width for the median filter as set by the
# ``spat_samp`` parameter.
med_width = int(np.ceil(np.sum(spat_gpm) * spat_samp / slitwidth))
# Construct the filtered illumination profile (iteratively if requested)
for i in range(illum_iter+1):
spat_flat_data = illum_filter(spat_flat_data_raw[spat_gpm_data_raw], med_width)
if illum_iter == 0 or illum_rej is None:
# No iterations so we're done (skips debug plot)
return spat_gpm, spat_srt, spat_coo_data, spat_flat_data_raw, spat_flat_data
if i == illum_iter:
# Don't perform the rejection on the last iteration
break
# Iteration does not keep previous rejections. NOTE: Rejections
# at either end of the data array would cause the interpolation
# below to fault, which is why I set bound_error to False. This
# may be a problem though because I set the fill value to 0...
interp = interpolate.interp1d(spat_coo_data[spat_gpm_data_raw], spat_flat_data,
bounds_error=False, fill_value=0.0, assume_sorted=True)
resid = spat_flat_data_raw - interp(spat_coo_data)
sigma = np.std(resid)
spat_gpm_data_raw = np.absolute(resid) < illum_rej*sigma
# TODO: Provide a report?
if debug:
plt.clf()
ax = plt.gca()
ax.scatter(spat_coo_data[spat_gpm_data_raw], spat_flat_data_raw[spat_gpm_data_raw],
marker='.', lw=0, s=10, color='k', zorder=1, label='used data')
ax.scatter(spat_coo_data[np.invert(spat_gpm_data_raw)],
spat_flat_data_raw[np.invert(spat_gpm_data_raw)],
marker='.', lw=0, s=10, color='C3', zorder=2, label='rejected data')
ax.plot(spat_coo_data[spat_gpm_data_raw], spat_flat_data, color='C2', zorder=3,
label='filtered profile')
ax.legend()
ax.set_title('Optimized slit illumination profile')
ax.set_xlabel('Spatial coordinate')
ax.set_ylabel('Spectrally collapsed, normalized flux')
plt.show()
# Include the rejected data in the full image good-pixel mask
_spat_gpm[_spat_gpm] = spat_gpm_data_raw[np.argsort(spat_srt)]
# Recreate the illumination profile data
_spat_gpm, spat_srt, spat_coo_data, spat_flat_data_raw \
= sorted_flat_data(norm_spec, spat_coo, gpm=_spat_gpm)
return _spat_gpm, spat_srt, spat_coo_data, spat_flat_data_raw, \
illum_filter(spat_flat_data_raw, med_width)
# TODO: See pypeit/deprecated/flat.py for the previous version. We need
# to continue to vet this algorithm to make sure there are no
# unforeseen corner cases that cause errors.
def tweak_slit_edges(left, right, spat_coo, norm_flat, thresh=0.93, maxfrac=0.1, debug=False):
r"""
Adjust slit edges based on the normalized slit illumination profile.
Args:
left (`numpy.ndarray`_):
Array with the left slit edge for a single slit. Shape is
:math:`(N_{\rm spec},)`.
right (`numpy.ndarray`_):
Array with the right slit edge for a single slit. Shape
is :math:`(N_{\rm spec},)`.
spat_coo (`numpy.ndarray`_):
Spatial pixel coordinates in fractions of the slit width
at each spectral row for the provided normalized flat
data. Coordinates are relative to the left edge (with the
left edge at 0.). Shape is :math:`(N_{\rm flat},)`.
Function assumes the coordinate array is sorted.
norm_flat (`numpy.ndarray`_)
Normalized flat data that provide the slit illumination
profile. Shape is :math:`(N_{\rm flat},)`.
thresh (:obj:`float`, optional):
Threshold of the normalized flat profile at which to
place the two slit edges.
maxfrac (:obj:`float`, optional):
The maximum fraction of the slit width that the slit edge
can be adjusted by this algorithm. If ``maxfrac = 0.1``,
this means the maximum change in the slit width (either
narrowing or broadening) is 20% (i.e., 10% for either
edge).
debug (:obj:`bool`, optional):
Show flow interrupting plots that show illumination
profile in the case of a failure and the placement of the
tweaked edge for each side of the slit regardless.
Returns:
tuple: Returns six objects:
- The threshold used to set the left edge
- The fraction of the slit that the left edge is shifted to
the right
- The adjusted left edge
- The threshold used to set the right edge
- The fraction of the slit that the right edge is shifted to
the left
- The adjusted right edge
"""
# Check input
nspec = len(left)
if len(right) != nspec:
msgs.error('Input left and right traces must have the same length!')
# Median slit width
slitwidth = np.median(right - left)
# Setup the masked array for finding the continuous left and right
# regions
masked_flat = np.ma.MaskedArray(norm_flat)
# ------------------------------------------------------------------
# Adjust the left edge
# Get the maximum to the left of the center
# TODO: Set a parameter for this
ileft = (spat_coo > 0.1) & (spat_coo < 0.4)
if not np.any(ileft):
msgs.error('No coordinates toward the left of the slit center. Slit boundaries are '
'likely in error, and you probably have a bad (very short) slit. Slit center '
'at center row is {0:.1f}.'.format((left[nspec//2] + right[nspec//2])/2))
left_thresh = thresh * np.amax(norm_flat[ileft])
# Find the data that are less than the provided threshold and
# within the limits set by the offset
masked_flat[(spat_coo >= maxfrac) | (norm_flat >= left_thresh)] = np.ma.masked
# To tweak, there must be at least one pixel that meet the above
# criteria
left_shift = 0.
new_left = np.copy(left)
if not np.all(masked_flat.mask):
# Find the last index of the first contiguous region
contiguous_region = np.ma.flatnotmasked_contiguous(masked_flat)[0]
if contiguous_region.stop is None:
if debug:
plt.scatter(spat_coo[masked_flat.mask], norm_flat[masked_flat.mask], marker='.',
s=10, color='C3', lw=0)
plt.scatter(spat_coo[np.invert(masked_flat.mask)],
norm_flat[np.invert(masked_flat.mask)], marker='.', s=10, color='k',
lw=0)
plt.show()
msgs.error('Tweak left edge has failed! Bad continuous region.')
i = contiguous_region.stop-1
if i >= 0 and norm_flat[i-1] > norm_flat[i]:
msgs.warn('When adjusting left edge, found noisy illumination profile structure.')
if debug:
plt.scatter(spat_coo[masked_flat.mask], norm_flat[masked_flat.mask], marker='.', s=10,
color='C3', lw=0)
plt.scatter(spat_coo[np.invert(masked_flat.mask)],
norm_flat[np.invert(masked_flat.mask)], marker='.', s=10, color='k', lw=0)
plt.scatter(spat_coo[i], norm_flat[i], marker='o', facecolor='none', s=50, color='C1')
plt.show()
if norm_flat[i+1] < left_thresh:
msgs.warn('Left slit boundary tweak limited by maximum allowed shift: {:.1f}%'.format(
100*maxfrac))
left_shift = maxfrac
else:
left_shift = linear_interpolate(norm_flat[i], spat_coo[i], norm_flat[i+1],
spat_coo[i+1], left_thresh)
msgs.info('Tweaking left slit boundary by {0:.1f}%'.format(100*left_shift) +
' % ({0:.2f} pixels)'.format(left_shift*slitwidth))
new_left += left_shift * slitwidth
# ------------------------------------------------------------------
# Adjust the right edge
# Get the maximum to the right of the center
# TODO: Set a parameter for this
iright = (spat_coo > 0.6) & (spat_coo < 0.9)
if not np.any(iright):
msgs.error('No coordinates toward the right of the slit center. Slit boundaries are '
'likely in error, and you probably have a bad (very short) slit. Slit center '
'at center row is {0:.1f}.'.format((left[nspec//2] + right[nspec//2])/2))
right_thresh = thresh * np.amax(norm_flat[iright])
# Find the data that are less than the provided threshold and
# within the limits set by the offset
masked_flat.mask = np.ma.nomask
masked_flat[(spat_coo <= 1 - maxfrac) | (norm_flat >= right_thresh)] = np.ma.masked
# To tweak, there must be at least one pixel that meets the above
# criteria
right_shift = 0.
new_right = np.copy(right)
if not np.all(masked_flat.mask):
# Find the first index of the last contiguous region
contiguous_region = np.ma.flatnotmasked_contiguous(masked_flat)[-1]
if contiguous_region.start is None:
if debug:
plt.scatter(spat_coo[masked_flat.mask], norm_flat[masked_flat.mask], marker='.',
s=10, color='C3', lw=0)
plt.scatter(spat_coo[np.invert(masked_flat.mask)],
norm_flat[np.invert(masked_flat.mask)], marker='.', s=10, color='k',
lw=0)
plt.show()
msgs.error('Tweak right edge has failed! Bad continuous region.')
i = contiguous_region.start
if i < norm_flat.size-1 and norm_flat[i+1] > norm_flat[i]:
msgs.warn('When adjusting right edge, found noisy illumination profile structure.')
if debug:
plt.scatter(spat_coo[masked_flat.mask], norm_flat[masked_flat.mask], marker='.', s=10,
color='C3', lw=0)
plt.scatter(spat_coo[np.invert(masked_flat.mask)],
norm_flat[np.invert(masked_flat.mask)], marker='.', s=10, color='k', lw=0)
plt.scatter(spat_coo[i], norm_flat[i], marker='o', facecolor='none', s=50, color='C1')
plt.show()
if norm_flat[i-1] < right_thresh:
msgs.warn('Right slit boundary tweak limited by maximum allowed shift: {:.1f}%'.format(
100*maxfrac))
right_shift = maxfrac
else:
right_shift = 1-linear_interpolate(norm_flat[i-1], spat_coo[i-1], norm_flat[i],
spat_coo[i], right_thresh)
msgs.info('Tweaking right slit boundary by {0:.1f}%'.format(100*right_shift) +
' % ({0:.2f} pixels)'.format(right_shift*slitwidth))
new_right -= right_shift * slitwidth
return left_thresh, left_shift, new_left, right_thresh, right_shift, new_right
# TODO: How much of the rest of this is used?
def flatfield(sciframe, flatframe, bpix, illum_flat=None, snframe=None, varframe=None):
""" Flat field the input image
.. todo::
- Is bpix required?
Parameters
----------
sciframe : 2d image
flatframe : 2d image
illum_flat : 2d image, optional
slit profile image
snframe : 2d image, optional
det : int
Detector index
varframe : ndarray
variance image
Returns
-------
flat-field image
and updated sigma array if snframe is input
or updated variance array if varframe is input
"""
if (varframe is not None) & (snframe is not None):
msgs.error("Cannot set both varframe and snframe")
# Fold in the slit profile
final_flat = flatframe.copy()
if illum_flat is not None:
if np.any(illum_flat != 1.0):
msgs.info('Applying illumination flat')
final_flat *= illum_flat # Previous code was modifying flatframe!
# New image
retframe = np.zeros_like(sciframe)
w = np.where(final_flat > 0.0)
retframe[w] = sciframe[w]/final_flat[w]
if w[0].size != final_flat.size:
ww = np.where(final_flat <= 0.0)
bpix[ww] = 1.0
# Variance?
if varframe is not None:
# This is risky -- Be sure your flat is well behaved!!
retvar = np.zeros_like(sciframe)
retvar[w] = varframe[w]/final_flat[w]**2
return retframe, retvar
# Error image
if snframe is None:
return retframe
else:
errframe = np.zeros_like(sciframe)
wnz = np.where(snframe>0.0)
errframe[wnz] = retframe[wnz]/snframe[wnz]
return retframe, errframe
|
{"hexsha": "7e79d680bfbaa3bc1a117b445bacdd05bff384bc", "size": 22312, "ext": "py", "lang": "Python", "max_stars_repo_path": "pypeit/core/flat.py", "max_stars_repo_name": "ykwang1/PypeIt", "max_stars_repo_head_hexsha": "a96cff699f1284905ce7ef19d06a9027cd333c63", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pypeit/core/flat.py", "max_issues_repo_name": "ykwang1/PypeIt", "max_issues_repo_head_hexsha": "a96cff699f1284905ce7ef19d06a9027cd333c63", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pypeit/core/flat.py", "max_forks_repo_name": "ykwang1/PypeIt", "max_forks_repo_head_hexsha": "a96cff699f1284905ce7ef19d06a9027cd333c63", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.1566731141, "max_line_length": 99, "alphanum_fraction": 0.6259411976, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5294}
|
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook tsa_arma_0.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Autoregressive Moving Average (ARMA): Sunspots data
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.graphics.api import qqplot
# ## Sunspots Data
print(sm.datasets.sunspots.NOTE)
dta = sm.datasets.sunspots.load_pandas().data
dta.index = pd.Index(sm.tsa.datetools.dates_from_range('1700', '2008'))
del dta["YEAR"]
dta.plot(figsize=(12, 8))
fig = plt.figure(figsize=(12, 8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(dta.values.squeeze(), lags=40, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(dta, lags=40, ax=ax2)
arma_mod20 = sm.tsa.ARMA(dta, (2, 0)).fit(disp=False)
print(arma_mod20.params)
arma_mod30 = sm.tsa.ARMA(dta, (3, 0)).fit(disp=False)
print(arma_mod20.aic, arma_mod20.bic, arma_mod20.hqic)
print(arma_mod30.params)
print(arma_mod30.aic, arma_mod30.bic, arma_mod30.hqic)
# * Does our model obey the theory?
sm.stats.durbin_watson(arma_mod30.resid.values)
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
ax = arma_mod30.resid.plot(ax=ax)
resid = arma_mod30.resid
stats.normaltest(resid)
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
fig = qqplot(resid, line='q', ax=ax, fit=True)
fig = plt.figure(figsize=(12, 8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(resid.values.squeeze(), lags=40, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(resid, lags=40, ax=ax2)
r, q, p = sm.tsa.acf(resid.values.squeeze(), qstat=True)
data = np.c_[range(1, 41), r[1:], q, p]
table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"])
print(table.set_index('lag'))
# * This indicates a lack of fit.
# * In-sample dynamic prediction. How good does our model do?
predict_sunspots = arma_mod30.predict('1990', '2012', dynamic=True)
print(predict_sunspots)
fig, ax = plt.subplots(figsize=(12, 8))
ax = dta.loc['1950':].plot(ax=ax)
fig = arma_mod30.plot_predict(
'1990', '2012', dynamic=True, ax=ax, plot_insample=False)
def mean_forecast_err(y, yhat):
return y.sub(yhat).mean()
mean_forecast_err(dta.SUNACTIVITY, predict_sunspots)
# ### Exercise: Can you obtain a better fit for the Sunspots model? (Hint:
# sm.tsa.AR has a method select_order)
# ### Simulated ARMA(4,1): Model Identification is Difficult
from statsmodels.tsa.arima_process import ArmaProcess
np.random.seed(1234)
# include zero-th lag
arparams = np.array([1, .75, -.65, -.55, .9])
maparams = np.array([1, .65])
# Let's make sure this model is estimable.
arma_t = ArmaProcess(arparams, maparams)
arma_t.isinvertible
arma_t.isstationary
# * What does this mean?
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
ax.plot(arma_t.generate_sample(nsample=50))
arparams = np.array([1, .35, -.15, .55, .1])
maparams = np.array([1, .65])
arma_t = ArmaProcess(arparams, maparams)
arma_t.isstationary
arma_rvs = arma_t.generate_sample(nsample=500, burnin=250, scale=2.5)
fig = plt.figure(figsize=(12, 8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(arma_rvs, lags=40, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(arma_rvs, lags=40, ax=ax2)
# * For mixed ARMA processes the Autocorrelation function is a mixture of
# exponentials and damped sine waves after (q-p) lags.
# * The partial autocorrelation function is a mixture of exponentials and
# dampened sine waves after (p-q) lags.
arma11 = sm.tsa.ARMA(arma_rvs, (1, 1)).fit(disp=False)
resid = arma11.resid
r, q, p = sm.tsa.acf(resid, qstat=True)
data = np.c_[range(1, 41), r[1:], q, p]
table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"])
print(table.set_index('lag'))
arma41 = sm.tsa.ARMA(arma_rvs, (4, 1)).fit(disp=False)
resid = arma41.resid
r, q, p = sm.tsa.acf(resid, qstat=True)
data = np.c_[range(1, 41), r[1:], q, p]
table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"])
print(table.set_index('lag'))
# ### Exercise: How good of in-sample prediction can you do for another
# series, say, CPI
macrodta = sm.datasets.macrodata.load_pandas().data
macrodta.index = pd.Index(
sm.tsa.datetools.dates_from_range('1959Q1', '2009Q3'))
cpi = macrodta["cpi"]
# #### Hint:
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
ax = cpi.plot(ax=ax)
ax.legend()
# P-value of the unit-root test, resoundingly rejects the null of a unit-
# root.
print(sm.tsa.adfuller(cpi)[1])
|
{"hexsha": "e1f31f28b4b6252c3c15f8b0b26b60d702ce1e7b", "size": 4589, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/python/tsa_arma_0.py", "max_stars_repo_name": "madhushree14/statsmodels", "max_stars_repo_head_hexsha": "04f00006a7aeb1c93d6894caa420698400da6c33", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-13T15:45:38.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-01T14:41:04.000Z", "max_issues_repo_path": "examples/python/tsa_arma_0.py", "max_issues_repo_name": "madhushree14/statsmodels", "max_issues_repo_head_hexsha": "04f00006a7aeb1c93d6894caa420698400da6c33", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2015-06-16T04:16:40.000Z", "max_issues_repo_issues_event_max_datetime": "2015-06-16T04:16:40.000Z", "max_forks_repo_path": "examples/python/tsa_arma_0.py", "max_forks_repo_name": "madhushree14/statsmodels", "max_forks_repo_head_hexsha": "04f00006a7aeb1c93d6894caa420698400da6c33", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-04-07T00:06:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-17T15:11:36.000Z", "avg_line_length": 26.6802325581, "max_line_length": 74, "alphanum_fraction": 0.7095227718, "include": true, "reason": "import numpy,from scipy,import statsmodels,from statsmodels", "num_tokens": 1472}
|
# Affine coupling layer from Dinh et al. (2017)
# Includes 1x1 convolution from in Putzky and Welling (2019)
# Author: Philipp Witte, pwitte3@gatech.edu
# Date: January 2020
export LearnedCouplingLayerSLIM
"""
CS = LearnedCouplingLayerSLIM(nx1, nx2, nx_in, ny1, ny2, ny_in, n_hidden, batchsize;
logdet::Bool=false, permute::Bool=false, k1=3, k2=3, p1=1, p2=1, s1=1, s2=1)
Create an invertible SLIM coupling layer with a learned data-to-image-space map.
*Input*:
- `nx1`, `nx2`, `nx_in`: spatial dimensions and no. of channels of input image
- `ny1`, `ny2`, `ny_in`: spatial dimensions and no. of channels of input data
- `n_hidden`: number of hidden units in conditional residual block
- `loget`: bool to indicate whether to return the logdet (default is `false`)
- `permute`: bool to indicate whether to apply a channel permutation (default is `false`)
- `k1`, `k2`: kernel size of convolutions in residual block. `k1` is the kernel of the first and third
operator, `k2` is the kernel size of the second operator
- `p1`, `p2`: padding for the first and third convolution (`p1`) and the second convolution (`p2`)
- `s1`, `s2`: stride for the first and third convolution (`s1`) and the second convolution (`s2`)
*Output*:
- `CS`: Invertible SLIM coupling layer with learned data-to-image map
*Usage:*
- Forward mode: `Y, logdet = CS.forward(X, D, A)` (if constructed with `logdet=true`)
- Inverse mode: `X = CS.inverse(Y, D, A)`
- Backward mode: `ΔX, X = CS.backward(ΔY, Y, D, A)`
- where `A` is a linear forward modeling operator and `D` is the observed data.
*Trainable parameters:*
- None in `CL` itself
- Trainable parameters in residual block `CL.RB` and 1x1 convolution layer `CL.C`
See also: [`Conv1x1`](@ref), [`ResidualBlock`](@ref), [`get_params`](@ref), [`clear_grad!`](@ref)
"""
struct LearnedCouplingLayerSLIM <: NeuralNetLayer
C::Union{Conv1x1, Nothing}
RB::ConditionalResidualBlock
logdet::Bool
end
@Flux.functor LearnedCouplingLayerSLIM
# Constructor from input dimensions
function LearnedCouplingLayerSLIM(nx1, nx2, nx_in, ny1, ny2, ny_in, n_hidden, batchsize;
k1=3, k2=3, p1=1, p2=1, s1=1, s2=1, logdet::Bool=false, permute::Bool=false)
# 1x1 Convolution and residual block for invertible layer
permute == true ? (C = Conv1x1(nx_in)) : (C = nothing)
RB = ConditionalResidualBlock(nx1, nx2, Int(nx_in/2), ny1, ny2, ny_in, n_hidden, batchsize; k1=k1, k2=k2, p1=p1, p2=p2, s1=s1, s2=s2)
return LearnedCouplingLayerSLIM(C, RB, logdet)
end
# Forward pass: Input X, Output Y
function forward(X::AbstractArray{Float32, 4}, D, CS::LearnedCouplingLayerSLIM)
# Get dimensions
nx, ny, n_s, batchsize = size(X)
# Permute and split
isnothing(CS.C) ? (X_ = copy(X)) : (X_ = CS.C.forward(X))
X1_, X2_ = tensor_split(X_)
# Coupling layer
Y1_ = copy(X1_)
Y2_ = X2_ + CS.RB.forward(X1_, D)[1]
Y_ = tensor_cat(Y1_, Y2_)
isnothing(CS.C) ? (Y = copy(Y_)) : (Y = CS.C.inverse(Y_))
CS.logdet == true ? (return Y, 0f0) : (return Y)
end
# Inverse pass: Input Y, Output X
function inverse(Y::AbstractArray{Float32, 4}, D, CS::LearnedCouplingLayerSLIM; save=false)
# Get dimensions
nx, ny, n_s, batchsize = size(Y)
# Permute and split
isnothing(CS.C) ? (Y_ = copy(Y)) : (Y_ = CS.C.forward(Y))
Y1_, Y2_ = tensor_split(Y_)
# Coupling layer
X1_ = copy(Y1_)
X2_ = Y2_ - CS.RB.forward(X1_, D)[1]
X_ = tensor_cat(X1_, X2_)
isnothing(CS.C) ? (X = copy(X_)) : (X = CS.C.inverse(X_))
save == true ? (return X, X_) : (return X)
end
# Backward pass: Input (ΔY, Y), Output (ΔX, X)
function backward(ΔY::AbstractArray{Float32, 4}, Y::AbstractArray{Float32, 4}, D, CS::LearnedCouplingLayerSLIM)
# Recompute forward states
X, X_ = inverse(Y, D, CS; save=true)
nx1, nx2, nx_in, batchsize = size(X)
# Backpropagation
isnothing(CS.C) ? (ΔY_ = copy(ΔY)) : (ΔY_ = CS.C.forward((ΔY, Y))[1])
ΔY1_, ΔY2_ = tensor_split(ΔY_)
ΔX2_ = copy(ΔY2_)
ΔX1_, ΔD = CS.RB.backward(ΔY2_, D.*0f0, tensor_split(X_)[1], D)[1:2]
ΔX1_ += ΔY1_
ΔX_ = tensor_cat(ΔX1_, ΔX2_)
isnothing(CS.C) ? (ΔX = copy(ΔX_)) : (ΔX = CS.C.inverse((ΔX_, X_))[1])
return ΔX, ΔD, X
end
# Clear gradients
function clear_grad!(CS::LearnedCouplingLayerSLIM)
~isnothing(CS.C) && clear_grad!(CS.C)
clear_grad!(CS.RB)
end
# Get parameters
function get_params(CS::LearnedCouplingLayerSLIM)
p = get_params(CS.RB)
~isnothing(CS.C) && (p = cat(p, get_params(CS.C); dims=1))
return p
end
# Put parameters
function put_params!(CS::LearnedCouplingLayerSLIM, Params::Array{Any,1})
put_params!(CS.RB, Params[1:5])
~isnothing(CS.C) && put_params!(CS.C, Params[6:end])
end
|
{"hexsha": "c71889406f44dabb7d5d1a75b754f60a4e179380", "size": 4765, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/layers/invertible_layer_slim_learned.jl", "max_stars_repo_name": "alisiahkoohi/InvertibleNetworks.jl", "max_stars_repo_head_hexsha": "719f788bcd12909496dfd5322d9b8b953996fc57", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-24T15:23:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-24T15:23:53.000Z", "max_issues_repo_path": "src/layers/invertible_layer_slim_learned.jl", "max_issues_repo_name": "alisiahkoohi/InvertibleNetworks.jl", "max_issues_repo_head_hexsha": "719f788bcd12909496dfd5322d9b8b953996fc57", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/layers/invertible_layer_slim_learned.jl", "max_forks_repo_name": "alisiahkoohi/InvertibleNetworks.jl", "max_forks_repo_head_hexsha": "719f788bcd12909496dfd5322d9b8b953996fc57", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5562913907, "max_line_length": 137, "alphanum_fraction": 0.6642182581, "num_tokens": 1658}
|
import numpy as np
import radvel.kepler
def timetrans_to_timeperi(tc, per, ecc, omega):
"""
Convert Time of Transit to Time of Periastron Passage
Args:
tc (float): time of transit
per (float): period [days]
ecc (float): eccentricity
omega (float): longitude of periastron (radians)
Returns:
float: time of periastron passage
"""
try:
if ecc >= 1:
return tc
except ValueError:
pass
f = np.pi/2 - omega
ee = 2 * np.arctan(np.tan(f/2) * np.sqrt((1-ecc)/(1+ecc))) # eccentric anomaly
tp = tc - per/(2*np.pi) * (ee - ecc*np.sin(ee)) # time of periastron
return tp
def timeperi_to_timetrans(tp, per, ecc, omega, secondary=False):
"""
Convert Time of Periastron to Time of Transit
Args:
tp (float): time of periastron
per (float): period [days]
ecc (float): eccentricity
omega (float): argument of peri (radians)
secondary (bool): calculate time of secondary eclipse instead
Returns:
float: time of inferior conjunction (time of transit if system is transiting)
"""
try:
if ecc >= 1:
return tp
except ValueError:
pass
if secondary:
f = 3*np.pi/2 - omega # true anomaly during secondary eclipse
ee = 2 * np.arctan(np.tan(f/2) * np.sqrt((1-ecc)/(1+ecc))) # eccentric anomaly
# ensure that ee is between 0 and 2*pi (always the eclipse AFTER tp)
if isinstance(ee, np.float64):
ee = ee + 2 * np.pi
else:
ee[ee < 0.0] = ee + 2 * np.pi
else:
f = np.pi/2 - omega # true anomaly during transit
ee = 2 * np.arctan(np.tan(f/2) * np.sqrt((1-ecc)/(1+ecc))) # eccentric anomaly
tc = tp + per/(2*np.pi) * (ee - ecc*np.sin(ee)) # time of conjunction
return tc
def true_anomaly(t, tp, per, e):
"""
Calculate the true anomaly for a given time, period, eccentricity.
Args:
t (array): array of times in JD
tp (float): time of periastron, same units as t
per (float): orbital period in days
e (float): eccentricity
Returns:
array: true anomoly at each time
"""
# f in Murray and Dermott p. 27
m = 2 * np.pi * (((t - tp) / per) - np.floor((t - tp) / per))
eccarr = np.zeros(t.size) + e
e1 = radvel.kepler.kepler(m, eccarr)
n1 = 1.0 + e
n2 = 1.0 - e
nu = 2.0 * np.arctan((n1 / n2)**0.5 * np.tan(e1 / 2.0))
return nu
|
{"hexsha": "281ea81f45d87d6eb914e6ce2bc35f0d17c23bce", "size": 2607, "ext": "py", "lang": "Python", "max_stars_repo_path": "radvel/orbit.py", "max_stars_repo_name": "spencerhurt/radvel", "max_stars_repo_head_hexsha": "05a1a1e020d239bf7cba8575b68a6d83ec0b3a5c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 49, "max_stars_repo_stars_event_min_datetime": "2016-12-16T15:24:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T10:36:55.000Z", "max_issues_repo_path": "radvel/orbit.py", "max_issues_repo_name": "spencerhurt/radvel", "max_issues_repo_head_hexsha": "05a1a1e020d239bf7cba8575b68a6d83ec0b3a5c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 211, "max_issues_repo_issues_event_min_datetime": "2016-08-03T20:08:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-01T23:13:19.000Z", "max_forks_repo_path": "radvel/orbit.py", "max_forks_repo_name": "spencerhurt/radvel", "max_forks_repo_head_hexsha": "05a1a1e020d239bf7cba8575b68a6d83ec0b3a5c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 53, "max_forks_repo_forks_event_min_datetime": "2016-08-25T20:54:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T05:08:49.000Z", "avg_line_length": 27.4421052632, "max_line_length": 107, "alphanum_fraction": 0.5485232068, "include": true, "reason": "import numpy", "num_tokens": 759}
|
MODULE m_emp_init
contains
SUBROUTINE emp_init (EMP_0_coef)
! ----------------------------------------------------------------------
! SUBROUTINE: emp_init
! ----------------------------------------------------------------------
! Purpose: Initialize the EMP model
! ----------------------------------------------------------------------
! Input arguments:
! - EMP_0_coef: Array for the initialization of the EMP model
! ----------------------------------------------------------------------
! Author : Tzupang Tseng, Geoscience Australia
!
! Created: 10-02-2021
!
! Changes:
! ----------------------------------------------------------------------
USE mdl_precision
USE mdl_num
USE mdl_param
USE mdl_config
USE mdl_param
use pod_yaml
IMPLICIT NONE
! ----------------------------------------------------------------------
! Dummy arguments declaration
! ----------------------------------------------------------------------
! IN
REAL (KIND = prec_q), DIMENSION(:), INTENT(OUT), ALLOCATABLE :: EMP_0_coef
! ----------------------------------------------------------------------
INTEGER (KIND = prec_int8) :: i, ii, PD_Param_ID
INTEGER (KIND = prec_int2) :: AllocateStatus
! ----------------------------------------------------------------------
CHARACTER (LEN=100) :: fname
CHARACTER (LEN=50) :: fname_id
CHARACTER (LEN=100) :: param_id
CHARACTER (LEN=500) :: param_value
logical found
! ----------------------------------------------------------------------
! Initial conditions for empirical model
! ----------------------------------------------------------------------
ALLOCATE (EMP_0_coef(EMPNUM))
EMP_0_coef = 0.d0
IF (yml_ic_input_format == IC_FILE) THEN
EMP_0_coef = IC_sat_glb (9:8+EMPNUM)
END IF
!print*,'EMP_0_coef=',EMP_0_coef
if (.not. yaml_found) then
fname = 'emp_est.in'
param_id = 'EMP'
write (param_value, *) EMP_0_coef
!Call write_prmfile (fname, fname_id, param_id, param_value)
CALL write_prmfile_init0 (fname, param_id, param_value)
else
found = .false.
do i=1, prn_override_count
if (.not. found .and. yml_prn_overrides(i)%name .eq. trim(PRN)) then
yml_prn_overrides(i)%integ%emp_init_values(1:EMPNUM) = EMP_0_coef(1:EMPNUM)
yml_prn_overrides(i)%integ%emp_parameters_used = EMPNUM
found = .true.
end if
end do
if (.not. found) then
call new_prn_override(PRN)
i = prn_override_count
yml_prn_overrides(i)%integ%emp_init_values(1:EMPNUM) = EMP_0_coef(1:EMPNUM)
yml_prn_overrides(i)%integ%emp_parameters_used = EMPNUM
end if
end if
END SUBROUTINE
END MODULE
|
{"hexsha": "58edf866a983d97fbcce25596addd45da346c798", "size": 2798, "ext": "f03", "lang": "FORTRAN", "max_stars_repo_path": "src/fortran/m_emp_init.f03", "max_stars_repo_name": "RodrigoNaves/ginan-bitbucket-update-tests", "max_stars_repo_head_hexsha": "4bd5cc0a9dd0e94b1c2d8b35385e128404009b0c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 73, "max_stars_repo_stars_event_min_datetime": "2021-07-08T23:35:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T15:17:58.000Z", "max_issues_repo_path": "src/fortran/m_emp_init.f03", "max_issues_repo_name": "RodrigoNaves/ginan-bitbucket-update-tests", "max_issues_repo_head_hexsha": "4bd5cc0a9dd0e94b1c2d8b35385e128404009b0c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-09-27T14:27:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T23:50:02.000Z", "max_forks_repo_path": "src/fortran/m_emp_init.f03", "max_forks_repo_name": "RodrigoNaves/ginan-bitbucket-update-tests", "max_forks_repo_head_hexsha": "4bd5cc0a9dd0e94b1c2d8b35385e128404009b0c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 39, "max_forks_repo_forks_event_min_datetime": "2021-07-12T05:42:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T15:15:34.000Z", "avg_line_length": 33.3095238095, "max_line_length": 84, "alphanum_fraction": 0.4624731951, "num_tokens": 638}
|
import warnings
warnings.filterwarnings("ignore")
import sys
sys.path.append("./")
from pathlib import Path
from multiprocessing import Pool
import numpy as np
import pandas as pd
import statistics as st
import cProfile
from matplotlib import pyplot as plt
from numpy import array, polyfit, poly1d
from corems.mass_spectra.input.andiNetCDF import ReadAndiNetCDF
from corems import get_dirname, get_filename
from corems.mass_spectra.factory.GC_Class import GCMSBase
from corems.mass_spectra.calc.LF_Targeted import LossFinderTargeted
from corems.mass_spectrum.factory.MassSpectrumClasses import MassSpecBase
import glob
def run_targetedLF(file_path, ref_file):
Loss_finder = LossFinderTargeted()
reader_gcms = ReadAndiNetCDF(file_path)
reader_gcms.run()
gcms = reader_gcms.get_gcms_obj()
gc_ms = gcms._ms
Loss_finder.noise_cutoff = float(0.85)
Loss_finder.tolerance = float(1)
Loss_finder.ref_file = ref_file
mz_dict, abund = Loss_finder.ms_info_get(gc_ms)
range_ref = Loss_finder.loss_ref_get(ref_file, Loss_finder.tolerance)
mz_filtered, abund_filtered = Loss_finder.threshold_filter(mz_dict, abund, Loss_finder.noise_cutoff)
offset_hits = Loss_finder.findpeakoffset(range_ref, mz_filtered, abund_filtered)
#Loss_finder.LF_out(offset_hits, Loss_finder.mz_count)
out = Loss_finder.plot_offset()
print(out)
#ax = gcms.plot_gc_peaks()
#ax.savefig('lf_fig.png')
#ax = MassSpecBase.plot_mz_domain_profile(MassSpecBase)
#plt.show()
#MassSpecBase.plot_mz_domain_profile(gcms)
#MassSpecBase.plot_profile_and_noise_threshold(gcms)
#plt.show()
return offset_hits, Loss_finder.mz_count
if __name__ == '__main__':
file_path = get_filename()
ref_file = '/mnt/c/ubuntu_home/loss_finder/NeutralLossList.csv'
output, mz_count = run_targetedLF(file_path, ref_file)
|
{"hexsha": "9ac4bb7ea5ccd7a8a5cf9a8fea4764f40825b111", "size": 1992, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/scripts/Loss_Finder.py", "max_stars_repo_name": "Kzra/CoreMS", "max_stars_repo_head_hexsha": "88bef42e3cf5d11c04ad13b4c58d8a366f7844a7", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-29T18:13:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-29T18:13:14.000Z", "max_issues_repo_path": "examples/scripts/Loss_Finder.py", "max_issues_repo_name": "Kzra/CoreMS", "max_issues_repo_head_hexsha": "88bef42e3cf5d11c04ad13b4c58d8a366f7844a7", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/scripts/Loss_Finder.py", "max_forks_repo_name": "Kzra/CoreMS", "max_forks_repo_head_hexsha": "88bef42e3cf5d11c04ad13b4c58d8a366f7844a7", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5925925926, "max_line_length": 108, "alphanum_fraction": 0.7304216867, "include": true, "reason": "import numpy,from numpy", "num_tokens": 477}
|
[STATEMENT]
lemma systemIN_noOUT:
assumes "systemIN x i"
shows "\<not> systemOUT x i"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> systemOUT x i
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
systemIN x i
goal (1 subgoal):
1. \<not> systemOUT x i
[PROOF STEP]
by (simp add: systemIN_def systemOUT_def)
|
{"llama_tokens": 140, "file": "ComponentDependencies_DataDependencies", "length": 2}
|
# -*- coding: utf-8 -*-
""" Tests for the `CRA` module."""
import pytest
from pytest import approx
import numpy as np
import scipy.sparse as sp
from deepburn.CRAM import CRA, CRAC, cras_literature, CRA_ODEsolver
def test_init():
crasolver = CRA_ODEsolver()
assert isinstance(crasolver._cra, CRA)
def test_basicsolve_zerodim():
A = np.zeros((0,0))
N0 = np.zeros(0)
crasolver = CRA_ODEsolver()
N = crasolver._solveCRA(A, N0)
def test_basicsolve_trivial():
A = np.asarray([[-1.0]])
N0 = np.asarray([1.0])
crasolver = CRA_ODEsolver()
N = crasolver._solveCRA(A, N0)
assert N == approx(0.367879441)
def test_basicsolve_polonium_dense():
oneY = 365.25*24*3600
A = np.array([[-1.83163e-12,0,0],[+1.83163e-12,-1.60035e-6,0],[0,+1.60035e-6,-5.79764e-8]]) * oneY
N0 = np.array([6.95896e-4,0,0])
crasolver = CRA_ODEsolver()
Y = crasolver._solveCRA(A, N0)
print(Y)
assert Y[0] == approx(6.958557771e-04)
assert Y[1] == approx(7.964206428e-10)
assert Y[2] == approx(1.832378200e-08)
def test_basicsolve_polonium_sparse():
oneY = 365.25*24*3600
A = np.array([[-1.83163e-12,0,0],[+1.83163e-12,-1.60035e-6,0],[0,+1.60035e-6,-5.79764e-8]]) * oneY
Asp = sp.csc_matrix(A)
N0 = np.array([6.95896e-4,0,0])
crasolver = CRA_ODEsolver()
Y = crasolver._solveCRA(Asp, N0)
assert Y[0] == approx(6.9585577708845012e-04)
assert Y[1] == approx(7.9642064281967071e-10)
assert Y[2] == approx(1.8323781965287107e-08)
|
{"hexsha": "be2da162909230db52fe40925209f2909e60124d", "size": 1508, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/CRAM/test_CRAsolve.py", "max_stars_repo_name": "gvdeynde/deepburn", "max_stars_repo_head_hexsha": "1af3d62ec0e70b82250bce31342326adcf561002", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/CRAM/test_CRAsolve.py", "max_issues_repo_name": "gvdeynde/deepburn", "max_issues_repo_head_hexsha": "1af3d62ec0e70b82250bce31342326adcf561002", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/CRAM/test_CRAsolve.py", "max_forks_repo_name": "gvdeynde/deepburn", "max_forks_repo_head_hexsha": "1af3d62ec0e70b82250bce31342326adcf561002", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-21T07:40:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T07:40:52.000Z", "avg_line_length": 29.568627451, "max_line_length": 102, "alphanum_fraction": 0.6478779841, "include": true, "reason": "import numpy,import scipy", "num_tokens": 593}
|
import sys
import numpy as np
class Graph(object):
def __init__(self):
nodes = np.loadtxt("assets/nodes.csv", dtype=str, delimiter=',')
matrix = np.genfromtxt("assets/AM.csv", delimiter=',', filling_values=1000)
init_graph = {}
for node in nodes:
init_graph[node] = {}
for i in range(len(nodes)):
for j in range(len(nodes)):
if matrix[i][j] != 1000:
if matrix[i][j] !=1:
init_graph[nodes[i]][nodes[j]] = matrix[i][j]*2
else:
init_graph[nodes[i]][nodes[j]] = matrix[i][j]
self.nodes = nodes
self.graph = self.construct_graph(nodes, init_graph)
def construct_graph(self, nodes, init_graph):
'''
This method makes sure that the graph is symmetrical. In other words, if there's a path from node A to B with a value V, there needs to be a path from node B to node A with a value V.
'''
graph = {}
for node in nodes:
graph[node] = {}
graph.update(init_graph)
for node, edges in graph.items():
for adjacent_node, value in edges.items():
if graph[adjacent_node].get(node, False) == False:
graph[adjacent_node][node] = value
print(graph.keys())
return graph
def get_nodes(self):
"Returns the nodes of the graph."
return self.nodes
def get_outgoing_edges(self, node):
"Returns the neighbors of a node."
connections = []
for out_node in self.nodes:
if self.graph[node].get(out_node, False) != False:
connections.append(out_node)
return connections
def value(self, node1, node2):
"Returns the value of an edge between two nodes."
return self.graph[node1][node2]
def dijkstra_algorithm(self, start_node):
unvisited_nodes = list(self.get_nodes())
# We'll use this dict to save the cost of visiting each node and update it as we move along the graph
shortest_path = {}
# We'll use this dict to save the shortest known path to a node found so far
previous_nodes = {}
# We'll use max_value to initialize the "infinity" value of the unvisited nodes
max_value = sys.maxsize
for node in unvisited_nodes:
shortest_path[node] = max_value
# However, we initialize the starting node's value with 0
shortest_path[start_node] = 0
# The algorithm executes until we visit all nodes
while unvisited_nodes:
# The code block below finds the node with the lowest score
current_min_node = None
for node in unvisited_nodes: # Iterate over the nodes
if current_min_node is None:
current_min_node = node
elif shortest_path[node] < shortest_path[current_min_node]:
current_min_node = node
# The code block below retrieves the current node's neighbors and updates their distances
neighbors = self.get_outgoing_edges(current_min_node)
for neighbor in neighbors:
tentative_value = shortest_path[current_min_node] + self.value(current_min_node, neighbor)
if tentative_value < shortest_path[neighbor]:
shortest_path[neighbor] = tentative_value
# We also update the best path to the current node
previous_nodes[neighbor] = current_min_node
# After visiting its neighbors, we mark the node as "visited"
unvisited_nodes.remove(current_min_node)
return previous_nodes, shortest_path
def print_result(self,start_node, target_node):
path = []
node = target_node
previous_nodes, shortest_path = self.dijkstra_algorithm(start_node)
while node != start_node:
path.append(node)
node = previous_nodes[node]
# Add the start node manually
path.append(start_node)
print("We found the following best path with a value of {}.".format(shortest_path[target_node]))
path.reverse()
return path
|
{"hexsha": "6c98082f5d714e13c0ff621e3628b4913de276f7", "size": 4412, "ext": "py", "lang": "Python", "max_stars_repo_path": "Graph.py", "max_stars_repo_name": "bitsPleaseHacked22/UofaPathfinder", "max_stars_repo_head_hexsha": "0676b667cc88c9435e0658e7aa7e5609d8691d41", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Graph.py", "max_issues_repo_name": "bitsPleaseHacked22/UofaPathfinder", "max_issues_repo_head_hexsha": "0676b667cc88c9435e0658e7aa7e5609d8691d41", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Graph.py", "max_forks_repo_name": "bitsPleaseHacked22/UofaPathfinder", "max_forks_repo_head_hexsha": "0676b667cc88c9435e0658e7aa7e5609d8691d41", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-16T01:56:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-16T01:56:18.000Z", "avg_line_length": 35.8699186992, "max_line_length": 192, "alphanum_fraction": 0.5825022665, "include": true, "reason": "import numpy", "num_tokens": 884}
|
MODULE m_pulses_init
! ----------------------------------------------------------------------
! MODULE: m_pulses_init
! ----------------------------------------------------------------------
! Purpose:
! Module for calling the pulses_force subroutine
! ----------------------------------------------------------------------
! Author : Dr. Thomas Papanikolaou at Geoscience Australia
! Created: 17 August 2020
! ----------------------------------------------------------------------
IMPLICIT NONE
!SAVE
Contains
SUBROUTINE pulses_init (mjd_t_epoch0, t_sec_epoch0, PULSES_Array_corr)
! ----------------------------------------------------------------------
! SUBROUTINE: pulses_init.f03
! ----------------------------------------------------------------------
! Purpose:
! Acceleration vector and Partial derivatives of pseudo-stochastic pulses (Velocity changes)
! ----------------------------------------------------------------------
! Input arguments:
! - rsat: Satellite Position vector (m) in inertial frame (ICRF)
! - vsat: Satellite Velocity vector (m/s) in inertial frame (ICRF)
! - mjd_t: MJD of current Epoch
! - t_sec: Seconds since start of day of current Epoch
! - delta_v: Pulse value
! - mjd_ti: MJD of pulse epoch
! - ti_sec: Seconds since start of the day of pulse' epoch
! - dir: Pulse' direction e.g. Radial, Tangential, Normal directions, XYZ directions
!
! Output arguments:
! - Fpulse: Acceleration vector cartesian components in inertial frame (ICRF)
! - PDr: Partial derivatives matrix of the acceleration w.r.t. the position vector in ICRF
! - PDv: Partial derivatives matrix of the acceleration w.r.t. the velocity vector in ICRF
! - PD_param: Partial derivatives matrix of the acceleration w.r.t. the (force-related) unknown parameters in ICRF
! ----------------------------------------------------------------------
! Author : Dr. Thomas Papanikolaou at Geoscience Australia
! Created: 17 August 2020
! ----------------------------------------------------------------------
USE mdl_precision
USE mdl_num
USE mdl_param
USE mdl_config
USE m_pd_pulses
use pod_yaml
IMPLICIT NONE
! ----------------------------------------------------------------------
! Dummy arguments declaration
! ----------------------------------------------------------------------
! IN
REAL (KIND = prec_d), INTENT(IN) :: mjd_t_epoch0, t_sec_epoch0
! ----------------------------------------------------------------------
! OUT
! REAL (KIND = prec_d), DIMENSION(3), INTENT(OUT) :: SFpulses
REAL (KIND = prec_q), DIMENSION(:,:), ALLOCATABLE :: PULSES_Array_corr
! ----------------------------------------------------------------------
! ---------------------------------------------------------------------
! Local variables declaration
! ----------------------------------------------------------------------
!INTEGER (KIND = prec_int2) :: N_param_pulses
INTEGER (KIND = prec_int2) :: AllocateStatus, DeAllocateStatus
!REAL (KIND = prec_q) :: delta_v_corr(3)
!REAL (KIND = prec_q), DIMENSION(:), ALLOCATABLE :: delta_v_corr
!REAL (KIND = prec_q), DIMENSION(:,:), ALLOCATABLE :: PULSES_Array_corr
!INTEGER (KIND = prec_int8) :: PULSE_param, N_PULSE_param
INTEGER (KIND = prec_int8) :: i_pulse, N_pulse_param_glb
INTEGER (KIND = prec_int8) :: i1_pulse, i2_pulse
REAL (KIND = prec_q) :: offset
! ----------------------------------------------------------------------
! ----------------------------------------------------------------------
! Number of pulses directions
!PULSE_dir_glb = PULSE_dir_vec(1) + PULSE_dir_vec(2) + PULSE_dir_vec(3)
PULSE_dir_glb = yml_pulse_parameter_count
! Number of pulses to be estimated
N_PULSE_param_glb = PULSE_dir_glb * yml_pulse_epoch_number
! ----------------------------------------------------------------------
! Allocate arrays of Pulses values
ALLOCATE (PULSES_Array_apriori_glb(yml_pulse_epoch_number, 2+PULSE_dir_glb), STAT = AllocateStatus)
ALLOCATE (PULSES_Array_aposteriori_glb(yml_pulse_epoch_number, 2+PULSE_dir_glb), STAT = AllocateStatus)
ALLOCATE (PULSES_Array_corr(yml_pulse_epoch_number, PULSE_dir_glb), STAT = AllocateStatus)
!ALLOCATE (delta_v_corr(N_PULSE_epochs * PULSE_dir_glb), STAT = AllocateStatus)
PULSES_Array_apriori_glb = 0.0D0
PULSES_Array_aposteriori_glb = 0.0D0
PULSES_Array_corr = 0.0D0
!delta_v_corr = 0.0D0
! ----------------------------------------------------------------------
! Pulses array values initialisation
! ----------------------------------------------------------------------
DO i1_pulse = 1 , yml_pulse_epoch_number
offset = yml_pulse_offset
!print *,"offset", offset
PULSES_Array_aposteriori_glb(i1_pulse, 1) = mjd_t_epoch0 + offset/86400.0D0 + (i1_pulse-1) * (yml_pulse_interval/86400.0D0)
PULSES_Array_aposteriori_glb(i1_pulse, 2)=(PULSES_Array_aposteriori_glb(i1_pulse,1)-&
& INT(PULSES_Array_aposteriori_glb(i1_pulse, 1)))*86400.d0
!print *,"mjd_t_epoch0, pulse_offest, yml_pulse_interval", mjd_t_epoch0, offset/86400.0D0, (i1_pulse-1)*(yml_pulse_interval/86400.0D0)
!print*,'i1_pulse, offset, yml_pulse_interval =', i1_pulse, offset, yml_pulse_interval
!print*,'i1_pulse, offset, yml_pulse_interval, mjd_t_epoch0, t_sec_epoch0 =', i1_pulse, offset, yml_pulse_interval, mjd_t_epoch0, t_sec_epoch0
!print*,'mjd_t , t_sec =', PULSES_Array_aposteriori_glb(i1_pulse, 1), PULSES_Array_aposteriori_glb(i1_pulse, 2)
DO i2_pulse = 1 , PULSE_dir_glb
PULSES_Array_aposteriori_glb(i1_pulse,i2_pulse+2) = 1.0D-8
END DO
IF (yml_ic_input_format == IC_FILE) THEN
! offset = IC_sat_pulse_glb(1,2)
PULSES_Array_aposteriori_glb(i1_pulse, 1) = mjd_t_epoch0 + offset/86400.0D0 + (i1_pulse-1) * (yml_pulse_interval/86400.0D0)
PULSES_Array_aposteriori_glb(i1_pulse, 2) = t_sec_epoch0 + i1_pulse * yml_pulse_interval !offset + (i1_pulse-1) * yml_pulse_interval
DO i2_pulse = 1 , PULSE_dir_glb
PULSES_Array_aposteriori_glb(i1_pulse,i2_pulse+2) = IC_sat_pulse_glb(i1_pulse,i2_pulse+2)
END DO
! print*,'offset, yml_pulse_interval =', offset, yml_pulse_interval
! print*,'PULSES_Array_aposteriori_glb, i1_pulse = ', i1_pulse, PULSES_Array_aposteriori_glb(i1_pulse,:)
END IF
END DO
! ----------------------------------------------------------------------
END SUBROUTINE
END Module
|
{"hexsha": "4188865640a9cb1c511595ba36a42c66d8a5e9d9", "size": 6574, "ext": "f03", "lang": "FORTRAN", "max_stars_repo_path": "src/fortran/m_pulses_init.f03", "max_stars_repo_name": "RodrigoNaves/ginan-bitbucket-update-tests", "max_stars_repo_head_hexsha": "4bd5cc0a9dd0e94b1c2d8b35385e128404009b0c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 73, "max_stars_repo_stars_event_min_datetime": "2021-07-08T23:35:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T15:17:58.000Z", "max_issues_repo_path": "src/fortran/m_pulses_init.f03", "max_issues_repo_name": "RodrigoNaves/ginan-bitbucket-update-tests", "max_issues_repo_head_hexsha": "4bd5cc0a9dd0e94b1c2d8b35385e128404009b0c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-09-27T14:27:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T23:50:02.000Z", "max_forks_repo_path": "src/fortran/m_pulses_init.f03", "max_forks_repo_name": "RodrigoNaves/ginan-bitbucket-update-tests", "max_forks_repo_head_hexsha": "4bd5cc0a9dd0e94b1c2d8b35385e128404009b0c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 39, "max_forks_repo_forks_event_min_datetime": "2021-07-12T05:42:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T15:15:34.000Z", "avg_line_length": 45.972027972, "max_line_length": 143, "alphanum_fraction": 0.5524794646, "num_tokens": 1668}
|
using VirulenceEvolution
using VirulenceEvolution:getindex, setindex!
using Test
# test ind2sub
t = reshape(1:8, 2, 2, 2)
@test t[VirulenceEvolution.ind2sub(axes(t), 6)...] == 6
# test Dynamics
d = Dynamics(0, 1)
@test d.history == [(0, 1)]
record!(d, 1, 2)
@test d.history == [(0, 1), (1, 2)]
# test zeroatdiag
m = [1 2
3 4]
VirulenceEvolution.zeroatdiag!(m)
@test m[1, 1] == m[2, 2] == 0
# test @apply
x = 1
y = 2
VirulenceEvolution.@apply x->x+1 x y
@test x == 2
@test y == 3
# test slicecol
@test m[:, 2] == VirulenceEvolution.slicecol(m, 2)
# test getindex by tuple
@test m[(1, 1)] == m[1, 1]
# test setindex by tuple
m[(1, 2)] = 5.0
@test m[1, 2] == 5
# test gillespie work
f(x::T) where {T} = rand(T)
gillespie_single(f, 50; S=1000, I=[1], R=[0], v=[0.3],
β=0.01, b=0.2, d=0.1, r=0.1, μ=0.1, m=0.01, maxepoch=30_000)
gillespie_meta(f, 50; S=[1000, 100], I=[1 0], R=[0 0], v=[0.3],
β=0.01, b=0.2, d=0.1, r=0.1, μ=0.1, mt=0.01, mg=0.01, maxepoch=50_000)
|
{"hexsha": "10e9345d709912c8fc4594a85aa63d11aa101b48", "size": 982, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "wangl-cc/VirulenceEvolution.jl", "max_stars_repo_head_hexsha": "a765e3e17e7c40e2900b1606dfdce6c0470f83d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-09-15T07:21:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T08:04:05.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "wangl-cc/VirulenceEvolution.jl", "max_issues_repo_head_hexsha": "a765e3e17e7c40e2900b1606dfdce6c0470f83d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "wangl-cc/VirulenceEvolution.jl", "max_forks_repo_head_hexsha": "a765e3e17e7c40e2900b1606dfdce6c0470f83d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.347826087, "max_line_length": 74, "alphanum_fraction": 0.5936863544, "num_tokens": 462}
|
module Plot
import PyPlot; plt = PyPlot
plt.svg(true)
using PyCall
export plt, make_bar_plot
"""
make_bar_plot(lookup_value, groups, keys; ...)
Create a bar plot.
# Examples
```julia
fig, ax = make_bar_plot(
["A", "B", "C", "D", "E"], ["1", "2", "3", "4"],
) do group, key
rand()
end
ax.set_title("...")
ax.set_xlabel("...")
ax.set_ylabel("...")
```
```julia
fig, ax = make_bar_plot(
["A"=>1, "B"=>2, "C"=>3, "D"=>4, "E"=>5], [1, 2, 3, 4, 5],
label_rotation=45, label_anchor="right",
) do group, key
group * key
end
ax.set_title("...")
ax.set_xlabel("...")
ax.set_ylabel("...")
```
"""
function make_bar_plot(lookup_value::Function, groups, keys;
colors=nothing, patterns=nothing, ax=nothing,
width::Real=0.7, gap::Real=0.2, label_rotation::Real=0.0,
label_anchor=nothing)
is_latex = ax isa PyObject && startswith(ax.__module__, "latextools")
n_groups = length(groups)
n_bars = length(keys)
width /= n_bars
gap /= n_bars
step = width + gap
x_arr = collect(1:n_groups)
offset = -(step * (n_bars+1)) / 2
if ax === nothing
fig, ax = plt.subplots()
else
fig = nothing
end
group_strs = [g isa Pair ? g[1] : string(g) for g in groups]
groups = [g isa Pair ? g[2] : g for g in groups]
for (i, key) in enumerate(keys)
key_str, key = (key isa Pair ? (key[1], key[2])
: (string(key), key))
vals = [lookup_value(g isa Pair ? g[2] : g, key) for g in groups]
c = colors !== nothing && i <= length(colors) ? colors[i] : nothing
p = (patterns !== nothing && i <= length(patterns)
? patterns[i] : nothing)
if is_latex
ax.bar(group_strs, vals, color=c, pattern=p, legend=key_str,
extra_config="pattern color=$c,")
else
ax.bar(x_arr .+ (offset + i*step), vals, width, color=c,
label=key_str)
end
end
if !is_latex
ax.set_xticks(collect(1:n_groups))
ax.set_xticklabels(group_strs, rotation=label_rotation, ha=label_anchor)
ax.legend()
end
fig === nothing ? nothing : (fig, ax)
end
end
|
{"hexsha": "a41254deaae0b9498dfc54a66566f04568684585", "size": 2238, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Plot.jl", "max_stars_repo_name": "cduck/Tweaks.jl", "max_stars_repo_head_hexsha": "c9dc3fdb866d5a3a05c23e9bfff1839c5565d6d1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Plot.jl", "max_issues_repo_name": "cduck/Tweaks.jl", "max_issues_repo_head_hexsha": "c9dc3fdb866d5a3a05c23e9bfff1839c5565d6d1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Plot.jl", "max_forks_repo_name": "cduck/Tweaks.jl", "max_forks_repo_head_hexsha": "c9dc3fdb866d5a3a05c23e9bfff1839c5565d6d1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.724137931, "max_line_length": 80, "alphanum_fraction": 0.5504915103, "num_tokens": 656}
|
import numpy as np
import pdb
def get_run_info(runtimes_info_path):
runtimes_info = open(runtimes_info_path, "r").read()
max_index = len(runtimes_info) -1
next_index = 0
runtime_vals = []
max_ram_vals = []
while next_index <= max_index:
try:
pos_before_index = runtimes_info.index("CPU time :", next_index)
pos_after_index = runtimes_info.index(" sec.", pos_before_index)
runtime_val = runtimes_info[(pos_after_index - 9):pos_after_index]
if runtime_val[0] == " ":
runtime_vals.append(float(runtime_val[1:]))
else:
print("ERROR: runtime much longer than expected")
exit()
pos_before_index = runtimes_info.index("Max Memory :", next_index)
pos_after_index = runtimes_info.index(" MB", pos_before_index)
max_ram_val = runtimes_info[(pos_after_index - 9):pos_after_index]
if runtime_val[0] == " ":
max_ram_vals.append(int(max_ram_val[1:]))
else:
print("ERROR: max_ram much longer than expected")
exit()
next_index = pos_after_index
except:
next_index = max_index + 1
return(np.array(runtime_vals), np.array(max_ram_vals))
def bootstrap_improvement_ratio(triadsim_vals, regens_vals, N):
resampled_vals_triadsim = triadsim_vals[(np.random.rand(N, len(triadsim_vals))*len(triadsim_vals)).astype(np.int)]
resampled_vals_regens = regens_vals[(np.random.rand(N, len(regens_vals))*len(regens_vals)).astype(np.int)]
resampled_improvement_ratios = np.mean(resampled_vals_triadsim, axis = 1)/np.mean(resampled_vals_regens, axis = 1)
mean = np.mean(triadsim_vals)/np.mean(regens_vals)
low = np.percentile(resampled_improvement_ratios, 2.5)
high = np.percentile(resampled_improvement_ratios, 97.5)
return([low, mean, high])
triadsim_runtimes, triadsim_max_rams = get_run_info("triadsim_runtimes.out")
regens_runtimes, regens_max_rams = get_run_info("regens_runtime.out")
print(bootstrap_improvement_ratio(triadsim_runtimes, regens_runtimes, 1000000))
print(bootstrap_improvement_ratio(triadsim_max_rams, regens_max_rams, 1000000))
|
{"hexsha": "b01e896c4a0b2b29eec4cd0db304292ac71ee66d", "size": 2247, "ext": "py", "lang": "Python", "max_stars_repo_path": "runtime_testing/triadsim_runtimes.py", "max_stars_repo_name": "greggj2016/regens", "max_stars_repo_head_hexsha": "763413891f41068830b5e711ad3f16917e7771cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "runtime_testing/triadsim_runtimes.py", "max_issues_repo_name": "greggj2016/regens", "max_issues_repo_head_hexsha": "763413891f41068830b5e711ad3f16917e7771cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "runtime_testing/triadsim_runtimes.py", "max_forks_repo_name": "greggj2016/regens", "max_forks_repo_head_hexsha": "763413891f41068830b5e711ad3f16917e7771cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.8571428571, "max_line_length": 118, "alphanum_fraction": 0.6800178015, "include": true, "reason": "import numpy", "num_tokens": 584}
|
# -*- coding: utf-8 -*-
import numpy
import scipy.linalg
import sklearn.cross_decomposition
import sklearn.metrics
class LinearCCA(object):
def __init__(self, n_components):
self._n_components = n_components
self._wx = None
self._wy = None
def fit(self, X, Y):
""" fit the model
Note
----
Do not include the `self` parameter in the ``Parameters`` section.
Parameters
----------
param1
The first parameter.
param2
The second parameter.
Returns
-------
bool
True if successful, False otherwise.
"""
assert X.shape[0] == Y.shape[0],\
"number of samples of X and Y should be the same"
# calculate covariance matrices
Cxx = numpy.dot(X.T, X)
Cxy = numpy.dot(X.T, Y)
Cyy = numpy.dot(Y.T, Y)
Cyy_inv = numpy.linalg.inv(Cyy)
# solve generalized eigenvalue problem
A = Cxy.dot(Cyy_inv).dot(Cxy.T)
eig, wx = scipy.linalg.eigh(A, Cxx)
eig = numpy.real(eig)
wx = wx[:, eig > 0]
eig = eig[eig > 0]
idx = numpy.argsort(eig)[::-1]
eig = eig[:self._n_components]
self._wx = wx[:, idx[:self._n_components]]
self._wy = Cyy_inv.dot(Cxy.T).dot(self._wx)
self._wy /= numpy.sqrt(eig)
self._correlation = numpy.diag(self._wx.T.dot(Cxy).dot(self._wy))
return
def fit_transform(self, X, Y):
self.fit(X, Y)
return self.predict(X, Y)
def predict(self, X, Y):
return self._wx.T.dot(X.T), self._wy.T.dot(Y.T)
@property
def correlation_(self):
return self._correlation
class KernelCCA(object):
def __init__(self, n_components, kernel='linear', kernel_params=[],
nystrom_approximation_ratio=1.0, reg_param=0.1):
self._n_components = n_components
self._reg_param = reg_param
self._alpha = None
self._beta = None
self._X = None
self._Y = None
self._nystrom_approximation_ratio = nystrom_approximation_ratio
if kernel == 'linear':
self._kernel = linear_kernel
elif kernel == 'rbf':
self._kernel = lambda x, y: rbf(x, y, *kernel_params)
elif callable(kernel):
self._kernel = lambda x, y: kernel(x, y, *kernel_params)
def fit(self, X, Y):
sample_idx = numpy.random.choice(
X.shape[0], int(X.shape[0] * self._nystrom_approximation_ratio),
replace=False)
X = X[sample_idx]
Y = Y[sample_idx]
num_samples = X.shape[0]
self._X = X
self._Y = Y
Kx = sklearn.metrics.pairwise_distances(X, metric=self._kernel)
Ky = sklearn.metrics.pairwise_distances(Y, metric=self._kernel)
# solve generalize eigenvalue problem
Z = numpy.zeros(shape=(num_samples, num_samples))
A = numpy.block([[Z, Kx.dot(Ky)], [Ky.dot(Kx), Z]])
B = numpy.block([
[Kx.dot(Kx) + self._reg_param * Kx, Z],
[Z, Ky.dot(Ky) + self._reg_param * Ky]])
eig, coef = scipy.linalg.eig(A, B)
# nan, negative eigenvalues and imaginary part of
# eigenvalues and eigenvectors are ignored
eig = numpy.real(eig)
coef = numpy.real(coef)
valid_idx = (eig > 0)
coef = coef[:, valid_idx]
eig = eig[valid_idx]
# take top-k eigenvalues (k=self._n_components)
idx = numpy.argsort(eig)[::-1]
eig = eig[idx[:self._n_components]]
self._alpha = coef[:num_samples, idx[:self._n_components]]
self._beta = coef[num_samples:, idx[:self._n_components]]
corr_xy = numpy.diag(
self._alpha.T.dot(Kx).dot(Ky).dot(self._beta))
corr_xx = numpy.diag(
self._alpha.T.dot(Kx).dot(Kx).dot(self._alpha))
corr_yy = numpy.diag(
self._beta.T.dot(Ky).dot(Ky).dot(self._beta))
self._correlation = corr_xy / numpy.sqrt(corr_xx * corr_yy)
return
def fit_transform(self, X, Y):
self.fit(X, Y)
return self.predict(X, Y)
def predict(self, X, Y):
Kx = sklearn.metrics.pairwise_distances(
self._X, X, metric=self._kernel)
Ky = sklearn.metrics.pairwise_distances(
self._Y, Y, metric=self._kernel)
corr_xx = numpy.diag(
self._alpha.T.dot(Kx).dot(Kx.T).dot(self._alpha))
corr_yy = numpy.diag(
self._beta.T.dot(Ky).dot(Ky.T).dot(self._beta))
return self._alpha.T.dot(Kx).T / numpy.sqrt(corr_xx),\
self._beta.T.dot(Ky).T / numpy.sqrt(corr_yy)
@property
def correlation_(self):
return self._correlation
def linear_kernel(x, y):
return x.dot(y)
def rbf(x, y, sigma):
return numpy.exp(-((x - y)**2).sum() / (2 * sigma**2))
if __name__ == '__main__':
numpy.random.seed(0)
num_samples = 400
x_dim = 2
y_dim = 2
noise1 = numpy.random.normal(size=num_samples)
noise2 = numpy.random.normal(size=num_samples)
u = numpy.arange(num_samples)
u = (u % 80) / 80
# u = numpy.repeat(numpy.array([0, 1, 2, 1, 0, 1, 2, 1]), 50)
X = numpy.zeros(shape=(num_samples, x_dim))
X[:, 0] = noise1 + u * 0.1
X[:, 1] = -noise1 + u * 0.1
Y = numpy.zeros(shape=(num_samples, y_dim))
Y[:, 0] = noise2 + u * 0.1
Y[:, 1] = -noise2 + u * 0.1
model = KernelCCA(n_components=20, kernel='rbf', kernel_params=[0.1, ],
nystrom_approximation_ratio=0.7)
X2, Y2 = model.fit_transform(X, Y)
print(model.correlation_)
|
{"hexsha": "aacc454bd71c3eb4f9b97cf3493d4dfb77cb8423", "size": 5663, "ext": "py", "lang": "Python", "max_stars_repo_path": "cca.py", "max_stars_repo_name": "t-aritake/KernelCCA.py", "max_stars_repo_head_hexsha": "68b219d096b344409f5d8bc7ff97102304565664", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cca.py", "max_issues_repo_name": "t-aritake/KernelCCA.py", "max_issues_repo_head_hexsha": "68b219d096b344409f5d8bc7ff97102304565664", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cca.py", "max_forks_repo_name": "t-aritake/KernelCCA.py", "max_forks_repo_head_hexsha": "68b219d096b344409f5d8bc7ff97102304565664", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1907216495, "max_line_length": 76, "alphanum_fraction": 0.5700158926, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1549}
|
from pylab import *
####################################
# Load of Input Data (Daten laden) #
####################################
def load_RKI(filename, LandkreisID, state_name ='Bavaria'):
'''
Reads file of the RKI database and selects the relevant data for the specific county.
Input
=====
filename : str
path to '*.csv' file downloaded from the RKI.
LandkreisID : str
String with 5 number entries to select the specific county.
state_name : str, default = 'Bavaria'
Name of the state.
return
======
dic : dictionary {'fall': array, 'tod':array, 'gesund': array}
Array of cumulative number of cases
uday : np.array
Individual days of notification to the RKI
umonth : np.array
Individual month of notification to the RKI
region_name : str
Name of specific region
dic_LK : dict={'name', 'ID'}
Alphabetical sorted names of counties and corresponding IDs
state : li[dat_state, num_state, name_state]
List of dates, cumulative case numbers and state name
'''
import numpy as np
daten_RKI = np.loadtxt(filename,
skiprows=1,
delimiter=',',
usecols=(9,3,8,6,7,-3),
dtype={'names': ('lkID', 'lk_name', 'datum', 'fall', 'tod', 'gesund'), 'formats': ( 'S6', 'S40', 'S10', 'i4', 'i4', 'i4')})
# state average
dat_state = np.unique(daten_RKI['datum'])
case_state = []
#tod_state = []
#gesund_state = []
for dat in dat_state:
case_state_dat = daten_RKI['fall'][daten_RKI['datum'] == dat]
#tod_state_dat = daten_RKI['tod'][daten_RKI['datum'] == dat]
#gesund_state_dat = daten_RKI['gesund'][daten_RKI['datum'] == dat]
case_state.append(np.sum(case_state_dat))#[case_state_dat > 0]))
num_state = np.cumsum(np.array(case_state))
# select unique region
indexes = np.unique(daten_RKI['lkID'], return_index=True)[1]
uID = daten_RKI['lkID'][indexes]
u_index_name = daten_RKI['lk_name'][indexes]
# sort and put to dic
sort_index = np.argsort(u_index_name)
dic_LK = {'name': u_index_name[sort_index], 'ID': uID[sort_index]}
# rename umlaut
for i in range(len(dic_LK['name'])):
dic_LK['name'][i] = dic_LK['name'][i].replace('\xc3\xb6', 'oe')
dic_LK['name'][i] = dic_LK['name'][i].replace('\xc3\x83\xc2\xb6', 'ae')
dic_LK['name'][i] = dic_LK['name'][i].replace('\xc3\xa4', 'ae')
dic_LK['name'][i] = dic_LK['name'][i].replace('\xc3\xbcr', 'ue')
dic_LK['name'][i] = dic_LK['name'][i].replace('\xc3\x83\xc2\xbc', 'ue')
dic_LK['name'][i] = dic_LK['name'][i].replace('\xc3\xbc', 'ue')
dic_LK['name'][i] = dic_LK['name'][i].replace('\xc3\x9f', 'ss')
dic_LK['name'][i] = dic_LK['name'][i].replace('\xc3\x83\xc5\xb8', 'ss')
daten_RKI = daten_RKI[daten_RKI['lkID'] == LandkreisID]
region_name = dic_LK['name'][dic_LK['ID'] == LandkreisID][0]
print 'region_name loaded', region_name
# splitting date
year = np.zeros(len(daten_RKI['datum'])).astype('int')
month = np.zeros(len(daten_RKI['datum'])).astype('int')
day = np.zeros(len(daten_RKI['datum'])).astype('int')
dat_sting = daten_RKI['datum']
for i in range(len(dat_sting)):
li = dat_sting[i].split('/')
year[i] = int(li[0])
month[i] = int(li[1])
day[i] = int(li[2])
# sum up unique dates
from astropy.table import Table
RKI_tab = Table([daten_RKI['datum'], year, month, day, daten_RKI['fall'], daten_RKI['tod'], daten_RKI['gesund']],
names=('datum', 'year', 'month', 'day', 'fall', 'tod', 'gesund'))
#print RKI_tab
udate = np.unique(RKI_tab['datum'])
uday = np.zeros(shape=udate.shape)
umonth = np.zeros(shape=udate.shape)
uyear = np.zeros(shape=udate.shape)
ufall = np.zeros(shape=udate.shape)
utod = np.zeros(shape=udate.shape)
ugesund = np.zeros(shape=udate.shape)
#print RKI_tab['datum']
for i in range(len(udate)):
cond = (RKI_tab['datum'] == udate[i])
fall_day = RKI_tab['fall'][cond]
#print fall_day[fall_day < 0]
ufall[i] = np.sum(fall_day)#[fall_day > 0])
tod_day = RKI_tab['tod'][cond]
utod[i] = np.sum(tod_day)#[tod_day > 0])
#print tod_day[tod_day < 0]
gesund_day = RKI_tab['gesund'][cond]
ugesund[i] = np.sum(gesund_day)#[gesund_day > 0])
#print gesund_day[gesund_day < 0]
uday[i] = RKI_tab['day'][cond][0]
umonth[i] = RKI_tab['month'][cond][0]
uyear[i] = RKI_tab['year'][cond][0]
return {'fall': np.cumsum(ufall), 'tod': np.cumsum(utod), 'gesund':np.cumsum(ugesund)}, uday, umonth, region_name, dic_LK, [num_state, dat_state, state_name]#, uyear, udate
##################################################################################################
# Logarithmic Plot of Cumulative Cases (Logarithmische Darstellung der aufsummierten Fallzahlen) #
##################################################################################################
def get_people_of_county(asked_ID, filename = 'data_RKI/12411-001.csv'):
import numpy as np
EW_ID, EW = np.loadtxt(filename, delimiter=';',
usecols=(0, -1), unpack=True,
dtype={'names': ('EW_ID', 'EW'), 'formats': ( 'S10', 'i4')})
ID_str = asked_ID[1:]
return EW[EW_ID == ID_str][0]
def plot_corona(num_dic, day, month, name, ID, geraet_min=None, geraet_max=None, anteil_beatmung=0.05):
'''
Plots cumulative case numbers against time for the specific county. Fits for any dataset
larger than eight a exponential function and estimates the doubling time. For the fit only
the last eight data points are used to get a recent description of the evolution. Estimates the
the doubling time from every fit by taking the fitting constants b of the ``y = a * exp(b * x)``
underlying theory into account::
``DT = ln(2)/b``
Input
=====
num_dic : dictionary {'fall': array, 'tod':array, 'gesund': array}
Array of cumulative number of cases
day : np.array
String with 5 number entries to select the specific county.
name : str
Name of specific region
ID : str
ID of county in Germany e.g. '09182' for LK Miesbach
geraet_min : int, None
Minimum capacity of intensive care
geraet_max : int, default = None
Maximum capacity of intensive care
anteil_beatmung : float, default = 0.05
Approximated fraction of demand for intensive care
return
======
DT : list
Contains name of county, day array and DT array containing the 8 last data points
where fit was possible
name : str
Name of specific region
day : np.array
Array of 8 days before and including the fit day
val_DT : list
list containing the calculated day-depending doubling times
rates : list
list containing rates (mortality rate, recovery rate, still ill rate)
pass_all : dict
dictionary of daily values of new cases, deaths and recovered cases
'''
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
print '-' * 30
print name
print '-' * 30
num = num_dic['fall'][num_dic['fall'] > 0]
num_tod = num_dic['tod'][num_dic['fall'] > 0]
num_gesund = num_dic['gesund'][num_dic['fall'] > 0]
day = day[num_dic['fall'] > 0]
month = month[num_dic['fall'] > 0]
day_max = 150.
day_ticks = [14, 18, 22, 26, 30, 3, 7, 11, 15, 19, 23, 27, 1, 5, 9, 13, 17, 21, 25, 29, 2, 6, 10, 14, 18, 22, 26, 30, 3, 7, 11, 15, 19, 23, 27]
fig, ax = plt.subplots(4, figsize=(10,22), gridspec_kw={'height_ratios': [3, 1, 1, 1]})
ax[0].set_title(name + ' (#' + ID +')')
ax[0].axis([13, day_max, 0.9, 1e5])
ax[0].set_title('Abb. 1', loc='right', fontsize=8)
ax[1].axis([13, day_max, 0.8, 1e3])
ax[1].set_title('Abb. 2', loc='right', fontsize=8)
ax[2].set_xlim([13, day_max])
ax[2].set_title('Abb. 3', loc='right', fontsize=8)
ax[3].set_xlim([13, day_max])
ax[3].set_title('Abb. 4', loc='right', fontsize=8)
####
# move to March time frame
#########
day_real = np.copy(day)
day[month == 2] = day[month == 2] - 29
day[month == 1] = day[month == 1] - 29 - 31
day[month == 4] = day[month == 4] + 31
day[month == 5] = day[month == 5] + 31 + 30
day[month == 6] = day[month == 6] + 31 + 30 + 31
day[month == 7] = day[month == 7] + 31 + 30 + 31 + 30
#print 'day now', day
#print 'day_real', day_real
#########
# fit
#########
def func(x, a, b):#, c):
#return a * np.exp(b * x)# + c #
#return a * b**(c*x)
return np.log(a) + b * x #log-linear
x = np.arange(10,day_max,0.5)
# fit only when there are more than 6 data points and cases every day.
data_points = range(8, len(day)+1)
DTs = []
Ntot_today = []
Ntot_week = []
R4s = []
col = 30
colapp = []
print 'Tag DTs R4'
for cut in data_points:
#print day[cut-8:cut]
#print num[cut-8:cut]
#########
### changed 8 fit
#########
#popt, pcov = curve_fit(func, day[:cut], num[:cut])
popt, pcov = curve_fit(func, day[cut-8:cut], np.log(num[cut-8:cut]))
#print len(day[cut-8:cut])
#####
# loglog
cond_week = day[cut-8:cut] > day[cut-1] - 7
num_week = num[cut-8:cut][cond_week]
Ntot_today.append(num[cut-8:cut][-1])
Ntot_week.append(num_week[-1] - num_week[0])
########
# Reproduction number for Rtime notification days (4 days strict)
#%%%%%%%%#daytoday = int(day[cut-1])
#%%%%%%%%#dayminus4 = int(daytoday - 5)
#%%%%%%%%#dayminus8 = int(daytoday - 9)
#%%%%%%%%#print daytoday, dayminus4, dayminus8
#%%%%%%%%#
#%%%%%%%%#print num[day == daytoday], num[day == dayminus4], num[day == dayminus8]
#%%%%%%%%#
#%%%%%%%%#R4_row = (num[day == daytoday] - num[day == dayminus4]) / (num[day == dayminus4] - num[day == dayminus8])
#%%%%%%%%#print R4_row
#%%%%%%%%#
#%%%%%%%%#raise Exception('stop')
########
# Reproduction number for Rtime notification days
#print num[cut-8:cut]
if cut-9 < 0:
num_before_int = num[cut-8:cut][0]
else: num_before_int = num[cut-8] - num[cut-9]
num_diff_8 = np.append(num_before_int, np.diff(num[cut-8:cut]))
#print num_diff_8
R4 = np.sum(num_diff_8[4:]) / np.sum(num_diff_8[0:4])
R4s.append(R4)
#print num_diff_8[0:4], num_diff_8[4:]
#########
# doubling time
#########
DT = np.round(np.log(2) / popt[1],2)
#print DT#, popt
DTs.append(DT)
#print popt, np.log(2) / popt[1]
print '%02d'%int(day_real[cut-1]) + '.' + '%02d'%int(month[cut-1]), '%6.2f'%DT#, '%6.2f'%R4
#print("a =", popt[0], "+/-", pcov[0,0]**0.5)
#print("b =", popt[1], "+/-", pcov[1,1]**0.5)
#print("c =", popt[2], "+/-", pcov[2,2]**0.5)
##########
# plot
##########
########
# plot fit
#########
day_label = 'Fit am ' + '%02d'%int(day_real[cut-1]) + '.' + '%02d'%int(month[cut-1]) + '; VZ: ' + '%6.2f'%DT + ' d'
ax[0].semilogy(x, np.exp(func(x, *popt)), '-', color=plt.cm.viridis(int(col)),
label=day_label)
colapp.append(int(col))
col = col + 256 / len(data_points)
########
# plot Beatmungsbedarf
#########
if cut == data_points[-1]:
# Beatmungsampel
bedarf = anteil_beatmung * np.exp(func(x, *popt))
ax[0].semilogy(x, bedarf,
'-', lw=3, color=plt.cm.Reds(200),
label='Ampel (Beatmungsbedarf ca. ' + str(int(anteil_beatmung*100)) + '%)')
ax[0].semilogy(x[(bedarf < geraet_max)],bedarf[(bedarf < geraet_max)],
'-', lw=3, color=plt.cm.jet(180),
label='Ampel (Beatmungsbedarf ca. ' + str(int(anteil_beatmung*100)) + '%)')
ax[0].semilogy(x[bedarf < geraet_min],bedarf[bedarf < geraet_min],
'-', lw=3, color=plt.cm.Greens(200),
label='Ampel (Beatmungsbedarf ca. ' + str(int(anteil_beatmung*100)) + '%)')
# Fehler
ax[0].semilogy(x, 0.05*np.exp(func(x,
popt[0] - pcov[0,0]**0.5,
popt[1] - pcov[1,1]**0.5)),
#popt[2] - pcov[2,2]**0.5),
'--', color='k', alpha=0.5, label='Unsicherheiten')
ax[0].semilogy(x, 0.05*np.exp(func(x,
popt[0] + pcov[0,0]**0.5,
popt[1] + pcov[1,1]**0.5)),
#popt[2] + pcov[2,2]**0.5),
'--', color='k', alpha=0.5)
#print 'day now2', day
####
# Beatmungs Kapazitaet
#########
ax[0].plot([x[0], x[-1]], [geraet_min]*2, 'k:', label="Kapazitaet Beatmungsapparate")
ax[0].plot([x[0], x[-1]], [geraet_max]*2, 'k:')
####
# gemeldete Fallzahlen
#########
ax[0].semilogy(day, num, 'k+', label="COVID19 erkrankt")
ax[0].semilogy(day, num_tod, 'k*', label="davon verstorben")
ax[0].semilogy(day, num_gesund, 'ko', alpha=0.3, label="davon genesen")
#print 'day now3', day
print '+' * 30
print 'Sterberate (%): ', np.round(num_tod[-1] / num[-1] * 100, 2)
print 'Gesunde (%): ', np.round(num_gesund[-1] / num[-1] * 100, 2)
print '+' * 30
#############
# formating
#############
from matplotlib.ticker import ScalarFormatter
for axis in [ax[0].xaxis, ax[0].yaxis]:
axis.set_major_formatter(ScalarFormatter())
ax[0].grid(True, which="both")
ax[0].set_xticks(np.arange(14, day_max, 4))
ax[0].set_xticklabels(day_ticks)
ax[0].text(13, 0.5, 'Mar')
ax[0].text(31, 0.5, 'Apr')
ax[0].text(31+30, 0.5, 'Mai')
ax[0].text(31+30+31, 0.5, 'Juni')
ax[0].text(31+30+31+30, 0.5, 'Juli')
ax[0].annotate('Ausgangssperre', ha='center', xy=(21, ax[0].get_ylim()[0]), xytext=(21, 0.4),
arrowprops=dict(arrowstyle= '-|>', color='grey', lw=2, ls='-'), alpha=0.6)
ax[0].annotate('Ostern', ha='center', xy=(31+12, ax[0].get_ylim()[0]), xytext=(31+12, 0.4),
arrowprops=dict(arrowstyle= '-|>', color='grey', lw=2, ls='-'), alpha=0.6)
ax[0].annotate('Ende Ferien', ha='center', xy=(31+20, ax[0].get_ylim()[0]), xytext=(31+20, 0.4),
arrowprops=dict(arrowstyle= '-|>', color='grey', lw=2, ls='-'), alpha=0.6)
# credit bar
credit = 'Christine Greif\nhttp://www.usm.uni-muenchen.de/~koepferl\nThis work is licensed under CC-BY-SA 4.0\nData: NPGEO-DE; VZ = Verdopplungszeit'
loc_label = ax[0].get_xlim()[1] * 1.12
link = ax[0].text(loc_label, 9e4, credit, fontsize=8, va='top')
link.set_url('http://www.usm.uni-muenchen.de/~koepferl')
# label
ax[0].set_ylabel('Gesamte Fallzahlen')
lgd = ax[0].legend(loc='best', bbox_to_anchor=(1.12, 0.93))
# percent
#########
EW_county = get_people_of_county(asked_ID=ID)
axi = ax[0].twinx()
#print 'lim', ax.get_ylim(), ax.get_ylim()[0] / EW_county * 100, ax.get_ylim()[1] / EW_county * 100, EW_county
axi.set_ylim(ax[0].get_ylim()[0] / EW_county * 100 , ax[0].get_ylim()[1] / EW_county * 100 )
axi.set_yscale('log')
import matplotlib.ticker as mtick
import matplotlib.ticker as ticker
axi.yaxis.set_major_formatter(ScalarFormatter())
axi.yaxis.set_major_formatter(ticker.FuncFormatter(lambda y,pos: ('{{:.{:1d}f}}%'.format(int(np.maximum(-np.log10(y),0)))).format(y)))
#axi.yaxis.set_major_formatter(FormatStrFormatter('%4d'))
#axi.yaxis.set_major_formatter(mtick.PercentFormatter())
axi.set_ylabel('Prozentualer Anteil zur Gesamteinwohnerzahl (' + str(EW_county) + ') im Kreis')
###########
# plot 2
###########
pass_all = {'day': day, 'fall': np.append(num[0], np.diff(num)), 'gesund': np.append(num_gesund[0], np.diff(num_gesund)), 'tod': np.append(num_tod[0], np.diff(num_tod))}
ax[1].set_ylabel('Taeglich gemeldete Fallzahlen')
# gemittelt ueber 7 Tage
all_smooth = np.zeros(shape = day[6:].shape)
tod_smooth = np.zeros(shape = day[6:].shape)
gesund_smooth = np.zeros(shape = day[6:].shape)
for z in range(7):
stop = -6+z
#print z, stop
if stop != 0:
all_smooth = all_smooth + 1/7. * pass_all['fall'][z:stop]
tod_smooth = tod_smooth + 1/7. * pass_all['tod'][z:stop]
gesund_smooth = gesund_smooth + 1/7. * pass_all['gesund'][z:stop]
else:
all_smooth = all_smooth + 1/7. * pass_all['fall'][z:]
tod_smooth = tod_smooth + 1/7. * pass_all['tod'][z:]
gesund_smooth = gesund_smooth + 1/7. * pass_all['gesund'][z:]
#all_smooth = 0.25 * (pass_all['fall'][0:-3] + pass_all['fall'][1:-2] + pass_all['fall'][2:-1] + pass_all['fall'][3:0])
#tod_smooth = 0.25 * (pass_all['tod'][0:-3] + pass_all['tod'][1:-2] + pass_all['tod'][2:-1] + pass_all['tod'][3:0])
#gesund_smooth = 0.25 * (pass_all['gesund'][0:-3] + pass_all['gesund'][1:-2] + pass_all['gesund'][2:-1] + pass_all['gesund'][3:0])
ax[1].semilogy(day[6:], all_smooth, 'r-', label='neu erkrankt (7-Tagesmittel)')
ax[1].semilogy(day[6:], tod_smooth, 'k-', label='verstorben (7-Tagesmittel)')
ax[1].semilogy(day[6:], gesund_smooth, 'k-', alpha=0.3, label='genesen (7-Tagesmittel)')
# box
import matplotlib.patches as patches
for b in range(len(day)):
if b == 0:
label_fall = "neu erkrankt"
label_gesund = 'genesen'
label_tod = 'verstorben'
else:
label_fall = None
label_gesund = None
label_tod = None
ax[1].add_patch(patches.Rectangle((day[b]-0.45,0.),0.9,pass_all['fall'][b],
linewidth=1,edgecolor='k',facecolor=plt.cm.jet(180), label=label_fall))
ax[1].add_patch(patches.Rectangle((day[b]-0.45,0.),0.9,pass_all['gesund'][b],
linewidth=1,edgecolor='k',facecolor='None', label=label_gesund, hatch='////'))
ax[1].add_patch(patches.Rectangle((day[b]-0.45,0.),0.9,pass_all['tod'][b],
linewidth=1,edgecolor='k',facecolor='w', label=label_tod))
#h = ax[1].hist(pass_all['fall'], bins=np.append(day - 0.5, day[-1] + 0.5))
#print h
#print pass_all['fall']
#ax[1].hist(pass_all['tod'], bins=day)
#ax[1].hist(pass_all['gesund'], bins=day)
from matplotlib.ticker import ScalarFormatter
for axis in [ax[1].xaxis, ax[1].yaxis]:
axis.set_major_formatter(ScalarFormatter())
ax[1].set_axisbelow(True)
ax[1].grid(True, which="both")
ax[1].set_xticks(np.arange(14, day_max, 4))
ax[1].set_xticklabels(day_ticks)
tx1 = ax[1].text(13, 0.3, 'Mar')
tx2 = ax[1].text(31, 0.3, 'Apr')
tx3 = ax[1].text(31+30, 0.3, 'Mai')
tx4 = ax[1].text(31+30+31, 0.3, 'Juni')
tx5 = ax[1].text(31+30+31+30, 0.3, 'Juli')
ax[1].annotate('Ausgangssperre', ha='center', xy=(21, ax[1].get_ylim()[0]), xytext=(21, 0.2),
arrowprops=dict(arrowstyle= '-|>', color='grey', lw=2, ls='-'), alpha=0.6)
ax[1].annotate('Ostern', ha='center', xy=(31+12, ax[1].get_ylim()[0]), xytext=(31+12, 0.2),
arrowprops=dict(arrowstyle= '-|>', color='grey', lw=2, ls='-'), alpha=0.6)
ax[1].annotate('Ende Ferien', ha='center', xy=(31+20, ax[1].get_ylim()[0]), xytext=(31+20, 0.2),
arrowprops=dict(arrowstyle= '-|>', color='grey', lw=2, ls='-'), alpha=0.6)
ax[1].legend(loc='best')
###########
# plot 3
###########
ax[2].set_ylabel('Verdopplungszeiten in Tage')
ax[2].plot(ax[2].get_xlim(), [10,10], ':', lw =2, color='grey', label='VZ = 10')
ax[2].plot(day[7:], np.array(DTs), 'k.-')
#from scipy.signal import find_peaks
#peaks, _ = find_peaks(np.array(DTs))
#print 'peaks', peaks
#ax[2].plot(peaks, np.array(DTs)[peaks], "Hb")
#ax[2].plot(day[7:], np.gradient(DTs, day[7:]), 'ko-')
diff = np.diff(DTs)
ax[2].plot(day[7:][1:][diff < 0], np.array(DTs)[1:][diff < 0], '^', color=plt.cm.Reds(200), label='Achtung: VZ faellt (!!!)')
#ax[2].plot(day[7:][1:], diff, 'k^-')
#print 'gradient', gradient
#print 'diff', np.diff(DTs)
ax[2].grid(True, which="both")
ax[2].set_xticks(np.arange(14, day_max, 4))
ax[2].set_xticklabels(day_ticks)
ax[2].legend(loc='best')
offset = ax[2].get_ylim()[0] - (ax[2].get_ylim()[1] - ax[2].get_ylim()[0]) / 5.
tx1 = ax[2].text(13, offset, 'Mar')
tx2 = ax[2].text(31, offset, 'Apr')
tx3 = ax[2].text(31+30, offset, 'Mai')
tx4 = ax[2].text(31+30+31, offset, 'Juni')
tx5 = ax[2].text(31+30+31+30, offset, 'Juli')
ax[2].annotate('Lock-down', ha='center', xy=(21, ax[2].get_ylim()[0]), xytext=(21, offset),
arrowprops=dict(arrowstyle= '-|>', color='grey', lw=2, ls='-'), alpha=0.6)
ax[2].annotate('Ostern', ha='center', xy=(31+12, ax[2].get_ylim()[0]), xytext=(31+12, offset),
arrowprops=dict(arrowstyle= '-|>', color='grey', lw=2, ls='-'), alpha=0.6)
ax[2].annotate('Ende Ferien', ha='center', xy=(31+20, ax[2].get_ylim()[0]), xytext=(31+20, offset),
arrowprops=dict(arrowstyle= '-|>', color='grey', lw=2, ls='-'), alpha=0.6)
###########
# plot 4
###########
########
# Reproduction number for Rtime notification days (4 days interpoliert)
minus8 = np.interp(day-8-1, day, num)
minus4 = np.interp(day-4-1, day, num)
minus_all = np.interp(np.arange(day[0], day[-1]), day, num)
#print day - 8 - 1
#print day
#print num
#print day - 4 - 1
#for i in range(len(np.arange(day[0], day[-1]))):
# print np.arange(day[0], day[-1])[i], minus_all[i]
R_number_interp = (num - minus4) / (minus4 - minus8)
R_number_interp[day-8-1 < day[0]] = np.nan # before 1st case
R_number_interp[day-4-1 < day[0]] = np.nan # before 1st case
#for da in day:
# if da-4 < day[0]:
# new_minus4 = 0
# else: new_minus4 = np.interp(da-4, day, num)
# if da-8 < day[0]:
# new_minus8 = 0
# else:
# new_minus8 = np.interp(da-8, day, num)
ax[3].set_ylabel('Interpolierte Reproduktionszahl')
ax[3].plot(ax[3].get_xlim(), [1,1], ':', lw =2, color='grey', label='R = 1')
ax[3].plot(day, R_number_interp, 'k.-')
ax[3].plot(day[R_number_interp >= 1], R_number_interp[R_number_interp >= 1], '^', color=plt.cm.Reds(200), label='Achtung: R > 1 (!!!)')
ax[3].grid(True, which="both")
ax[3].set_xticks(np.arange(14, day_max, 4))
ax[3].set_xticklabels(day_ticks)
ax[3].legend(loc='best')
offset = ax[3].get_ylim()[0] - (ax[3].get_ylim()[1] - ax[3].get_ylim()[0]) / 5.
tx1 = ax[3].text(13, offset, 'Mar')
tx2 = ax[3].text(31, offset, 'Apr')
tx3 = ax[3].text(31+30, offset, 'Mai')
tx4 = ax[3].text(31+30+31, offset, 'Juni')
tx5 = ax[3].text(31+30+31+30, offset, 'Juli')
ax[3].annotate('Lock-down', ha='center', xy=(21, ax[3].get_ylim()[0]), xytext=(21, offset),
arrowprops=dict(arrowstyle= '-|>', color='grey', lw=2, ls='-'), alpha=0.6)
ax[3].annotate('Ostern', ha='center', xy=(31+12, ax[3].get_ylim()[0]), xytext=(31+12, offset),
arrowprops=dict(arrowstyle= '-|>', color='grey', lw=2, ls='-'), alpha=0.6)
ax[3].annotate('Ende Ferien', ha='center', xy=(31+20, ax[3].get_ylim()[0]), xytext=(31+20, offset),
arrowprops=dict(arrowstyle= '-|>', color='grey', lw=2, ls='-'), alpha=0.6)
ax[3].text(ax[3].get_xlim()[1] * 1.02,
ax[3].get_ylim()[1] * 1.,
'zu Abb. 1: \nBei Kreisen mit sehr kurzen Verdopplungszeiten wird \nder Verlauf nicht/kaum flacher; mit sehr langen \nVerdopplungszeiten (wenigen Neuerkrankten) \nist der Verlauf fast horizontal. \n(Ziel: horizontale Linie).')
ax[3].text(ax[3].get_xlim()[1] * 1.02,
ax[3].get_ylim()[1] * 0.75,
'zu Abb. 2: \nBalkendiagramm der taeglich gemeldeten Fallzahlen. \n(Ziel: keine gelben und weissen Balken.)'
)
ax[3].text(ax[3].get_xlim()[1] * 1.02,
ax[3].get_ylim()[1] * 0.35,
'zu Abb. 3: \nVerdopplungszahl gibt die Zeit an in der sich die \nFallzahlen verdoppeln. Verdopplungszeiten kleiner \nals 10 oder abnehmend sind bedenklich. \n(Ziel: keine verkuerzenden Verdopplungszeiten \nund viel groesser als 10).'
)
ax[3].text(ax[3].get_xlim()[1] * 1.02,
ax[3].get_ylim()[0],
'zu Abb. 4: \nReproduktionszahl gibt die Anzahl der \nWeiteransteckungen durch einen Infizierten an. \nReproduktionszahl groesser als 1 ist bedenklich. \n(Ziel: Reproduktionszahl viel kleiner als 1).'
)
fig.savefig('expert/' + name + '_expert.pdf', dpi=300, overwrite=True, bbox_inches='tight', bbox_extra_artists=(lgd, tx1, tx2, tx3, tx4, tx5))
##################
# save plot ax[0]
##################
ax[1].remove()
ax[2].remove()
ax[3].remove()
#for a in ax:
# print 'ax'
#fig.set_size_inches(10, 8)
#ax[0].set_aspect(aspect=0.5)
#fig.set_gridspec_kw({'height_ratios': [1, 0, 0, ]})
#fig.gridspec_kw({'height_ratios': [1]})
#extent = ax[0].get_window_extent().transformed(fig.dpi_scale_trans.inverted())
# Pad the saved area by 10% in the x-direction and 20% in the y-direction
fig.savefig('plots/' + name + '.pdf', dpi=300, overwrite=True, bbox_extra_artists=(lgd, tx1, tx2, tx3, tx4, tx5), bbox_inches='tight')
rates = {'death_rate': num_tod / num * 100,
'recover_rate': num_gesund / num * 100,
'ill_rate': (num - num_gesund - num_tod) / num * 100,
'day': day}
return [name, day[7:], DTs, Ntot_today, Ntot_week, rates, R4s, pass_all]
#####################################
# Doubeling Time (Verdopplungszeit) #
#####################################
def plot_DT(DT, state, ncol=4, nrow=3):
'''
Plots day-dependent doubling time against time for the selected counties.
input
======
DT : list
Contains name of county, day array and DT array containing the 8 last data-points
where fit was possible
name : str
Name of specific region
day : np.array
Array of 8 days before and including the fit day
val_DT : list
list containing the calculated day-depending doubling times
rates : list
list containing rates (mortality rate, recovery rate, still ill rate)
state : list
output from load_RKI
ncol : int
Number of columns in plot (should not be changed for now.)
nrow : int
Number of rows in plot (should not be changed for now.)
returns
=======
Saves diagram as PDF.
'''
######################################
# DT for state
#######################
####
# move to March time frame
#########
day_max = 150.
day_ticks = [14, 18, 22, 26, 30, 3, 7, 11, 15, 19, 23, 27, 1, 5, 9, 13, 17, 21, 25, 29, 2, 6, 10, 14, 18, 22, 26, 30, 3, 7, 11, 15, 19, 23, 27]
dat_states = state[1]
state_day = []
for i in range(len(dat_states)):
y, m, d = dat_states[i].split('/')
if m == '01': state_day.append(int(d) - 29 - 31)
if m == '02': state_day.append(int(d) - 29)
if m == '03': state_day.append(int(d))
if m == '04': state_day.append(int(d) + 31)
if m == '05': state_day.append(int(d) + 31 + 30)
if m == '06': state_day.append(int(d) + 31 + 30 + 31)
if m == '07': state_day.append(int(d) + 31 + 30 + 31 + 30)
#########
# fit
#########
def func(x, a, b):#, c):
#return a * np.exp(b * x)# + c
#return a * b**(c*x)
return np.log(a) + b * x
x = np.arange(10,day_max,0.5)
# fit only when there are more than 8 data points and cases every day.
data_points = range(8, len(state_day)+1)
state_day = np.array(state_day)
state_num = np.array(state[0])
DTs_state = []
from scipy.optimize import curve_fit
for cut in data_points:
popt, pcov = curve_fit(func, state_day[cut-8:cut], np.log(state_num[cut-8:cut]))
#########
# doubling time
#########
DT_state = np.round(np.log(2) / popt[1],2)
DTs_state.append(DT_state)
fig, axs = plt.subplots(nrow, ncol, figsize=(28,21))
fig2, axs2 = plt.subplots(nrow, ncol, figsize=(28,21))
fig3, axs3 = plt.subplots(nrow, ncol, figsize=(28,21))
fig4, axs4 = plt.subplots(nrow, ncol, figsize=(28,21))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.1, hspace=0.1)
axs[0,0].set_title('Entwicklung der Verdopplungszeiten auf Kreisebene')
axs[0,1].set_title('Evolution of the Doubeling Time for German Counties')
axs[0,2].set_title('Entwicklung der Verdopplungszeiten auf Kreisebene')
axs[0,3].set_title('Evolution of the Doubling Time for German Counties')
print len(DT.keys())
# sort after Landkreis name not ID
DT_ids = DT.keys()
DT_names = []
for key in DT_ids:
DT_names.append(DT[key][0])
print key, DT[key][0]
DT_ids = np.array(DT_ids)
DT_names = np.array(DT_names)
DT_index = np.argsort(DT_names)
DT_ids = DT_ids[DT_index]
DT_names = DT_names[DT_index]
#print '*' * 10
#print DT_names[95], DT_ids[95]
sorted_keys = DT_ids
from matplotlib.cm import get_cmap
cmap = get_cmap('inferno') # type: matplotlib.colors.ListedColormap
for i in range(len(sorted_keys)):
if i in range(0, 8):
ax = axs[0,0]
ax2 = axs2[0,0]
ax3 = axs3[0,0]
ax4 = axs4[0,0]
line_col = 20 + 30 * (8 - i)
if i in range(8, 16):
ax = axs[0,1]
ax2 = axs2[0,1]
ax3 = axs3[0,1]
ax4 = axs4[0,1]
line_col = 20 + 30 * (16 - i)
if i in range(16,24):
ax = axs[0,2]
ax2 = axs2[0,2]
ax3 = axs3[0,2]
ax4 = axs4[0,2]
line_col = 20 + 30 * (24 - i)
if i in range(24,32):
ax = axs[0,3]
ax2 = axs2[0,3]
ax3 = axs3[0,3]
ax4 = axs4[0,3]
line_col = 20 + 30 * (32 - i)
if i in range(32,40):
ax = axs[1,0]
ax2 = axs2[1,0]
ax3 = axs3[1,0]
ax4 = axs4[1,0]
line_col = 20 + 30 * (40 - i)
if i in range(40,48):
ax = axs[1,1]
ax2 = axs2[1,1]
ax3 = axs3[1,1]
ax4 = axs4[1,1]
line_col = 20 + 30 * (48 - i)
if i in range(48,56):
ax = axs[1,2]
ax2 = axs2[1,2]
ax3 = axs3[1,2]
ax4 = axs4[1,2]
line_col = 20 + 30 * (56 - i)
if i in range(56,64):
ax = axs[1,3]
ax2 = axs2[1,3]
ax3 = axs3[1,3]
ax4 = axs4[1,3]
line_col = 20 + 30 * (64 - i)
if i in range(64,72):
ax = axs[2,0]
ax2 = axs2[2,0]
ax3 = axs3[2,0]
ax4 = axs4[2,0]
line_col = 20 + 30 * (72 - i)
if i in range(72,80):
ax = axs[2,1]
ax2 = axs2[2,1]
ax3 = axs3[2,1]
ax4 = axs4[2,1]
line_col = 20 + 30 * (80 - i)
if i in range(80,88):
ax = axs[2,2]
ax2 = axs2[2,2]
ax3 = axs3[2,2]
ax4 = axs4[2,2]
line_col = 20 + 30 * (88 - i)
if i in range(88,96):
ax = axs[2,3]
ax2 = axs2[2,3]
ax3 = axs3[2,3]
ax4 = axs4[2,3]
line_col = 20 + 30 * (96 - i)
key = sorted_keys[i]
if i in [0, 8 ,16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96]:
ax.semilogy(state_day[7:], DTs_state, '.:k', label= state[2] + ' average')
print '-' * 20
print state[2], DTs_state[-1], int(state_day[7:][-1])
print '-' * 20
ax.semilogy(DT[key][1], DT[key][2], '.-', c = cmap(line_col), label=DT[key][0])
ax2.loglog(DT[key][3], DT[key][4], '.-', c = cmap(line_col), label=DT[key][0])
print DT[key][2][-1], int(DT[key][1][-1]), DT[key][0]
ax3.plot(DT[key][5]['day'], DT[key][5]['death_rate'], '*-', c = cmap(line_col), label=DT[key][0])
ax4.plot(DT[key][1], DT[key][6], '.-', c = cmap(line_col), label=DT[key][0])
#ax3.plot(DT[key][5]['day'], DT[key][5]['recover_rate'], 'o-', c = cmap(line_col), label=DT[key][0])
#ax3.plot(DT[key][5]['day'], DT[key][5]['ill_rate'], 'x-', c = cmap(line_col), label=DT[key][0])
######
# axis
#factor_1 = 100/60.
x_pos = 57
credit2 = 'Christine Greif\nhttp://www.usm.uni-muenchen.de/~koepferl\nThis work is licensed under CC-BY-SA 4.0\nData: NPGEO-DE'
link = axs[2,3].text(x_pos, 0.7, credit2, fontsize=8, va = 'top')
link = axs3[2,3].text(x_pos, -2, credit2, fontsize=8)
link = axs4[2,3].text(x_pos, -1., credit2, fontsize=8)
link = axs2[2,3].text(3.5, 0.5, credit2, fontsize=8, va='top')
link.set_url('http://www.usm.uni-muenchen.de/~koepferl')
axs[0,0].set_ylabel('Verdopplungszeiten (Tage)')
axs[1,0].set_ylabel('Doubling Time DT (days)')
axs[2,0].set_ylabel('Verdopplungszeiten (Tage)')
for ax in axs.reshape(-1):
ax.set_ylim(1.5,500.9)
ax.set_xlim(13,day_max)
from matplotlib.ticker import ScalarFormatter
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
ax.grid(True, which="both")
ax.set_xticks(np.arange(14, day_max, 4))
ax.set_xticklabels(day_ticks)
ax.legend(loc='upper left')
#if ax in [axs[2,0], axs[2,1], axs[2,2], axs[2,3]]:
ax.text(13, 0.8, 'Maerz/March')
ax.text(31, 0.8, 'April')
ax.text(31+30, 0.8, 'Mai/May')
ax.text(31+30+31, 0.8, 'Juni/June')
ax.text(31+30+31+30, 0.8, 'Juli/July')
for ax2 in axs2.reshape(-1):
from matplotlib.ticker import ScalarFormatter
for axis in [ax2.xaxis, ax2.yaxis]:
axis.set_major_formatter(ScalarFormatter())
ax2.grid(True, which="both")
ax2.set_ylim(1.5,2000)
ax2.set_xlim(1.5,10000)
ax2.legend(loc='upper left')
if ax2 in [axs2[2,0], axs2[2,1], axs2[2,2], axs2[2,3]]:
ax2.set_xlabel('Totale Fallzahlen (total number of cases)')
if ax2 in [axs2[0,0], axs2[1,0], axs2[2,0]]:
ax2.set_ylabel('Fallzahlen in der letzten Woche (number of new cases last week)')
for ax3 in axs3.reshape(-1):
ax3.set_ylim(0,20.9)
ax3.set_xlim(13,day_max)
ax3.grid(True, which="both")
ax3.set_xticks(np.arange(14, day_max, 4))
ax3.set_xticklabels(day_ticks)
if ax3 in [axs3[0,0], axs3[1,0], axs3[2,0]]:
ax3.set_ylabel('Sterberaten in %')
ax3.legend(loc='upper left')
#if ax in [axs[2,0], axs[2,1], axs[2,2], axs[2,3]]:
offset3 = - 0.07 * ax3.get_ylim()[1]
ax3.text(13, offset3, 'Maerz/March')
ax3.text(31, offset3, 'April')
ax3.text(31+30, offset3, 'Mai/May')
ax3.text(31+30+31, offset3, 'Juni/June')
ax3.text(31+30+31+30, offset3, 'Juli/July')
for ax4 in axs4.reshape(-1):
ax4.set_ylim(0,4.9)
ax4.set_xlim(13,day_max)
ax4.grid(True, which="both")
ax4.set_xticks(np.arange(14, day_max, 4))
ax4.set_xticklabels(day_ticks)
ax4.legend(loc='upper left')
#if ax in [axs[2,0], axs[2,1], axs[2,2], axs[2,3]]:
offset4 = - 0.07 * ax4.get_ylim()[1]
ax4.text(13, offset4, 'Maerz/March')
ax4.text(31, offset4, 'April')
ax4.text(31+30, offset4, 'Mai/May')
ax4.text(31+30+31, offset4, 'Juni/June')
ax4.text(31+30+31+30, offset4, 'Juli/July')
if ax4 in [axs4[1,0]]:
ax4.set_ylabel('geschaetzte Reproduktionszahl R (Anzahl letzten 4 Meldungen / Anzahl der letzten 4 Meldungen davor)')
#plt.show()
fig.savefig('DT_' + state[2] + '.pdf', dpi=300, overwrite=True, bbox_inches='tight')
fig2.savefig('loglog_' + state[2] + '.pdf', dpi=300, overwrite=True, bbox_inches='tight')
fig3.savefig('rate_' + state[2] + '.pdf', dpi=300, overwrite=True, bbox_inches='tight')
fig4.savefig('R_' + state[2] + '.pdf', dpi=300, overwrite=True, bbox_inches='tight')
def docu(LK_ID, DT):
print '*' * 30
print 'English Documentation'
print '*' * 30
day_print = []
DT_print = []
name_print = []
for lkid in LK_ID:
#print DT[lkid]
#print DT[lkid][2]
DT_print.append(DT[lkid][2][-1])
day_print.append(DT[lkid][1][-1])
name_print.append(DT[lkid][0])
# sort
asort = np.argsort(DT_print)
DT_print = np.array(DT_print)[asort]
day_print = np.array(day_print)[asort]
name_print = np.array(name_print)[asort]
for i in range(len(name_print)):
if i == 0:
#print ' * Bavaria average is ' + str(DT['Bavaria'][2][-1]) + 'd'
print ' * 5 counties with lowest DTS (the larger the better):'
if i == len(name_print) - 6:
print ' * 5 counties with highest DTs (the larger the better):'
if (i < 5) or (i > len(name_print) - 6) :
if day_print[i]-31 <= 30: #April
print ' *', '%6.2f'%DT_print[i], str(int(day_print[i]-31)) + '.4', name_print[i]
if day_print[i]-31 > 30: #Mai
print ' *', '%6.2f'%DT_print[i], str(int(day_print[i]-31 - 30)) + '.5', name_print[i]
print '*' * 30
print 'German Documentation'
print '*' * 30
for i in range(len(name_print)):
if i == 0:
#print ' * Bayerischer Durchschnitt mit ' + str(DT['Bavaria'][2][-1]) + 'd'
print ' * 5 Kreise mit den niedriger Verdopplungszeiten (umso groesser desto besser):'
if i == len(name_print) - 6:
print ' * 5 Kreise mit den hoechsten Verdopplungszeiten (umso groesser desto besser):'
if (i < 5) or (i > len(name_print) - 6) :
if day_print[i]-31 <= 30: #April
print ' *', '%6.2f'%DT_print[i], str(int(day_print[i]-31)) + '.4', name_print[i]
if day_print[i]-31 > 30: #Mai
print ' *', '%6.2f'%DT_print[i], str(int(day_print[i]-31 - 30)) + '.5', name_print[i]
|
{"hexsha": "d5fb0db1a53baa222fb521a565df296367ee6885", "size": 40508, "ext": "py", "lang": "Python", "max_stars_repo_path": "cov19_local.py", "max_stars_repo_name": "koepferl/CoV19Dahoam", "max_stars_repo_head_hexsha": "0c4ea2db3d1cfc759b53f3e7a3dc84eb4a551c0f", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-06T01:00:20.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-06T01:00:20.000Z", "max_issues_repo_path": "cov19_local.py", "max_issues_repo_name": "koepferl/CoV19Dahoam", "max_issues_repo_head_hexsha": "0c4ea2db3d1cfc759b53f3e7a3dc84eb4a551c0f", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-04-04T06:10:32.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-18T21:58:57.000Z", "max_forks_repo_path": "cov19_local.py", "max_forks_repo_name": "koepferl/COVID19Dahoam", "max_forks_repo_head_hexsha": "0c4ea2db3d1cfc759b53f3e7a3dc84eb4a551c0f", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1292392301, "max_line_length": 249, "alphanum_fraction": 0.5242668115, "include": true, "reason": "import numpy,from scipy,from astropy", "num_tokens": 13171}
|
from cv2 import cv2
import numpy as np
from matplotlib import pyplot as plt
# 灰度化
def rgb2gray(src):
height = src.shape[0]
width = src.shape[1]
red_channel, green_channel, blue_channel = cv2.split(src)
dst = np.zeros(red_channel.shape, red_channel.dtype)
for h in range(height):
for w in range(width):
dst[h][w] = (red_channel[h][w] * 299 +
green_channel[h][w] * 587 +
blue_channel[h][w] * 144 +
500) / 1000
# np.savetxt('gray_image.txt', dst, "%d", " ")
return dst
# 直方图阈值分割
def cacle_hist(src):
height = src.shape[0]
width = src.shape[1]
flat_array = src.ravel()
plt.hist(flat_array, 256)
plt.show()
dst = np.zeros(src.shape, dtype=src.dtype)
dst.fill(255)
for h in range(height):
for w in range(width):
if src[h][w] <= 125:
dst[h][w] = 0
else:
pass
return dst
# 先切割一下没有必要的前景,只保留数字块
def find_aera(src):
height = src.shape[0]
width = src.shape[1]
start = -1
end = -1
for h in range(height):
for w in range(width):
if src[h][w] == 0:
break
elif w == width - 1:
start = h + 5
else:
pass
if start != -1:
break
for h in range(height - 1, 0, -1):
for w in range(width):
if src[h][w] == 0:
break
elif w == width - 1:
end = h - 5
else:
pass
if end != -1:
break
ROI = src[start:end, :]
contours, hierarchy = cv2.findContours(ROI, mode=cv2.RETR_TREE,method=cv2.CHAIN_APPROX_SIMPLE)
# for i in contours:
# length = cv2.arcLength(i, True)
# print(length)
cv2.drawContours(ROI, contours,-1,color=255,thickness=1)
ROI_inv = cv2.bitwise_not(ROI)
retval, labels, stats, centroids = cv2.connectedComponentsWithStats(ROI_inv, labels=None, stats=None, centroids=None, connectivity=8, ltype=None)
print(stats[:,0])
stats_ = np.argsort(stats[:,0])
print(stats_)
for i in stats_:
print('The rectangular box coordinates are (', stats[i][0], stats[i][1], ')\t(',
stats[i][0] + stats[i][2], stats[i][1] + stats[i][3], ')\t\t',
'The area are ', stats[i][4], '\t\t', 'The centroid', centroids[i], '\t\t', 'The lenth are', 2 * (stats[i][2] * stats[i][3]))
plt.imshow(labels)
plt.show()
return ROI
# 读取RGB原图
source_image = cv2.imread(r'img.png')
gray_image = rgb2gray(source_image)
thresh_image = cacle_hist(gray_image)
ROI = find_aera(thresh_image)
cv2.imshow('ROI', ROI)
cv2.waitKey(0)
|
{"hexsha": "049ad30b2bfb884fb8fdfce14b8e8ee00f3b9bb2", "size": 2858, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_4.py", "max_stars_repo_name": "Believas/ISBN-barcode-recognition", "max_stars_repo_head_hexsha": "69ab87ec9faa114666d8a651980913d9ee6402ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-30T08:24:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-30T08:24:35.000Z", "max_issues_repo_path": "test_4.py", "max_issues_repo_name": "Believas/ISBN-barcode-recognition", "max_issues_repo_head_hexsha": "69ab87ec9faa114666d8a651980913d9ee6402ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test_4.py", "max_forks_repo_name": "Believas/ISBN-barcode-recognition", "max_forks_repo_head_hexsha": "69ab87ec9faa114666d8a651980913d9ee6402ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4807692308, "max_line_length": 150, "alphanum_fraction": 0.5188943317, "include": true, "reason": "import numpy", "num_tokens": 800}
|
/*
* Copyright (c) 2020 International Business Machines
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* Authors: Kornilios Kourtis (kou@zurich.ibm.com, kornilios@gmail.com)
*
*/
// vim: set expandtab softtabstop=4 tabstop:4 shiftwidth:4:
#ifndef TRT_SYNC_ABSTRACT_H__
#define TRT_SYNC_ABSTRACT_H__
// Some thoughts on synchronization
//
// When scheduling, there is a tradeoff between synchronization across different
// cores and load-balancing.
//
// Cilk-5, for example, uses work stealing and the THE protocol. If a core does
// not have work, it will try to steal from other schedulers. The task queue
// uses an algorithm that does not require a lock in the fast path, i.e.,
// pushing/popping tasks out of the local queue. It does require locking when
// stealing though (to protect against other potential stealers). Cilk's
// implementation is based on the assumption that there is ample parallel
// slackness so stealing is infrequent.
//
// Accept-affinity, arrakis, and other works have shown that you need to
// minimize synchronization when dealing with network servers. If possible, the
// path of request processing should never change cores. Unfortunately, this is
// not possible to do for a single (standard) TCP socket because of accept().
//
// The initial thought for trt was to use something closer to Cilk's work
// stealing, but since we target network servers, parallel slackness is not
// really a property that we can assume. I think it actually makes more sense to
// target something like the second approach.
//
// Two designs come to mind as a first approach
// (we can probably do better with something like DPDK):
// 1. single queue for all schedulers. Scheduler that has accept puts tasks
// into this queue.
// 2. one queue per scheduler that does not do accept, accept task places new
// tasks into this queue in a round-robin (or whatever) fashion.
//
// (2) makes more sense to me.
//
// The discussion above is concerned mostly with task balancing. However, there
// are other tradeoffs, such as synchronization for tasks that wait on one
// another (e.g., wait_() and friends). Initially, I assumed that a task on one
// core might have to wait for a task on another core. We might, however, want
// to make the argument that all processing of a single network request happens
// on a single core so there is no need to synchronize for these things. Since,
// the atomic operations used in these function start to appear on profiling,
// this might not be a bad idea.
//
// Is there a compromise where we could support both (i.e., waiting from
// different threads and waiting on the same thread)? There are other (special)
// cases where we can have more efficient implementations: e.g., waitsets with
// one waiter.
// Can we build a common interface for:
// - AsyncObj
// - Future
// - Waitset
// - Task
// that the scheduler can use?
// The primitives above are used inside tasks, so we will need an abstract task
// as well.
#include <cstdio>
#include <boost/intrusive/list.hpp>
namespace bi = boost::intrusive;
#include "trt/common.hh"
namespace trt {
//
// Base types for synchronization
//
class TaskBase;
class AsyncObjBase;
class FutureBase;
class WaitsetBase;
// TODO: explain
class WaitsetBase {
public:
// called when one of the async objects pointed by the futures of this wait
// set becomes available.
//
// NB: called in Scheduler::notify_
virtual TaskBase *set_ready() = 0;
// used by the user. it will either return directly or try to wait in the
// scheduler
virtual FutureBase *wait() = 0;
// used by scheduler to make the transition to the waiting state
// return value:
// true: transition succesfull, do nothing
// false: transition failed, please reschedule task
//
// NB: used in scheduler when handing Cmd::WAIT
virtual bool try_set_state_to_waiting(void) = 0;
};
class FutureBase {
protected:
// A future is kept into two lists:
// - The async object keeps the futures it needs to wakeup
// - The waitlist keeps the futures it waits on
bi::list_member_hook<> f_lnode_ao_;
bi::list_member_hook<> f_lnode_ws_;
// Atomically checks if future is ready and it schedules it for notification
// if it is not
//
// Used from within waitsets.
virtual bool is_ready_or_register() = 0;
// notify the future it's wait has been completed (i.e., no longer a member
// of a waitset).
//
// Used from within waitsets.
virtual void wait_completed() = 0;
public:
// Is the value of this Future ready?
virtual bool is_ready() = 0;
// set the future
virtual TaskBase *set_ready() = 0;
virtual RetT get_val() = 0; // should be called only if is_ready() == true
virtual void *get_ctx() = 0; // get user context
virtual void drop_ref() = 0; // drop reference to asynchronous object
// list type for AsyncObj
using AoList =
bi::list<FutureBase,
bi::member_hook<FutureBase,
bi::list_member_hook<>,
&FutureBase::f_lnode_ao_>>;
// list type for Waitset
using WsList =
bi::list<FutureBase,
bi::member_hook<FutureBase,
bi::list_member_hook<>,
&FutureBase::f_lnode_ws_>>;
virtual ~FutureBase() {
if (f_lnode_ao_.is_linked())
fprintf(stderr, "%s: %p ->f_lnode_ao_ is still linked\n", __PRETTY_FUNCTION__, this);
if (f_lnode_ws_.is_linked())
fprintf(stderr, "%s: %p ->f_lnode_ws_ is still linked\n", __PRETTY_FUNCTION__, this);
}
};
class AsyncObjBase {
public:
// move the state to ready, and return the list of futures that need to be
// set to ready (and notified).
virtual FutureBase::AoList set_ready(RetT val) = 0;
};
} // end trt namespace
#endif /* ifndef TRT_SYNC_ABSTRACT_H__ */
|
{"hexsha": "6a5daa03b2970c0b9b41826c602c3d50f01182ba", "size": 6013, "ext": "hh", "lang": "C++", "max_stars_repo_path": "trt/src/trt/sync_base_types.hh", "max_stars_repo_name": "nik-io/uDepot", "max_stars_repo_head_hexsha": "06b94b7f2438b38b46572ede28072e24997e40c6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "trt/src/trt/sync_base_types.hh", "max_issues_repo_name": "nik-io/uDepot", "max_issues_repo_head_hexsha": "06b94b7f2438b38b46572ede28072e24997e40c6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trt/src/trt/sync_base_types.hh", "max_forks_repo_name": "nik-io/uDepot", "max_forks_repo_head_hexsha": "06b94b7f2438b38b46572ede28072e24997e40c6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4055555556, "max_line_length": 97, "alphanum_fraction": 0.6870114751, "num_tokens": 1458}
|
#include <string.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include <boost/thread/mutex.hpp>
#include "./../Game/cmdtypes.h"
#include "./../Game/log.h"
#include "./../utils/stringbuilder.hpp"
#include "./../Game/getdefinevalue.h"
#include "./../server.h"
#include "./../Monster/monster.h"
#include "diskdbmanager.h"
using namespace std;
DiskDBManager::DiskDBManager()
{
}
DiskDBManager::~DiskDBManager()
{
}
bool DiskDBManager::Connect(Configuration con)
{
m_PGconn = PQsetdbLogin(con.ip, con.port, NULL, NULL,con.dbName, con.user, con.password);
if(PQstatus(m_PGconn) != CONNECTION_OK)
{
printf("PQconnectdb error\n");
return false;
}
else
return true;
}
bool DiskDBManager::Connect()
{
// m_PGconn = PQconnectdb("hostaddr = 127.0.0.1 port = 5432 dbname = my_database user = postgres password = postgres connect_timeout = 1");
m_PGconn = PQconnectdb("hostaddr = 139.196.165.107 port = 5433 dbname = game user = game password = houfang2015 connect_timeout = 1");
// m_PGconn = PQconnectdb("host = localhost dbname = my_database user = postgres password = postgres");
if(PQstatus(m_PGconn) != CONNECTION_OK)
{
printf("PQconnectdb error\n");
return false;
}
else
return true;
}
bool DiskDBManager::Disconnect()
{
PQfinish(m_PGconn);
return true;
}
//从数据库中得到查询的数据
void* DiskDBManager::Get(const char* str)
{
return NULL;
}
//执行不返回数据的命令 update insert delete move
hf_int32 DiskDBManager::Set(const char *str,...)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_ExecStatusType = PQresultStatus(t_PGresult);
if(t_ExecStatusType != PGRES_COMMAND_OK) //成功完成一个不返回数据的命令
{
printf("%d\n", t_ExecStatusType);
printf("PQexec error\n");
return -1;
}
else
{
return atoi(PQcmdTuples(t_PGresult));
}
}
//执行返回数据的命令 select
hf_int32 DiskDBManager::GetSqlResult(const hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK) //执行一个返回数据的操作
{
return -1;
}
else
{
return PQntuples(t_PGresult);
}
}
//得到玩家的登录信息
hf_int32 DiskDBManager::GetPlayerUserId(STR_PlayerLoginUserId* user,const char *str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_ExecStatusType = PQresultStatus((t_PGresult));
StringBuilder sbd;
sbd<<"Function GetPlayerUserID :" << str;
Logger::GetLogger()->Debug(sbd.str());
if(t_ExecStatusType != PGRES_TUPLES_OK) // PGRES_TUPLES_OK表示成功执行一个返回数据的查询查询
{
printf("PQexec error:%d\n", t_ExecStatusType);
return -1;
}
else
{
int t_row = PQntuples(t_PGresult); //行数
for(int i = 0; i < t_row; i++)
{
memcpy(user->userName, PQgetvalue(t_PGresult, i, 0), PQgetlength(t_PGresult, i, 0));
memcpy(user->password, PQgetvalue(t_PGresult, i, 1), PQgetlength(t_PGresult, i, 1));
}
return t_row;
}
}
hf_int32 DiskDBManager::GetPlayerRoleList(ResRoleList* RoleList, const char *str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_ExecStatusType = PQresultStatus((t_PGresult));
StringBuilder sbd;
// sbd<<"Function GetPlayerRoleList :" << str;
// Logger::GetLogger()->Debug(sbd.str());
if(t_ExecStatusType != PGRES_TUPLES_OK) // PGRES_TUPLES_OK表示成功执行一个返回数据的查询查询
{
printf("PQexec error\n");
return -1;
}
else
{
int t_row = PQntuples(t_PGresult); //行数
STR_RoleBasicInfo t_RoleInfo;
//打包数据
for(int i = 0; i < t_row; i++)
{
memset(&t_RoleInfo, 0, sizeof(t_RoleInfo));
memcpy(t_RoleInfo.Nick, PQgetvalue(t_PGresult, i, 0), PQgetlength(t_PGresult, i, 0));
t_RoleInfo.RoleID = atoi(PQgetvalue(t_PGresult, i, 1));
t_RoleInfo.Profession = atoi(PQgetvalue(t_PGresult, i, 2));
t_RoleInfo.Level = atoi(PQgetvalue(t_PGresult, i, 3));
t_RoleInfo.Sex = atoi(PQgetvalue(t_PGresult, i, 4));
t_RoleInfo.Figure = atoi(PQgetvalue(t_PGresult, i, 5));
t_RoleInfo.FigureColor = atoi(PQgetvalue(t_PGresult, i, 6));
t_RoleInfo.Face = atoi(PQgetvalue(t_PGresult, i, 7));
t_RoleInfo.Eye = atoi(PQgetvalue(t_PGresult, i, 8));
t_RoleInfo.Hair = atoi(PQgetvalue(t_PGresult, i, 9));
t_RoleInfo.HairColor = atoi(PQgetvalue(t_PGresult, i, 10));
t_RoleInfo.ModeID = atoi(PQgetvalue(t_PGresult, i, 11));
t_RoleInfo.SkirtID = atoi(PQgetvalue(t_PGresult, i, 12));
RoleList->m_Role.push_back(t_RoleInfo);
}
return t_row;
}
}
hf_int32 DiskDBManager::GetPlayerRegisterRoleInfo(STR_RoleBasicInfo* t_RoleInfo, const hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_ExecStatusType = PQresultStatus((t_PGresult));
if(t_ExecStatusType != PGRES_TUPLES_OK) // PGRES_TUPLES_OK表示成功执行一个返回数据的查询查询
{
printf("PQexec error\n");
return -1;
}
else
{
int t_row = PQntuples(t_PGresult); //行数
//打包数据
if(t_row == 1)
{
memcpy(t_RoleInfo->Nick, PQgetvalue(t_PGresult, 0, 0), PQgetlength(t_PGresult, 0, 0));
t_RoleInfo->RoleID = atoi(PQgetvalue(t_PGresult, 0, 1));
t_RoleInfo->Profession = atoi(PQgetvalue(t_PGresult, 0, 2));
t_RoleInfo->Level = atoi(PQgetvalue(t_PGresult, 0, 3));
t_RoleInfo->Sex = atoi(PQgetvalue(t_PGresult, 0, 4));
t_RoleInfo->Figure = atoi(PQgetvalue(t_PGresult, 0, 5));
t_RoleInfo->FigureColor = atoi(PQgetvalue(t_PGresult, 0, 6));
t_RoleInfo->Face = atoi(PQgetvalue(t_PGresult, 0, 7));
t_RoleInfo->Eye = atoi(PQgetvalue(t_PGresult, 0, 8));
t_RoleInfo->Hair = atoi(PQgetvalue(t_PGresult, 0, 9));
t_RoleInfo->HairColor = atoi(PQgetvalue(t_PGresult, 0, 10));
t_RoleInfo->ModeID = atoi(PQgetvalue(t_PGresult, 0, 11));
t_RoleInfo->SkirtID = atoi(PQgetvalue(t_PGresult, 0, 12));
}
return t_row;
}
}
bool DiskDBManager::IsConnected()
{
if ( PQstatus(m_PGconn) != CONNECTION_OK )
return false;
else
return true;
}
hf_int32 DiskDBManager::GetPlayerInitPos(STR_PackPlayerPosition *pos, const char *sql)
{
if ( ! IsConnected()) return -1;
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, sql);
m_mtx.unlock();
ExecStatusType t_ExecStatusType = PQresultStatus((t_PGresult));
if(t_ExecStatusType != PGRES_TUPLES_OK) // PGRES_TUPLES_OK表示成功执行一个返回数据的查询查询
{
std::ostringstream os;
os<<"SQL:"<<sql<<" Execute error";
Logger::GetLogger()->Error(os.str().c_str());
return -1;
}
else
{
int t_row = PQntuples(t_PGresult); //行数
//打包数据
if(t_row == 1)
{
istringstream( PQgetvalue(t_PGresult,0,0) ) >> pos->Pos_x;
istringstream( PQgetvalue(t_PGresult,0,1) ) >> pos->Pos_y;
istringstream( PQgetvalue(t_PGresult,0,2) ) >> pos->Pos_z;
istringstream( PQgetvalue(t_PGresult,0,3) ) >> pos->Direct;
istringstream( PQgetvalue(t_PGresult,0,4) ) >> pos->MapID;
pos->ActID = atoi(PQgetvalue(t_PGresult, 0, 5));
// istringstream( PQgetvalue(t_PGresult,0,5) ) >> pos->ActID;
}
return t_row;
}
}
//从数据库中查询怪物条目
hf_int32 DiskDBManager:: GetMonsterSpawns(umap_monsterSpawns* monsterSpawns)
{
const hf_char* str = "select * from T_MonsterSpawns;";
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_ExecStatusType = PQresultStatus(t_PGresult);
if(t_ExecStatusType != PGRES_TUPLES_OK) //成功执行一个返回数据的查询查询
{
std::ostringstream os;
os<<"Function : GetMonsterSpawns SQL: '"<<str<<"'' Execute Error";
// Logger::GetLogger()->Error(os.str());
return -1;
}
else
{
int t_row = PQntuples(t_PGresult); //行数
//为结果分配内存
STR_MonsterSpawns t_monsterSpawns;
//打包数据
for(int i = 0; i < t_row; i++)
{
t_monsterSpawns.MonsterTypeID = atoi(PQgetvalue(t_PGresult, i, 0));
t_monsterSpawns.SpawnsPosID = atoi(PQgetvalue(t_PGresult, i,1));
t_monsterSpawns.Pos_x = atof(PQgetvalue(t_PGresult, i,2));
t_monsterSpawns.Pos_y = atof(PQgetvalue(t_PGresult, i, 3));
t_monsterSpawns.Pos_z = atof(PQgetvalue(t_PGresult, i, 4));
t_monsterSpawns.Boundary = atof(PQgetvalue(t_PGresult, i,5));
t_monsterSpawns.MapID = atoi(PQgetvalue(t_PGresult, i, 6));
t_monsterSpawns.Amount = atoi(PQgetvalue(t_PGresult, i, 7));
(*monsterSpawns)[t_monsterSpawns.SpawnsPosID] = t_monsterSpawns;
}
return t_row;
}
}
//从数据库中查询怪物属性
hf_int32 DiskDBManager::GetMonsterType(umap_monsterType* monsterType)
{
const hf_char* str = "select * from T_MonsterType;";
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
Logger::GetLogger()->Error("select MosnterType error");
return -1;
}
else
{
int t_row = PQntuples(t_PGresult);
STR_MonsterType t_monsterType;
// 打包数据
for(int i = 0; i < t_row; i++)
{
memset(&t_monsterType, 0, sizeof(STR_MonsterType));
t_monsterType.MonsterTypeID = atoi(PQgetvalue(t_PGresult, i, 0));
memcpy(t_monsterType.MonsterName, PQgetvalue(t_PGresult, i, 1), PQgetlength(t_PGresult, i, 1));
t_monsterType.HP = atoi(PQgetvalue(t_PGresult, i, 2));
t_monsterType.PhysicalAttack = atoi(PQgetvalue(t_PGresult, i, 3));
t_monsterType.MagicAttack = atoi(PQgetvalue(t_PGresult, i, 4));
t_monsterType.PhysicalDefense = atoi(PQgetvalue(t_PGresult, i, 5));
t_monsterType.MagicDefense = atoi(PQgetvalue(t_PGresult, i, 6));
t_monsterType.Attackrate = atoi(PQgetvalue(t_PGresult, i, 7));
t_monsterType.MoveRate = atoi(PQgetvalue(t_PGresult, i, 8));
t_monsterType.Crit_Rate = atof(PQgetvalue(t_PGresult, i, 9));
t_monsterType.Dodge_Rate = atof(PQgetvalue(t_PGresult, i, 10));
t_monsterType.Hit_Rate = atof(PQgetvalue(t_PGresult, i, 11));
t_monsterType.RankID = atoi(PQgetvalue(t_PGresult, i, 12));
t_monsterType.Level = atoi(PQgetvalue(t_PGresult, i, 13));
t_monsterType.AttackTypeID = atoi(PQgetvalue(t_PGresult, i, 14));
// t_monsterType.AttackRange = atoi(PQgetvalue(t_PGresult, i, 15));
(*monsterType)[t_monsterType.MonsterTypeID] = t_monsterType;
}
return t_row;
}
}
hf_int32 DiskDBManager::GetTaskProfile(umap_taskProfile TaskProfile)
{
const hf_char* str = "select * from t_taskprofile;";
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
Logger::GetLogger()->Error("select t_taskprofile error");
return -1;
}
else
{
int t_row = PQntuples(t_PGresult);
STR_TaskProfile t_profile;
for(int i = 0; i < t_row; i++)
{
memset(&t_profile, 0, sizeof(STR_TaskProfile));
t_profile.TaskID = atoi(PQgetvalue(t_PGresult, i, 0));
memcpy(t_profile.TaskName, PQgetvalue(t_PGresult, i, 1), PQgetlength(t_PGresult, i, 1));
t_profile.StartNPCID = atoi(PQgetvalue(t_PGresult, i, 2));
t_profile.FinishNPCID = atoi(PQgetvalue(t_PGresult, i, 3));
t_profile.AcceptModeID = atoi(PQgetvalue(t_PGresult, i, 4));
t_profile.FinishModeID = atoi(PQgetvalue(t_PGresult, i, 5));
t_profile.Status = 1; //未接取
(*TaskProfile)[t_profile.TaskID] = t_profile;
}
return t_row;
}
}
hf_int32 DiskDBManager::GetTaskDialogue(umap_dialogue* TaskDialogue)
{
const hf_char* str = "select * from t_taskdialogue;";
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
Logger::GetLogger()->Error("select t_taskDialogue error");
return -1;
}
else
{
int t_row = PQntuples(t_PGresult);
STR_TaskDlg t_dialogue;
for(int i = 0; i < t_row; i++)
{
memset(&t_dialogue, 0, sizeof(STR_TaskDlg));
t_dialogue.TaskID = atoi(PQgetvalue(t_PGresult, i, 0));
t_dialogue.StartLen = PQgetlength(t_PGresult, i, 1) + 1;
// t_dialogue.ExeLen = PQgetlength(t_PGresult, i, 2) + 1;
t_dialogue.FinishLen = PQgetlength(t_PGresult, i, 2) + 1;
memcpy(t_dialogue.StartDialogue, PQgetvalue(t_PGresult, i,1), t_dialogue.StartLen - 1);
// memcpy(t_dialogue.ExeDialogue, PQgetvalue(t_PGresult, i, 2), t_dialogue.ExeLen - 1);
memcpy(t_dialogue.FinishDialogue, PQgetvalue(t_PGresult, i, 2), t_dialogue.FinishLen - 1);
(*TaskDialogue).insert(make_pair(t_dialogue.TaskID,t_dialogue));
}
return t_row;
}
}
hf_int32 DiskDBManager::GetTaskExeDialogue(umap_exeDialogue* TaskExeDialogue)
{
const hf_char* str = "select * from t_taskexedialogue;";
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
Logger::GetLogger()->Error("select t_taskDialogue error");
return -1;
}
else
{
int t_row = PQntuples(t_PGresult);
if(t_row == 0)
{
return 0;
}
STR_TaskExeDlg t_dialogue;
for(int i = 0; i < t_row; i++)
{
memset(&t_dialogue, 0, sizeof(STR_TaskExeDlg));
t_dialogue.TaskID = atoi(PQgetvalue(t_PGresult, i, 0));
t_dialogue.AimID = atoi(PQgetvalue(t_PGresult, i, 1));
t_dialogue.ExeLen = PQgetlength(t_PGresult, i, 2) + 1;
memcpy(t_dialogue.ExeDialogue, PQgetvalue(t_PGresult, i, 2), t_dialogue.ExeLen - 1);
umap_exeDialogue::iterator it = TaskExeDialogue->find(t_dialogue.TaskID);
if(it == TaskExeDialogue->end())
{
vector<STR_TaskExeDlg> t_exeDlg;
t_exeDlg.push_back(t_dialogue);
(*TaskExeDialogue)[t_dialogue.TaskID] = t_exeDlg;
}
else
{
it->second.push_back(t_dialogue);
}
}
return t_row;
}
}
hf_int32 DiskDBManager::GetTaskDescription(umap_taskDescription* TaskDesc)
{
const hf_char* str = "select * from t_taskdescription;";
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
Logger::GetLogger()->Error("select t_taskdescrition error");
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_PackTaskDescription t_desc;
for(int i = 0; i < t_row; i++)
{
memset(t_desc.Description, 0, sizeof(t_desc.Description));
t_desc.TaskID = atoi(PQgetvalue(t_PGresult, i, 0));
t_desc.TaskPropsID = atoi(PQgetvalue(t_PGresult, i, 1));
t_desc.Time = atoi(PQgetvalue(t_PGresult, i, 2));
memcpy(t_desc.Description, PQgetvalue(t_PGresult, i, 3), PQgetlength(t_PGresult, i, 3));
(*TaskDesc).insert(make_pair(t_desc.TaskID,t_desc));
}
return t_row;
}
}
hf_int32 DiskDBManager::GetTaskAim(umap_taskAim* TaskAim)
{
const hf_char* str = "select * from t_taskaim;";
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
Logger::GetLogger()->Error("select t_taskaim error");
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_TaskAim t_Aim;
for(int i = 0; i < t_row; i++)
{
t_Aim.TaskID = atoi(PQgetvalue(t_PGresult, i, 0));
t_Aim.AimID = atoi(PQgetvalue(t_PGresult, i, 1));
t_Aim.Amount = atoi(PQgetvalue(t_PGresult, i, 2));
t_Aim.ExeModeID = atoi(PQgetvalue(t_PGresult, i, 3));
umap_taskAim::iterator it = TaskAim->find(t_Aim.TaskID);
if(it == TaskAim->end())
{
vector<STR_TaskAim> vecAim;
vecAim.push_back(t_Aim);
(*TaskAim)[t_Aim.TaskID] = vecAim;
}
else
{
it->second.push_back(t_Aim);
}
}
return t_row;
}
}
hf_int32 DiskDBManager::GetTaskReward(umap_taskReward* TaskReward)
{
const char* str = "select * from t_taskreward;";
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
Logger::GetLogger()->Error("select t_taskaim error");
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_TaskReward t_reward;
for(int i = 0; i < t_row; i++)
{
t_reward.TaskID = atoi(PQgetvalue(t_PGresult, i, 0));
t_reward.Experience = atoi(PQgetvalue(t_PGresult, i, 1));
t_reward.Money = atoi(PQgetvalue(t_PGresult, i, 2));
t_reward.SkillID = atoi(PQgetvalue(t_PGresult, i, 3));
t_reward.TitleID = atoi(PQgetvalue(t_PGresult, i, 4));
t_reward.Attribute = atoi(PQgetvalue(t_PGresult, i, 5));
(*TaskReward).insert(make_pair(t_reward.TaskID,t_reward));
}
return t_row;
}
}
hf_int32 DiskDBManager::GetGoodsReward(umap_goodsReward* GoodsReward)
{
const char* str = "select * from t_GoodsReward;";
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
Logger::GetLogger()->Error("select t_GoodsReward error");
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_GoodsReward t_good;
vector<STR_GoodsReward> v_goodReward;
for(int i = 0; i < t_row; i++)
{
v_goodReward.clear();
hf_uint32 TaskID = atoi(PQgetvalue(t_PGresult, i, 0));
t_good.GoodsID = atoi(PQgetvalue(t_PGresult, i, 1));
t_good.Count = atoi(PQgetvalue(t_PGresult, i, 2));
t_good.Type = atoi(PQgetvalue(t_PGresult, i, 3));
v_goodReward.push_back(t_good);
umap_goodsReward::iterator it = GoodsReward->find(TaskID);
if(it != GoodsReward->end())
{
it->second.push_back(t_good);
}
else
{
(*GoodsReward)[TaskID] = v_goodReward;
}
}
return t_row;
}
}
//查询任务条件
hf_int32 DiskDBManager::GetTaskPremise(umap_taskPremise* t_TaskPremise)
{
const char* str = "select * from t_taskPremise;";
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_TaskPremise t_taskPremise;
for(int i = 0; i < t_row; i++)
{
t_taskPremise.TaskID = atoi(PQgetvalue(t_PGresult, i, 0));
t_taskPremise.PreTaskID = atoi(PQgetvalue(t_PGresult, i, 1));
t_taskPremise.CrmtTaskID = atoi(PQgetvalue(t_PGresult, i, 2));
t_taskPremise.GoodsID = atoi(PQgetvalue(t_PGresult, i, 3));
t_taskPremise.TitleID = atoi(PQgetvalue(t_PGresult, i, 4));
t_taskPremise.DungeonID = atoi(PQgetvalue(t_PGresult, i, 5));
t_taskPremise.GenderID = atoi(PQgetvalue(t_PGresult, i, 6));
t_taskPremise.Level = atoi(PQgetvalue(t_PGresult, i, 7));
t_taskPremise.JobID = atoi(PQgetvalue(t_PGresult, i, 8));
(*t_TaskPremise).insert(make_pair(t_taskPremise.TaskID, t_taskPremise));
}
return t_row;
}
}
//查询任务进度
hf_int32 DiskDBManager::GetPlayerTaskProcess(umap_taskProcess TaskProcess, const hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_TaskProcess t_taskProcess;
for(int i = 0; i < t_row; i++)
{
memset(&t_taskProcess, 0, sizeof(STR_TaskProcess));
t_taskProcess.TaskID = atoi(PQgetvalue(t_PGresult, i, 1));
t_taskProcess.AimID = atoi(PQgetvalue(t_PGresult, i, 2));
t_taskProcess.FinishCount = atoi(PQgetvalue(t_PGresult, i, 3));
t_taskProcess.AimAmount = atoi(PQgetvalue(t_PGresult, i, 4));
t_taskProcess.ExeModeID = atoi(PQgetvalue(t_PGresult, i, 5));
_umap_taskProcess::iterator it = TaskProcess->find(t_taskProcess.TaskID);
if(it != TaskProcess->end())
{
it->second.push_back(t_taskProcess);
}
else
{
vector<STR_TaskProcess> vec_process;
vec_process.push_back(t_taskProcess);
(*TaskProcess)[t_taskProcess.TaskID] = vec_process;
}
}
return t_row;
}
}
//查询角色信息
hf_int32 DiskDBManager::GetRoleInfo(STR_RoleInfo* roleinfo, const hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
if(t_row == 1)
{
roleinfo->MaxHP = atoi(PQgetvalue(t_PGresult, 0, 1));
roleinfo->HP = atoi(PQgetvalue(t_PGresult, 0, 2));
roleinfo->MaxMagic = atoi(PQgetvalue(t_PGresult, 0, 3));
roleinfo->Magic = atoi(PQgetvalue(t_PGresult, 0, 4));
roleinfo->PhysicalDefense = atoi(PQgetvalue(t_PGresult, 0, 5));
roleinfo->MagicDefense = atoi(PQgetvalue(t_PGresult, 0, 6));
roleinfo->PhysicalAttack = atoi(PQgetvalue(t_PGresult, 0, 7));
roleinfo->MagicAttack = atoi(PQgetvalue(t_PGresult, 0, 8));
roleinfo->Crit_Rate = atof(PQgetvalue(t_PGresult, 0, 9));
roleinfo->Dodge_Rate = atof(PQgetvalue(t_PGresult, 0, 10));
roleinfo->Hit_Rate = atof(PQgetvalue(t_PGresult, 0, 11));
roleinfo->Resist_Rate = atof(PQgetvalue(t_PGresult, 0, 12));
roleinfo->Caster_Speed = atoi(PQgetvalue(t_PGresult, 0, 13));
roleinfo->Move_Speed = atoi(PQgetvalue(t_PGresult, 0, 14));
roleinfo->Hurt_Speed = atoi(PQgetvalue(t_PGresult, 0, 15));
roleinfo->Small_Universe = atoi(PQgetvalue(t_PGresult, 0, 16));
roleinfo->maxSmall_Universe = atoi(PQgetvalue(t_PGresult, 0, 17));
roleinfo->RecoveryLife_Percentage = atof(PQgetvalue(t_PGresult, 0, 18));
roleinfo->RecoveryLife_value = atoi(PQgetvalue(t_PGresult, 0, 19));
roleinfo->RecoveryMagic_Percentage = atof(PQgetvalue(t_PGresult, 0, 20));
roleinfo->RecoveryMagic_value = atoi(PQgetvalue(t_PGresult, 0, 21));
roleinfo->MagicHurt_Reduction = atof(PQgetvalue(t_PGresult, 0, 22));
roleinfo->PhysicalHurt_Reduction = atof(PQgetvalue(t_PGresult, 0, 23));
roleinfo->CritHurt = atof(PQgetvalue(t_PGresult, 0, 24));
roleinfo->CritHurt_Reduction = atof(PQgetvalue(t_PGresult, 0, 25));
roleinfo->Magic_Pass = atof(PQgetvalue(t_PGresult, 0, 26));
roleinfo->Physical_Pass = atof(PQgetvalue(t_PGresult, 0, 27));
roleinfo->Rigorous = atoi(PQgetvalue(t_PGresult, 0, 28));
roleinfo->Will = atoi(PQgetvalue(t_PGresult, 0, 29));
roleinfo->Wise = atoi(PQgetvalue(t_PGresult, 0, 30));
roleinfo->Mentality = atoi(PQgetvalue(t_PGresult, 0, 21));
roleinfo->Physical_fitness = atoi(PQgetvalue(t_PGresult, 0, 32));
}
return t_row;
}
}
//查询玩家经验
hf_int32 DiskDBManager::GetRoleExperience(STR_PackRoleExperience* RoleExp, const hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
if(t_row == 1)
{
RoleExp->Level = atoi(PQgetvalue(t_PGresult, 0, 0));
RoleExp->CurrentExp = atoi(PQgetvalue(t_PGresult, 0, 1));
RoleExp->UpgradeExp = GetUpgradeExprience(RoleExp->Level);
}
return t_row;
}
}
//查询好友列表
hf_int32 DiskDBManager::GetFriendList(umap_friendList t_friendList, const hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_FriendInfo t_friendInfo;
for(hf_int32 i = 0; i < t_row; i++)
{
memset(&t_friendInfo, 0, sizeof(STR_FriendInfo));
t_friendInfo.RoleID = atoi(PQgetvalue(t_PGresult, i, 0));
memcpy(t_friendInfo.Nick, PQgetvalue(t_PGresult, i, 1), PQgetlength(t_PGresult, i, 1));
t_friendInfo.Status = 2; //默认不在线
(*t_friendList)[t_friendInfo.RoleID] = t_friendInfo;
}
return t_row;
}
}
//查询某个昵称的roleid
hf_int32 DiskDBManager::GetNickRoleid(hf_uint32* roleid, const hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
if(t_row == 1)
{
*roleid = atoi(PQgetvalue(t_PGresult, 0, 0));
}
return t_row;
}
}
//查询添加好友请求
hf_int32 DiskDBManager::GetAskAddFriend(vector<STR_AddFriend>& addFriend, const hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_AddFriend t_addFriend;
for(hf_int32 i = 0; i < t_row; i++)
{
memset(&t_addFriend, 0, sizeof(STR_AddFriend));
t_addFriend.RoleID = atoi(PQgetvalue(t_PGresult, i, 0));
memcpy(t_addFriend.Nick,PQgetvalue(t_PGresult, i, 1), PQgetlength(t_PGresult, i, 1));
addFriend.push_back(t_addFriend);
}
return t_row;
}
}
//查询NPC信息
hf_int32 DiskDBManager::GetNPCInfo(umap_npcInfo* npcInfo)
{
const hf_char* str = "select * from t_npcinfo;";
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
NPCInfo t_NPCInfo;
for(hf_int32 i = 0; i < t_row; i++)
{
t_NPCInfo.NpcID = atoi(PQgetvalue(t_PGresult, i, 0));
t_NPCInfo.Mapid = atoi(PQgetvalue(t_PGresult, i, 1));
t_NPCInfo.Pos_x = atof(PQgetvalue(t_PGresult, i, 2));
t_NPCInfo.Pos_y = atof(PQgetvalue(t_PGresult, i, 3));
t_NPCInfo.Pos_z = atof(PQgetvalue(t_PGresult, i, 4));
(*npcInfo)[t_NPCInfo.NpcID] = t_NPCInfo;
}
return t_row;
}
}
//查询怪物掉落物品
hf_int32 DiskDBManager::GetMonsterLoot(umap_monsterLoot* monsterLoot)
{
const hf_char* str = "select * from t_monsterloot;";
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_MonsterLoot t_monsterLoot;
vector<STR_MonsterLoot> v_monsterLoot;
for(hf_int32 i = 0; i < t_row; i++)
{
v_monsterLoot.clear();
t_monsterLoot.MonsterTypeID = atoi(PQgetvalue(t_PGresult, i, 0));
t_monsterLoot.PreCondition = atoi(PQgetvalue(t_PGresult, i, 1));
t_monsterLoot.LootGoodsID = atoi(PQgetvalue(t_PGresult, i, 2));
t_monsterLoot.LootProbability = atof(PQgetvalue(t_PGresult, i, 3));
t_monsterLoot.Count = atoi(PQgetvalue(t_PGresult, i, 4));
v_monsterLoot.push_back(t_monsterLoot);
umap_monsterLoot::iterator it = monsterLoot->find(t_monsterLoot.MonsterTypeID);
if(it != monsterLoot->end())
{
it->second.push_back(t_monsterLoot);
}
else
{
(*monsterLoot)[t_monsterLoot.MonsterTypeID] = v_monsterLoot;
}
}
return t_row;
}
}
//查询技能信息
hf_int32 DiskDBManager::GetSkillInfo(umap_skillInfo* skillInfo)
{
const hf_char* str = "select * from t_skillinfo;";
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_PackSkillInfo t_skill;
for(hf_int32 i = 0; i < t_row; i++)
{
t_skill.SkillID = atoi(PQgetvalue(t_PGresult, i, 0));
t_skill.UseGoodsID = atoi(PQgetvalue(t_PGresult, i, 1));
t_skill.UseMagic = atoi(PQgetvalue(t_PGresult, i, 2));
t_skill.CoolTime = atof(PQgetvalue(t_PGresult, i, 3));
t_skill.LeadTime = atof(PQgetvalue(t_PGresult, i, 4));
t_skill.PhysicalHurt = atoi(PQgetvalue(t_PGresult, i, 5));
t_skill.PhyPlus = atof(PQgetvalue(t_PGresult, i, 6));
t_skill.MagicHurt = atoi(PQgetvalue(t_PGresult, i, 7));
t_skill.MagPlus = atof(PQgetvalue(t_PGresult, i, 8));
t_skill.UseGoodsCount = atoi(PQgetvalue(t_PGresult, i, 9));
t_skill.FarDistance = atoi(PQgetvalue(t_PGresult, i, 10));
t_skill.NearlyDistance = atoi(PQgetvalue(t_PGresult, i, 11));
t_skill.TriggerID = atoi(PQgetvalue(t_PGresult, i, 12));
t_skill.SkillRangeID = atoi(PQgetvalue(t_PGresult, i, 13));
t_skill.UseAnger = atoi(PQgetvalue(t_PGresult, i, 14));
t_skill.CastingTime = atoi(PQgetvalue(t_PGresult, i, 15));
t_skill.CasterNumber = atoi(PQgetvalue(t_PGresult, i, 16));
(*skillInfo)[t_skill.SkillID] = t_skill;
}
return t_row;
}
}
//查询玩家金币
hf_int32 DiskDBManager::GetPlayerMoney(umap_roleMoney playerMoney, const hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_PlayerMoney t_money;
for(hf_int32 i = 0; i < t_row; i++)
{
t_money.Count = atoi(PQgetvalue(t_PGresult, i, 0));
t_money.TypeID = atoi(PQgetvalue(t_PGresult, i, 1));
(*playerMoney)[t_money.TypeID] = t_money;
}
return t_row;
}
}
//查询玩家物品
hf_int32 DiskDBManager::GetPlayerGoods(umap_roleGoods playerGoods, umap_roleEqu playerEqu, const hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_Goods t_goods;
STR_PlayerEqu t_equ;
t_goods.Source = Source_Bag;
vector<STR_Goods> t_vec;
for(hf_int32 i = 0; i < t_row; i++)
{
t_goods.GoodsID = atoi(PQgetvalue(t_PGresult, i, 0));
t_goods.TypeID = atoi(PQgetvalue(t_PGresult, i, 1));
t_goods.Count = atoi(PQgetvalue(t_PGresult, i, 2));
t_goods.Position = atoi(PQgetvalue(t_PGresult, i, 3));
if(EquTypeMinValue <= t_goods.TypeID && t_goods.TypeID <= EquTypeMaxValue) //装备
{
memcpy(&t_equ.goods, &t_goods, sizeof(STR_Goods));
(*playerEqu)[t_goods.GoodsID] = t_equ;
continue;
}
_umap_roleGoods::iterator it = playerGoods->find(t_goods.GoodsID);
if(it != playerGoods->end())
{
it->second.push_back(t_goods);
}
else
{
t_vec.clear();
t_vec.push_back(t_goods);
(*playerGoods)[t_goods.GoodsID] = t_vec;
}
}
return t_row;
}
}
//查询玩家装备属性
hf_int32 DiskDBManager::GetPlayerEqu(umap_roleEqu playerEqu, const hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_EquipmentAttr t_equAttr;
for(hf_int32 i = 0; i < t_row; i++)
{
t_equAttr.EquID = atoi(PQgetvalue(t_PGresult, i, 1));
t_equAttr.TypeID = atoi(PQgetvalue(t_PGresult, i, 2));
t_equAttr.Crit_Rate = atof(PQgetvalue(t_PGresult, i, 3));
t_equAttr.Dodge_Rate = atof(PQgetvalue(t_PGresult, i, 4));
t_equAttr.Hit_Rate = atof(PQgetvalue(t_PGresult, i, 5));
t_equAttr.Resist_Rate = atof(PQgetvalue(t_PGresult, i, 6));
t_equAttr.Caster_Speed = atof(PQgetvalue(t_PGresult, i, 7));
t_equAttr.Move_Speed = atof(PQgetvalue(t_PGresult, i, 8));
t_equAttr.Hurt_Speed = atof(PQgetvalue(t_PGresult, i, 9));
t_equAttr.RecoveryLife_Percentage = atof(PQgetvalue(t_PGresult, i, 10));
t_equAttr.RecoveryLife_value = atoi(PQgetvalue(t_PGresult, i, 11));
t_equAttr.RecoveryMagic_Percentage = atof(PQgetvalue(t_PGresult, i, 12));
t_equAttr.RecoveryMagic_value = atoi(PQgetvalue(t_PGresult, i, 13));
t_equAttr.MagicHurt_Reduction = atof(PQgetvalue(t_PGresult, i, 14));
t_equAttr.PhysicalHurt_Reduction = atof(PQgetvalue(t_PGresult, i, 15));
t_equAttr.CritHurt = atof(PQgetvalue(t_PGresult, i, 16));
t_equAttr.CritHurt_Reduction = atof(PQgetvalue(t_PGresult, i, 17));
t_equAttr.Magic_Pass = atof(PQgetvalue(t_PGresult, i, 18));
t_equAttr.Physical_Pass = atof(PQgetvalue(t_PGresult, i, 19));
t_equAttr.SuitSkillID = atoi(PQgetvalue(t_PGresult, i, 20));
t_equAttr.HP = atoi(PQgetvalue(t_PGresult, i, 21));
t_equAttr.Magic = atoi(PQgetvalue(t_PGresult, i, 22));
t_equAttr.PhysicalDefense = atoi(PQgetvalue(t_PGresult, i, 23));
t_equAttr.MagicDefense = atoi(PQgetvalue(t_PGresult, i, 24));
t_equAttr.PhysicalAttack = atoi(PQgetvalue(t_PGresult, i, 25));
t_equAttr.MagicAttack = atoi(PQgetvalue(t_PGresult, i, 26));
t_equAttr.Rigorous = atoi(PQgetvalue(t_PGresult, i, 27));
t_equAttr.Will = atoi(PQgetvalue(t_PGresult, i, 28));
t_equAttr.Wise = atoi(PQgetvalue(t_PGresult, i, 29));
t_equAttr.Mentality = atoi(PQgetvalue(t_PGresult, i, 30));
t_equAttr.Physical_fitness = atoi(PQgetvalue(t_PGresult, i, 31));
t_equAttr.JobID = atoi(PQgetvalue(t_PGresult, i, 32));
t_equAttr.BodyPos = atoi(PQgetvalue(t_PGresult, i, 33));
t_equAttr.Grade = atoi(PQgetvalue(t_PGresult, i, 34));
t_equAttr.Level = atoi(PQgetvalue(t_PGresult, i, 35));
t_equAttr.StrengthenLevel = atoi(PQgetvalue(t_PGresult, i, 36));
t_equAttr.MaxDurability = atoi(PQgetvalue(t_PGresult, i, 37));
t_equAttr.Durability = atoi(PQgetvalue(t_PGresult, i, 38));
memcpy(&((*playerEqu)[t_equAttr.EquID].equAttr), &t_equAttr, sizeof(STR_EquipmentAttr));
}
return t_row;
}
}
//查询玩家未捡取的物品位置
hf_int32 DiskDBManager::GetNotPickGoodsPosition(umap_lootPosition lootPosition, const hf_char* str)
{
time_t timep;
time(&timep);
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
LootPositionTime t_loot;
for(hf_int32 i = 0; i < t_row; i++)
{
t_loot.timep = (hf_uint32)timep;
t_loot.continueTime = atoi(PQgetvalue(t_PGresult, i, 0));
t_loot.goodsPos.GoodsFlag = atoi(PQgetvalue(t_PGresult, i, 1));
t_loot.goodsPos.Pos_x = atoi(PQgetvalue(t_PGresult, i, 2));
t_loot.goodsPos.Pos_y = atoi(PQgetvalue(t_PGresult, i, 3));
t_loot.goodsPos.Pos_z = atoi(PQgetvalue(t_PGresult, i, 4));
t_loot.goodsPos.MapID = atoi(PQgetvalue(t_PGresult, i, 5));
(*lootPosition)[t_loot.goodsPos.GoodsFlag] = t_loot;
}
return t_row;
}
}
//查询玩家未捡取的物品
hf_int32 DiskDBManager::GetNotPickGoods(umap_lootGoods lootGoods, const hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_LootGoods t_goods;
vector<STR_LootGoods> vec;
for(hf_int32 i = 0; i < t_row; i++)
{
hf_uint32 lootID = atoi(PQgetvalue(t_PGresult, i, 0));
t_goods.LootGoodsID = atoi(PQgetvalue(t_PGresult, i, 1));
t_goods.Count = atoi(PQgetvalue(t_PGresult, i, 2));
_umap_lootGoods::iterator it = lootGoods->find(lootID);
if(it != lootGoods->end())
{
it->second.push_back(t_goods);
}
else
{
vec.clear();
vec.push_back(t_goods);
(*lootGoods)[lootID] = vec;
}
}
return t_row;
}
}
//查询物品价格
hf_int32 DiskDBManager::GetGoodsPrice(umap_goodsPrice* goodsPrice, const hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_GoodsPrice t_goodsPrice;
for(hf_int32 i = 0; i < t_row; i++)
{
t_goodsPrice.GoodsID = atoi(PQgetvalue(t_PGresult, i, 0));
t_goodsPrice.BuyPrice = atoi(PQgetvalue(t_PGresult, i, 1));
t_goodsPrice.SellPrice = atoi(PQgetvalue(t_PGresult, i, 2));
(*goodsPrice)[t_goodsPrice.GoodsID] = t_goodsPrice;
}
return t_row;
}
}
//查询消耗品价格
hf_int32 DiskDBManager::GetConsumableAttr(umap_consumable* consumable, const hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_Consumable t_consumable;
for(hf_int32 i = 0; i < t_row; i++)
{
t_consumable.GoodsID = atoi(PQgetvalue(t_PGresult, i, 0));
t_consumable.HP = atoi(PQgetvalue(t_PGresult, i, 1));
t_consumable.Magic = atoi(PQgetvalue(t_PGresult, i, 2));
t_consumable.ColdTime = atoi(PQgetvalue(t_PGresult, i, 3));
t_consumable.StackNumber = atoi(PQgetvalue(t_PGresult, i, 4));
t_consumable.PersecondHP = atoi(PQgetvalue(t_PGresult, i, 5));
t_consumable.PersecondMagic = atoi(PQgetvalue(t_PGresult, i, 6));
t_consumable.UserLevel = atoi(PQgetvalue(t_PGresult, i, 7));
t_consumable.ContinueTime = atoi(PQgetvalue(t_PGresult, i, 8));
t_consumable.Type = atoi(PQgetvalue(t_PGresult, i, 9));
(*consumable)[t_consumable.GoodsID] = t_consumable;
}
return t_row;
}
}
//查询装备属性
hf_int32 DiskDBManager::GetEquAttr(umap_equAttr* equAttr, const hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
STR_EquipmentAttr t_equAttr;
for(hf_int32 i = 0; i < t_row; i++)
{
t_equAttr.TypeID = atoi(PQgetvalue(t_PGresult, i, 0));
t_equAttr.Crit_Rate = atof(PQgetvalue(t_PGresult, i, 2));
t_equAttr.Dodge_Rate = atof(PQgetvalue(t_PGresult, i, 3));
t_equAttr.Hit_Rate = atof(PQgetvalue(t_PGresult, i, 4));
t_equAttr.Resist_Rate = atof(PQgetvalue(t_PGresult, i, 5));
t_equAttr.Caster_Speed = atof(PQgetvalue(t_PGresult, i, 6));
t_equAttr.Move_Speed = atof(PQgetvalue(t_PGresult, i, 7));
t_equAttr.Hurt_Speed = atof(PQgetvalue(t_PGresult, i, 8));
t_equAttr.RecoveryLife_Percentage = atof(PQgetvalue(t_PGresult, i, 9));
t_equAttr.RecoveryLife_value = atoi(PQgetvalue(t_PGresult, i, 10));
t_equAttr.RecoveryMagic_Percentage = atof(PQgetvalue(t_PGresult, i, 11));
t_equAttr.RecoveryMagic_value = atoi(PQgetvalue(t_PGresult, i, 12));
t_equAttr.MagicHurt_Reduction = atof(PQgetvalue(t_PGresult, i, 13));
t_equAttr.PhysicalHurt_Reduction = atof(PQgetvalue(t_PGresult, i, 14));
t_equAttr.CritHurt = atof(PQgetvalue(t_PGresult, i, 15));
t_equAttr.CritHurt_Reduction = atof(PQgetvalue(t_PGresult, i, 16));
t_equAttr.Magic_Pass = atof(PQgetvalue(t_PGresult,i, 17));
t_equAttr.Physical_Pass = atof(PQgetvalue(t_PGresult,i, 18));
t_equAttr.SuitSkillID = atoi(PQgetvalue(t_PGresult,i, 19));
t_equAttr.HP = atoi(PQgetvalue(t_PGresult, i, 20));
t_equAttr.Magic = atoi(PQgetvalue(t_PGresult, i, 21));
t_equAttr.PhysicalDefense = atoi(PQgetvalue(t_PGresult, i, 22));
t_equAttr.MagicDefense = atoi(PQgetvalue(t_PGresult, i, 23));
t_equAttr.PhysicalAttack = atoi(PQgetvalue(t_PGresult, i, 24));
t_equAttr.MagicAttack = atoi(PQgetvalue(t_PGresult, i, 25));
t_equAttr.Rigorous = atoi(PQgetvalue(t_PGresult, i, 26));
t_equAttr.Will = atoi(PQgetvalue(t_PGresult, i, 27));
t_equAttr.Wise = atoi(PQgetvalue(t_PGresult, i, 28));
t_equAttr.Mentality = atoi(PQgetvalue(t_PGresult, i, 29));
t_equAttr.Physical_fitness = atoi(PQgetvalue(t_PGresult, i, 30));
t_equAttr.JobID = atoi(PQgetvalue(t_PGresult, i ,31));
t_equAttr.BodyPos = atoi(PQgetvalue(t_PGresult, i, 32));
t_equAttr.Grade = atoi(PQgetvalue(t_PGresult, i, 33));
t_equAttr.Level = atoi(PQgetvalue(t_PGresult, i, 34));
t_equAttr.StrengthenLevel = atoi(PQgetvalue(t_PGresult, i, 35));
t_equAttr.MaxDurability = atoi(PQgetvalue(t_PGresult, i, 36));
t_equAttr.Durability = t_equAttr.MaxDurability;
(*equAttr)[t_equAttr.TypeID] = t_equAttr;
}
return t_row;
}
}
//查询数据库中装备现在的最大值
hf_int32 DiskDBManager::GetEquIDMaxValue()
{
const hf_char* str = "select max(equid) from t_playerequattr;";
Logger::GetLogger()->Debug(str);
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
hf_int32 t_row = PQntuples(t_PGresult);
if(t_row == 0)
{
return EquipMentID;
}
else
{
hf_uint32 equid = atoi(PQgetvalue(t_PGresult, 0, 0));
if(equid < EquipMentID)
{
return EquipMentID;
}
else
{
return equid;
}
}
}
//查询用户身上穿戴的装备
hf_int32 DiskDBManager::GetUserBodyEqu(hf_char* buff, hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
STR_BodyEquipment bodyEqu;
hf_int32 t_row = PQntuples(t_PGresult);
for(hf_int32 i = 0; i < t_row; i++)
{
bodyEqu.roleid = atoi(PQgetvalue(t_PGresult, 0, 0));
bodyEqu.Head = atoi(PQgetvalue(t_PGresult, 0, 1));
bodyEqu.HeadType = atoi(PQgetvalue(t_PGresult, 0, 2));
bodyEqu.UpperBody = atoi(PQgetvalue(t_PGresult, 0, 3));
bodyEqu.UpperBodyType = atoi(PQgetvalue(t_PGresult, 0, 4));
bodyEqu.Pants = atoi(PQgetvalue(t_PGresult, 0, 5));
bodyEqu.PantsType = atoi(PQgetvalue(t_PGresult, 0, 6));
bodyEqu.Shoes = atoi(PQgetvalue(t_PGresult, 0, 7));
bodyEqu.ShoesType = atoi(PQgetvalue(t_PGresult, 0, 8));
bodyEqu.Belt = atoi(PQgetvalue(t_PGresult, 0, 9));
bodyEqu.BeltType = atoi(PQgetvalue(t_PGresult, 0, 10));
bodyEqu.Neaklace = atoi(PQgetvalue(t_PGresult, 0, 11));
bodyEqu.NeaklaceType = atoi(PQgetvalue(t_PGresult, 0, 12));
bodyEqu.Bracelet = atoi(PQgetvalue(t_PGresult, 0, 13));
bodyEqu.BraceletType = atoi(PQgetvalue(t_PGresult, 0, 14));
bodyEqu.LeftRing = atoi(PQgetvalue(t_PGresult, 0, 15));
bodyEqu.LeftRingType = atoi(PQgetvalue(t_PGresult, 0, 16));
bodyEqu.RightRing = atoi(PQgetvalue(t_PGresult, 0, 17));
bodyEqu.RightRingType = atoi(PQgetvalue(t_PGresult, 0, 18));
bodyEqu.Phone = atoi(PQgetvalue(t_PGresult, 0, 19));
bodyEqu.PhoneType = atoi(PQgetvalue(t_PGresult, 0, 20));
bodyEqu.Weapon = atoi(PQgetvalue(t_PGresult, 0, 21));
bodyEqu.WeaponType = atoi(PQgetvalue(t_PGresult, 0, 22));
memcpy(buff + sizeof(STR_PackHead) + i*sizeof(STR_BodyEquipment), &bodyEqu, sizeof(STR_BodyEquipment));
}
return t_row;
}
}
//查询玩家身上穿戴的装备
hf_int32 DiskDBManager::GetRoleBodyEqu(STR_BodyEquipment* bodyEqu, hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult);
if(t_row == 1)
{
bodyEqu->roleid = atoi(PQgetvalue(t_PGresult, 0, 0));
bodyEqu->Head = atoi(PQgetvalue(t_PGresult, 0, 1));
bodyEqu->HeadType = atoi(PQgetvalue(t_PGresult, 0, 2));
bodyEqu->UpperBody = atoi(PQgetvalue(t_PGresult, 0, 3));
bodyEqu->UpperBodyType = atoi(PQgetvalue(t_PGresult, 0, 4));
bodyEqu->Pants = atoi(PQgetvalue(t_PGresult, 0, 5));
bodyEqu->PantsType = atoi(PQgetvalue(t_PGresult, 0, 6));
bodyEqu->Shoes = atoi(PQgetvalue(t_PGresult, 0, 7));
bodyEqu->ShoesType = atoi(PQgetvalue(t_PGresult, 0, 8));
bodyEqu->Belt = atoi(PQgetvalue(t_PGresult, 0, 9));
bodyEqu->BeltType = atoi(PQgetvalue(t_PGresult, 0, 10));
bodyEqu->Neaklace = atoi(PQgetvalue(t_PGresult, 0, 11));
bodyEqu->NeaklaceType = atoi(PQgetvalue(t_PGresult, 0, 12));
bodyEqu->Bracelet = atoi(PQgetvalue(t_PGresult, 0, 13));
bodyEqu->BraceletType = atoi(PQgetvalue(t_PGresult, 0, 14));
bodyEqu->LeftRing = atoi(PQgetvalue(t_PGresult, 0, 15));
bodyEqu->LeftRingType = atoi(PQgetvalue(t_PGresult, 0, 16));
bodyEqu->RightRing = atoi(PQgetvalue(t_PGresult, 0, 17));
bodyEqu->RightRingType = atoi(PQgetvalue(t_PGresult, 0, 18));
bodyEqu->Phone = atoi(PQgetvalue(t_PGresult, 0, 19));
bodyEqu->PhoneType = atoi(PQgetvalue(t_PGresult, 0, 20));
bodyEqu->Weapon = atoi(PQgetvalue(t_PGresult, 0, 21));
bodyEqu->WeaponType = atoi(PQgetvalue(t_PGresult, 0, 22));
}
return t_row;
}
}
//查询职业属性
hf_int32 DiskDBManager::GetJobAttribute(STR_RoleJobAttribute* jobAttr, hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_status = PQresultStatus(t_PGresult);
if(t_status != PGRES_TUPLES_OK)
{
return -1;
}
else
{
jobAttr++;
hf_int32 t_row = PQntuples(t_PGresult);
for(hf_int32 i = 0; i < t_row; i++)
{
jobAttr->MaxHP = atoi(PQgetvalue(t_PGresult, i, 0));;
jobAttr->MaxMagic = atoi(PQgetvalue(t_PGresult, i, 1));;
jobAttr->PhysicalDefense = atoi(PQgetvalue(t_PGresult, i, 2));;
jobAttr->MagicDefense = atoi(PQgetvalue(t_PGresult, i, 3));;
jobAttr->PhysicalAttack = atoi(PQgetvalue(t_PGresult, i, 4));
jobAttr->PhysicalDefense = atoi(PQgetvalue(t_PGresult, i, 5));
jobAttr->Rigorous = atoi(PQgetvalue(t_PGresult, i, 6));
jobAttr->Will = atoi(PQgetvalue(t_PGresult, i, 7));
jobAttr->Wise = atoi(PQgetvalue(t_PGresult, i, 8));
jobAttr->Mentality = atoi(PQgetvalue(t_PGresult, i, 9));
jobAttr->Physical_fitness = atoi(PQgetvalue(t_PGresult, i, 10));
jobAttr++;
}
return t_row;
}
}
//查询玩家基本信息
hf_int32 DiskDBManager::GetRoleBasicInfo(STR_RoleBasicInfo* roleInfo, hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_ExecStatusType = PQresultStatus((t_PGresult));
if(t_ExecStatusType != PGRES_TUPLES_OK)
{
printf("PQexec error\n");
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult); //行数
if(t_row == 1)
{
memcpy(roleInfo->Nick, PQgetvalue(t_PGresult, 0, 1), PQgetlength(t_PGresult, 0, 1));
roleInfo->RoleID = atoi(PQgetvalue(t_PGresult, 0, 2));
roleInfo->Profession = atoi(PQgetvalue(t_PGresult, 0, 3));
roleInfo->Level = atoi(PQgetvalue(t_PGresult, 0, 4));
roleInfo->Sex = atoi(PQgetvalue(t_PGresult, 0, 5));
roleInfo->Figure = atoi(PQgetvalue(t_PGresult, 0, 6));
roleInfo->FigureColor = atoi(PQgetvalue(t_PGresult, 0, 7));
roleInfo->Face = atoi(PQgetvalue(t_PGresult, 0, 8));
roleInfo->Eye = atoi(PQgetvalue(t_PGresult, 0, 9));
roleInfo->Hair = atoi(PQgetvalue(t_PGresult, 0, 10));
roleInfo->HairColor = atoi(PQgetvalue(t_PGresult, 0, 11));
roleInfo->ModeID = atoi(PQgetvalue(t_PGresult, 0, 13));
roleInfo->SkirtID = atoi(PQgetvalue(t_PGresult, 0, 14));
}
return t_row;
}
}
//查询玩家已经完成的任务
hf_int32 DiskDBManager::GetPlayerCompleteTask(umap_completeTask completeTask, hf_char* str)
{
m_mtx.lock();
PGresult* t_PGresult = PQexec(m_PGconn, str);
m_mtx.unlock();
ExecStatusType t_ExecStatusType = PQresultStatus((t_PGresult));
if(t_ExecStatusType != PGRES_TUPLES_OK)
{
printf("PQexec error\n");
return -1;
}
else
{
hf_int32 t_row = PQntuples(t_PGresult); //行数
hf_uint32 taskid = 0;
for(hf_int32 i = 0; i < t_row; i++)
{
taskid = atoi(PQgetvalue(t_PGresult, i, 0));
(*completeTask)[taskid] = taskid;
}
return t_row;
}
}
|
{"hexsha": "d690da9bd844b711a0e20d942bc8f6e092ab3a16", "size": 53523, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "memManage/diskdbmanager.cpp", "max_stars_repo_name": "ycsoft/FatCat-Server", "max_stars_repo_head_hexsha": "fe01d3278927437c04977f3009154537868cc354", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 30.0, "max_stars_repo_stars_event_min_datetime": "2015-08-31T04:25:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-19T17:39:23.000Z", "max_issues_repo_path": "memManage/diskdbmanager.cpp", "max_issues_repo_name": "Mark0518/FatCat-Server", "max_issues_repo_head_hexsha": "fe01d3278927437c04977f3009154537868cc354", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "memManage/diskdbmanager.cpp", "max_forks_repo_name": "Mark0518/FatCat-Server", "max_forks_repo_head_hexsha": "fe01d3278927437c04977f3009154537868cc354", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 35.0, "max_forks_repo_forks_event_min_datetime": "2015-08-31T10:19:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-18T07:37:00.000Z", "avg_line_length": 35.9697580645, "max_line_length": 142, "alphanum_fraction": 0.6092707808, "num_tokens": 15917}
|
import numpy as np
from openvino.inference_engine import IECore
import cv2
import sys
import time
import argparse
from decode_np import Decode
def build_argparser():
parser = argparse.ArgumentParser(description='')
parser.add_argument("-t", "--tiny", action="store_true",
help='store_true: if model is v4 or v4tiny, default: v4')
parser.add_argument("-d", "--device", type=str, default='VPU',
help='str: use CPU or VPU, default=\'VPU\'')
parser.add_argument("-s", "--source", type=str, default='camera',
help='str: detect from camera or file, default=\'camera\'')
parser.add_argument("-p", "--path", type=str, default='image1.jpg',
help='str: path to file if detect from files, default=\'image1.jpg\'')
args = parser.parse_args()
assert args.device is 'CPU' or 'VPU', 'parser error! use \'-h\' for help'
assert args.source is 'camera' or 'file', 'parser error! use \'-h\' for help'
return args
if __name__ == "__main__":
args = build_argparser()
if args.source == 'file':
image_origin = cv2.imread(args.path)
assert image_origin is not None, 'Image is not found, No such file or directory'
print("\nDetect initing...")
print('=' * 30)
# load network
if args.tiny:
print('model: v4tiny')
model_xml = './IR_FP16/yolov4-tiny.xml'
model_bin = './IR_FP16/yolov4-tiny.bin'
else:
print('model: v4')
model_xml = './IR_FP16/yolov4.xml'
model_bin = './IR_FP16/yolov4.bin'
ie = IECore()
net = ie.read_network(model=model_xml, weights=model_bin)
print("inputs number: " + str(len(net.input_info.keys())))
for input_key in net.input_info:
print("input shape: " + str(net.input_info[input_key].input_data.shape))
if len(net.input_info[input_key].input_data.layout) == 4:
n, c, h, w = net.input_info[input_key].input_data.shape
print('=' * 30)
# build net
print("Loading model to the device...")
exec_net = ie.load_network(network=net, device_name='MYRIAD' if args.device == 'VPU' else 'CPU')
print("Creating infer request and starting inference...")
if args.tiny:
conf_thresh = 0.25
else:
conf_thresh = 0.5
nms_thresh = 0.60
input_shape = (416, 416)
all_classes = ['face']
# ---------------------------------------------- camera --------------------------------------------------
_decode = Decode(conf_thresh, nms_thresh, input_shape, all_classes, exec_net, iftiny=args.tiny)
if args.source == 'camera':
print('detect from camera...')
cam = cv2.VideoCapture(0)
ifsuccess, frame_origin = cam.read()
assert ifsuccess is True, 'camera error'
while 1:
ifsuccess, frame_origin = cam.read()
time_start = time.time() # start
image, boxes, scores, classes = _decode.detect_image(frame_origin, draw_image=True)
time_stop = time.time() # stop
cost_time = time_stop - time_start
# print(args.device, 'fps: ', 1 / cost_time)
image = cv2.putText(image, 'Model: {}'.format('YOLOv4-tiny' if args.tiny else 'YOLOv4'), (10, 25),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 23, 255), 1)
image = cv2.putText(image, 'Device: {}'.format(args.device), (10, 50),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 23, 255), 1)
image = cv2.putText(image, 'Cost: {:2.2f} ms'.format(cost_time),
(10, 75), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 23, 255), 1)
image = cv2.putText(image,
'FPS: {:2.2f}'.format(1 / cost_time) if cost_time > 0 else 'FPS: --',
(10, 100), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 23, 255), 1)
cv2.imshow("capture", image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
elif args.source == 'file':
print('detect from file: {}...'.format(args.path))
time_start = time.time() # start
image, boxes, scores, classes = _decode.detect_image(image_origin, draw_image=True)
time_stop = time.time() # stop
cost_time = time_stop - time_start
# print(args.device, 'fps: ', 1 / cost_time)
image = cv2.putText(image, 'Model: {}'.format('YOLOv4-tiny' if args.tiny else 'YOLOv4'), (10, 25),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 23, 255), 1)
image = cv2.putText(image, 'Device: {}'.format(args.device), (10, 50),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 23, 255), 1)
image = cv2.putText(image, 'Cost: {:2.2f} ms'.format(cost_time),
(10, 75), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 23, 255), 1)
image = cv2.putText(image,
'FPS: {:2.2f}'.format(1 / cost_time) if cost_time > 0 else 'FPS: --',
(10, 100), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 23, 255), 1)
cv2.imwrite('result.jpg', image)
cv2.imshow("capture", image)
cv2.waitKey()
cv2.destroyAllWindows()
|
{"hexsha": "84f444ab36f55a8f88f01b2712ab27e6fbb90205", "size": 5323, "ext": "py", "lang": "Python", "max_stars_repo_path": "detect.py", "max_stars_repo_name": "PieceZhang/face_detect_yolov4_yolov4tiny_ssd_openvino", "max_stars_repo_head_hexsha": "7e55ca610862b7c2dd1552be007a39153a8c20dc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-15T12:48:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-15T12:48:02.000Z", "max_issues_repo_path": "detect.py", "max_issues_repo_name": "PieceZhang/face_detect_yolov4_yolov4tiny_ssd_openvino", "max_issues_repo_head_hexsha": "7e55ca610862b7c2dd1552be007a39153a8c20dc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "detect.py", "max_forks_repo_name": "PieceZhang/face_detect_yolov4_yolov4tiny_ssd_openvino", "max_forks_repo_head_hexsha": "7e55ca610862b7c2dd1552be007a39153a8c20dc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.4957264957, "max_line_length": 110, "alphanum_fraction": 0.5641555514, "include": true, "reason": "import numpy", "num_tokens": 1391}
|
"""
Example use of vixutil to plot the term structure.
Be sure to run vixutil -r first to download the data.
"""
import vixutil as vutil
import pandas as pd
import logging as logging
import asyncio
import sys
pd.set_option('display.max_rows', 10)
#need over two months
pd.set_option('display.min_rows', 10)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
logger=logging.getLogger()
logger.setLevel(logging.INFO)
vutils=vutil.VixUtilsApi()
weights=vutils.get_vix_futures_constant_maturity_weights()
constant_maturity_term_structure = vutils.get_vix_futures_constant_maturity_term_structure()
cash_vix = vutils.get_cash_vix_term_structure()
futures_term_structure = vutils.get_vix_futures_term_structure()
wide_vix_calendar=vutils.get_vix_futures_constant_maturity_weights()
sep_lines = "_"*25+"\n"
constant_maturity_weights=vutils.get_vix_futures_constant_maturity_weights()
try:
import matplotlib.pyplot as plt
import scipy.stats as bc
except Exception as e:
logging.warning(f"""Exception {e} while trying to plot. matplotlip and scipy.stats
are required to run the plots in this example. Install them into your environment if you want to
see the graphs.""")
sys.exit(-3)
# the nine month has some bad data in it
#futures_term_structure = futures_term_structure.swaplevel(0,1,axis=1).drop(columns=[9]).swaplevel(0, 1, axis=1)
#futures_term_structure.drop(level=1,columns=[9,8],inplace=True)
futures_term_structure[['Close']].plot()
# futures_term_structure[['VIX1M_SPVIXSTR','Close']].plot()
plt.show()
constant_maturity_term_structure[['Close']].plot()
print(f"Constant maturity term structure {constant_maturity_term_structure}")
plt.show()
print(f"Cash vix {cash_vix}")
b=cash_vix['Close'][['VIX3M','VIX','VIX9D']]
b.plot()
plt.show()
#plot the term structure for Feb 16, 2021
day_of_interest = '2021-02-16'
s1 = futures_term_structure.loc[day_of_interest][["Close", "Settlement Date"]]
s2 = constant_maturity_term_structure.loc[day_of_interest][["Close", "Settlement Date"]]
s1.index = pd.Index([ (a,f"{b}") for a,b in s1.index])
s3=pd.concat([s1,s2])
one_day_ts = pd.DataFrame(s3).unstack(0)
iii=one_day_ts.columns.droplevel(0)
one_day_ts.columns=iii
one_day_ts.sort_values("Settlement Date",inplace=True)
print(f"{one_day_ts}")
one_day_ts.plot(x="Settlement Date", y="Close", kind = 'scatter', use_index=True)
plt.show()
|
{"hexsha": "8eca75cca8e499dbc8934aea4a8f4519b449250b", "size": 2492, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/vix_utils/example_plot_vix_term_structure.py", "max_stars_repo_name": "MichaelWS/vix_utils", "max_stars_repo_head_hexsha": "c7a73a0c4013f7eb2329cfe27eb012028fa31cdd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-02-23T03:11:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T19:27:03.000Z", "max_issues_repo_path": "src/vix_utils/example_plot_vix_term_structure.py", "max_issues_repo_name": "MichaelWS/vix_utils", "max_issues_repo_head_hexsha": "c7a73a0c4013f7eb2329cfe27eb012028fa31cdd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-02-20T22:57:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T10:06:45.000Z", "max_forks_repo_path": "src/vix_utils/example_plot_vix_term_structure.py", "max_forks_repo_name": "MichaelWS/vix_utils", "max_forks_repo_head_hexsha": "c7a73a0c4013f7eb2329cfe27eb012028fa31cdd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-17T18:32:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-20T12:20:42.000Z", "avg_line_length": 31.15, "max_line_length": 116, "alphanum_fraction": 0.7588282504, "include": true, "reason": "import scipy", "num_tokens": 650}
|
import six
import itertools
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
from matplotlib.collections import LineCollection
from move_direction import angle_clockwise
from data_to_segments import angle_to_segments
from patterns import find_substr_idx
from helpers import shift_x, chunks_from_origin
from data_to_char import code_factors_in_chars
COLORS = [color for color in list(six.iteritems(mcolors.cnames)) if not ':' in color]
np.random.seed(42)
def xy_to_segments(xy):
return list(zip(xy[:-1], xy[1:]))
def plot_path(xy_arr, segments_weights=None, clip=True, figsize=(6, 4), title='',
change_width=True, change_color=True, screen_lims=False, colorbar=True,
weight_threshold=None, show_joints=False,
feed_lines=False, width_mult=3, **kwargs):
"""Plot trajectories on a screen and highlight segments with color and linewidth"""
# Reshape xy_arr to a sequence of segments [[(x0,y0),(x1,y1)],[(x1,y1),(x2,y2)],...]
if clip:
xy_arr = np.clip(xy_arr, 0, 1)
if feed_lines:
segments = xy_arr
else:
segments = np.array(xy_to_segments(xy_arr))
sw = np.array(segments_weights) if segments_weights is not None else None
cmap = None
linewidths = None
# Show weights with color and linewidth
if (sw is not None) and change_color:
cmap = plt.get_cmap('plasma')
if (sw is not None) and change_width:
linewidths = (1 + (sw-min(sw)/(max(sw)-min(sw) + 1e-16)) * width_mult)
# Plot only segments where weight is higher than zero:
if (sw is not None) and (weight_threshold is not None):
segments = segments[sw > weight_threshold]
sw = sw[sw > weight_threshold]
fig, ax = plt.subplots(1, 1, figsize=figsize)
# Convert data to LineCollection
line_segments = LineCollection(segments, linewidths=linewidths, linestyles='solid',
cmap=cmap, **kwargs)#, norm=plt.Normalize(min(fw), max(fw)))
line_segments.set_array(sw)
ax.add_collection(line_segments)
if show_joints:
# x, y start segment points + last x, y point from segments ends
x = np.concatenate([segments[:, 0][:, 0], [segments[:, 1][-1, 0]]])
y = np.concatenate([segments[:, 0][:, 1], [segments[:, 1][-1, 1]]])
ax.scatter(x, y, s=10)
# Add colorbar for weights
if (sw is not None) and colorbar:
axcb = fig.colorbar(line_segments)
axcb.set_label('Activation')
plt.sci(line_segments) # this allows interactive changing of the colormap.
# Set plot limits
if screen_lims:
ax.set_ylim([-0.05, 1.05])
ax.set_xlim([-0.05, 1.05])
else:
ax.set_xlim((np.amin(xy_arr[:, 0] - 0.05), np.amax(xy_arr[:, 0]) + 0.05))
ax.set_ylim((np.amin(xy_arr[:, 1] - 0.05), np.amax(xy_arr[:, 1]) + 0.05))
ax.set_title(title, fontsize=12, y=1.1)
ax.invert_yaxis() # invert y axis according to the eye-tracking data
ax.xaxis.tick_top()
return fig
def plot_dir_segments(n_segments=12, n=1000):
"""Plot example of N segments"""
np.random.seed(48)
p = 2 * np.random.rand(3, n*2) - 1
p = p[:n, np.power(np.sum(p * p, 0), 0.5) <= 1]
x, y = p[0], p[1]
move_dir = angle_clockwise(x1 = 0, y1 = 1, x2 = x.T, y2 = y.T)
dir_segm = angle_to_segments(move_dir, n_segments, segment_ranges=False)
fig = plt.figure(figsize=(7, 7))
_ = plt.scatter(x, y, c=dir_segm, cmap=plt.cm.tab20_r, s=15)
return fig
def plot_distr_segments(move_len, factor_len):
"""Plot distribution lengths into N segments"""
labels = list(set(factor_len))
colors = plt.cm.tab20_r
fig, ax = plt.subplots(1, 1, figsize=(14, 4))
p = plt.hist(move_len, bins=100)
for i, (l_s, l_e) in enumerate(labels):
ax.axvspan(l_s, l_e, facecolor=COLORS[i][1],
label='{:.3f}-{:.3f}'.format(l_s, l_e), alpha=0.2)
ax.legend(loc='upper right')
return fig
def plot_chunks_example(n_segments=12, n=10):
p = 2 * np.random.rand(3, n) - 1
p = p[:, np.power(np.sum(p * p, 0), 0.5) <= 1]
x, y = p[0], p[1]
move_dir = angle_clockwise(x1 = 0, y1 = 1, x2 = x.T, y2 = y.T)
dir_segm = angle_to_segments(move_dir, n_segments)
fig = plt.figure(figsize=(7, 7))
seq, codes = code_factors_in_chars([dir_segm])
for i, char in enumerate(sorted(codes.keys())):
mask = [c == char for c in seq]
mask = shift_x(mask, shift=-1) | mask # to include previous x
x_chunks = chunks_from_origin(x[mask], step=2, origin_value=0.)
y_chunks = chunks_from_origin(y[mask], step=2, origin_value=0.)
plt.plot(x_chunks, y_chunks, color=COLORS[i][1], alpha=0.5)
plt.scatter(x, y)
for i in range(len(x)):
plt.annotate(str(i), (x[i], y[i]+0.02), fontsize=12)
return fig
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues, figsize=(6, 6)):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots(1, 1, figsize=figsize)
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar(im, fraction=0.046, pad=0.04) # magic to equlize height of colorbar with the image height
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return fig
|
{"hexsha": "ffb786cbacac0e91da2a36ed18dddec6489b6f18", "size": 6157, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/plot_helpers.py", "max_stars_repo_name": "taneta/patterns", "max_stars_repo_head_hexsha": "b00e5e3466e467992795f183f12b3a0101bd238b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-01-05T08:59:11.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-07T08:34:59.000Z", "max_issues_repo_path": "utils/plot_helpers.py", "max_issues_repo_name": "taneta/patterns", "max_issues_repo_head_hexsha": "b00e5e3466e467992795f183f12b3a0101bd238b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/plot_helpers.py", "max_forks_repo_name": "taneta/patterns", "max_forks_repo_head_hexsha": "b00e5e3466e467992795f183f12b3a0101bd238b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-01T07:30:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-01T07:30:01.000Z", "avg_line_length": 36.2176470588, "max_line_length": 106, "alphanum_fraction": 0.6178333604, "include": true, "reason": "import numpy", "num_tokens": 1730}
|
/// \file \brief This file disables some warnings produced by the library
///
/// \warning This file has no include guards (it is supposed to be included
/// multiple times) and should always be paired with a:
///
/// #include <boost/v3/detail/re_enable_warnings.hpp>
///
/// The following warnings are disabled by this file:
/// -Wshadow
///
/// \note Works with GCC and Clang
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wshadow"
|
{"hexsha": "c87dd8f5daf36eb281237c0676be3f02350b6292", "size": 453, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/range/v3/detail/disable_warnings.hpp", "max_stars_repo_name": "CornedBee/range-v3", "max_stars_repo_head_hexsha": "99a9f5f70e65dfcf6bbc8894bf2a22d8f5d4552a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/range/v3/detail/disable_warnings.hpp", "max_issues_repo_name": "CornedBee/range-v3", "max_issues_repo_head_hexsha": "99a9f5f70e65dfcf6bbc8894bf2a22d8f5d4552a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/range/v3/detail/disable_warnings.hpp", "max_forks_repo_name": "CornedBee/range-v3", "max_forks_repo_head_hexsha": "99a9f5f70e65dfcf6bbc8894bf2a22d8f5d4552a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2, "max_line_length": 75, "alphanum_fraction": 0.7152317881, "num_tokens": 103}
|
#! Demonstrates a failing test suite initializer
#:include 'fytest.fypp'
#:block TEST_SUITE('failing_suite')
use mymath
implicit none
#:contains
#! Using the test suite initializer to initialize suite.
#! Since it will fail, none of the tests in the suite will be run.
#:block TEST_SUITE_INIT
call random_seed()
@:ASSERT(.false.)
print *, "Error: this line should be never reached"
stop
#:endblock
#:block TEST_SUITE_FINAL
print *, 'Error: This should be never reached, since init has failed'
stop
#:endblock
#! Testing for various special factorial values
#:block TEST_FIXTURE('random', RENDERER='render')
#! Variables defined here can be accessed by each unit within the fixture
integer :: curval
#:contains
#! Test fixture initializer is called each time before a test starts.
#! A separate scope is created for each test.
#:block TEST_FIXTURE_INIT
real :: rand
call random_number(rand)
curval = int(rand * 10.0) + 1
#:endblock
#! Tests can access the fixture scope
#:block TEST('recursion_up')
@:ASSERT(factorial(curval) * (curval + 1) == factorial(curval + 1))
#:endblock
#:block TEST('recursion_down')
@:ASSERT(factorial(curval) == curval * factorial(curval - 1))
#:endblock
#! We define a renderer to show the random number used in a given fixture
#! A renderer must have no arguments and return a string containing a human
#! readable representation of the fixture.
function render() result(str)
character(:), allocatable :: str
character(10) :: buffer
write(buffer, "(A,I0)") 'curval=', curval
str = trim(buffer)
end function render
#:endblock TEST_FIXTURE
#:endblock TEST_SUITE
#:block TEST_DRIVER()
#:endblock TEST_DRIVER
|
{"hexsha": "0727fe8f3d33befd601e7d05ce464eace0783444", "size": 1823, "ext": "fpp", "lang": "FORTRAN", "max_stars_repo_path": "examples/serial/test/test_failing_suite.fpp", "max_stars_repo_name": "aradi/fytest", "max_stars_repo_head_hexsha": "9133d5dab5b582161f4fb4c4b127d7f97133e3e7", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-01-13T23:34:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-19T11:25:29.000Z", "max_issues_repo_path": "examples/serial/test/test_failing_suite.fpp", "max_issues_repo_name": "aradi/fyunit", "max_issues_repo_head_hexsha": "9133d5dab5b582161f4fb4c4b127d7f97133e3e7", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-23T15:59:07.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-23T15:59:07.000Z", "max_forks_repo_path": "examples/serial/test/test_failing_suite.fpp", "max_forks_repo_name": "aradi/fyunit", "max_forks_repo_head_hexsha": "9133d5dab5b582161f4fb4c4b127d7f97133e3e7", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-02T18:31:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T18:31:41.000Z", "avg_line_length": 24.3066666667, "max_line_length": 79, "alphanum_fraction": 0.6736149205, "num_tokens": 462}
|
import numpy as np
from sklearn.decomposition import PCA
def identity(data):
""" no transformation """
return data
def l2(data):
return np.asarray(data)/(np.linalg.norm(data, axis=0) + 1e-4)
def l1(data):
return np.asarray(data)/(np.linalg.norm(data, axis=0, ord=1) + 1e-4)
def pca_whitening_30d(data):
# get_eigenvalues(data)
dim = min(30, data.shape[0])
model = PCA(whiten=True, n_components=dim)
reduced_data = model.fit_transform(data)
return reduced_data
|
{"hexsha": "55bcfe92dc287bb42567f2965ee01d7760871353", "size": 504, "ext": "py", "lang": "Python", "max_stars_repo_path": "Animator/normalization_methods.py", "max_stars_repo_name": "oronnir/CAST", "max_stars_repo_head_hexsha": "c2b095a516e5ad0cdfec8b13196045549cbd3f4c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2022-01-20T12:40:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-26T15:11:18.000Z", "max_issues_repo_path": "Animator/normalization_methods.py", "max_issues_repo_name": "oronnir/CAST", "max_issues_repo_head_hexsha": "c2b095a516e5ad0cdfec8b13196045549cbd3f4c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-10T03:01:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T03:01:47.000Z", "max_forks_repo_path": "Animator/normalization_methods.py", "max_forks_repo_name": "oronnir/CAST", "max_forks_repo_head_hexsha": "c2b095a516e5ad0cdfec8b13196045549cbd3f4c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.0, "max_line_length": 72, "alphanum_fraction": 0.6805555556, "include": true, "reason": "import numpy", "num_tokens": 141}
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
def get_output_shape(attrs, in_shape, img_real_size):
batchsize = in_shape[0]
img_height = in_shape[2]
img_width = in_shape[3]
paddings = np.array(attrs['paddings']).astype("int32")
kernels = np.array(attrs['kernels']).astype("int32")
strides = np.array(attrs['strides']).astype("int32")
output_height = np.zeros((1, batchsize)).astype("int32")
output_width = np.zeros((1, batchsize)).astype("int32")
if len(img_real_size):
out_stride = np.array(attrs['out_stride']).astype("int32")
imgreal_h = 0
imgreal_w = 0
for index in range(batchsize):
if img_real_size[index, 0] % out_stride[0] == 0:
imgreal_h = img_real_size[index, 0] / out_stride[0]
else:
imgreal_h = img_real_size[index, 0] / out_stride[0] + 1
if img_real_size[index, 0] % out_stride[1] == 0:
imgreal_w = img_real_size[index, 1] / out_stride[1]
else:
imgreal_w = img_real_size[index, 0] / out_stride[1] + 1
output_height[0,index] = \
1 + \
(imgreal_h + paddings[0] + paddings[2] - kernels[0] + strides[0] - 1) / \
strides[0]
output_width[0,index] = \
1 + \
(imgreal_w + paddings[1] + paddings[3] - kernels[1] + strides[1] - 1) / \
strides[1]
else:
for index in range(batchsize):
output_height[0,index] = \
1 + \
(img_height + paddings[0] + paddings[2] - kernels[0] + strides[0] - 1) / \
strides[0]
output_width[0,index] = \
1 + \
(img_width + paddings[1] + paddings[3] - kernels[1] + strides[1] - 1) / \
strides[1]
return output_height, output_width
def im2col(attrs, im, col):
"""
im: {CHW}
col:
{outputHeight, outputWidth, inputChannels, filterHeight, filterWidth}
"""
input_channels, input_height, input_width = im.shape
output_height, output_width, _, filter_height, filter_width = col.shape
stride_height, stride_width = attrs['strides']
padding_height, padding_width = attrs['paddings'][0:2]
for col_row_idx in range(0, output_height):
for col_col_idx in range(0, output_width):
for channel in range(0, input_channels):
for filter_row_idx in range(0, filter_height):
for filter_col_idx in range(0, filter_width):
im_row_offset = col_row_idx * stride_height \
+ filter_row_idx - padding_height
im_col_offset = col_col_idx * stride_width \
+ filter_col_idx - padding_width
if (im_row_offset < 0 or
im_row_offset >= input_height or
im_col_offset < 0 or
im_col_offset >= input_width):
col[col_row_idx][col_col_idx][channel][\
filter_row_idx][filter_col_idx] = 0.0
else:
im_offset = (channel * input_height + im_row_offset \
) * input_width + im_col_offset
col[col_row_idx][col_col_idx][channel][\
filter_row_idx][filter_col_idx] = im[channel][ \
im_row_offset][im_col_offset]
def Im2Sequence(inputs, img_real_size, attrs):
output_height, output_width = get_output_shape(attrs, inputs.shape,
img_real_size)
img_channels = inputs.shape[1]
batch_size = inputs.shape[0]
out = []
for index in range(batch_size):
tmp = np.zeros([
output_height[0, index], output_width[0, index], img_channels,
attrs['kernels'][0], attrs['kernels'][1]
]).astype("float32")
out.append(tmp)
for index in range(len(inputs)):
im2col(attrs, inputs[index], out[index])
out[index] = out[index].reshape([
output_height[0, index] * output_width[0, index],
img_channels * attrs['kernels'][0] * attrs['kernels'][1]
])
out = np.concatenate(out, axis=0)
return out
class TestBlockExpandOp(OpTest):
def config(self):
self.batch_size = 1
self.img_channels = 3
self.img_height = 4
self.img_width = 4
self.attrs = {
'kernels': [2, 2],
'strides': [1, 1],
'paddings': [1, 1, 1, 1],
}
def setUp(self):
self.config()
self.op_type = "im2sequence"
x = np.random.uniform(0.1, 1, [
self.batch_size, self.img_channels, self.img_height, self.img_width
]).astype("float32")
real_size = np.array([]).astype("float32")
out = Im2Sequence(x, real_size, self.attrs)
self.inputs = {'X': x}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out')
class TestBlockExpandOpCase2(TestBlockExpandOp):
def config(self):
self.batch_size = 2
self.img_channels = 3
self.img_height = 4
self.img_width = 5
self.attrs = {
'kernels': [2, 1],
'strides': [2, 1],
'paddings': [2, 1, 2, 1],
}
class TestBlockExpandOpCase3(TestBlockExpandOp):
def config(self):
self.batch_size = 2
self.img_channels = 1
self.img_height = 4
self.img_width = 5
self.attrs = {
'kernels': [2, 1],
'strides': [2, 1],
'paddings': [2, 0, 2, 0],
}
class TestBlockExpandOpCase4(TestBlockExpandOp):
def config(self):
self.batch_size = 2
self.img_channels = 2
self.img_height = 3
self.img_width = 3
self.attrs = {
'kernels': [2, 2],
'strides': [1, 1],
'paddings': [0, 0, 0, 0],
}
class TestBlockExpandOpCase5(OpTest):
def config(self):
self.batch_size = 1
self.img_channels = 3
self.img_height = 4
self.img_width = 5
self.attrs = {
'kernels': [2, 1],
'strides': [2, 1],
'paddings': [2, 1, 2, 1],
'out_stride': [2, 2],
}
def setUp(self):
self.config()
self.op_type = "im2sequence"
x = np.random.uniform(0.1, 1, [
self.batch_size, self.img_channels, self.img_height, self.img_width
]).astype("float32")
real_size = np.array([[8, 10], [5, 8]]).astype("float32")
out = np.array(Im2Sequence(x, real_size, self.attrs))
self.inputs = {'X': x, 'Y': real_size} #l ??
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
class TestBlockExpandOpCase6(OpTest):
def config(self):
self.batch_size = 3
self.img_channels = 1
self.img_height = 4
self.img_width = 5
self.attrs = {
'kernels': [2, 1],
'strides': [1, 1],
'paddings': [0, 0, 0, 0],
'out_stride': [1, 1],
}
def setUp(self):
self.config()
self.op_type = "im2sequence"
x = np.random.uniform(0.1, 1, [
self.batch_size, self.img_channels, self.img_height, self.img_width
]).astype("float32")
real_size = np.array([[8, 10], [5, 8], [5, 8]]).astype("float32")
out = np.array(Im2Sequence(x, real_size, self.attrs))
self.inputs = {'X': x, 'Y': real_size} #l ??
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
class TestBlockExpandOpCase7(OpTest):
def config(self):
self.batch_size = 2
self.img_channels = 2
self.img_height = 3
self.img_width = 3
self.attrs = {
'kernels': [2, 2],
'strides': [1, 1],
'paddings': [1, 0, 1, 0],
'out_stride': [2, 2],
}
def setUp(self):
self.config()
self.op_type = "im2sequence"
x = np.random.uniform(0.1, 1, [
self.batch_size, self.img_channels, self.img_height, self.img_width
]).astype("float32")
real_size = np.array([[6, 6], [4, 4]]).astype("float32")
out = np.array(Im2Sequence(x, real_size, self.attrs))
self.inputs = {'X': x, 'Y': real_size}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
if __name__ == '__main__':
unittest.main()
#set shiftwidth=4 set expandtab set tabstop=4
|
{"hexsha": "13bc5768740ece00bbe285a0b47d82bb8a42d2c7", "size": 9502, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/paddle/fluid/tests/unittests/test_im2sequence_op.py", "max_stars_repo_name": "jerrywgz/Paddle", "max_stars_repo_head_hexsha": "85c4912755b783dd7554a9d6b9dae4a7e40371bc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-08-03T03:33:52.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-03T03:33:52.000Z", "max_issues_repo_path": "python/paddle/fluid/tests/unittests/test_im2sequence_op.py", "max_issues_repo_name": "jerrywgz/Paddle", "max_issues_repo_head_hexsha": "85c4912755b783dd7554a9d6b9dae4a7e40371bc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2017-07-15T14:20:08.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-06T03:16:54.000Z", "max_forks_repo_path": "python/paddle/fluid/tests/unittests/test_im2sequence_op.py", "max_forks_repo_name": "jerrywgz/Paddle", "max_forks_repo_head_hexsha": "85c4912755b783dd7554a9d6b9dae4a7e40371bc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-07-20T07:13:31.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-20T07:13:31.000Z", "avg_line_length": 33.695035461, "max_line_length": 88, "alphanum_fraction": 0.5451483898, "include": true, "reason": "import numpy", "num_tokens": 2483}
|
[STATEMENT]
lemma double_swap_qSwap:
assumes "good X"
shows "qGood (((pick X) #[[x \<and> y]]_zs) #[[x' \<and> y']]_zs') \<and>
((X #[x \<and> y]_zs) #[x' \<and> y']_zs') = asTerm (((pick X) #[[x \<and> y]]_zs) #[[x' \<and> y']]_zs')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. qGood (pick X #[[x \<and> y]]_zs #[[x' \<and> y']]_zs') \<and> X #[x \<and> y]_zs #[x' \<and> y']_zs' = asTerm (pick X #[[x \<and> y]]_zs #[[x' \<and> y']]_zs')
[PROOF STEP]
by (simp add: asTerm_qSwap_swap assms
good_imp_qGood_pick local.swap_def qSwap_preserves_qGood1)
|
{"llama_tokens": 267, "file": "Binding_Syntax_Theory_Transition_QuasiTerms_Terms", "length": 1}
|
library(fitdistrplus)
######################################################################################
library(httk) # High-Throughput Toxicokinetics
library(sensitivity) # Sensitivity Analysis
######################################################################################
# devtools::install_github("nanhung/pksensi")
library(pksensi) # Global Sensitivity Analysis in Pharmacokinetic Modeling Hsieh 2018
library(PK) # Basic Non-Compartmental Pharmacokinetics https://cran.r-project.org/web/packages/PK/index.html
#devtools::install_github("dpastoor/PKPDmisc")
library(PKPDmisc) # Pharmacokinetic and Pharmacodynamic Data Management Functions
######################################################################################
# http://pkpdsim.ronkeizer.com/ https://github.com/InsightRX/PKPDsim
# devtools::install_github("InsightRX/PKPDsim")
library(PKPDsim) # Simulate dose regimens for PKPD models described by ODE or equation
#devtools::install_github("ronkeizer/PKPDsimShiny")
library(PKPDsimShiny) #
#######################################################################################################àà
source('https://alfcrisci.github.io/Mychif/plot_senstivity_aux.r')
#######################################################################################################àà
# Distribution analisys
######################################################################################################################################
# check data in httk sources
mycotoxins_data=read.csv("https://alfcrisci.github.io/Mychif/mycotoxins.csv")
mycoCAS <- mycotoxins_data$CAS
chem.lists$Tox21[which(chem.lists$Tox21$CAS %in% mycoCAS ==T),] # Zearalenone
chem.lists$ExpoCast[which(chem.lists$ExpoCast$CAS %in% mycoCAS ==T),] # Zearalenone ZEA 807, Aflatoxin B1 6469
chem.physical_and_invitro.data[which(chem.physical_and_invitro.data$CAS %in% mycoCAS ==T),] # OTA, Aflatoxin B1
#############################################################################################################################################
# Non compartimental analisys
#############################################################################################################################################
# set seeds for reproducibility
set.seed(34534)
#############################################################################################################################################
# Considering 1 comp Flip-flop kinetics (FFPK) PK Model for Sensitivity Analysis by pksensi R packages
# Inputs are generally considered for bolus regimes:
# F bioavailability :the fraction or percentage of the administrated dose that can reach the general circulation
# k_a is the first-order absorption rate constant (1/time)
# k_e is the first-order elimination rate constant (/time),
# Vd is the distribution volume. see https://en.wikipedia.org/wiki/Volume_of_distribution
# Volume of distribution is called a “primary pharmacokinetic parameter”, which means that this parameter depends on the physiologic properties of the body and the physiochemical properties of the drug. Volume of distribution is not derived from other PK parameters, instead it is used to estimate the “secondary” PK parameters
# ndr when regime is bolus Vd/F equal 2.01 Vd=2.01*0.75
#################################################################################
#################################################################################
# toxicokinetics
#################################################################################
# po Saint Cyr et al 2015 doi:10.3390/toxins7124873
D=100 # microL/kgbw
params <- c(F = 0.75, KA = 3.72, KE = 0.29, V = 1.5074)
t <- seq(0, 24, 0.01)
C <-pksensi::FFPK(params = params, time = t,dose=D)
plot(t, C, type = "l", xlab = "Hours", ylab = "DON concentration")
PKPDmisc::nca(t, C, dose=100, last_times = c(3, 4, 5), digits = 2)
####################################################################################
# po NDON control Paulick et al 2015 doi:10.3390/toxins7114622
D=75 # microL/kgbw
params <- c(F = 0.98, KA = 0.61, KE = 0.12, V = 1.68)
t <- seq(0, 24, 0.01)
C <-FFPK(params = params, time = t,dose=D)
plot(t, C, type = "l", xlab = "Hours", ylab = "DON concentration")
PKPDmisc::nca(t, C, dose=75, last_times = c(3, 4, 5), digits = 2)
# sulfite DON
D=71 # microL/kgbw
params <- c(F = 0.893, KA = 0.99, KE = 0.13, V = 1.98)
t <- seq(0, 24, 0.01)
C <-FFPK(params = params, time = t,dose=D)
plot(t, C, type = "l", xlab = "Hours", ylab = "DON concentration")
PKPDmisc::nca(t, C, dose=71, last_times = c(3, 4, 5), digits = 2)
########################################################################################################
# Sensibility DON pigs
# params <- c(F = 0.893, KA = 0.99, KE = 0.13, V = 1.98)
# params <- c(F = 0.75, KA = 3.72, KE = 0.29, V = 1.5074)
q <- "qunif"
params <- c("F","KA","KE","V")
q.arg <- list(list(min = 0.6, max = 1.0),
list(min = 0.5, max = 4),
list(min = 0.02, max = 0.3),
list(min = 1, max = 3))
x <- rfast99(params = params, n = 200, q = q, q.arg = q.arg, rep = 20)
time <- seq(0.1, 24, 0.1)
y <- solve_fun(x, model = FFPK, time = time, vars = "output")
tell2(x,y) # Link decoupling simulation result
check(x)
heat_check(x)
heat_check(x, index = "CI")
test=x
dimnames(test$y)[[4]]=" Fast99 sensitivity analisys - Pigs"
plot(x)
##################################################################################################
# sensitivity analysis
q.arg <- list(list(min = 0.6, max = 1.0),
list(min = 0.5, max = 4),
list(min = 0.02, max = 0.3),
list(min = 1, max = 3))
sobolSim <- function(r,n){
M <- length(r)
X <- data.frame(matrix(runif(M * n), nrow = n))
for (m in (1:M)){
rm <- r[[m]]
X[,m] <- X[,m]*(rm$max-rm$min) + rm$min
}
return(X)
}
X1 <- sobolSim(q.arg,n=200)
X2 <- sobolSim(q.arg,n=200)
dose=100
time=seq(0.1, 24, 0.1)
n_boot=200
FFPKTmax=function(X) {
X=as.numeric(X)
params <- c(F = X[1], KA = X[2], KE = X[4], V =X[4])
as.numeric(time[which.max(as.numeric(FFPK(params,time=time,dose=dose)))])
}
FFPKCmax=function(X) {
X=as.numeric(X)
params <- c(F = X[1], KA = X[2], KE = X[4], V =X[4])
max(as.numeric(FFPK(params,time=time,dose=dose)))
}
FFPKauc=function(X) {
X=as.numeric(X)
params <- c(F = X[1], KA = X[2], KE = X[4], V =X[4])
sum(as.numeric(FFPK(params,time=time,dose=dose)))
}
sa <- soboljansen(model = NULL, X1, X2, nboot = n_boot, conf = 0.95)
<- matrix(NA, nrow = nrow(sa$X), ncol = 3)
SimRes[,1]=apply(sa$X,1,FFPKTmax)
SimRes[,2]=apply(sa$X,1,FFPKCmax)
SimRes[,3]=apply(sa$X,1,FFPKauc)
colnames(SimRes) <- c("Tmax","Cmax","AUC")
tell(x = sa, y = SimRes, nboot = n_boot, conf = 0.95)
#################################################################################################################
par_sim=c("Tmax","Cmax","AUC")
n=length(par_sim)
np=length(c("F","KA","KE","V"))
FOI = TI = TI.borninf= TI.bornsup= matrix(NA, nrow = np, ncol = n)
rownames(FOI)= rownames(TI)= rownames(TI.borninf) = rownames(TI.bornsup) =c("F","Kabs","Ke","Vd")
par(mfrow=c(2,2), las=2, cex=0.7)
for(i in 1:n){
print(i)
tell(x = sa, y = SimRes[,i], nboot = n_boot, conf = 0.95)
FOI[,i] = sa$S[,1] #First order indices
TI[,i] = sa$T[,1] #Total indices
TI.borninf[,i] = sa$T[,4] #Lower CL total indices
TI.bornsup[,i] = sa$T[,5] #Upper CL total indices
plot(sa, main=as.character(par_sim[i]))
}
dev.off()
#############################################################################################
# Lowry plots
i <- 20 # repeat for each point in time (column of t_A)
# sum of first order variances should not exceed 1
for (j in 1:nrow(FOI)) {
if (FOI[j,i]<0) {
FOI[j,i] <- 0
}
}
if (sum(FOI[,i])>1) {
FOI[,i] <- FOI[,i]/sum(FOI[,i])
}
# Create data frame for lowry, with first order and interaction effects
lowry <- data.frame(Parameter=Names_var,Interaction=TI[,i]-FOI[,i],Main.Effect=FOI[,i])
# Make sure that interaction indices are not below 0
lowry$Interaction <- ifelse(lowry$Interaction<0,0,lowry$Interaction)
lowry_plot(lowry)
########################################################################################
#############################################################################
# example with PKsim
p <- list(CL = 0.52, V = 2.01*0.75, KA = 3.72)
pk <- new_ode_model("pk_1cmt_oral")
r1 <- new_regimen(amt = 100*0.75,
n = 1,
interval = 24)
dat <- sim_ode(
ode = pk,
parameters = p,
regimen = r1,
t_obs = seq(0, 24, 0.01)
)
PKPDmisc::nca(dat[dat$comp=="obs",]$t, dat[dat$comp=="obs",]$y, dose=100, last_times = c(3, 4, 5), digits = 2)
#############################################################################
#############################################################################
# install.packages("rsconnect")
library(PKPDsim)
library(PKPDsimShiny)
library(shinythemes)
p <- list(CL = 1, V = 10, KA = 0.5)
pk1 <- new_ode_model("pk_1cmt_oral")
r1 <- new_regimen(amt = 100,
n = 5,
interval = 12)
n <- 100
omega <- c(0.1,
0.05, 0.1)
#simulation in a Shiny app
PKPDsimShiny::sim_ode_shiny (
ode = pk1,
par = p,
n_ind = 100,
regimen = r1)
shiny::runApp("shiny-pkpd")
rsconnect::setAccountInfo(name='alfcrisci', token='B92FF3D98C78A375B3156FE4F27C8FF9', secret='ACX1o0PvYRHpgtylngX9+c00tYoUuyYsthDpVE4y')
rsconnect::deployApp("shiny-pkpd",appName = "mychifonecomp",appTitle = "mychif_oneC")
#shinytheme("united")
nca_res=PK::nca(conc, time, n.tail=3, dose=100, method=c("z", "boott"),
conf.level=0.95, design="complete",nsample=1000, d.conc)
d.conc=data.frame(conc=C, time=t,subject=1)
conc.obj <- PKNCA::PKNCAconc(d.conc, conc~time|subject)
plot(conc.obj)
|
{"hexsha": "126927b8d813e23ee1180249fe0534ac7f2131bd", "size": 9862, "ext": "r", "lang": "R", "max_stars_repo_path": "pbkm_modeling/R_code/runs.r", "max_stars_repo_name": "alfcrisci/michyf", "max_stars_repo_head_hexsha": "9a6a8905f272f9bc7ed9751eeaa75ad5e2418544", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-06-13T15:54:35.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-13T15:54:35.000Z", "max_issues_repo_path": "pbkm_modeling/R_code/runs.r", "max_issues_repo_name": "alfcrisci/Mychif", "max_issues_repo_head_hexsha": "9a6a8905f272f9bc7ed9751eeaa75ad5e2418544", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pbkm_modeling/R_code/runs.r", "max_forks_repo_name": "alfcrisci/Mychif", "max_forks_repo_head_hexsha": "9a6a8905f272f9bc7ed9751eeaa75ad5e2418544", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2053872054, "max_line_length": 327, "alphanum_fraction": 0.5127763131, "num_tokens": 2913}
|
SUBROUTINE INPTT4
C
C THIS INPTT4 UTILITY MODULE WILL READ USER-SUPPLIED TAPE (OR DISC
C FILE), AS GENERATED FROM OUTPUT4 OR FROM MSC/OUTPUTi MODULES (i=1,
C
C THIS MODULE HANDLES ONLY MATRICES, AND NOT TABLES
C
C COSMIC/OUTPUT4 AND MSC/OUTPUT4 ARE IDENTICAL (BINARY ONLY)
C COSMIC/INPUTT4 AND MSC/INPUTT4 ARE SIMILAR, EXECPT COSMIC/INPUTT4
C CAN ALSO PROCESS MSC/OUTPUT1 AND MSC/OUTPUT2 TAPES.
C
C INPUTT4 /O1,O2,O3,O4,O5/V,N,P1/V,N,P2/V,N,P3/V,N,P4 $
C
C Oi = OUTPUT GINO DATA BLOCKS
C
C P1 = TAPE READ POSITION CONTROL
C . SEE P1 OF INPUTT1 MODULE IF P4=-1
C . SEE P1 OF INPUTT2 MODULE IF P4=-2
C . SEE P1 OF INPUTT4 MODULE IF P4=-4
C . IF P4=0, P1= 0 NO ACTION
C P1=-1 REWIND P2 BEFORE READ
C P1=-2 WRITE E-O-F MARK AND REWIND P2 AT END
C P1=-3 BOTH
C P2 =+N, INPUT TAPE LOGICAL UNIT, INTEGER, NO DEFAULT
C INPUT TAPE IS IN BINARY (UNFORMATTED).
C =-N, INPUT TAPE LOGICAL UNIT +N, INPUT MATRICES WERE
C WRITTEN IN BCD RECORDS (i.e. ASCII, FORMATTED)
C P3 = TAPE LABEL, DEFAULT='XXXXXXXX'
C P4 = OUTPUT TAPE MODULE, INTEGER (DEFAULT P4=0)
C =-4, TAPE WAS ORIGINALLY WRITTEN BY MSC/OUTPUT4 MODULE*
C UNFORMATTED (BINARY) TAPE, OR FORMATTED (BCD) TAPE.
C FORMATS FOR BCD TAPE ARE -
C 3I8 FOR INTEGERS, 2A4 FOR BCD, AND 5E16.9 FOR REAL.
C =-2, TAPE WAS ORIGINALLY WRITTEN BY MSC/OUTPUT2 MODULE*
C =-1, TAPE WAS ORIGINALLY WRITTEN BY MSC/OUTPUT1 MODULE*
C = 0, TAPE WAS ORIGINALLY WRITTEN BY OUTPUT4 MODULE
C . IN BINARY RECORDS (P2=+N), UNFORMATTED.
C . IN ASCII FORMATTED RECORDS (P2=-N), FORMATS FOR
C INTEGERS AND REAL DATA ARE MATRIX TYPE DEPENDENT.
C I13 AND 10E13.6 FOR S.P.MATRIX DATA, AND
C I16 AND 8D16.9 FOR D.P.MATRIX DATA.
C I16 AND 8E16.9 FOR S.P.MATRIX DATA, AND LONG WORD
C .GE.1, IN ASCII FORMATTED RECORDS (P2=-N), I16 IS USED FOR
C INTEGERS, AND 8E16.9 FOR ALL REAL S.P. OR D.P.DATA
C
C * REQUIRE SYNCHRONIZED GINO BUFFER SIZE IN COSMIC NASTRAN AND
C MSC/NASTRAN
C
C PARAMETERS EQUIVALENCE FOR COSMIC/INPUTT4 AND MSC/INPUTT4
C
C COSMIC/INPUTT4 MSC/INPUTT4
C -------------- ------------------------------
C P1 NMAT (NO OF MATRICES ON TAPE)
C P2 P2
C P3 P1
C P4 BCDOPT
C
C
C NOTE - MIXED OUTPUT FILES FROM MSC/OUTPUT1, OUTPUT2 AND OUTPUT4
C ON ONE TAPE ARE NOT ALLOWED IN THIS INPUTT4 MODULE
C
C EXAMPLE 1 - INPUT TAPE INP1 (UNIT 15) CONTAINS 5 MATRICES,
C ========= WRITTEN BY OUTPUT4, BINARY.
C WE WANT TO COPY
C FILE 3 TO A,
C FILE 4 TO B
C
C 1. INPUTT4 /,,A,B,/-1/15 $ REWIND, READ & ECHO HEADER RECORD
C
C
C EXAMPLE 2 - TO COPY THE FIRST 2 FILES OF A FORMATTED TAPE INP2
C ========= (UNIT 16), WRITTEN BY OUTPUT4
C
C 2. INPUTT4 /A,B,,,/-1/-16 $
C
C
C EXAMPLE 3 - TO LIST THE FILES ON INP3 (TAPE CODE 3), THEN REWIND,
C ========= AND COPY FILES 2 AND 3 ON INPUT TAPE ORIGINALLY
C WRITTEN BY MSC/OUTPUT1. TAPE CONTAINS A HEADER RECORD
C (FILE 0), AND TAPE ID "MYFILE"
C
C 3. INPUTT4 /A,B,,,/-3/3/*MYFILE*/-1 $
C
C ACTUALLY, INPTT4 MODULE CALLS INPUT2 TO PROCESS ANY TAPE THAT WAS
C GENERATED BY MSC/OUTPUT2. SIMILARILY, INPUT1 IS CALLED FOR TAPE
C FROM MSC/OUTPUT1
C
C THE FIRT PARAMETER NMAT IN MSC/INPUTT4 IS NOT USED HERE
C
INTEGER P1,P2,P3,P4,BCDOPT,Y(1),Z(1)
COMMON /BLANK / P1,P2,P3(2),P4
COMMON /SYSTEM/ IBUFF,NOUT
COMMON /ZZZZZZ/ X(1)
EQUIVALENCE (Y(1),X(1))
EQUIVALENCE (Z(1),X(1))
C
IF (P4 .GE. 0) GO TO 40
NMAT = IABS(P4)
GO TO (10,20,30,40,30), NMAT
C
10 CALL INPUT1
GO TO 50
C
20 CALL INPUT2
GO TO 50
C
30 WRITE (NOUT,35) P4
35 FORMAT (' ERROR IN INPTT4. P4 =',I3,' NOT AVAILABLE')
CALL MESAGE (-61,0,0)
C
40 NMAT = 5
IUNIT = IABS(P2)
ITAPE = P1
BCDOPT= 1
IF (P2 .LT. 0) BCDOPT = 2
IF (P4 .GT. 0) BCDOPT = 3
C
C BCDOPT = 1, BINARAY INPUT TAPE
C = 2, ASCII INPUT TAPE, WITH S.P./D.P. STANDARD FORMATS
C = 3, ASCII INPUT TAPE, WITH LARGE FILED S.P./D.P. FORMATS
C
CALL INPUT4 (NMAT,IUNIT,ITAPE,BCDOPT)
50 RETURN
END
|
{"hexsha": "34f70b330d86baeac844658c916aa567ad060919", "size": 4913, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "mis/inptt4.f", "max_stars_repo_name": "ldallolio/NASTRAN-95", "max_stars_repo_head_hexsha": "6d2c175f5b53ebaec4ba2b5186f7926ef9d0ed47", "max_stars_repo_licenses": ["NASA-1.3"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2016-01-09T14:33:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-18T11:51:42.000Z", "max_issues_repo_path": "mis/inptt4.f", "max_issues_repo_name": "gassive/NASTRAN95", "max_issues_repo_head_hexsha": "98cb3acaa7990d639360601648498834c7782056", "max_issues_repo_licenses": ["NASA-1.3"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2016-01-17T07:30:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-06T19:37:44.000Z", "max_forks_repo_path": "mis/inptt4.f", "max_forks_repo_name": "gassive/NASTRAN95", "max_forks_repo_head_hexsha": "98cb3acaa7990d639360601648498834c7782056", "max_forks_repo_licenses": ["NASA-1.3"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-04-07T20:51:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-04T14:16:01.000Z", "avg_line_length": 39.304, "max_line_length": 73, "alphanum_fraction": 0.5393853043, "num_tokens": 1654}
|
# -*- coding: utf-8 -*-
# _calculateSNR.py
# Module providing the calculateSNR function
# Copyright 2013 Giuseppe Venturini
# This file is part of python-deltasigma.
#
# python-deltasigma is a 1:1 Python replacement of Richard Schreier's
# MATLAB delta sigma toolbox (aka "delsigma"), upon which it is heavily based.
# The delta sigma toolbox is (c) 2009, Richard Schreier.
#
# python-deltasigma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# LICENSE file for the licensing terms.
"""Module providing the calculateSNR() function
"""
from __future__ import division, print_function
import numpy as np
from numpy.linalg import norm
from ._dbv import dbv
def calculateSNR(hwfft, f, nsig=1):
"""Estimate the SNR from the FFT.
Estimate the Signal-to-Noise Ratio (SNR), given the in-band bins of
a Hann-windowed FFT and the location ``f0`` of the input signal (f>0).
For ``nsig = 1``, the input tone is contained in ``hwfft(f:f+2)``,
this range is appropriate for a Hann-windowed FFT.
Each increment in ``nsig`` adds a bin to either side.
The SNR is expressed in dB.
**Parameters:**
hwfft : sequence
the FFT
f : integer
Location of the input signal. Normalized.
.. note:: f = 0 corresponds to DC, as Python indexing starts from 0.
nsig : integer, optional
Extra bins added to either side. Defaults to 1.
**Returns:**
SNR : scalar
The computed SNR value in dB.
"""
hwfft = hwfft.squeeze()
signalBins = np.arange(f - nsig + 1, f + nsig + 2, dtype='int64')
signalBins = signalBins[signalBins > 0]
signalBins = signalBins[signalBins <= max(hwfft.shape)]
s = norm(hwfft[signalBins - 1]) # *4/(N*sqrt(3)) for true rms value;
noiseBins = np.arange(1, max(hwfft.shape) + 1, dtype='int64')
noiseBins = np.delete(noiseBins, noiseBins[signalBins - 1] - 1)
n = norm(hwfft[noiseBins - 1])
if n == 0:
snr = np.Inf
else:
snr = dbv(s/n)
return snr
|
{"hexsha": "92ce7ea4632bed32eb6b82f2c054ccd637b26dcf", "size": 2139, "ext": "py", "lang": "Python", "max_stars_repo_path": "deltasigma/_calculateSNR.py", "max_stars_repo_name": "michi7x7/python-deltasigma", "max_stars_repo_head_hexsha": "029e97eb6de748744f62840114ae6725ec5a721b", "max_stars_repo_licenses": ["OLDAP-2.6", "Python-2.0"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2015-01-11T14:42:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T13:07:06.000Z", "max_issues_repo_path": "deltasigma/_calculateSNR.py", "max_issues_repo_name": "thomasrussellmurphy/python-deltasigma", "max_issues_repo_head_hexsha": "826ccbcfaadc0597e62f55b2d0eda39b26f7c4c1", "max_issues_repo_licenses": ["OLDAP-2.6", "Python-2.0"], "max_issues_count": 33, "max_issues_repo_issues_event_min_datetime": "2015-06-04T10:47:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-07T10:20:44.000Z", "max_forks_repo_path": "deltasigma/_calculateSNR.py", "max_forks_repo_name": "thomasrussellmurphy/python-deltasigma", "max_forks_repo_head_hexsha": "826ccbcfaadc0597e62f55b2d0eda39b26f7c4c1", "max_forks_repo_licenses": ["OLDAP-2.6", "Python-2.0"], "max_forks_count": 39, "max_forks_repo_forks_event_min_datetime": "2015-02-10T06:55:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T19:23:44.000Z", "avg_line_length": 30.1267605634, "max_line_length": 78, "alphanum_fraction": 0.6699392239, "include": true, "reason": "import numpy,from numpy", "num_tokens": 595}
|
from __future__ import division
import theano.tensor as tt
import theano
import numpy as np
from VIMCO import VIMCO
from utils import sigmoid, replicate_batch
class SBN(VIMCO):
def __init__(self, layers, batch_size, b1, b2, lam):
super(SBN, self).__init__(batch_size, b1, b2, lam)
self.layers = layers
self.init_parameters()
self.init_adam_parameters()
def train(self, learning_rate, epoch):
batch_likelihood_list = np.array([])
batch_order = self.get_batch_order(self.N_train)
self.prng.shuffle(batch_order)
for i, batch in enumerate(batch_order):
samples = self.sample_train(batch)
batch_L = self.update_train(learning_rate, epoch, *samples)
batch_likelihood_list = np.append(batch_likelihood_list, batch_L)
assert(batch_likelihood_list.shape[0] == len(batch_order))
return np.mean(batch_likelihood_list)
def valid(self):
batch_likelihood_list = np.array([])
batch_order = self.get_batch_order(self.N_valid)
self.prng.shuffle(batch_order)
for i, batch in enumerate(batch_order):
samples = self.sample_valid(batch)
batch_L = self.likelihood_valid(*samples)
batch_likelihood_list = np.append(batch_likelihood_list, batch_L)
assert(batch_likelihood_list.shape[0] == len(batch_order))
return np.mean(batch_likelihood_list)
def test(self):
batch_likelihood_list = np.array([])
batch_order = self.get_batch_order(self.N_test)
self.prng.shuffle(batch_order)
for i, batch in enumerate(batch_order):
samples = self.sample_test(batch)
batch_L = self.likelihood_test(*samples)
batch_likelihood_list = np.append(batch_likelihood_list, batch_L)
assert(batch_likelihood_list.shape[0] == len(batch_order))
return np.mean(batch_likelihood_list)
def compute_samples(self, srng, Z_below, layer):
q_z_above_given_z_below = sigmoid(tt.dot(Z_below, self.params['W_enc_' + str(layer)]) + self.params['b_enc_' + str(layer)])
U = srng.uniform(q_z_above_given_z_below.shape)
Z = tt.cast(q_z_above_given_z_below > U, dtype=theano.config.floatX)
return Z
def compile_sampling(self, data_train, data_valid, data_test, training_n_samples):
X = tt.matrix('X')
batch = tt.iscalar('batch')
n_samples = tt.iscalar('n_samples')
n_layers = len(self.layers)
samples = [None] * n_layers
samples[0] = replicate_batch(X, n_samples)
if "gpu" in theano.config.device:
from theano.sandbox import rng_mrg
srng = rng_mrg.MRG_RandomStreams(seed=42)
else:
srng = tt.shared_randomstreams.RandomStreams(seed=42)
for layer in range(n_layers - 1):
samples[layer + 1] = self.compute_samples(srng, samples[layer], layer)
givens = dict()
givens[X] = data_valid[batch * self.batch_size:(batch + 1) * self.batch_size]
self.sample_convergence = theano.function([batch, n_samples], samples, givens=givens)
givens[n_samples] = np.int32(training_n_samples)
givens[X] = data_train[batch * self.batch_size:(batch + 1) * self.batch_size]
self.sample_train = theano.function([batch], samples, givens=givens)
givens[X] = data_valid[batch * self.batch_size:(batch + 1) * self.batch_size]
self.sample_valid = theano.function([batch], samples, givens=givens)
givens[X] = data_test[batch * self.batch_size:(batch + 1) * self.batch_size]
self.sample_test = theano.function([batch], samples, givens=givens)
def compile_model(self):
epoch = tt.scalar('epoch')
learning_rate = tt.scalar('learning_rate')
n_layers = len(self.layers)
samples = [tt.matrix('samples_' + str(i)) for i in range(n_layers)]
log_q = [None] * n_layers
log_p = [None] * n_layers
n_samples = tt.cast(tt.shape(samples[0])[0] / self.batch_size, dtype="int32")
log_q[0] = tt.zeros((tt.shape(samples[0])[0],), dtype=theano.config.floatX)
for layer in range(n_layers - 1):
log_q[layer + 1] = self.compute_q(samples[layer],
samples[layer +1],
layer)
log_p[-1] = self.compute_p_prior(samples[-1])
for layer in range(n_layers - 1, 0, -1):
log_p[layer - 1] = self.compute_p(samples[layer - 1],
samples[layer], layer - 1)
log_p_all = tt.zeros((self.batch_size, n_samples), dtype=theano.config.floatX)
log_q_all = tt.zeros((self.batch_size, n_samples), dtype=theano.config.floatX)
for layer in range(n_layers):
log_q[layer] = log_q[layer].reshape((self.batch_size, n_samples))
log_p[layer] = log_p[layer].reshape((self.batch_size, n_samples))
log_p_all += log_p[layer]
log_q_all += log_q[layer]
likelihood, gradients = self.compute_estimator(log_p_all, log_q_all)
updates = self.get_adam_updates(gradients, learning_rate, epoch)
self.update_train = theano.function([learning_rate, epoch] + samples,
likelihood,
updates=updates)
self.likelihood_valid = theano.function(samples, likelihood)
self.likelihood_test = theano.function(samples, likelihood)
self.get_gradients = theano.function(samples, gradients)
|
{"hexsha": "72c8bfb650d83abb89d06d8c9a33dc70f24f5e25", "size": 5664, "ext": "py", "lang": "Python", "max_stars_repo_path": "SBN.py", "max_stars_repo_name": "y0ast/VIMCO", "max_stars_repo_head_hexsha": "62420d90d27656621f6ca47d90a55d051e9a5934", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2016-06-07T09:01:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-10T16:53:12.000Z", "max_issues_repo_path": "SBN.py", "max_issues_repo_name": "afcarl/VIMCO", "max_issues_repo_head_hexsha": "e6c7aeb3f28e5f4ada258df1522240136758185e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SBN.py", "max_forks_repo_name": "afcarl/VIMCO", "max_forks_repo_head_hexsha": "e6c7aeb3f28e5f4ada258df1522240136758185e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-03T01:16:33.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-03T01:16:33.000Z", "avg_line_length": 37.5099337748, "max_line_length": 131, "alphanum_fraction": 0.6288841808, "include": true, "reason": "import numpy,import theano,from theano", "num_tokens": 1286}
|
import numpy as np
import keras
import csv
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Conv2D, MaxPooling2D, Flatten
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import CSVLogger
from keras.preprocessing.image import ImageDataGenerator
img_size = 64
n_epochs = 30
batch_sizes = 4
n_steps_per_epoch = 500
n_validation_steps = 100
csv_logger = CSVLogger('cnn_training_LENET.csv')
try:
train_data = np.load('CNN_train_feature.npy')
train_target = np.load('CNN_train_target.npy')
test_data = np.load('CNN_test_feature.npy')
test_target = np.load('CNN_test_target.npy')
#print(train_target.shape)
train_target = keras.utils.to_categorical(train_target,10)
test_target = keras.utils.to_categorical(test_target,10)
#print(train_target[0:10])
'''
print('train_data:')
print(train_data)
print('test_data:')
print(test_data)
print('train_target:')
print(train_target)
print('test_target:')
print(test_target)
'''
except ValueError:
print('Dataset files not founded ')
train_datagen = ImageDataGenerator()
train_generator = train_datagen.flow(train_data, train_target, batch_size = batch_sizes)
test_datagen = ImageDataGenerator()
test_generator = test_datagen.flow(test_data, test_target, batch_size = batch_sizes)
model = Sequential()
model.add(Conv2D(filters = 16, kernel_size = (5, 5),
padding = 'same', input_shape = (img_size, img_size, 3),
activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Conv2D(filters = 36, kernel_size = (5, 5),
padding = 'same', activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Flatten())
model.add(Dense(1024, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation = 'softmax'))
model.compile(optimizer = Adam(lr=0.0001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
model.fit_generator(train_generator,
epochs=n_epochs,
validation_data=test_generator,
steps_per_epoch = n_steps_per_epoch,
validation_steps = n_validation_steps,
callbacks=[csv_logger])
model.save('CNN_Model_cnn.h5')
|
{"hexsha": "88e224442772d6b8227747c0821ff349d84f9a7f", "size": 2371, "ext": "py", "lang": "Python", "max_stars_repo_path": "Balencing/CNN_Balacing(LeNet).py", "max_stars_repo_name": "kctoayo88/MLP_CNN_Comparison_Test", "max_stars_repo_head_hexsha": "8cd7d1222b432394223f2dacf0e906578ea2f4cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Balencing/CNN_Balacing(LeNet).py", "max_issues_repo_name": "kctoayo88/MLP_CNN_Comparison_Test", "max_issues_repo_head_hexsha": "8cd7d1222b432394223f2dacf0e906578ea2f4cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Balencing/CNN_Balacing(LeNet).py", "max_forks_repo_name": "kctoayo88/MLP_CNN_Comparison_Test", "max_forks_repo_head_hexsha": "8cd7d1222b432394223f2dacf0e906578ea2f4cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7922077922, "max_line_length": 88, "alphanum_fraction": 0.6912695065, "include": true, "reason": "import numpy", "num_tokens": 553}
|
# -*- coding: utf-8 -*-
from collections import defaultdict
import re
import numpy as np
from pyfr.readers import BaseReader, NodalMeshAssembler
from pyfr.readers.nodemaps import GmshNodeMaps
def msh_section(mshit, section):
endln = '$End{}\n'.format(section)
endix = int(next(mshit)) - 1
for i, l in enumerate(mshit):
if l == endln:
raise ValueError('Unexpected end of section $' + section)
yield l.strip()
if i == endix:
break
else:
raise ValueError('Unexpected EOF')
if next(mshit) != endln:
raise ValueError('Expected $End' + section)
class GmshReader(BaseReader):
# Supported file types and extensions
name = 'gmsh'
extn = ['.msh']
# Gmsh element types to PyFR type (petype) and node counts
_etype_map = {
1: ('line', 2), 8: ('line', 3), 26: ('line', 4), 27: ('line', 5),
2: ('tri', 3), 9: ('tri', 6), 21: ('tri', 10), 23: ('tri', 15),
3: ('quad', 4), 10: ('quad', 9), 36: ('quad', 16), 37: ('quad', 25),
4: ('tet', 4), 11: ('tet', 10), 29: ('tet', 20), 30: ('tet', 35),
5: ('hex', 8), 12: ('hex', 27), 92: ('hex', 64), 93: ('hex', 125),
6: ('pri', 6), 13: ('pri', 18), 90: ('pri', 40), 91: ('pri', 75),
7: ('pyr', 5), 14: ('pyr', 14), 118: ('pyr', 30), 119: ('pyr', 55)
}
# First-order node numbers associated with each element face
_petype_fnmap = {
'tri': {'line': [[0, 1], [1, 2], [2, 0]]},
'quad': {'line': [[0, 1], [1, 2], [2, 3], [3, 0]]},
'tet': {'tri': [[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]},
'hex': {'quad': [[0, 1, 2, 3], [0, 1, 4, 5], [1, 2, 5, 6],
[2, 3, 6, 7], [0, 3, 4, 7], [4, 5, 6, 7]]},
'pri': {'quad': [[0, 1, 3, 4], [1, 2, 4, 5], [0, 2, 3, 5]],
'tri': [[0, 1, 2], [3, 4, 5]]},
'pyr': {'quad': [[0, 1, 2, 3]],
'tri': [[0, 1, 4], [1, 2, 4], [2, 3, 4], [0, 3, 4]]}
}
# Mappings between the node ordering of PyFR and that of Gmsh
_nodemaps = GmshNodeMaps
def __init__(self, msh):
if isinstance(msh, str):
msh = open(msh)
# Get an iterator over the lines of the mesh
mshit = iter(msh)
# Section readers
sect_map = {
'MeshFormat': self._read_mesh_format,
'PhysicalNames': self._read_phys_names,
'Entities': self._read_entities,
'Nodes': self._read_nodes,
'Elements': self._read_eles
}
for l in filter(lambda l: l != '\n', mshit):
# Ensure we have encountered a section
if not l.startswith('$'):
raise ValueError('Expected a mesh section')
# Strip the '$' and '\n' to get the section name
sect = l[1:-1]
# Try to read the section
try:
sect_map[sect](mshit)
# Else skip over it
except KeyError:
endsect = '$End{0}\n'.format(sect)
for el in mshit:
if el == endsect:
break
else:
raise ValueError('Expected $End' + sect)
def _read_mesh_format(self, mshit):
ver, ftype, dsize = next(mshit).split()
if ver == '2.2':
self._read_nodes_impl = self._read_nodes_impl_v2
self._read_eles_impl = self._read_eles_impl_v2
elif ver == '4':
self._read_nodes_impl = self._read_nodes_impl_v4
self._read_eles_impl = self._read_eles_impl_v4
else:
raise ValueError('Invalid mesh version')
if ftype != '0':
raise ValueError('Invalid file type')
if dsize != '8':
raise ValueError('Invalid data size')
if next(mshit) != '$EndMeshFormat\n':
raise ValueError('Expected $EndMeshFormat')
def _read_phys_names(self, mshit):
# Physical entities can be divided up into:
# - fluid elements ('the mesh')
# - boundary faces
# - periodic faces
self._felespent = None
self._bfacespents = {}
self._pfacespents = defaultdict(list)
# Seen physical names
seen = set()
# Extract the physical names
for l in msh_section(mshit, 'PhysicalNames'):
m = re.match(r'(\d+) (\d+) "((?:[^"\\]|\\.)*)"$', l)
if not m:
raise ValueError('Malformed physical entity')
pent, name = int(m.group(2)), m.group(3).lower()
# Ensure we have not seen this name before
if name in seen:
raise ValueError('Duplicate physical name: {}'.format(name))
# Fluid elements
if name == 'fluid':
self._felespent = pent
# Periodic boundary faces
elif name.startswith('periodic'):
p = re.match(r'periodic[ _-]([a-z0-9]+)[ _-](l|r)$', name)
if not p:
raise ValueError('Invalid periodic boundary condition')
self._pfacespents[p.group(1)].append(pent)
# Other boundary faces
else:
self._bfacespents[name] = pent
seen.add(name)
if self._felespent is None:
raise ValueError('No fluid elements in mesh')
if any(len(pf) != 2 for pf in self._pfacespents.values()):
raise ValueError('Unpaired periodic boundary in mesh')
def _read_entities(self, mshit):
self._tagpents = tagpents = {}
# Iterate over the entities
nent = sum(int(i) for i in next(mshit).split())
for i in range(nent):
ent = next(mshit).split()
etag, enphys = int(ent[0]), int(ent[7])
if enphys == 0:
continue
elif enphys == 1:
tagpents[etag] = int(ent[8])
else:
raise ValueError('Invalid physical tag count for entity')
if next(mshit) != '$EndEntities\n':
raise ValueError('Expected $EndEntities')
def _read_nodes(self, mshit):
self._read_nodes_impl(mshit)
def _read_nodes_impl_v2(self, mshit):
self._nodepts = nodepts = {}
for l in msh_section(mshit, 'Nodes'):
nv = l.split()
nodepts[int(nv[0])] = np.array([float(x) for x in nv[1:]])
def _read_nodes_impl_v4(self, mshit):
self._nodepts = nodepts = {}
# Entity and total node count
ne, nn = (int(i) for i in next(mshit).split())
for i in range(ne):
nen = int(next(mshit).split()[-1])
for j in range(nen):
nv = next(mshit).split()
nodepts[int(nv[0])] = np.array([float(x) for x in nv[1:]])
if nn != len(nodepts):
raise ValueError('Invalid node count')
if next(mshit) != '$EndNodes\n':
raise ValueError('Expected $EndNodes')
def _read_eles(self, mshit):
self._read_eles_impl(mshit)
def _read_eles_impl_v2(self, mshit):
elenodes = defaultdict(list)
for l in msh_section(mshit, 'Elements'):
# Extract the raw element data
elei = [int(i) for i in l.split()]
enum, etype, entags = elei[:3]
etags, enodes = elei[3:3 + entags], elei[3 + entags:]
if etype not in self._etype_map:
raise ValueError('Unsupported element type {0}'.format(etype))
# Physical entity type (used for BCs)
epent = etags[0]
elenodes[etype, epent].append(enodes)
self._elenodes = {k: np.array(v) for k, v in elenodes.items()}
def _read_eles_impl_v4(self, mshit):
elenodes = defaultdict(list)
# Block and total element count
nb, ne = (int(i) for i in next(mshit).split())
for i in range(nb):
etag, _, etype, ecount = (int(j) for j in next(mshit).split())
if etype not in self._etype_map:
raise ValueError('Unsupported element type {0}'.format(etype))
# Physical entity type (used for BCs)
epent = self._tagpents.get(etag, -1)
append = elenodes[etype, epent].append
for j in range(ecount):
append([int(k) for k in next(mshit).split()[1:]])
if ne != sum(len(v) for v in elenodes.values()):
raise ValueError('Invalid element count')
if next(mshit) != '$EndElements\n':
raise ValueError('Expected $EndElements')
self._elenodes = {k: np.array(v) for k, v in elenodes.items()}
def _to_raw_pyfrm(self):
# Assemble a nodal mesh
maps = self._etype_map, self._petype_fnmap, self._nodemaps
pents = self._felespent, self._bfacespents, self._pfacespents
mesh = NodalMeshAssembler(self._nodepts, self._elenodes, pents, maps)
rawm = {}
rawm.update(mesh.get_connectivity())
rawm.update(mesh.get_shape_points())
return rawm
|
{"hexsha": "93162a2be83d4a32945d947bbd5f1a2645032e31", "size": 9075, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyfr/readers/gmsh.py", "max_stars_repo_name": "synthetik-technologies/PyFR", "max_stars_repo_head_hexsha": "9d4d5e96a8a9d5ca47970ec197b251ae8b0ecdda", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-23T16:37:06.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-23T16:37:06.000Z", "max_issues_repo_path": "pyfr/readers/gmsh.py", "max_issues_repo_name": "synthetik-technologies/PyFR", "max_issues_repo_head_hexsha": "9d4d5e96a8a9d5ca47970ec197b251ae8b0ecdda", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyfr/readers/gmsh.py", "max_forks_repo_name": "synthetik-technologies/PyFR", "max_forks_repo_head_hexsha": "9d4d5e96a8a9d5ca47970ec197b251ae8b0ecdda", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2417582418, "max_line_length": 78, "alphanum_fraction": 0.5237465565, "include": true, "reason": "import numpy", "num_tokens": 2565}
|
# -*- coding: utf-8 -*-
#
from __future__ import division
import numpy
import sympy
from .helpers import _symm_r_0, _z, _symm_s_t
from ..helpers import untangle
class Maxwell(object):
"""
J.C. Maxwell,
On Approximate Multiple Integration between Limits by Summation.
In W. Niven (Ed.), The Scientific Papers of James Clerk Maxwell,
Cambridge Library Collection - Physical Sciences, pp. 604-611.
Cambridge: Cambridge University Press.
First published in 1890.
<https://doi.org/10.1017/CBO9780511710377.061>.
"""
def __init__(self, symbolic=False):
frac = sympy.Rational if symbolic else lambda x, y: x / y
sqrt = sympy.sqrt if symbolic else numpy.sqrt
self.name = "Maxwell"
self.degree = 7
r = sqrt(frac(12, 35))
s, t = [sqrt((93 + i * 3 * sqrt(186)) / 155) for i in [+1, -1]]
data = [
(frac(1, 81), _z()),
(frac(49, 324), _symm_r_0(r)),
# ERR typo in Stroud: 648 vs 649
(frac(31, 648), _symm_s_t(s, t)),
]
self.points, self.weights = untangle(data)
self.weights *= 4
return
|
{"hexsha": "833f525ef4a3413a772193d97efbe81f8683b702", "size": 1159, "ext": "py", "lang": "Python", "max_stars_repo_path": "quadpy/quadrilateral/maxwell.py", "max_stars_repo_name": "gdmcbain/quadpy", "max_stars_repo_head_hexsha": "c083d500027d7c1b2187ae06ff2b7fbdd360ccc7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-01-02T19:04:42.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-02T19:04:42.000Z", "max_issues_repo_path": "quadpy/quadrilateral/maxwell.py", "max_issues_repo_name": "gdmcbain/quadpy", "max_issues_repo_head_hexsha": "c083d500027d7c1b2187ae06ff2b7fbdd360ccc7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "quadpy/quadrilateral/maxwell.py", "max_forks_repo_name": "gdmcbain/quadpy", "max_forks_repo_head_hexsha": "c083d500027d7c1b2187ae06ff2b7fbdd360ccc7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9534883721, "max_line_length": 71, "alphanum_fraction": 0.595340811, "include": true, "reason": "import numpy,import sympy", "num_tokens": 333}
|
(* ================================================================== *)
Section EX.
Variables (A:Set) (P : A->Prop).
Variable Q:Prop.
(* Check the type of an expression. *)
Check P.
Lemma trivial : forall x:A, P x -> P x.
Proof.
intros.
assumption.
Qed.
(* Prints the definition of an identifier. *)
Print trivial.
Lemma example : forall x:A, (Q -> Q -> P x) -> Q -> P x.
Proof.
intros x H H0.
apply H.
assumption.
assumption.
Qed.
Print example.
End EX.
Print example.
(* ================================================================== *)
Print trivial.
(* ================================================================== *)
(* ====================== Propositional Logic ======================= *)
(* ================================================================== *)
Section ExamplesPL.
Variables Q P :Prop.
Lemma ex1 : (P -> Q) -> ~Q -> ~P.
Proof.
tauto.
Qed.
Print ex1.
Lemma ex1' : (P -> Q) -> ~Q -> ~P.
Proof.
intros.
intro.
apply H0.
apply H.
assumption.
Qed.
Print ex1'.
Lemma ex2 : P /\ Q -> Q /\ P.
Proof.
intro H.
split.
destruct H as [H1 H2].
exact H2.
destruct H; assumption.
Qed.
(* We can itemize the subgoals using - for each of them.
Note that when entering in this mode the other subgoals are not displayed.
For nested item use the symbols -, +, *, --, ++, **, ... *)
Lemma ex2' : P /\ Q -> Q /\ P.
Proof.
intro H. split.
- destruct H as [H1 H2]. exact H2.
- destruct H; assumption.
Qed.
Lemma ex3 : P \/ Q -> Q \/ P.
Proof.
intros.
destruct H as [h1 | h2].
- right. assumption.
- left; assumption.
Qed.
Theorem ex4 : forall A:Prop, A -> ~~A.
Proof.
intros.
intro.
apply H0.
exact H.
Qed.
Lemma ex4' : forall A:Prop, A -> ~~A.
Proof.
intros.
red. (* does only the unfolding of the head of the goal *)
intro.
unfold not in H0. (* unfold – applies the delta rule for a transparent constant. *)
apply H0; assumption.
Qed.
Axiom double_neg_law : forall A:Prop, ~~A -> A. (* classical *)
(* CAUTION: Axiom is a global declaration.
Even after the section is closed double_neg_law is assume in the enviroment, and can be used.
If we want to avoid this we should declare double_neg_law using the command Hypothesis.
*)
Lemma ex5 : (~Q -> ~P) -> P -> Q. (* this result is only valid classically *)
Proof.
intros.
apply double_neg_law.
intro.
(* apply H; assumption. *)
apply H.
- assumption.
- assumption.
Qed.
Lemma ex6 : (P \/ Q) /\ ~P -> Q.
Proof.
intros.
elim H. intros .
destruct H.
destruct H.
- contradiction.
- assumption.
Qed.
Lemma ex6' : (P \/ Q) /\ ~P -> Q.
Proof.
intros.
destruct H.
destruct H.
- contradiction.
- assumption.
Qed.
Print ex6'.
Print ex6.
Lemma ex7 : ~(P \/ Q) <-> ~P /\ ~Q.
Proof.
red. (* unfold "<->". *)
split.
- intros.
split.
+ unfold not in H.
intro H1.
apply H.
left; assumption.
+ intro H1; apply H; right; assumption.
- intros H H1.
destruct H.
destruct H1.
+ contradiction.
+ contradiction.
Qed.
Lemma ex7' : ~(P \/ Q) <-> ~P /\ ~Q.
Proof.
tauto.
Qed.
Variable B :Prop.
Variable C :Prop.
(* exercise *)
Lemma ex8 : (P -> Q) /\ (B -> C) /\ P /\ B -> Q /\ C.
Proof.
intros.
destruct H as [H1 H2].
destruct H2 as [H2 H3].
destruct H3 as [H3 H4].
split.
- apply H1. apply H3.
- apply H2. apply H4.
Qed.
(* exercise *)
Lemma ex9 : ~ (P /\ ~P).
Proof.
unfold not.
intros.
destruct H.
apply H0.
apply H.
Qed.
End ExamplesPL.
(* ================================================================== *)
(* ======================= First-Order Logic ======================= *)
(* ================================================================== *)
Section ExamplesFOL.
Variable X :Set.
Variable t :X.
Variables R W : X -> Prop.
Lemma ex10 : (R t) -> (forall x, R x -> ~(W x)) -> ~(W t).
Proof.
intros.
apply H0.
exact H.
Qed.
Lemma ex11 : forall x, R x -> exists x, R x.
Proof.
intros.
exists x.
assumption.
Qed.
Lemma ex11' : forall x, R x -> exists x, R x.
Proof.
firstorder.
Qed.
Lemma ex12 : (exists x, ~(R x)) -> ~ (forall x, R x).
Proof.
intros H H1.
destruct H as [x0 H0].
apply H0.
apply H1.
Qed.
Lemma ex13 : (exists x, R x) -> (forall x y, R x -> W y) -> forall y, W y.
Proof.
intros H H1 y1.
destruct H as [ x1 H0].
(* try "apply H1." to see error message *)
apply H1 with x1 . (* apply (H1 x1). *)
assumption.
Qed.
(* Exercise *)
Lemma ex14 : (forall x, R x) \/ (forall x, W x) -> forall x, (R x) \/ (W x).
Proof.
intros H x.
destruct H.
- left. apply H.
- right. apply H.
Qed.
Variable G : X->X->Prop.
(* Exercise *)
Lemma ex15 : (exists x, exists y, (G x y)) -> exists y, exists x, (G x y).
Proof.
intros.
destruct H as [x1 H].
destruct H as [y1 H].
exists y1. exists x1.
apply H.
Qed.
(* Exercise *)
Proposition ex16: (forall x, W x) /\ (forall x, R x) -> (forall x, W x /\ R x).
Proof.
intros.
destruct H.
split.
- apply H.
- apply H0.
Qed.
(* ------- Note that we can have nested sections ----------- *)
Section Relations.
Variable D : Set.
Variable Rel : D -> D -> Prop.
Hypothesis R_symmetric : forall x y:D, Rel x y -> Rel y x.
Hypothesis R_transitive : forall x y z:D, Rel x y -> Rel y z -> Rel x z.
Lemma refl_if : forall x:D, (exists y, Rel x y) -> Rel x x.
Proof.
intros.
destruct H.
(* try "apply R_transitive" to see de error message *)
apply R_transitive with x0. (* apply (R_transitive x x0). *)
- assumption.
- apply R_symmetric.
assumption.
Qed.
Check refl_if.
End Relations.
Check refl_if. (* Note the difference after the end of the section Relations. *)
(* ====== OTHER USES OF AXIOMS ====== *)
(* --- A stack abstract data type --- *)
Section Stack.
Variable U : Type.
Parameter stack : Type -> Type.
Parameter emptyS : stack U.
Parameter push : U -> stack U -> stack U.
Parameter pop : stack U -> stack U.
Parameter top : stack U -> U.
Parameter isEmpty : stack U -> Prop.
Axiom emptyS_isEmpty : isEmpty emptyS.
Axiom push_notEmpty : forall x s, ~isEmpty (push x s).
Axiom pop_push : forall x s, pop (push x s) = s.
Axiom top_push : forall x s, top (push x s) = x.
End Stack.
Check pop_push.
(* Now we can make use of stacks in our formalisation!!! *)
(* A NOTE OF CAUTION!!! *)
(* The capability to extend the underlying theory with arbitary axiom
is a powerful but dangerous mechanism. We must avoid inconsistency.
*)
Section Caution.
Check False_ind.
Hypothesis ABSURD : False.
Theorem oops : forall (P:Prop), P /\ ~P.
elim ABSURD.
Qed.
End Caution. (* We have declared ABSURD as an hypothesis to avoid its use outside this section. *)
|
{"author": "melpereira7", "repo": "VF_2122", "sha": "cbac6daa9e4640a095cfadc06ad5fa5722d4bbfd", "save_path": "github-repos/coq/melpereira7-VF_2122", "path": "github-repos/coq/melpereira7-VF_2122/VF_2122-cbac6daa9e4640a095cfadc06ad5fa5722d4bbfd/Exerc\u00edcios/Coq/lesson1.v"}
|
Require Import Coq.Program.Equality.
Require Import List.
Import ListNotations.
Require Import IFOL.Util.List_index.
Require Import IFOL.Util.HVec.
Require Import IFOL.Util.Witness.
Fixpoint RHVec {X} (Y : X -> Type) (xs : list X) : Type :=
match xs with
| [] => unit
| x :: xs' => Y x * RHVec Y xs'
end.
Fixpoint rhv_proj {X} {Y : X -> Type} {xs}
{x} (w : witness x xs) {struct xs}
: RHVec Y xs -> Y x.
Proof.
destruct xs.
- destruct w.
- destruct w.
+ destruct e.
exact fst.
+ intros [_ tl].
exact (rhv_proj _ _ _ _ w tl).
Defined.
Fixpoint rhv_map {X} {Y Z : X -> Type} (f : forall x, Y x -> Z x)
{xs : list X} : RHVec Y xs -> RHVec Z xs :=
match xs with
| [] => fun _ => tt
| x :: xs' => fun '(hd, tl) => (f x hd, rhv_map f tl)
end.
Fixpoint of_hvec {X} {Y : X -> Type} {xs} (v : HVec Y xs)
: RHVec Y xs :=
match v with
| hvnil => tt
| hvcons y ys => (y, of_hvec ys)
end.
Fixpoint rhv_insert {X} {Y : X -> Type}
{xs xs'} {x} (pw : part_witness x xs' xs) {struct xs} :
Y x -> RHVec Y xs' -> RHVec Y xs.
Proof.
destruct xs.
- destruct pw.
- destruct pw.
destruct p.
destruct e,e0.
+ exact pair.
+ destruct xs'.
* destruct y.
* destruct y.
destruct e.
intros y1 [y2 tl].
apply (pair y2).
eapply rhv_insert.
** exact p.
** exact y1.
** exact tl.
Defined.
Lemma HVec_rect_map {X} {Y Z : X -> Type} {xs} (f : forall x, Y x -> Z x) :
forall ys : HVec Y xs,
rhv_map f (of_hvec ys) =
HVec_rect X Y (fun xs0 _ => RHVec Z xs0) tt
(fun (x : X) (xs0 : list X) (y : Y x)
(_ : HVec Y xs0) (IHys : RHVec Z xs0) =>
pair (f x y) IHys) xs ys.
Proof.
induction ys.
- reflexivity.
- simpl.
now rewrite IHys.
Defined.
Fixpoint rhv_replace {X} {Y : X -> Type}
{xs} {x} (w : witness x xs) {struct xs} : Y x -> RHVec Y xs -> RHVec Y xs.
Proof.
destruct xs.
- destruct w.
- destruct w.
+ destruct e.
intros y [_ tl].
exact (y, tl).
+ intros y1 [y2 tl].
apply (pair y2).
eapply rhv_replace.
exact w.
exact y1.
exact tl.
Defined.
Lemma of_hvec_replace {X} {Y : X -> Type} {xs} {x}
(w : witness x xs) (y : Y x) (v : HVec Y xs) :
of_hvec (hvec_replace w y v) = rhv_replace w y (of_hvec v).
Proof.
induction v.
- destruct w.
- destruct w.
+ now destruct e.
+ simpl.
now rewrite IHv.
Defined.
Lemma rhv_map_replace {X} {Y Z : X -> Type} {xs} {x}
(f : forall x, Y x -> Z x) (w : witness x xs) (y : Y x)
(ys : RHVec Y xs) :
rhv_map f (rhv_replace w y ys) =
rhv_replace w (f x y) (rhv_map f ys).
Proof.
induction xs.
- destruct w.
- simpl.
destruct w.
+ destruct e.
now destruct ys.
+ destruct ys.
now rewrite IHxs.
Defined.
Lemma rhv_proj_of_hvec {X} {Y : X -> Type} {xs} {x}
(w : witness x xs) (ys : HVec Y xs) :
rhv_proj w (of_hvec ys) = hvec_proj ys w.
Proof.
induction ys.
- destruct w.
- simpl.
destruct w.
+ now destruct e.
+ apply IHys.
Defined.
Lemma rhv_proj_map {X} {Y Z : X -> Type} {xs} {x} (f : forall x, Y x -> Z x)
(ys : RHVec Y xs) (w : witness x xs) :
rhv_proj w (rhv_map f ys) =
f x (rhv_proj w ys).
Proof.
induction xs.
- destruct w.
- destruct w.
+ simpl.
destruct e.
now destruct ys.
+ destruct ys.
simpl.
apply IHxs.
Defined.
Lemma rhv_proj_replace {X} {Y : X -> Type} {xs} {x}
(w : witness x xs) (ys : RHVec Y xs) (y : Y x) :
rhv_proj w (rhv_replace w y ys) = y.
Proof.
induction xs.
- destruct w.
- destruct w.
+ destruct e.
now destruct ys.
+ destruct ys; simpl.
apply IHxs.
Defined.
Lemma rhv_proj_replace_neq {X} {Y : X -> Type} {xs} {x x'}
(w : witness x xs) (w' : witness x' xs) (ys : RHVec Y xs) (y : Y x') :
nat_of_wit w <> nat_of_wit w' ->
rhv_proj w (rhv_replace w' y ys) = rhv_proj w ys.
Proof.
intro Hneq.
induction xs.
- destruct w.
- destruct w.
+ destruct e.
destruct w'.
* destruct e.
now destruct ys.
* now destruct ys.
+ destruct w'.
* destruct e.
now destruct ys.
* simpl.
destruct ys.
apply IHxs.
intro Heq.
apply Hneq.
simpl.
now destruct Heq.
Defined.
Lemma rhv_replace_proj {X} {Y : X -> Type} {xs} {x}
(w : witness x xs) (ys : RHVec Y xs) :
rhv_replace w (rhv_proj w ys) ys = ys.
Proof.
induction xs.
- destruct w.
- destruct w.
+ destruct e.
now destruct ys.
+ simpl.
destruct ys.
now rewrite IHxs.
Defined.
Fixpoint rhv_suff {X} {Y : X -> Type} {xs} {ys}
(w : suffix_wit xs ys) : RHVec Y ys -> RHVec Y xs :=
match w with
| sw_refl => fun v => v
| sw_cons w' => fun '(_, tl) => rhv_suff w' tl
end.
Fixpoint rhv_index_proj {X} {Y : X -> Type}
{xs : list X} (i : list_index xs)
(v : RHVec Y xs) {struct xs} : Y (list_proj xs i).
Proof.
destruct xs.
- destruct i.
- simpl in *.
destruct i.
+ exact (fst v).
+ exact (rhv_index_proj X Y xs l (snd v)).
Defined.
Fixpoint rhv_proj_rhv_insert_witness_weak
{X} {Y : X -> Type}
{xs xs'} {x x'}
(pw : part_witness x xs' xs)
(w : witness x' xs')
(y : Y x) (v : RHVec Y xs') {struct xs} :
rhv_proj (witness_weak pw w) (rhv_insert pw y v) =
rhv_proj w v.
Proof.
destruct xs.
- destruct pw.
- destruct pw.
+ destruct xs'.
* destruct w.
* destruct w.
** simpl.
destruct p.
destruct e0.
destruct e1.
now destruct e.
** simpl.
destruct p.
destruct e.
destruct e0.
now destruct v.
+ destruct xs'.
* destruct y0.
* simpl.
destruct w.
** destruct e.
destruct y0.
destruct e.
now destruct v.
** destruct y0.
destruct e.
destruct v.
apply rhv_proj_rhv_insert_witness_weak.
Defined.
Fixpoint rhv_proj_rhv_insert_invert {X} {Y : X -> Type}
{xs xs'} {x x'} (pw : part_witness x' xs' xs) (w : witness x xs)
(w' : witness x xs') (v : RHVec Y xs') (a : Y x') {struct xs} :
witness_invert pw w = inr w' ->
rhv_proj w' v =
rhv_proj w (rhv_insert pw a v).
Proof.
intro H.
destruct xs.
- destruct w.
- destruct w.
+ simpl.
destruct e.
destruct pw.
* destruct p.
destruct e.
destruct e0.
simpl in *.
destruct xs'.
** destruct w'.
** discriminate.
* simpl in *.
destruct xs'.
** destruct y.
** destruct y.
destruct e.
destruct v.
now inversion H.
+ simpl.
destruct pw.
* destruct p.
destruct e.
destruct e0.
simpl in H.
destruct xs'.
** destruct w.
** now inversion H.
* destruct xs'.
destruct y.
destruct y.
destruct e.
destruct v.
simpl in *.
destruct witness_invert eqn:G.
** discriminate.
** inversion H.
eapply rhv_proj_rhv_insert_invert.
exact G.
Defined.
Fixpoint rhv_proj_rhv_insert_invert2 {X} {Y : X -> Type}
{xs xs'} {x x'} (pw : part_witness x xs' xs) (w : witness x' xs)
(pf : x' = x)
(v : RHVec Y xs') (a : Y x) {struct xs} :
witness_invert pw w = inl pf
->
rhv_proj w (rhv_insert pw a v)
= match eq_sym pf with
| eq_refl => a
end.
Proof.
intro.
destruct xs.
- destruct w.
- destruct w.
+ simpl.
destruct e.
destruct pw.
* destruct p.
destruct e,e0.
simpl in H.
destruct xs'.
** assert (eq_refl = pf).
{ eapply inl_inj. exact H. }
destruct H0.
reflexivity.
** assert (eq_refl = pf).
{ eapply inl_inj. exact H. }
destruct H0.
reflexivity.
* simpl in H.
destruct xs'.
** destruct y.
** destruct y.
discriminate.
+ simpl.
destruct pw.
* simpl in H.
destruct xs'.
** destruct p.
destruct e,e0.
destruct w.
** destruct p.
destruct e, e0.
discriminate.
* destruct xs'.
** destruct y.
** destruct y.
simpl in H.
destruct witness_invert eqn:G.
*** destruct e.
destruct v.
eapply rhv_proj_rhv_insert_invert2.
rewrite G.
now rewrite (inl_inj _ _ H).
*** discriminate.
Defined.
|
{"author": "emarzion", "repo": "IFOL", "sha": "135d188c9c899350e8726f97891101b46d8a7c2f", "save_path": "github-repos/coq/emarzion-IFOL", "path": "github-repos/coq/emarzion-IFOL/IFOL-135d188c9c899350e8726f97891101b46d8a7c2f/src/Util/RHVec.v"}
|
using Libdl
const shared_lib = "./ocaml.so"
function start_ocaml()
lib = Libdl.dlopen(shared_lib)
ccall(("ocaml_jl_start", shared_lib), Cvoid, ())
end
start_ocaml()
fn = Main.mycaml_fn
Main.mycaml_fn(x=1, y=2)
println(fn((1, "foo", [1.2, "bar"])))
for i in 1:3
println(fn(i, "foo", [1.2, "bar"]))
end
fn2 = Main.myother_fn
println(fn2(4))
println(fn2(2; y="test"))
fn2 = Main.yetanother_fn
println(fn2(4))
println(typeof(fn2(4)))
println(fn2(4)[2])
println(Main.with_julia_callback_fn(42, x -> 2*x))
|
{"hexsha": "6aa337aa20aef798621907a8bce5445531498d3d", "size": 518, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/generic_test.jl", "max_stars_repo_name": "LaurentMazare/ocaml.jl", "max_stars_repo_head_hexsha": "1be77de8caa1da5610afd8d6c49d3359b5cbed25", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-07-11T20:11:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-21T01:49:30.000Z", "max_issues_repo_path": "test/generic_test.jl", "max_issues_repo_name": "ejhill24/ocaml.jl", "max_issues_repo_head_hexsha": "1be77de8caa1da5610afd8d6c49d3359b5cbed25", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/generic_test.jl", "max_forks_repo_name": "ejhill24/ocaml.jl", "max_forks_repo_head_hexsha": "1be77de8caa1da5610afd8d6c49d3359b5cbed25", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-21T01:49:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-21T01:49:32.000Z", "avg_line_length": 18.5, "max_line_length": 52, "alphanum_fraction": 0.6640926641, "num_tokens": 185}
|
//Copyright (c) 2013 Singapore-MIT Alliance for Research and Technology
//Licensed under the terms of the MIT License, as described in the file:
// license.txt (http://opensource.org/licenses/MIT)
#pragma once
#include "util/LangHelpers.hpp"
#include "metrics/Length.hpp"
#include <map>
#include <vector>
#include <boost/unordered_map.hpp>
#include <boost/utility.hpp>
#include "StreetDirectory.hpp"
namespace sim_mob {
//class CoordinateTransform;
//class RoadRunnerRegion;
//class Point;
//
//class GridStreetDirectoryImpl : public StreetDirectory::Impl {
//public:
// GridStreetDirectoryImpl(const RoadNetwork& network, centimeter_t gridWidth, centimeter_t gridHeight);
// virtual ~GridStreetDirectoryImpl() {}
//
//protected:
// //virtual std::pair<sim_mob::RoadRunnerRegion, bool> getRoadRunnerRegion(const sim_mob::RoadSegment* seg);
//
// //virtual std::vector<const sim_mob::RoadSegment*> getSegmentsFromRegion(const sim_mob::RoadRunnerRegion& region);
//
// virtual const BusStop* getBusStop(const Point& position) const;
//
// virtual const Node* getNode(const int id) const;
//
// virtual StreetDirectory::LaneAndIndexPair getLane(const Point& position) const;
//
// //virtual const MultiNode* GetCrossingNode(const Crossing* cross) const;
//
// virtual std::vector<StreetDirectory::RoadSegmentAndIndexPair> closestRoadSegments(const Point& point, centimeter_t halfWidth, centimeter_t halfHeight) const;
//
// /**
// * return a road segment from a aimsun-id
// * @param id is a given aimsun id
// * return a pointer to associated road segment
// */
// virtual const sim_mob::RoadSegment* getRoadSegment(const unsigned int id);
//
//private:
// // Partition the road network into a rectangular grid.
// void partition(const RoadNetwork& network);
//
// // Partition the list of road segments into a rectangular grid.
// // If <isForward> is true, then vehicle traffic on the road segments flows from their start
// // to end points.
// void partition(const std::vector<RoadSegment*>& segments, bool isForward);
//
// // Partition the road segment into a rectangular grid.
// // If <isForward> is true, then vehicle traffic on the road segment flows from its start
// // to end points.
// void partition(const RoadSegment& segment, bool isForward);
//
// // Return true if the stretch of the road segment is inside the specified grid cell.
// // The stretch is specified by <p1>, <p2>, and <halfWidth>; the line from <p1> to <p2>
// // traces the middle of the stretch. <m> and <n> specify the (m, n) grid cell of a
// // rectangular grid of gridWidth_ and gridHeight_.
// bool checkGrid(int m, int n, const Point& p1, const Point& p2, centimeter_t halfWidth) const;
//
// // Called once for each unique RoadSegment
// //void buildLookups(const std::vector<RoadSegment*>& roadway, std::set<const Crossing*>& completed, const std::map<int, sim_mob::RoadRunnerRegion>& roadRunnerRegions, sim_mob::CoordinateTransform* coords);
//
//private:
// centimeter_t gridWidth_;
// centimeter_t gridHeight_;
//
// // The following custom hash and equality functions were taken
// // from the boost::unordered documentation.
// struct Hash2D : private std::unary_function<Point, std::size_t> {
// size_t operator()(const Point& key) const {
// std::size_t seed = 0;
// boost::hash_combine(seed, key.getX());
// boost::hash_combine(seed, key.getY());
// return seed;
// }
// };
// struct Equal2D : private std::binary_function<Point, Point, bool> {
// bool operator()(const Point& p1, const Point& p2) const {
// return p1.getX() == p2.getX() && p1.getY() == p2.getY();
// }
// };
//
// //Map of Crossings->MultiNode. May not contain all crossings.
// //std::map<const Crossing*, const MultiNode*> crossings_to_multinodes;
//
// // map< key, vector<value> > is used for GridType instead of multimap<key, value>.
// typedef std::vector<StreetDirectory::RoadSegmentAndIndexPair> RoadSegmentSet;
// typedef boost::unordered_map<Point, RoadSegmentSet, Hash2D, Equal2D> GridType;
//
// GridType grid_;
// std::map<std::string, const RoadSegment*> roadSegments_;
// std::set<const BusStop*> busStops_;
// std::set<const Node*> nodes;
// std::map<const RoadSegment*, RoadRunnerRegion> rrRegionLookup;
// std::map<int, std::vector<const RoadSegment*> > rrRegionRevLookup; ///<Indexed by Region ID
// std::map<const unsigned int, const sim_mob::RoadSegment*> segmentByAimsunID;
//
//};
}
|
{"hexsha": "d4e313b288df39a6e72780e678d0c6b8ae37a420", "size": 4617, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "dev/Basic/shared/geospatial/streetdir/GridStreetDirectoryImpl.hpp", "max_stars_repo_name": "gusugusu1018/simmobility-prod", "max_stars_repo_head_hexsha": "d30a5ba353673f8fd35f4868c26994a0206a40b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 50.0, "max_stars_repo_stars_event_min_datetime": "2018-12-21T08:21:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T09:47:59.000Z", "max_issues_repo_path": "dev/Basic/shared/geospatial/streetdir/GridStreetDirectoryImpl.hpp", "max_issues_repo_name": "gusugusu1018/simmobility-prod", "max_issues_repo_head_hexsha": "d30a5ba353673f8fd35f4868c26994a0206a40b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2018-12-19T13:42:47.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-13T04:11:45.000Z", "max_forks_repo_path": "dev/Basic/shared/geospatial/streetdir/GridStreetDirectoryImpl.hpp", "max_forks_repo_name": "gusugusu1018/simmobility-prod", "max_forks_repo_head_hexsha": "d30a5ba353673f8fd35f4868c26994a0206a40b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 27.0, "max_forks_repo_forks_event_min_datetime": "2018-11-28T07:30:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-05T02:22:26.000Z", "avg_line_length": 40.8584070796, "max_line_length": 211, "alphanum_fraction": 0.6907082521, "num_tokens": 1146}
|
#!/usr/bin/env python
# AUTHOR: Shane Gordon
# ROLE: TODO (some explanation)
# CREATED: 2015-06-06 13:12:10
import os
import re
import sys
import logging
import argparse
import subprocess
from datetime import datetime
import numpy as np
import shutil
import seaborn as sns
import time
import matplotlib.pyplot as plt
import pandas as pd
import more_itertools
from mathops import sum_squares_residuals
from mathops import r_squared
from mathops import rms_error
from readers import read_protss
from readers import read_continll
from readers import read_cdsstr
notes = (
"\n"
"CDGo is research software. If you use CDGo in your research, please \n"
"acknowledge it appropriately. E.g.:\n"
"\t...using the software package CDPro (Sreerama and Woody, 2000) as \n"
"\timplemented by the software package CDGo \n"
'\t(available from https://github.com/s-gordon/CDGo)\n\n'
'Any use of CDGo in research should also cite the software package\n'
'CDPro (found http://sites.bmb.colostate.edu/sreeram/CDPro/):\n'
'\tSreerama, N., & Woody, R. W. (2000). Estimation of protein \n'
'\tsecondary structure from circular dichroism spectra: comparison of \n'
'\tCONTIN, SELCON, and CDSSTR methods with an expanded reference set. \n'
'\tAnalytical biochemistry, 287(2), 252-260.\n\n'
'Use of individual protein reference databases should also be credited\n'
'appropriately. For a full listing of each database and the \n'
'appropriate citation, please use the following link:\n'
'\thttp://sites.bmb.colostate.edu/sreeram/CDPro/\n'
)
now = datetime.now()
print notes
def allowed_ibasis_val(x):
x = int(x)
if 1 < x > 10:
raise argparse.ArgumentTypeError(
"Acceptable ibases are between 1 and 10"
)
sys.exit(2)
return x
def parse_num_list(string):
"""Docstring for parse_num_list
:string:
:returns: list of integers
"""
m = re.match(r'(\d+)(?:-(\d+))?$', string)
# ^ (or use .split('-'). anyway you like.)
if not m:
raise argparse.ArgumentTypeError(
"'" + string +
"' is not a range of number. Expected forms like '0-5' or '2'.")
start = m.group(1)
end = m.group(2) or start
return list(range(int(start, 10), int(end, 10)+1))
# Argparse
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
parser = MyParser(description='Run CDPro automatically.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-C', action="store", dest="cdpro_dir",
default="/Users/sgordon/.wine/drive_c/Program Files/CDPro",
help="CDPro executable directory")
parser.add_argument('-i', action="store", dest="cdpro_input",
required=True, help="CDPro executable directory")
parser.add_argument('--mol_weight', action="store", required=True,
type=float,
help="Molecular weight (Da)")
parser.add_argument('--number_residues', action="store", required=True,
type=int, help="Residues")
parser.add_argument('--concentration', action="store", required=True,
type=float, help="Concentration (mg/ml)")
parser.add_argument('--buffer', action="store", required=False,
dest="buffer", help="Buffer file for blank.")
parser.add_argument('--cdsstr', action="store_true", required=False,
help="Use CDSSTR algorithm for fitting.")
parser.add_argument('--db_range', type=parse_num_list,
default="1-10", help="""
CDPro ibasis range to use. Accepted values are
between 1 and 10 inclusive.
Acceptable values are ranges (e.g. 2-5) or integers
(e.g. 2).
""")
parser.add_argument('--continll', action="store_true", required=False,
help="""
Use CONTINLL algorithm for fitting.
If you use CONTINLL for your work please cite the
following:
[1] Sreerama, N., & Woody, R. W. (2000). Estimation of
protein secondary structure from circular dichroism
spectra: comparison of CONTIN, SELCON, and CDSSTR methods
with an expanded reference set. Analytical biochemistry,
287(2), 252-260.
[2]
""")
parser.add_argument('-v', '--verbose', action="store_true",
help="Increase verbosity")
result = parser.parse_args()
"""
If verbosity set, change logging to debug.
Else leave at info
"""
if result.verbose:
logging.basicConfig(format='%(levelname)s:\t%(message)s',
level=logging.DEBUG)
else:
logging.basicConfig(format='%(levelname)s:\t%(message)s',
level=logging.INFO)
def logfile(fname, parser):
"""Docstring for logfile
:fname: output logfile name
:parser: argparse parser object
:returns: None
"""
with open(fname, 'w') as f:
f.write(
"Logfile for CDGo. See below details of the arguments provided." +
"\n\n")
f.write(notes + "\n\n")
f.write('Date: {}\n'.format(now.strftime("%Y-%m-%d %H:%M")))
f.write('CDGo Version: {}\n'.format(cdgo.__version__))
f.write('CDPro path: {}\n'.format(parser.cdpro_dir))
f.write('Input file: {}\n'.format(parser.cdpro_input))
f.write('Protein molecular weight: {}\n'.format(parser.mol_weight))
f.write('Number of residues: {}\n'.format(parser.number_residues))
f.write('Protein concentration: {}\n'.format(parser.concentration))
f.write('Buffer file: {}\n'.format(parser.buffer))
f.write('iBasis range: {}\n'.format(parser.db_range))
f.write('CONTINLL?: {}\n'.format(parser.continll))
f.write('CDSSTR?: {}\n'.format(parser.cdsstr))
def read_line(f, line_no):
"""TODO: Docstring for read_line.
:f: file name
:line_no: single line number to read
:returns: line
"""
with open(f) as fp:
for i, line in enumerate(fp):
if i == line_no:
l = line
return l
def read_aviv(f, save_line_no=False, last_line_no=False):
"""Wrapper function to read in raw Aviv CD data files
:f: TODO
:returns: TODO
"""
# check file summary for experiment type
# if the exp type is not wavelength, throw an error and exit
exp_type = read_line(f, 1)
# delimit with colon + space
exp_type = exp_type.split(': ')[1]
# remove trailing newlines and carriage returns
exp_type = exp_type.rstrip("\r\n")
if exp_type != "Wavelength":
logging.error(
("The experiment type for one or more of input files is {e}.\n"
"Only wavelength experiments are allowed at this time. Please\n"
"check your inputs and try again."
).format(e=exp_type)
)
sys.exit(2)
else:
logging.debug(
"Experiment type for file {f} is {e}.".format(f=f, e=exp_type)
)
df = pd.read_csv(f, sep=' ', skiprows=18, header=0, engine='python')
if last_line_no is False:
line_no = df[df['X'].str.contains("\$ENDDATA")].index.tolist()
line_no = line_no[0]
else:
line_no = last_line_no
df = df.iloc[0:line_no]
# Subsample the resulting dataframe to exclude irrelevant cols
df = df[['X', 'CD_Signal', 'CD_Dynode']]
# Set row names (indices) to col X (i.e. wavelength)
df = df.set_index('X')
# Throw away data when the dynode voltage peaks beyond 600
df = df[(df.CD_Dynode < 600)]
return df, line_no if save_line_no is True else df
def replace_input(input, output, ibasis):
"""
return: None
"""
pattern = '# PRINT(.*\n)\s+(\S+)(.*)'
replace = '# PRINT IBasis\n 0 {}'.format(ibasis)
f = open(input, 'r')
lines = f.read()
f.close()
r = re.sub(pattern, replace, lines)
with open(output, 'w') as o:
for line in r:
o.write(line)
def set_style():
"""
Set the global style for seaborn plots
"""
sns.set(style="darkgrid")
def cdpro_input_header(firstvalue, lastvalue, factor):
"""
:returns: Multiline string mimicking cdpro output
"""
firstvalue = '{:0.4f}'.format(firstvalue)
lastvalue = '{:0.4f}'.format(lastvalue)
header = ("# \n"
"# PRINT IBasis \n"
" 1 0\n"
"# \n"
"# ONE Title Line \n"
" Title\n"
"# \n"
"# WL_Begin WL_End Factor \n"
" {first} {last} 1.0000\n"
"# \n"
"# CDDATA (Long->Short Wavelength; 260 - 178 LIMITS \n"
).format(first=lastvalue, last=firstvalue)
return header
def cdpro_input_footer():
"""
:returns: Multiline string mimicking cdpro input footer
"""
footer = ("# \n"
"# IGuess Str1 Str2 Str3 Str4 Str5 Str6\n"
" 0 \n"
)
return footer
def check_dir(dir):
"""
Check whether directory dir exists.
If true continue. Else exit.
"""
if not os.path.isdir(dir):
logging.error('Path %s not found', dir)
logging.error('Aborting')
sys.exit()
def delete_dir(dir):
"""
Check whether directory dir exists.
If true delete and remake.
"""
if os.path.exists(dir):
shutil.rmtree(dir)
os.makedirs(dir)
def check_cmd(*kwargs):
"""Verify that exe in accessible
exe: absolute path to exe file, or entry within PATH
returns: None
"""
for exe in kwargs:
try:
subprocess.check_call(['%s --version>/dev/null' % exe], shell=True)
except subprocess.CalledProcessError:
logging.error('Command %s not found or not in path' % exe)
sys.exit(2)
def make_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def cd_output_style(style_1, style_2, algorithm):
if os.path.isfile(style_1) is True:
logging.debug('{algorithm} style is {style}'.format(
algorithm=algorithm, style=style_1))
return style_1
elif os.path.isfile(style_2) is True:
logging.debug('{algorithm} style is {style}'.format(
algorithm=algorithm, style=style_2))
return style_2
else:
sys.exit(2)
def chunks(l, n):
"""
Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i + n]
def list_params(df):
"""Get key list params.
:df: pandas dataframe
:returns: list max, min, and step size of the pandas indices
"""
df = df.index.astype(float)
try:
max = df.max()
min = df.min()
step = df[-1] - df[-2]
except IndexError:
logging.error(
"Bad input data. Please check that data is correctly formatted"
)
sys.exit(2)
return min, max, step
def single_line_scatter(datafile, fit_label, exp_label, ax,
flip=True, x_col_name='WaveL',
calc_col='CalcCD', xlabel='Wavelength (nm)',
ylabel='$\Delta\epsilon$ ($M^{-1}{\cdot}cm^{-1}$)'):
"""Docstring for single_line_scatter
"""
df = pd.read_table(datafile, skipinitialspace=True, sep=r"\s*",
engine='python')
# Invert data vertically to compensate for CDPro output
if flip is True:
df = df.iloc[::-1]
try:
df.plot(x=x_col_name, y='ExpCD', style='.', ax=ax, label=exp_label)
except KeyError:
try:
df.plot(x=x_col_name, y='Exptl', style='.', ax=ax, label=exp_label)
except KeyError as e:
logging.error(e)
df.plot(x=x_col_name, y=calc_col, style='-', ax=ax, label=fit_label)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
def cdpro_input_writer(body, head, fname='input'):
"""TODO: Docstring for cdpro_input_writer.
:body: CDPro input body text. Contains n rows of length 10, where the final
line may be up to 10 items
:head: CDPro input header information
:returns: None
"""
f = open(fname, 'w')
f.write(head)
for line in body:
# Separate list items by two spaces and append newline
f.write(' ' + ' '.join(str(x) for x in line) + '\n')
f.write(cdpro_input_footer())
f.close()
def double_line_scatter(datafile1, datafile2, fit_label1, fit_label2,
exp_label,
df1_headers, df2_headers, outfile='output.png',
flip=True, xlabel='Wavelength (nm)',
ylabel='$\Delta\epsilon$ ($M^{-1}{\cdot}cm^{-1}$)'):
fig, ax = plt.subplots(nrows=1, ncols=1)
df1 = pd.read_table(datafile1, skipinitialspace=True, sep=r"\s*",
engine='python', usecols=df1_headers)
df2 = pd.read_table(datafile2, skipinitialspace=True, sep=r"\s*",
engine='python', usecols=df2_headers)
# Invert data vertically to compensate for CDPro output
if flip is True:
df1 = df1.iloc[::-1]
df2 = df2.iloc[::-1]
df1.plot(x='WaveL', y='ExpCD', style='o', ax=ax, label=exp_label)
df1.plot(x='WaveL', y='CalcCD', style='-', ax=ax, label=fit_label1)
df2.plot(x='WaveL', y='CalcCD', style='-', ax=ax, label=fit_label2)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.savefig(outfile, bbox_inches='tight')
def drop_indices(df):
"""TODO: Docstring for drop_indices.
:df: pandas dataframe with float-based index
:returns: pandas dataframe subsampled to exclude rows with indices not
evenly divisible by 1
"""
# Establish what indices we want to discard. Namely, we want to get rid of
# non-integers, as CDPro discards these.
bad_indices = ~df.index.isin(np.arange(159.5, 260.5, 1))
# Subsample the input dataframe, removing integers that match the condition
# above
df = df[bad_indices]
return df
def millidegrees_to_epsilon(df, mrc):
"""TODO
df: single column pandas dataframe
mrc: mean residue concentration conversion factor
returns:
"""
return (df * mrc/3298).map(lambda x: '%1.3f' % x)
def better_alg_eval(df):
"""TODO: Docstring for better_alg_eval.
:df: TODO
:returns: TODO
"""
pass
def dec_to_percent(n):
"""Docstring for dec_to_percent
:n: float value or list as fraction
:returns: value or list multiplied by 10e2
"""
return n * 100
def format_val(v):
"""Takes in float and formats as str with 1 decimal place
:v: float
:returns: str
"""
return '{:.1f}%'.format(v)
def find_line(fname, pattern):
"""Docstring for find_line
:fname: file name
:pattern:
:returns:
"""
with open(fname) as search:
for line in search:
line = line.strip() # remove '\n' at EOL
if pattern in line:
o = line.split()[2:] # output ss as list
o = [float(i) for i in o] # convert to float
if 'Ref. Prot. Set' in line:
db = line.split()[3] # split db to get int
if 'RMSD(Exp-Calc)' in line and 'NRMSD(Exp-Calc)' not in line:
rmsd = line.split()[1] # split line to get rmsd
rmsd = float(rmsd)
rmsd = '{:.3f}'.format(rmsd)
return o, db, rmsd
def best_fit(df, col, ax):
"""TODO: Docstring for best_fit.
:df: TODO
:col: value for dataframe column 'alg' with which to subsample
:returns: TODO
"""
if col is 'continll':
fit_fname = "CONTIN.CD"
elif col is 'cdsstr':
fit_fname = 'reconCD.out'
else:
logging.error("Unknown algorithm reference {} supplied.".format(col))
sys.exit(2)
# select only rows with alg value equal to col
df = df.loc[df['alg'] == col]
# select row with lowest rmsd value as top
top = df.ix[df['rmsd'].idxmin()]
logging.info('best ibasis for {a}: {i}'.format(a=col, i=top.name))
# full file name and path for plot file
fname = '{a}-ibasis{i}/{f}'.format(a=col, i=top.name, f=fit_fname)
# plot label for matplotlib
flab = '{alg} ibasis {ib} (RMSD: {rmsd})'.format(
alg=col, ib=top.name, rmsd=top['rmsd'])
# exp label for matplotlib
elab = '{} exp'.format(col)
# plot on supplied axis
single_line_scatter(fname, flab, elab, ax)
def main():
"""Docstring for main
:returns: None
"""
# shell commands for continll and cdsstr
continll_cmd = ('echo | WINEDEBUG=-all wine Continll.exe > stdout || '
'echo -n "(crashed)"')
cdsstr_cmd = ('echo | WINEDEBUG=-all wine CDSSTR.EXE > stdout || '
'"echo -n (crashed)"')
# read in data files for dataset (dat) and reference buffer for subtraction
# (buf)
dat, lline = read_aviv(result.cdpro_input, save_line_no=True)
buf = read_aviv(result.buffer, save_line_no=False, last_line_no=lline)[0]
# subtract signal for reference from sample
df = (dat - buf).dropna()
# convert into units of mre
pep_bonds = result.number_residues - 1
mrc = result.mol_weight / (pep_bonds * result.concentration)
# Convert from the input units of millidegrees to the standard delta
# epsilon
epsilon = millidegrees_to_epsilon(df['CD_Signal'], mrc)
max, min, step = list_params(epsilon)
# Remap the df index to floats. Required for drop_indices
epsilon.index = epsilon.index.map(float)
# drop bad datapoints
epsilon = drop_indices(epsilon)
# force inverse sorting
epsilon = epsilon.sort_index(ascending=False) # force inverse sorting
head = cdpro_input_header(max, min, 1)
body = list(more_itertools.chunked(epsilon, 10))
cdpro_input_writer(body, head)
check_cmd('wine')
check_dir(result.cdpro_dir)
base_dir = os.path.dirname(os.path.realpath(result.cdpro_input))
cdpro_out_dir = "%s/%s-CDPro" % (base_dir, result.cdpro_input)
delete_dir(cdpro_out_dir)
logging.debug('Processing %s into %s' % (result.cdpro_input,
cdpro_out_dir))
# log args into to logfile lname
lname = '{p}/input.log'.format(p=cdpro_out_dir)
logfile(lname, result)
shutil.copy("input", "%s/input" % (result.cdpro_dir))
os.chdir(result.cdpro_dir)
ss_assign = pd.DataFrame()
for ibasis in result.db_range:
logging.info('ibasis %s', ibasis)
replace_input('input', 'input', ibasis)
ss_col_head = ['ibasis', 'alg', 'ahelix', 'bstrand', 'turn', 'unord',
'rmsd', 'ss_res', 'r2']
if result.continll is True:
"""
if continll switch is True, run the continll algorithm and pull
secondary structure assignments
"""
logging.debug('Running CONTINLL')
subprocess.call([continll_cmd], shell=True)
continll_outdir = ('%s/continll-ibasis%s' % (cdpro_out_dir,
ibasis))
continll_out = cd_output_style("CONTINLL.OUT", "continll.out",
"continll")
make_dir(continll_outdir)
for f in ["CONTIN.CD", "CONTIN.OUT", continll_out,
"BASIS.PG", "ProtSS.out", "SUMMARY.PG", "stdout"]:
shutil.move(f, "%s/" % (continll_outdir))
if os.path.isfile("input"):
shutil.copy("input", "%s/" % (continll_outdir))
# read in fit values and stats
db, int, ss = read_protss(
'{}/ProtSS.out'.format(continll_outdir))
"""
read in continll output
returns stats about fit such as rms error, sum-of-squares
residuals, etc
"""
p = read_continll('{}/CONTIN.CD'.format(continll_outdir))
ss_res = sum_squares_residuals(p['CalcCD'], p['ExpCD'])
r2 = r_squared(p['CalcCD'], p['ExpCD'])
rmsd = rms_error(p['CalcCD'], p['ExpCD'])
# define new dataframe with output from read_protss
df = pd.DataFrame(
[[db, 'continll', ss['ahelix'], ss['bstrand'], ss['turn'],
ss['unord'], rmsd, ss_res, r2]],
index=[ibasis]
)
# append fit values and stats to dataframe
ss_assign = ss_assign.append(df)
if result.cdsstr is True:
"""
if cdsstr switch is True, run the cdsstr algorithm and pull
secondary structure assignments
"""
logging.debug('Running CDSSTR')
subprocess.call([cdsstr_cmd], shell=True)
cdsstr_outdir = ('%s/cdsstr-ibasis%s' % (cdpro_out_dir, ibasis))
make_dir(cdsstr_outdir)
cdsstr_out = cd_output_style("CDsstr.out", "cdsstr.out", "CDSSTR")
for f in ["reconCD.out", "ProtSS.out", cdsstr_out,
"stdout"]:
shutil.move(f, "%s/" % (cdsstr_outdir))
if os.path.isfile("input"):
shutil.copy("input", "%s/" % (cdsstr_outdir))
# read in fit values and stats
db, int, ss = read_protss(
'{}/ProtSS.out'.format(cdsstr_outdir))
"""
read in continll output
returns stats about fit such as rms error, sum-of-squares
residuals, etc
"""
p = read_cdsstr('{}/reconCD.out'.format(cdsstr_outdir))
ss_res = sum_squares_residuals(p['CalcCD'], p['Exptl'])
r2 = r_squared(p['CalcCD'], p['Exptl'])
rmsd = rms_error(p['CalcCD'], p['Exptl'])
df = pd.DataFrame(
[[db, 'cdsstr', ss['ahelix'], ss['bstrand'], ss['turn'],
ss['unord'], rmsd, ss_res, r2]], index=[ibasis]
)
# append fit values and stats to dataframe
ss_assign = ss_assign.append(df)
os.chdir(cdpro_out_dir)
set_style()
# assign column headings
ss_assign.columns = ss_col_head
# round floats to 3 decimal places for certain columns
ss_assign.rmsd = ss_assign.rmsd.round(3)
ss_assign.ss_res = ss_assign.ss_res.round(3)
ss_assign.r2 = ss_assign.r2.round(3)
# Print the matplotlib overlay
logging.debug('Plotting fit overlays')
outfile = 'CDSpec-{}-{}-Overlay.png'.format(
result.cdpro_input, time.strftime("%Y%m%d"))
fig, ax = plt.subplots(nrows=1, ncols=1)
if result.continll is True:
best_fit(ss_assign, 'continll', ax)
if result.cdsstr is True:
best_fit(ss_assign, 'cdsstr', ax)
ax.legend()
plt.savefig(outfile, bbox_inches='tight')
ss_assign.to_csv(
'{}/secondary_structure_summary.csv'.format(cdpro_out_dir)
)
logging.info('\n{}\n'.format(ss_assign))
if __name__ == '__main__':
main()
|
{"hexsha": "86a3c9df4bf0bb47f31b835dda5651f4edd77693", "size": 23702, "ext": "py", "lang": "Python", "max_stars_repo_path": "cdgo/__main__.py", "max_stars_repo_name": "s-gordon/CDGo", "max_stars_repo_head_hexsha": "7bd1b3a6780f70f1237a7f0cac5e112c6b804100", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-01-24T20:52:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-24T20:52:19.000Z", "max_issues_repo_path": "cdgo/__main__.py", "max_issues_repo_name": "s-gordon/CDGo", "max_issues_repo_head_hexsha": "7bd1b3a6780f70f1237a7f0cac5e112c6b804100", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2015-06-18T06:09:37.000Z", "max_issues_repo_issues_event_max_datetime": "2017-09-07T02:48:44.000Z", "max_forks_repo_path": "cdgo/__main__.py", "max_forks_repo_name": "s-gordon/CDGo", "max_forks_repo_head_hexsha": "7bd1b3a6780f70f1237a7f0cac5e112c6b804100", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9865047233, "max_line_length": 79, "alphanum_fraction": 0.5759007679, "include": true, "reason": "import numpy", "num_tokens": 5974}
|
[STATEMENT]
lemma sphere_cball [simp,intro]: "sphere z r \<subseteq> cball z r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sphere z r \<subseteq> cball z r
[PROOF STEP]
by force
|
{"llama_tokens": 73, "file": null, "length": 1}
|
import pandas
# import scipy
import numpy
from numpy import array
from numpy.linalg import inv
from sklearn.preprocessing import Normalizer
# load CSV using Pandas
filename = 'mockData.csv'
names = ['name', 'sem1', 'sem2', 'sem3', 'sem4', 'sem5', 'sem6', 'sem7', 'sem8', 'dist', 'hour', 'tuition', 'hobby', 'gender', 'sep_room', 'competitive', 'higher_stud', 'campus', 'extra', 'cet_score']
data = pandas.read_csv(filename, names=names)
# print(data.shape)
# separate array into input and output components
X = data.values[1:, 1:8]
Y = data.values[1:, 8]
X = X.astype(float)
Y = Y.astype(float)
b = inv(X.T.dot(X)).dot(X.T).dot(Y)
print('co-efficients: ', b)
x = array([7.42, 8.31, 7.48, 7.04, 6.68, 7.35, 8.46])
# x = x.reshape((len(x), 1))
# predict using coefficients
y = x.dot(b)
print('predicted pointer: ', y)
# CROSS-VALIDATION
X_train, X_test, y_train, y_test = train_test_split(X, Y, train_size = 0.85)
regression = LinearRegression()
r2 = regression.fit(X_train, y_train)
y_pred = regression.predict(X_test)
print(y_pred)
#print(y_test)
accuracy = r2_score(y_test, y_pred)*100
print("Accuracy is :" , accuracy)
from sklearn.model_selection import cross_val_score
scores = cross_val_score(regression, X_train, y_train, cv=4, scoring='neg_mean_squared_error')
print(scores)
print("The mean score is: " , scores.mean())
plt.scatter(y_pred, y_test , s=30, c='r', marker='+', zorder=10)
plt.xlabel("Predicted Values")
plt.ylabel("Actual Values")
plt.show()
|
{"hexsha": "76a84dea44db88e1db5052d1d64286fe0e87cda4", "size": 1526, "ext": "py", "lang": "Python", "max_stars_repo_path": "linear regression/linearRegression.py", "max_stars_repo_name": "harmitsampat96/Machine-Learning", "max_stars_repo_head_hexsha": "f7b6bf1ae07a9bc53cdb79660068011452eb1731", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "linear regression/linearRegression.py", "max_issues_repo_name": "harmitsampat96/Machine-Learning", "max_issues_repo_head_hexsha": "f7b6bf1ae07a9bc53cdb79660068011452eb1731", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "linear regression/linearRegression.py", "max_forks_repo_name": "harmitsampat96/Machine-Learning", "max_forks_repo_head_hexsha": "f7b6bf1ae07a9bc53cdb79660068011452eb1731", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7924528302, "max_line_length": 201, "alphanum_fraction": 0.6782437746, "include": true, "reason": "import numpy,from numpy,import scipy", "num_tokens": 448}
|
'''
'''
import numpy as np
import fusilib.io.spikes2 as iospikes
time_locked_events_matrix = iospikes.time_locked_spike_matrix
def time_locked_delay_matrix(event_ids,
oldtimes,
newtimes,
dt,
delay_window=(0, 2),
verbose=False):
'''
Parameters
----------
event_ids (1D np.ndarray): Vector containing unique `nevents` (n,)
oldtimes (1D np.ndarray) : Event onsets (n,)
newtimes (1D np.ndarray) : New sample times (n_newtimes,)
dt (scalar) : New sample rate
delay_window (tuple) : Delays for ``newtimes``: (delay_min, delay_max)
If ``0`` or ``None``, no delays are applied.
Returns
-------
delay_times (1D np.ndarray) : Delays in seconds (ndelays,)
delay_matrix (3D np.ndarray): Delay matrix of shape (ndelays, n_newtimes, nevents)
Examples
--------
>>> dels, dmat = events_object.get_delay_matrix(np.random.rand(100),delay_window=(0,1), dt=0.1)
>>> dmat.shape
(10, 100, 4)
'''
if (delay_window == 0) or (delay_window is None):
delay_window = (0, dt)
delmin = int(min(delay_window)/dt)
delmax = int(max(delay_window)/dt)
delays = np.arange(delmin, delmax)
delay_times = delays*dt
ndelays = len(delays)
if verbose: print('Delays [sec]:',delay_times)
delay_matrix = []
for ddx, tdelay in enumerate(delay_times):
if verbose: print('Working on delay %2i/%i: %10.05f[sec]'%(ddx+1, ndelays, tdelay))
design_matrix = time_locked_events_matrix(oldtimes,
event_ids,
newtimes - tdelay,
duration=dt,
dtype=np.float32)
delay_matrix.append(design_matrix)
delay_matrix = np.asarray(delay_matrix)
return delay_times, delay_matrix
class EventsFromTrain(object):
'''Easy computation of events responses.
>>> events_object = EventsFromTrain(times_vector, evid_vector)
'''
def __init__(self, event_times,
event_ids,
dt=0.05,
nevents=None,
event_values=None):
'''
'''
assert isinstance(event_times, np.ndarray)
assert isinstance(event_ids, np.ndarray)
# sort in time
order = np.argsort(event_times)
event_times = event_times[order]
event_ids = event_ids[order]
if event_values is not None:
event_values = event_values[order]
if nevents is None:
nevents = len(np.unique(event_ids))
# under the hood, this uses the probe spikes machinery ;)
self.view = iospikes.ProbeSpikes(event_times,
event_ids,
nclusters=nevents,
spike_counts=event_values)
self.event_times = event_times
self.event_ids = event_ids
self.nevents = nevents
self.nonsets = len(event_times)
self.dt = dt
# get sparse matrix representation of data
self.sparse_matrix = self.view.get_sparse_spike_matrix()
self.MBsize = self.view.sparse_matrix.data.nbytes/(2**20)
def __call__(self, event_times, duration, dt=None):
'''Sample the events at the requested times and durations
Parameters
----------
event_times : Vector of timestamps in seconds
duration : Duration after event onset
dt : Bin size in seconds
Returns
-------
times (ntimepoints, ndelays) :
delay_matrix (ntimepoints, ndelays, nevents) :
'''
dt = self.dt if dt is None else dt
times, delays = self.view.psth(event_times, dt=dt, duration=duration)
delays = delays.transpose(1,0,2) # (ntimepoints, ndelays, nevents)
times = times.T # (ntimepoints, ndelays)
return times, delays
def __repr__(self):
info = (__name__, type(self).__name__, self.nonsets, self.nevents, self.MBsize)
return '<%s.%s (nonsets=%i, nevents=%i) [%0.02fMB]>'%info
def get_delay_matrix(self, newtimes, delay_window=(0,2), dt=None, verbose=False):
'''Construct a delay matrix for these events
Horizontally stacking the delays yields a design matrix.
Parameters
----------
newtimes (1D np.ndarray):
Sample the onsets at these times (ntimepoints,)
delay_window (tuple):
Delays in time [sec]: (delay_min, delay_max)
If ``0`` or ``None``, no delays are applied.
dt (scalar, optional): New sample rate [sec]
Defaults to ``self.dt``
Returns
-------
delay_times (1D np.ndarray) : Delays in seconds (ndelays,)
delay_matrix (3D np.ndarray): Delay matrix (ndelays, ntimepoints, nevents)
'''
dt = self.dt if dt is None else dt
if verbose: print('Sample rate: %f[sec]'%dt)
delay_times, delay_matrix = time_locked_delay_matrix(self.event_ids,
self.event_times,
newtimes,
dt=dt,
delay_window=delay_window,
verbose=verbose)
return delay_times, delay_matrix
class Events(EventsFromTrain):
'''Easy computation of events responses.
'''
def __init__(self, event_onsets, dt=0.05, event_values=None):
'''
event_onsets : list
'''
if isinstance(event_onsets[0], (float, int)):
# a single event as a list or array
event_onsets = [event_onsets]
event_times = []
event_ids = []
event_vals = []
for edx, onsets in enumerate(event_onsets):
event_times.append(np.asarray(onsets))
event_ids.append(np.ones(len(onsets))*edx)
if event_values is not None:
event_vals.append(np.asarray(event_values[edx]))
event_ids = np.hstack(event_ids)
event_times = np.hstack(event_times)
nevents = len(event_onsets)
if event_values is not None:
event_vals = np.hstack(event_vals)
counts = None if event_values is None else event_vals
super(type(self), self).__init__(event_times,
event_ids,
dt=dt,
nevents=nevents,
event_values=counts)
|
{"hexsha": "fed41b160b0cb50339d2cbb80cdad1869c8ac2fa", "size": 6944, "ext": "py", "lang": "Python", "max_stars_repo_path": "fusilib/io/events.py", "max_stars_repo_name": "anwarnunez/fusi", "max_stars_repo_head_hexsha": "c15ea2567e9fca92b1a6a1130eb396825d0f76cf", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fusilib/io/events.py", "max_issues_repo_name": "anwarnunez/fusi", "max_issues_repo_head_hexsha": "c15ea2567e9fca92b1a6a1130eb396825d0f76cf", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fusilib/io/events.py", "max_forks_repo_name": "anwarnunez/fusi", "max_forks_repo_head_hexsha": "c15ea2567e9fca92b1a6a1130eb396825d0f76cf", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2022-02-14T02:50:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T08:23:48.000Z", "avg_line_length": 36.1666666667, "max_line_length": 99, "alphanum_fraction": 0.5375864055, "include": true, "reason": "import numpy", "num_tokens": 1482}
|
[STATEMENT]
lemma bv_sub_length: "length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
proof (cases "bv_to_int w2 = 0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. bv_to_int w2 = 0 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
2. bv_to_int w2 \<noteq> 0 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
assume p: "bv_to_int w2 = 0"
[PROOF STATE]
proof (state)
this:
bv_to_int w2 = 0
goal (2 subgoals):
1. bv_to_int w2 = 0 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
2. bv_to_int w2 \<noteq> 0 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
proof (simp add: bv_sub_def bv_sadd_def bv_uminus_def p)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. length (norm_signed w1) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
have "length (norm_signed w1) \<le> length w1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (norm_signed w1) \<le> length w1
[PROOF STEP]
by (rule norm_signed_length)
[PROOF STATE]
proof (state)
this:
length (norm_signed w1) \<le> length w1
goal (1 subgoal):
1. length (norm_signed w1) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
length (norm_signed w1) \<le> length w1
goal (1 subgoal):
1. length (norm_signed w1) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
have "... \<le> max (length w1) (length w2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length w1 \<le> max (length w1) (length w2)
[PROOF STEP]
by (rule max.cobounded1)
[PROOF STATE]
proof (state)
this:
length w1 \<le> max (length w1) (length w2)
goal (1 subgoal):
1. length (norm_signed w1) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
length w1 \<le> max (length w1) (length w2)
goal (1 subgoal):
1. length (norm_signed w1) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
have "... \<le> Suc (max (length w1) (length w2))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. max (length w1) (length w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
by arith
[PROOF STATE]
proof (state)
this:
max (length w1) (length w2) \<le> Suc (max (length w1) (length w2))
goal (1 subgoal):
1. length (norm_signed w1) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
length (norm_signed w1) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
show "length (norm_signed w1) \<le> Suc (max (length w1) (length w2))"
[PROOF STATE]
proof (prove)
using this:
length (norm_signed w1) \<le> Suc (max (length w1) (length w2))
goal (1 subgoal):
1. length (norm_signed w1) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
length (norm_signed w1) \<le> Suc (max (length w1) (length w2))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
goal (1 subgoal):
1. bv_to_int w2 \<noteq> 0 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. bv_to_int w2 \<noteq> 0 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
assume "bv_to_int w2 \<noteq> 0"
[PROOF STATE]
proof (state)
this:
bv_to_int w2 \<noteq> 0
goal (1 subgoal):
1. bv_to_int w2 \<noteq> 0 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
hence "0 < length w2"
[PROOF STATE]
proof (prove)
using this:
bv_to_int w2 \<noteq> 0
goal (1 subgoal):
1. 0 < length w2
[PROOF STEP]
by (cases w2,simp_all)
[PROOF STATE]
proof (state)
this:
0 < length w2
goal (1 subgoal):
1. bv_to_int w2 \<noteq> 0 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
hence lmw: "0 < max (length w1) (length w2)"
[PROOF STATE]
proof (prove)
using this:
0 < length w2
goal (1 subgoal):
1. 0 < max (length w1) (length w2)
[PROOF STEP]
by arith
[PROOF STATE]
proof (state)
this:
0 < max (length w1) (length w2)
goal (1 subgoal):
1. bv_to_int w2 \<noteq> 0 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
let ?Q = "bv_to_int w1 - bv_to_int w2"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. bv_to_int w2 \<noteq> 0 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
have "0 < ?Q \<or> ?Q = 0 \<or> ?Q = -1 \<or> ?Q < -1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < bv_to_int w1 - bv_to_int w2 \<or> bv_to_int w1 - bv_to_int w2 = 0 \<or> bv_to_int w1 - bv_to_int w2 = - 1 \<or> bv_to_int w1 - bv_to_int w2 < - 1
[PROOF STEP]
by arith
[PROOF STATE]
proof (state)
this:
0 < bv_to_int w1 - bv_to_int w2 \<or> bv_to_int w1 - bv_to_int w2 = 0 \<or> bv_to_int w1 - bv_to_int w2 = - 1 \<or> bv_to_int w1 - bv_to_int w2 < - 1
goal (1 subgoal):
1. bv_to_int w2 \<noteq> 0 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
0 < bv_to_int w1 - bv_to_int w2 \<or> bv_to_int w1 - bv_to_int w2 = 0 \<or> bv_to_int w1 - bv_to_int w2 = - 1 \<or> bv_to_int w1 - bv_to_int w2 < - 1
goal (1 subgoal):
1. length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
proof safe
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. 0 < bv_to_int w1 - bv_to_int w2 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
2. bv_to_int w1 - bv_to_int w2 = 0 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
3. bv_to_int w1 - bv_to_int w2 = - 1 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
4. bv_to_int w1 - bv_to_int w2 < - 1 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
assume "?Q = 0"
[PROOF STATE]
proof (state)
this:
bv_to_int w1 - bv_to_int w2 = 0
goal (4 subgoals):
1. 0 < bv_to_int w1 - bv_to_int w2 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
2. bv_to_int w1 - bv_to_int w2 = 0 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
3. bv_to_int w1 - bv_to_int w2 = - 1 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
4. bv_to_int w1 - bv_to_int w2 < - 1 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
bv_to_int w1 - bv_to_int w2 = 0
goal (1 subgoal):
1. length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
by (simp add: bv_sub_def bv_sadd_def bv_uminus_def)
[PROOF STATE]
proof (state)
this:
length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
goal (3 subgoals):
1. 0 < bv_to_int w1 - bv_to_int w2 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
2. bv_to_int w1 - bv_to_int w2 = - 1 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
3. bv_to_int w1 - bv_to_int w2 < - 1 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. 0 < bv_to_int w1 - bv_to_int w2 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
2. bv_to_int w1 - bv_to_int w2 = - 1 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
3. bv_to_int w1 - bv_to_int w2 < - 1 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
assume "?Q = -1"
[PROOF STATE]
proof (state)
this:
bv_to_int w1 - bv_to_int w2 = - 1
goal (3 subgoals):
1. 0 < bv_to_int w1 - bv_to_int w2 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
2. bv_to_int w1 - bv_to_int w2 = - 1 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
3. bv_to_int w1 - bv_to_int w2 < - 1 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
bv_to_int w1 - bv_to_int w2 = - 1
goal (1 subgoal):
1. length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
by (simp add: bv_sub_def bv_sadd_def bv_uminus_def)
[PROOF STATE]
proof (state)
this:
length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
goal (2 subgoals):
1. 0 < bv_to_int w1 - bv_to_int w2 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
2. bv_to_int w1 - bv_to_int w2 < - 1 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. 0 < bv_to_int w1 - bv_to_int w2 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
2. bv_to_int w1 - bv_to_int w2 < - 1 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
assume p: "0 < ?Q"
[PROOF STATE]
proof (state)
this:
0 < bv_to_int w1 - bv_to_int w2
goal (2 subgoals):
1. 0 < bv_to_int w1 - bv_to_int w2 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
2. bv_to_int w1 - bv_to_int w2 < - 1 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
apply (simp add: bv_sub_def bv_sadd_def bv_uminus_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (int_to_bv (bv_to_int w1 - bv_to_int w2)) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
apply (rule length_int_to_bv_upper_limit_gt0)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. 0 < bv_to_int w1 - bv_to_int w2
2. bv_to_int w1 - bv_to_int w2 \<le> 2 ^ (Suc (max (length w1) (length w2)) - 1) - 1
[PROOF STEP]
apply (rule p)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bv_to_int w1 - bv_to_int w2 \<le> 2 ^ (Suc (max (length w1) (length w2)) - 1) - 1
[PROOF STEP]
proof simp
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. bv_to_int w1 - bv_to_int w2 < 2 ^ max (length w1) (length w2)
[PROOF STEP]
from bv_to_int_lower_range [of w2]
[PROOF STATE]
proof (chain)
picking this:
- (2 ^ (length w2 - 1)) \<le> bv_to_int w2
[PROOF STEP]
have v2: "- bv_to_int w2 \<le> 2 ^ (length w2 - 1)"
[PROOF STATE]
proof (prove)
using this:
- (2 ^ (length w2 - 1)) \<le> bv_to_int w2
goal (1 subgoal):
1. - bv_to_int w2 \<le> 2 ^ (length w2 - 1)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
- bv_to_int w2 \<le> 2 ^ (length w2 - 1)
goal (1 subgoal):
1. bv_to_int w1 - bv_to_int w2 < 2 ^ max (length w1) (length w2)
[PROOF STEP]
have "bv_to_int w1 + - bv_to_int w2 < (2 ^ (length w1 - 1)) + (2 ^ (length w2 - 1))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bv_to_int w1 + - bv_to_int w2 < 2 ^ (length w1 - 1) + 2 ^ (length w2 - 1)
[PROOF STEP]
apply (rule add_less_le_mono)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. bv_to_int w1 < 2 ^ (length w1 - 1)
2. - bv_to_int w2 \<le> 2 ^ (length w2 - 1)
[PROOF STEP]
apply (rule bv_to_int_upper_range [of w1])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. - bv_to_int w2 \<le> 2 ^ (length w2 - 1)
[PROOF STEP]
apply (rule v2)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
bv_to_int w1 + - bv_to_int w2 < 2 ^ (length w1 - 1) + 2 ^ (length w2 - 1)
goal (1 subgoal):
1. bv_to_int w1 - bv_to_int w2 < 2 ^ max (length w1) (length w2)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
bv_to_int w1 + - bv_to_int w2 < 2 ^ (length w1 - 1) + 2 ^ (length w2 - 1)
goal (1 subgoal):
1. bv_to_int w1 - bv_to_int w2 < 2 ^ max (length w1) (length w2)
[PROOF STEP]
have "... \<le> 2 ^ max (length w1) (length w2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 2 ^ (length w1 - 1) + 2 ^ (length w2 - 1) \<le> 2 ^ max (length w1) (length w2)
[PROOF STEP]
apply (rule adder_helper)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < max (length w1) (length w2)
[PROOF STEP]
apply (rule lmw)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
2 ^ (length w1 - 1) + 2 ^ (length w2 - 1) \<le> 2 ^ max (length w1) (length w2)
goal (1 subgoal):
1. bv_to_int w1 - bv_to_int w2 < 2 ^ max (length w1) (length w2)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
bv_to_int w1 + - bv_to_int w2 < 2 ^ max (length w1) (length w2)
[PROOF STEP]
show "?Q < 2 ^ max (length w1) (length w2)"
[PROOF STATE]
proof (prove)
using this:
bv_to_int w1 + - bv_to_int w2 < 2 ^ max (length w1) (length w2)
goal (1 subgoal):
1. bv_to_int w1 - bv_to_int w2 < 2 ^ max (length w1) (length w2)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
bv_to_int w1 - bv_to_int w2 < 2 ^ max (length w1) (length w2)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
goal (1 subgoal):
1. bv_to_int w1 - bv_to_int w2 < - 1 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. bv_to_int w1 - bv_to_int w2 < - 1 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
assume p: "?Q < -1"
[PROOF STATE]
proof (state)
this:
bv_to_int w1 - bv_to_int w2 < - 1
goal (1 subgoal):
1. bv_to_int w1 - bv_to_int w2 < - 1 \<Longrightarrow> length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
apply (simp add: bv_sub_def bv_sadd_def bv_uminus_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (int_to_bv (bv_to_int w1 - bv_to_int w2)) \<le> Suc (max (length w1) (length w2))
[PROOF STEP]
apply (rule length_int_to_bv_upper_limit_lem1)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. bv_to_int w1 - bv_to_int w2 < - 1
2. - (2 ^ (Suc (max (length w1) (length w2)) - 1)) \<le> bv_to_int w1 - bv_to_int w2
[PROOF STEP]
apply (rule p)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. - (2 ^ (Suc (max (length w1) (length w2)) - 1)) \<le> bv_to_int w1 - bv_to_int w2
[PROOF STEP]
proof simp
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. - (2 ^ max (length w1) (length w2)) \<le> bv_to_int w1 - bv_to_int w2
[PROOF STEP]
have "(2 ^ (length w1 - 1)) + 2 ^ (length w2 - 1) \<le> (2::int) ^ max (length w1) (length w2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 2 ^ (length w1 - 1) + 2 ^ (length w2 - 1) \<le> 2 ^ max (length w1) (length w2)
[PROOF STEP]
apply (rule adder_helper)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < max (length w1) (length w2)
[PROOF STEP]
apply (rule lmw)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
2 ^ (length w1 - 1) + 2 ^ (length w2 - 1) \<le> 2 ^ max (length w1) (length w2)
goal (1 subgoal):
1. - (2 ^ max (length w1) (length w2)) \<le> bv_to_int w1 - bv_to_int w2
[PROOF STEP]
hence "-((2::int) ^ max (length w1) (length w2)) \<le> - (2 ^ (length w1 - 1)) + -(2 ^ (length w2 - 1))"
[PROOF STATE]
proof (prove)
using this:
2 ^ (length w1 - 1) + 2 ^ (length w2 - 1) \<le> 2 ^ max (length w1) (length w2)
goal (1 subgoal):
1. - (2 ^ max (length w1) (length w2)) \<le> - (2 ^ (length w1 - 1)) + - (2 ^ (length w2 - 1))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
- (2 ^ max (length w1) (length w2)) \<le> - (2 ^ (length w1 - 1)) + - (2 ^ (length w2 - 1))
goal (1 subgoal):
1. - (2 ^ max (length w1) (length w2)) \<le> bv_to_int w1 - bv_to_int w2
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
- (2 ^ max (length w1) (length w2)) \<le> - (2 ^ (length w1 - 1)) + - (2 ^ (length w2 - 1))
goal (1 subgoal):
1. - (2 ^ max (length w1) (length w2)) \<le> bv_to_int w1 - bv_to_int w2
[PROOF STEP]
have "- (2 ^ (length w1 - 1)) + -(2 ^ (length w2 - 1)) \<le> bv_to_int w1 + -bv_to_int w2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. - (2 ^ (length w1 - 1)) + - (2 ^ (length w2 - 1)) \<le> bv_to_int w1 + - bv_to_int w2
[PROOF STEP]
apply (rule add_mono)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. - (2 ^ (length w1 - 1)) \<le> bv_to_int w1
2. - (2 ^ (length w2 - 1)) \<le> - bv_to_int w2
[PROOF STEP]
apply (rule bv_to_int_lower_range [of w1])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. - (2 ^ (length w2 - 1)) \<le> - bv_to_int w2
[PROOF STEP]
using bv_to_int_upper_range [of w2]
[PROOF STATE]
proof (prove)
using this:
bv_to_int w2 < 2 ^ (length w2 - 1)
goal (1 subgoal):
1. - (2 ^ (length w2 - 1)) \<le> - bv_to_int w2
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
- (2 ^ (length w1 - 1)) + - (2 ^ (length w2 - 1)) \<le> bv_to_int w1 + - bv_to_int w2
goal (1 subgoal):
1. - (2 ^ max (length w1) (length w2)) \<le> bv_to_int w1 - bv_to_int w2
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
- (2 ^ max (length w1) (length w2)) \<le> bv_to_int w1 + - bv_to_int w2
[PROOF STEP]
show "- (2^max (length w1) (length w2)) \<le> ?Q"
[PROOF STATE]
proof (prove)
using this:
- (2 ^ max (length w1) (length w2)) \<le> bv_to_int w1 + - bv_to_int w2
goal (1 subgoal):
1. - (2 ^ max (length w1) (length w2)) \<le> bv_to_int w1 - bv_to_int w2
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
- (2 ^ max (length w1) (length w2)) \<le> bv_to_int w1 - bv_to_int w2
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
length (bv_sub w1 w2) \<le> Suc (max (length w1) (length w2))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 8809, "file": "RSAPSS_Word", "length": 84}
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import json
import numpy as np
import os
import torch
import datasets.registry
from foundations import paths
from foundations.step import Step
from lottery.runner import LotteryRunner
import models.registry
from pruning.mask import Mask
from testing import test_case
class TestRunner(test_case.TestCase):
def setUp(self):
super(TestRunner, self).setUp()
self.desc = models.registry.get_default_hparams('cifar_resnet_8_2')
def to_step(self, s):
return Step.from_str(s, datasets.registry.iterations_per_epoch(self.desc.dataset_hparams))
def assertLevelFilesPresent(self, level_root, start_step, end_step, masks=False):
with self.subTest(level_root=level_root):
self.assertTrue(os.path.exists(paths.model(level_root, start_step)))
self.assertTrue(os.path.exists(paths.model(level_root, end_step)))
self.assertTrue(os.path.exists(paths.logger(level_root)))
if masks:
self.assertTrue(os.path.exists(paths.mask(level_root)))
self.assertTrue(os.path.exists(paths.sparsity_report(level_root)))
def test_level0_2it(self):
self.desc.training_hparams.training_steps = '2it'
LotteryRunner(replicate=2, levels=0, desc=self.desc, verbose=False).run()
level_root = self.desc.run_path(2, 0)
# Ensure the important files are there.
self.assertLevelFilesPresent(level_root, self.to_step('0it'), self.to_step('2it'))
# Ensure that the mask is all 1's.
mask = Mask.load(level_root)
for v in mask.numpy().values(): self.assertTrue(np.all(np.equal(v, 1)))
with open(paths.sparsity_report(level_root)) as fp:
sparsity_report = json.loads(fp.read())
self.assertEqual(sparsity_report['unpruned'] / sparsity_report['total'], 1)
def test_level3_2it(self):
self.desc.training_hparams.training_steps = '2it'
LotteryRunner(replicate=2, levels=3, desc=self.desc, verbose=False).run()
level0_weights = paths.model(self.desc.run_path(2, 0), self.to_step('0it'))
level0_weights = {k: v.numpy() for k, v in torch.load(level0_weights).items()}
for level in range(0, 4):
level_root = self.desc.run_path(2, level)
self.assertLevelFilesPresent(level_root, self.to_step('0it'), self.to_step('2it'))
# Check the mask.
pct = 0.8**level
mask = Mask.load(level_root).numpy()
# Check the mask itself.
total, total_present = 0.0, 0.0
for v in mask.values():
total += v.size
total_present += np.sum(v)
self.assertTrue(np.allclose(pct, total_present / total, atol=0.01))
# Check the sparsity report.
with open(paths.sparsity_report(level_root)) as fp:
sparsity_report = json.loads(fp.read())
self.assertTrue(np.allclose(pct, sparsity_report['unpruned'] / sparsity_report['total'], atol=0.01))
# Ensure that the initial weights are a masked version of the level 0 weights.
level_weights = paths.model(level_root, self.to_step('0it'))
level_weights = {k: v.numpy() for k, v in torch.load(level_weights).items()}
self.assertStateEqual(level_weights, {k: v * mask.get(k, 1) for k, v in level0_weights.items()})
def test_level3_4it_pretrain2it(self):
self.desc.pretrain_dataset_hparams = copy.deepcopy(self.desc.dataset_hparams)
self.desc.pretrain_training_hparams = copy.deepcopy(self.desc.training_hparams)
self.desc.pretrain_training_hparams.training_steps = '2it'
self.desc.training_hparams.training_steps = '4it'
LotteryRunner(replicate=2, levels=3, desc=self.desc, verbose=False).run()
# Check that the pretrain weights are present.
pretrain_root = self.desc.run_path(2, 'pretrain')
self.assertLevelFilesPresent(pretrain_root, self.to_step('0it'), self.to_step('2it'), masks=False)
# Load the pretrain and level0 start weights to ensure they're the same.
pretrain_end_weights = paths.model(self.desc.run_path(2, 'pretrain'), self.desc.pretrain_end_step)
pretrain_end_weights = {k: v.numpy() for k, v in torch.load(pretrain_end_weights).items()}
level0_weights = paths.model(self.desc.run_path(2, 0), self.desc.train_start_step)
level0_weights = {k: v.numpy() for k, v in torch.load(level0_weights).items()}
self.assertStateEqual(pretrain_end_weights, level0_weights)
# Evaluate each of the pruning levels.
for level in range(0, 2):
level_root = self.desc.run_path(2, level)
self.assertLevelFilesPresent(level_root, self.to_step('2it'), self.to_step('4it'))
# Ensure that the initial weights are a masked version of the level 0 weights
# (which are identical to the weights at the end of pretraining).
mask = Mask.load(level_root).numpy()
level_weights = paths.model(level_root, self.desc.train_start_step)
level_weights = {k: v.numpy() for k, v in torch.load(level_weights).items()}
self.assertStateEqual(level_weights, {k: v * mask.get(k, 1) for k, v in level0_weights.items()})
def test_level3_4it_pretrain2it_different_output_size(self):
self.desc.pretrain_dataset_hparams = copy.deepcopy(self.desc.dataset_hparams)
self.desc.pretrain_training_hparams = copy.deepcopy(self.desc.training_hparams)
self.desc.pretrain_training_hparams.training_steps = '2it'
self.desc.pretrain_dataset_hparams.unsupervised_labels = 'rotation'
self.desc.training_hparams.training_steps = '4it'
LotteryRunner(replicate=2, levels=3, desc=self.desc, verbose=False).run()
# Check that the pretrain weights are present.
pretrain_root = self.desc.run_path(2, 'pretrain')
self.assertLevelFilesPresent(pretrain_root, self.to_step('0it'), self.to_step('2it'), masks=False)
# Load the pretrain and level0 start weights to ensure they're the same.
pretrain_end_weights = paths.model(self.desc.run_path(2, 'pretrain'), self.desc.pretrain_end_step)
pretrain_end_weights = {k: v.numpy() for k, v in torch.load(pretrain_end_weights).items()}
level0_weights = paths.model(self.desc.run_path(2, 0), self.desc.train_start_step)
level0_weights = {k: v.numpy() for k, v in torch.load(level0_weights).items()}
# All weights should be identical except for the output layers.
output_layer_names = models.registry.get(self.desc.model_hparams).output_layer_names
self.assertStateEqual({k: v for k, v in pretrain_end_weights.items() if k not in output_layer_names},
{k: v for k, v in level0_weights.items() if k not in output_layer_names})
# Evaluate each of the pruning levels.
for level in range(0, 2):
level_root = self.desc.run_path(2, level)
self.assertLevelFilesPresent(level_root, self.to_step('2it'), self.to_step('4it'))
# Ensure that the initial weights are a masked version of the level 0 weights
# (which are identical to the weights at the end of pretraining).
mask = Mask.load(level_root).numpy()
level_weights = paths.model(level_root, self.desc.train_start_step)
level_weights = {k: v.numpy() for k, v in torch.load(level_weights).items()}
self.assertStateEqual(level_weights, {k: v * mask.get(k, 1) for k, v in level0_weights.items()})
test_case.main()
|
{"hexsha": "0ea598a00282904dd5382783896916a98f270f10", "size": 7967, "ext": "py", "lang": "Python", "max_stars_repo_path": "lottery/test/test_runner.py", "max_stars_repo_name": "sbam13/open_lth", "max_stars_repo_head_hexsha": "d8c8d450cc8229afed54b26f77b91c3fe0c3f339", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lottery/test/test_runner.py", "max_issues_repo_name": "sbam13/open_lth", "max_issues_repo_head_hexsha": "d8c8d450cc8229afed54b26f77b91c3fe0c3f339", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lottery/test/test_runner.py", "max_forks_repo_name": "sbam13/open_lth", "max_forks_repo_head_hexsha": "d8c8d450cc8229afed54b26f77b91c3fe0c3f339", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.4, "max_line_length": 113, "alphanum_fraction": 0.6646165432, "include": true, "reason": "import numpy", "num_tokens": 1816}
|
[STATEMENT]
lemma no_repetition_list:
assumes "set ws \<subseteq> {a,b}"
and not_per: "\<not> ws \<le>p [a,b] \<cdot> ws" "\<not> ws \<le>p [b,a] \<cdot> ws"
and not_square: "\<not> [a,a] \<le>f ws" and "\<not> [b,b] \<le>f ws"
shows False
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. False
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
set ws \<subseteq> {a, b}
\<not> ws \<le>p [a, b] \<cdot> ws
\<not> ws \<le>p [b, a] \<cdot> ws
\<not> [a, a] \<le>f ws
\<not> [b, b] \<le>f ws
goal (1 subgoal):
1. False
[PROOF STEP]
proof (induction ws, simp)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>aa ws. \<lbrakk>\<lbrakk>set ws \<subseteq> {a, b}; \<not> ws \<le>p [a, b] \<cdot> ws; \<not> ws \<le>p [b, a] \<cdot> ws; \<not> [a, a] \<le>f ws; \<not> [b, b] \<le>f ws\<rbrakk> \<Longrightarrow> False; set (aa # ws) \<subseteq> {a, b}; \<not> aa # ws \<le>p [a, b] \<cdot> aa # ws; \<not> aa # ws \<le>p [b, a] \<cdot> aa # ws; \<not> [a, a] \<le>f aa # ws; \<not> [b, b] \<le>f aa # ws\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
case (Cons d ws)
[PROOF STATE]
proof (state)
this:
\<lbrakk>set ws \<subseteq> {a, b}; \<not> ws \<le>p [a, b] \<cdot> ws; \<not> ws \<le>p [b, a] \<cdot> ws; \<not> [a, a] \<le>f ws; \<not> [b, b] \<le>f ws\<rbrakk> \<Longrightarrow> False
set (d # ws) \<subseteq> {a, b}
\<not> d # ws \<le>p [a, b] \<cdot> d # ws
\<not> d # ws \<le>p [b, a] \<cdot> d # ws
\<not> [a, a] \<le>f d # ws
\<not> [b, b] \<le>f d # ws
goal (1 subgoal):
1. \<And>aa ws. \<lbrakk>\<lbrakk>set ws \<subseteq> {a, b}; \<not> ws \<le>p [a, b] \<cdot> ws; \<not> ws \<le>p [b, a] \<cdot> ws; \<not> [a, a] \<le>f ws; \<not> [b, b] \<le>f ws\<rbrakk> \<Longrightarrow> False; set (aa # ws) \<subseteq> {a, b}; \<not> aa # ws \<le>p [a, b] \<cdot> aa # ws; \<not> aa # ws \<le>p [b, a] \<cdot> aa # ws; \<not> [a, a] \<le>f aa # ws; \<not> [b, b] \<le>f aa # ws\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. False
[PROOF STEP]
proof (rule "Cons.IH")
[PROOF STATE]
proof (state)
goal (5 subgoals):
1. set ws \<subseteq> {a, b}
2. \<not> ws \<le>p [a, b] \<cdot> ws
3. \<not> ws \<le>p [b, a] \<cdot> ws
4. \<not> [a, a] \<le>f ws
5. \<not> [b, b] \<le>f ws
[PROOF STEP]
show "set ws \<subseteq> {a,b}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set ws \<subseteq> {a, b}
[PROOF STEP]
using \<open>set (d # ws) \<subseteq> {a, b}\<close>
[PROOF STATE]
proof (prove)
using this:
set (d # ws) \<subseteq> {a, b}
goal (1 subgoal):
1. set ws \<subseteq> {a, b}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
set ws \<subseteq> {a, b}
goal (4 subgoals):
1. \<not> ws \<le>p [a, b] \<cdot> ws
2. \<not> ws \<le>p [b, a] \<cdot> ws
3. \<not> [a, a] \<le>f ws
4. \<not> [b, b] \<le>f ws
[PROOF STEP]
have "ws \<noteq> \<epsilon>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ws \<noteq> \<epsilon>
[PROOF STEP]
using "Cons.IH" Cons.prems
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>set ws \<subseteq> {a, b}; \<not> ws \<le>p [a, b] \<cdot> ws; \<not> ws \<le>p [b, a] \<cdot> ws; \<not> [a, a] \<le>f ws; \<not> [b, b] \<le>f ws\<rbrakk> \<Longrightarrow> False
set (d # ws) \<subseteq> {a, b}
\<not> d # ws \<le>p [a, b] \<cdot> d # ws
\<not> d # ws \<le>p [b, a] \<cdot> d # ws
\<not> [a, a] \<le>f d # ws
\<not> [b, b] \<le>f d # ws
goal (1 subgoal):
1. ws \<noteq> \<epsilon>
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
ws \<noteq> \<epsilon>
goal (4 subgoals):
1. \<not> ws \<le>p [a, b] \<cdot> ws
2. \<not> ws \<le>p [b, a] \<cdot> ws
3. \<not> [a, a] \<le>f ws
4. \<not> [b, b] \<le>f ws
[PROOF STEP]
from hd_tl[OF this]
[PROOF STATE]
proof (chain)
picking this:
[hd ws] \<cdot> tl ws = ws
[PROOF STEP]
have "hd ws \<noteq> d"
[PROOF STATE]
proof (prove)
using this:
[hd ws] \<cdot> tl ws = ws
goal (1 subgoal):
1. hd ws \<noteq> d
[PROOF STEP]
using Cons.prems(1,4-5) hd_pref[OF \<open>ws \<noteq> \<epsilon>\<close>]
[PROOF STATE]
proof (prove)
using this:
[hd ws] \<cdot> tl ws = ws
set (d # ws) \<subseteq> {a, b}
\<not> [a, a] \<le>f d # ws
\<not> [b, b] \<le>f d # ws
[hd ws] \<le>p ws
goal (1 subgoal):
1. hd ws \<noteq> d
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
hd ws \<noteq> d
goal (4 subgoals):
1. \<not> ws \<le>p [a, b] \<cdot> ws
2. \<not> ws \<le>p [b, a] \<cdot> ws
3. \<not> [a, a] \<le>f ws
4. \<not> [b, b] \<le>f ws
[PROOF STEP]
thus "\<not> [a, a] \<le>f ws" and "\<not> [b, b] \<le>f ws"
[PROOF STATE]
proof (prove)
using this:
hd ws \<noteq> d
goal (1 subgoal):
1. \<not> [a, a] \<le>f ws &&& \<not> [b, b] \<le>f ws
[PROOF STEP]
using Cons.prems(4-5)
[PROOF STATE]
proof (prove)
using this:
hd ws \<noteq> d
\<not> [a, a] \<le>f d # ws
\<not> [b, b] \<le>f d # ws
goal (1 subgoal):
1. \<not> [a, a] \<le>f ws &&& \<not> [b, b] \<le>f ws
[PROOF STEP]
unfolding sublist_code(3)
[PROOF STATE]
proof (prove)
using this:
hd ws \<noteq> d
\<not> ([a, a] \<le>p d # ws \<or> [a, a] \<le>f ws)
\<not> ([b, b] \<le>p d # ws \<or> [b, b] \<le>f ws)
goal (1 subgoal):
1. \<not> [a, a] \<le>f ws &&& \<not> [b, b] \<le>f ws
[PROOF STEP]
by blast+
[PROOF STATE]
proof (state)
this:
\<not> [a, a] \<le>f ws
\<not> [b, b] \<le>f ws
goal (2 subgoals):
1. \<not> ws \<le>p [a, b] \<cdot> ws
2. \<not> ws \<le>p [b, a] \<cdot> ws
[PROOF STEP]
show "\<not> ws \<le>p [a, b] \<cdot> ws"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> ws \<le>p [a, b] \<cdot> ws
[PROOF STEP]
proof (rule notI)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. ws \<le>p [a, b] \<cdot> ws \<Longrightarrow> False
[PROOF STEP]
assume "ws \<le>p [a, b] \<cdot> ws"
[PROOF STATE]
proof (state)
this:
ws \<le>p [a, b] \<cdot> ws
goal (1 subgoal):
1. ws \<le>p [a, b] \<cdot> ws \<Longrightarrow> False
[PROOF STEP]
from pref_hd_eq[OF this \<open>ws \<noteq> \<epsilon>\<close>]
[PROOF STATE]
proof (chain)
picking this:
hd ws = hd ([a, b] \<cdot> ws)
[PROOF STEP]
have "hd ws = a"
[PROOF STATE]
proof (prove)
using this:
hd ws = hd ([a, b] \<cdot> ws)
goal (1 subgoal):
1. hd ws = a
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
hd ws = a
goal (1 subgoal):
1. ws \<le>p [a, b] \<cdot> ws \<Longrightarrow> False
[PROOF STEP]
hence "d = b"
[PROOF STATE]
proof (prove)
using this:
hd ws = a
goal (1 subgoal):
1. d = b
[PROOF STEP]
using \<open>set (d # ws) \<subseteq> {a, b}\<close> \<open>hd ws \<noteq> d\<close>
[PROOF STATE]
proof (prove)
using this:
hd ws = a
set (d # ws) \<subseteq> {a, b}
hd ws \<noteq> d
goal (1 subgoal):
1. d = b
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
d = b
goal (1 subgoal):
1. ws \<le>p [a, b] \<cdot> ws \<Longrightarrow> False
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. False
[PROOF STEP]
using \<open>ws \<le>p [a, b] \<cdot> ws\<close> \<open>\<not> d # ws \<le>p [b, a] \<cdot> d # ws\<close>[unfolded \<open>d = b\<close>]
[PROOF STATE]
proof (prove)
using this:
ws \<le>p [a, b] \<cdot> ws
\<not> b # ws \<le>p [b, a] \<cdot> b # ws
goal (1 subgoal):
1. False
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<not> ws \<le>p [a, b] \<cdot> ws
goal (1 subgoal):
1. \<not> ws \<le>p [b, a] \<cdot> ws
[PROOF STEP]
show "\<not> ws \<le>p [b, a] \<cdot> ws"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> ws \<le>p [b, a] \<cdot> ws
[PROOF STEP]
proof (rule notI)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. ws \<le>p [b, a] \<cdot> ws \<Longrightarrow> False
[PROOF STEP]
assume "ws \<le>p [b, a] \<cdot> ws"
[PROOF STATE]
proof (state)
this:
ws \<le>p [b, a] \<cdot> ws
goal (1 subgoal):
1. ws \<le>p [b, a] \<cdot> ws \<Longrightarrow> False
[PROOF STEP]
from pref_hd_eq[OF this \<open>ws \<noteq> \<epsilon>\<close>]
[PROOF STATE]
proof (chain)
picking this:
hd ws = hd ([b, a] \<cdot> ws)
[PROOF STEP]
have "hd ws = b"
[PROOF STATE]
proof (prove)
using this:
hd ws = hd ([b, a] \<cdot> ws)
goal (1 subgoal):
1. hd ws = b
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
hd ws = b
goal (1 subgoal):
1. ws \<le>p [b, a] \<cdot> ws \<Longrightarrow> False
[PROOF STEP]
hence "d = a"
[PROOF STATE]
proof (prove)
using this:
hd ws = b
goal (1 subgoal):
1. d = a
[PROOF STEP]
using \<open>set (d # ws) \<subseteq> {a, b}\<close> \<open>hd ws \<noteq> d\<close>
[PROOF STATE]
proof (prove)
using this:
hd ws = b
set (d # ws) \<subseteq> {a, b}
hd ws \<noteq> d
goal (1 subgoal):
1. d = a
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
d = a
goal (1 subgoal):
1. ws \<le>p [b, a] \<cdot> ws \<Longrightarrow> False
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. False
[PROOF STEP]
using \<open>ws \<le>p [b, a] \<cdot> ws\<close> \<open>\<not> d # ws \<le>p [a, b] \<cdot> d # ws\<close>[unfolded \<open>d = a\<close>]
[PROOF STATE]
proof (prove)
using this:
ws \<le>p [b, a] \<cdot> ws
\<not> a # ws \<le>p [a, b] \<cdot> a # ws
goal (1 subgoal):
1. False
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<not> ws \<le>p [b, a] \<cdot> ws
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4442, "file": "Combinatorics_Words_Equations_Basic", "length": 47}
|
from os import listdir, path
import numpy as np
import sklearn.neighbors as neighbors
import vtk
from vtk.util.numpy_support import vtk_to_numpy
def extract_line(filename):
# Read the VTP file
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(filename)
reader.Update()
# Extract the polygon data
polydata = reader.GetOutput()
# Apply a filter to connect contiguous line segments
# (This step is necessary since otherwise we would have many small line elements)
strip = vtk.vtkStripper()
strip.SetInputData(polydata)
strip.SetJoinContiguousSegments(True)
strip.Update()
# Retrieve the filter output
filtered = strip.GetOutput()
# Extract Points
point_coordinates = vtk_to_numpy(filtered.GetPoints().GetData())
# Extract Line data
lines = filtered.GetLines()
lines_array = vtk_to_numpy(lines.GetData())
# Extract the surface line (as separate x, y and z array)
return [np.array(d) for d in point_coordinates[lines_array[1:]].T]
def line_to_distance(tx, ty, x, y):
nbrs = neighbors.NearestNeighbors(
n_neighbors=1, metric="euclidean").fit(np.vstack([x, y]).T)
dist, _ = nbrs.kneighbors(np.vstack([tx, ty]).T)
return dist
|
{"hexsha": "034fb574066f26bd308342ca2b24ad09de33c961", "size": 1238, "ext": "py", "lang": "Python", "max_stars_repo_path": "trench_automation/util.py", "max_stars_repo_name": "yozoon/TrenchDepositionAutomation", "max_stars_repo_head_hexsha": "4eb1dd9fbabe7a782aa2070de144240616c00472", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "trench_automation/util.py", "max_issues_repo_name": "yozoon/TrenchDepositionAutomation", "max_issues_repo_head_hexsha": "4eb1dd9fbabe7a782aa2070de144240616c00472", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trench_automation/util.py", "max_forks_repo_name": "yozoon/TrenchDepositionAutomation", "max_forks_repo_head_hexsha": "4eb1dd9fbabe7a782aa2070de144240616c00472", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1363636364, "max_line_length": 85, "alphanum_fraction": 0.7084006462, "include": true, "reason": "import numpy", "num_tokens": 295}
|
#include <StdInc.h>
#include "TrType.h"
#include "TrTopLevel.h"
#include "Module.h"
#include "GlobalContext.h"
#include "Nest/Utils/Diagnostic.hpp"
#include "Nest/Utils/cppif/StringRef.hpp"
#include "Nest/Utils/cppif/Type.hpp"
#include "Nest/Api/Type.h"
#include "Nest/Api/Node.h"
#include "Feather/Api/Feather.h"
#include "Feather/Utils/FeatherUtils.hpp"
#include "Feather/Utils/cppif/FeatherTypes.hpp"
#include <boost/bind.hpp>
#include <algorithm>
using namespace LLVMB;
using namespace LLVMB::Tr;
using namespace Nest;
using namespace Feather;
namespace {
llvm::Type* transformVoid(VoidType /*type*/, GlobalContext& ctx) {
return llvm::Type::getVoidTy(ctx.targetBackend_.llvmContext());
}
llvm::Type* transformDataType(DataType type, GlobalContext& ctx) {
// Call the translation method for the class declaration
auto cls = Feather_classDecl(type);
ASSERT(cls);
// TODO (backend): Sometimes we can generate only opaque structs. No need for fields.
llvm::Type* t = Tr::translateClass(cls, ctx);
for (size_t i = 0; i < type.numReferences(); ++i)
t = llvm::PointerType::get(t, 0);
return t;
}
llvm::Type* transformConstType(ConstType type, GlobalContext& ctx) {
return llvm::PointerType::get(getLLVMType(type.base(), ctx), 0);
}
llvm::Type* transformMutableType(MutableType type, GlobalContext& ctx) {
return llvm::PointerType::get(getLLVMType(type.base(), ctx), 0);
}
llvm::Type* transformTempType(TempType type, GlobalContext& ctx) {
return llvm::PointerType::get(getLLVMType(type.base(), ctx), 0);
}
llvm::Type* transformArrayType(ArrayType type, GlobalContext& ctx) {
return llvm::ArrayType::get(getLLVMType(type.unitType(), ctx), type.count());
}
llvm::Type* transformFunctionType(FunctionType type, int ignoreArg, GlobalContext& ctx) {
vector<llvm::Type*> llvmParamTypes;
llvmParamTypes.reserve(type.numParams() + 1);
llvm::Type* resultType = Tr::getLLVMType(type.result(), ctx);
for (size_t i = 0; i < type.numParams(); ++i) {
if (int(i) == ignoreArg)
continue;
Type t = type[i];
llvmParamTypes.push_back(Tr::getLLVMType(t, ctx));
}
return llvm::FunctionType::get(resultType, llvmParamTypes, false);
}
} // namespace
llvm::Type* Tr::getLLVMType(Type type, GlobalContext& ctx) {
if (!type)
REP_INTERNAL(NOLOC, "Invalid type to translate to LLVM");
ASSERT(type);
if (!ctx.targetBackend_.isCt() && !type.canBeUsedAtRt())
REP_INTERNAL(NOLOC, "Cannot use CT-only type at run-time (%1%)") % type.description();
// First check or cache of translated types; if we don't have a value there, make sure to set it
llvm::Type*& llvmType = ctx.targetBackend_.translatedTypes_[type];
if (llvmType)
return llvmType;
if (type.kind() == typeKindVoid)
llvmType = transformVoid(VoidType(type), ctx);
else if (type.kind() == typeKindData)
llvmType = transformDataType(DataType(type), ctx);
else if (type.kind() == typeKindConst)
llvmType = transformConstType(ConstType(type), ctx);
else if (type.kind() == typeKindMutable)
llvmType = transformMutableType(MutableType(type), ctx);
else if (type.kind() == typeKindTemp)
llvmType = transformTempType(TempType(type), ctx);
else if (type.kind() == typeKindArray)
llvmType = transformArrayType(ArrayType(type), ctx);
else if (type.kind() == typeKindFunction)
llvmType = transformFunctionType(FunctionType(type), -1, ctx);
else {
REP_INTERNAL(NOLOC, "Don't know how to translate type '%1%'") % type;
return nullptr;
}
return llvmType;
}
llvm::Type* Tr::getNativeLLVMType(
const Location& loc, StringRef nativeName, llvm::LLVMContext& llvmContext) {
if (nativeName.size() > 1 && islower(nativeName.begin[0])) {
if (nativeName == "double")
return llvm::Type::getDoubleTy(llvmContext);
else if (nativeName == "float")
return llvm::Type::getFloatTy(llvmContext);
else if (nativeName.size() > 1 &&
(nativeName.begin[0] == 'i' || nativeName.begin[0] == 'u')) {
try {
auto noBits = boost::lexical_cast<int>(nativeName.begin + 1);
return llvm::IntegerType::get(llvmContext, noBits);
} catch (...) {
}
}
REP_ERROR(loc, "Unknown native type name: %1%") % nativeName;
}
return nullptr;
}
llvm::Type* Tr::getLLVMFunctionType(Node* funDecl, int ignoreArg, GlobalContext& ctx) {
ASSERT(funDecl);
Type t = funDecl->type;
ASSERT(t && t.kind() == typeKindFunction);
return transformFunctionType(FunctionType(t), ignoreArg, ctx);
}
|
{"hexsha": "337c246d645ce5d428612641f608a47f69789a1c", "size": 4723, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/LLVMBackend/Tr/TrType.cpp", "max_stars_repo_name": "CristianDragu/sparrow", "max_stars_repo_head_hexsha": "49844c2329ac001c3a0779baae7a2f02743c4494", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/LLVMBackend/Tr/TrType.cpp", "max_issues_repo_name": "CristianDragu/sparrow", "max_issues_repo_head_hexsha": "49844c2329ac001c3a0779baae7a2f02743c4494", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/LLVMBackend/Tr/TrType.cpp", "max_forks_repo_name": "CristianDragu/sparrow", "max_forks_repo_head_hexsha": "49844c2329ac001c3a0779baae7a2f02743c4494", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7803030303, "max_line_length": 100, "alphanum_fraction": 0.660808808, "num_tokens": 1216}
|
\documentclass[11pt]{article}
\usepackage[english]{babel}
\usepackage{a4}
\usepackage{latexsym}
\usepackage[
colorlinks,
pdftitle={IGV solutions week 10},
pdfsubject={Werkcollege Inleiding Gegevensverwerking week 10},
pdfauthor={Laurens Bronwasser, Martijn Vermaat}
]{hyperref}
\title{IGV solutions week 10}
\author{
Laurens Bronwasser\footnote{E-mail: lmbronwa@cs.vu.nl, homepage: http://www.cs.vu.nl/\~{}lmbronwa/}
\and
Martijn Vermaat\footnote{E-mail: mvermaat@cs.vu.nl, homepage: http://www.cs.vu.nl/\~{}mvermaat/}
}
\date{8th March 2003}
\begin{document}
\maketitle
\begin{abstract}
In this document we present the solutions to exercises 1 through 4 of the assignment for werkcollege \emph{\mbox{Inleiding Gegevensverwerking}} \mbox{week 10}.
\end{abstract}
\tableofcontents
\newpage
\section{Exercise 1}
\begin{quote}
CREATE database TABLEs for veterinarians and for appointments to treat animals during the consultation hours.
\end{quote}
\subsection*{Solution}
First we have to create a table for veterinarians. A veterinarian needs a name, an address and a unique identifier number. A \verb|VARCHAR| column with length 20 seems to be good enough to store a veterinarian's name and address. For the unique identifier, which will be the \verb|PRIMARY KEY|, we use the \verb|BIGINT| type\footnote{While a $BIGINT$ type may seem to be a bit much for a simple $PRIMARY\ KEY$, we use it anyway, just to keep in line with the other identifiers in the database (a $BIGINT$ value can range from -9223372036854775808 to 9223372036854775807, it doesn't seem likely there will ever fit that many cows and sheep on the entire earth)}. So the table definition becomes\footnote{With the solutions we list the output as printed in the \emph{MySQL} terminal; some lines of this output may be ommited in case of a very large resultset}:
\begin{verbatim}
mysql> CREATE TABLE Veterinarian (
-> vetid BIGINT PRIMARY KEY,
-> vetname VARCHAR(20),
-> address VARCHAR(20)
-> );
Query OK, 0 rows affected (0.17 sec)
\end{verbatim}
Now we'd like to be able to store some appointments. An appointment tells us which animal is seeing a veterinarian, which veterinarian this will be, and when all this will take place. The unique identifier for any appointment could be composed from the animal identifier and the veterinarian identifier, but that will render the situation of an animal seeing a particular veterinarian twice impossible\footnote{Something similar is already the case for the given $Contaminated$ table: one animal cannot suffer from more than one type of contamination}. Therefore, we don't use a compound \verb|PRIMARY KEY| for our \verb|Appointment| table, and, a bit straightforward, our \verb|CREATE TABLE| statement will look like this:
\begin{verbatim}
mysql> CREATE TABLE Appointment (
-> appid BIGINT PRIMARY KEY,
-> animalid BIGINT NOT NULL REFERENCES Animal(animalid),
-> vetid BIGINT NOT NULL REFERENCES Veterinarian(vetid),
-> dateandtime DATETIME
-> );
Query OK, 0 rows affected (0.05 sec)
\end{verbatim}
\newpage
\section{Exercise 2}
\begin{quote}
Study the \verb|CREATE INDEX| statement. It is covered by the MySQL manual while its discussion is omitted in the reader chapter. Use this statement to optimise the database for frequent queries that are ordered by the price of animals.
\end{quote}
\subsection*{Solution}
In the \emph{MySQL Reference Manual} we find the syntax for the \verb|CREATE INDEX| statement\footnote{Section 6.5.7 CREATE INDEX Syntax, available online at $http://www.mysql.com$}:
\begin{verbatim}
CREATE [UNIQUE|FULLTEXT] INDEX index_name
ON tbl_name (col_name[(length)],... )
\end{verbatim}
We simply want to create an index on the \verb|price| column of the \verb|Animal| table to speed up sorting on that column, so a simple statement of the following form will do:
\begin{verbatim}
mysql> CREATE INDEX price ON Animal (price);
Query OK, 52 rows affected (0.25 sec)
Records: 52 Duplicates: 0 Warnings: 0
\end{verbatim}
\newpage
\section{Exercise 3}
\begin{quote}
Recall Exercise 3 from Week 9. Alter the database schema in a way to remove the described redundancy.
\end{quote}
\subsection*{Solution}
The redundancy comes from the fact that a farm for any given contaminated animal can be derived in two different ways. One of these has to be eliminated. We choose to remove the \verb|farm| column from the \verb|Contaminated| table:
\begin{verbatim}
mysql> ALTER TABLE Contaminated DROP farm;
Query OK, 43 rows affected (0.20 sec)
Records: 43 Duplicates: 0 Warnings: 0
\end{verbatim}
The obvious consequence of this action is that all information stored in the \verb|farm| column is now lost. But that's just what we want.
\newpage
\section{Exercise 4}
\begin{quote}
Attempt an alternative solution for the removal of redundancy. Instead of altering the existing \verb|Contaminated| table, create a new table \verb|NonredundantContaminated| which does not suffer from the redundancy. Be sure to use the \verb|CREATE TABLE| syntax which involves a \verb|SELECT| statement as explained at the end of Sec. 4.1 in the reader chapter on SQL and MySQL. Once you have created the new table, you can drop the old table. This is all very simple.
\end{quote}
\subsection*{Solution}
We execute a \verb|CREATE TABLE| statement in which we use a \verb|SELECT| statement. We select the columns \verb|animal| and \verb|type| from the \verb|Contaminated| table to create the new table \verb|NonredundantContaminated|. We would also like to have a \verb|PRIMARY KEY| for this table, so we choose \verb|animal| for this\footnote{Downside of this approach is that one animal can never suffer from more than one contamination, but that was also the case in the old $Contaminated$ table}.
\begin{verbatim}
mysql> CREATE TABLE NonredundantContaminated
-> (PRIMARY KEY (animal))
-> SELECT animal, type FROM Contaminated;
Query OK, 43 rows affected (0.05 sec)
Records: 43 Duplicates: 0 Warnings: 0
\end{verbatim}
We now only have to \verb|DROP| the old \verb|Contaminated| table:
\begin{verbatim}
mysql> DROP TABLE Contaminated;
Query OK, 0 rows affected (0.00 sec)
\end{verbatim}
\end{document}
|
{"hexsha": "21bb886cf3cf34959d041c996f1a45836e0cee66", "size": 6240, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "vu/igv/igv2003_10.tex", "max_stars_repo_name": "martijnvermaat/documents", "max_stars_repo_head_hexsha": "42483b7c4bf94ed708e2893c3ea961d025a10b5e", "max_stars_repo_licenses": ["CC-BY-3.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-28T14:38:06.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-28T14:38:06.000Z", "max_issues_repo_path": "vu/igv/igv2003_10.tex", "max_issues_repo_name": "martijnvermaat/documents", "max_issues_repo_head_hexsha": "42483b7c4bf94ed708e2893c3ea961d025a10b5e", "max_issues_repo_licenses": ["CC-BY-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vu/igv/igv2003_10.tex", "max_forks_repo_name": "martijnvermaat/documents", "max_forks_repo_head_hexsha": "42483b7c4bf94ed708e2893c3ea961d025a10b5e", "max_forks_repo_licenses": ["CC-BY-3.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.5714285714, "max_line_length": 858, "alphanum_fraction": 0.7626602564, "num_tokens": 1649}
|
#Packages
import sys
import numpy
import matplotlib
import pandas
import sklearn
#version check
print('Python:{}'.format(sys.version))
print('Numpy:{}'.format(numpy.__version__))
print('matplotlib:{}'.format(matplotlib.__version__))
print('pandas:{}'.format(pandas.__version__))
print('sklearn:{}'.format(sklearn.__version__))
#importing packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC
from sklearn.metrics import classification_report,accuracy_score
#loading the dataset
names=['id','clump_thickness','univorm_cell_size','uniform_cell_shape','marginal_adhesion','single_epithelial_size','bare_nuclei','bland_chromatin','normal_nuceloli','mitoses','class']
dataset=pd.read_csv("data1.csv",names=names)
#taking care of missing data
dataset.replace('?',-99999,inplace=True)
print(dataset.axes)
dataset.drop(['id'],1,inplace=True)
#shape of dataset
print(dataset.shape)
# do dataset visualization
print(dataset.loc[6])
print(dataset.describe())
#plot histogram for each variable
dataset.hist(figsize=(10,10))
plt.show()
#create scatter plot matrix
scatter_matrix(dataset,figsize=(18,18))
plt.show()
#spliting into X and Y
X=dataset.iloc[:,:-1]
Y=dataset.iloc[:,9]
#splitting into training and test set
from sklearn.model_selection import train_test_split
X_train ,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.2)
#specifying testing option
seed=8
scoring='accuracy'
#Define model to train
models=[]
models.append(('KNN',KNeighborsClassifier(n_neighbors=5)))
models.append(('SVM',SVC()))
#evaluate each model in turn
results=[]
names=[]
for name,model in models:
kfold=KFold(n_splits=10,random_state=seed)
cv_results=cross_val_score(model,X_train,y_train,cv=kfold,scoring=scoring)
results.append(cv_results)
names.append(name)
msg="%s: %f (%f)" %(name,cv_results.mean(),cv_results.std())
print(msg)
#make predictions on validation dataset
for name,model in models:
model.fit(X_train,y_train)
predictions=model.predict(X_test)
print(name)
print(accuracy_score(y_test,predictions))
print(classification_report(y_test,predictions))
#now come to prediction at Example using KNN
clf=KNeighborsClassifier(n_neighbors=5)
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test)
print(accuracy)
#prediction on example
example= np.array([[4,2,1,1,1,2,3,5,5]])
example = example.reshape(len(example), -1)
prediction = clf.predict(example)
if(prediction==4):
print("Malignant")
if(prediction==2):
print("Benign")
|
{"hexsha": "13c5b083985f5e11f433fa1879aae6df7c19fd36", "size": 2901, "ext": "py", "lang": "Python", "max_stars_repo_path": "BreastCancerDetection.py", "max_stars_repo_name": "firoj998/Breast-Cancer-Detection", "max_stars_repo_head_hexsha": "bf041fba24ba0f2bbb379f64e7b7e56f11aa3245", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "BreastCancerDetection.py", "max_issues_repo_name": "firoj998/Breast-Cancer-Detection", "max_issues_repo_head_hexsha": "bf041fba24ba0f2bbb379f64e7b7e56f11aa3245", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BreastCancerDetection.py", "max_forks_repo_name": "firoj998/Breast-Cancer-Detection", "max_forks_repo_head_hexsha": "bf041fba24ba0f2bbb379f64e7b7e56f11aa3245", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3679245283, "max_line_length": 185, "alphanum_fraction": 0.7421578766, "include": true, "reason": "import numpy", "num_tokens": 688}
|
import matplotlib.pyplot as plt
import numpy as np
#valores do grafico#
y = np.array([35, 25, 25, 15])
#intens do gafricO#
mylabels = ['Maçãs', 'Banana', 'Laranja', 'Melancia']
#espaços ente fatias#
myexplode = [0.2, 0, 0, 0]
plt.pie(y, labels=mylabels, explode=myexplode, shadow=True)
plt.show()
|
{"hexsha": "45336227bfbb0d479278647aa4c9f7849e3e915b", "size": 325, "ext": "py", "lang": "Python", "max_stars_repo_path": "gafricoPizza/grafico.py", "max_stars_repo_name": "lucasDEV20/GafricoPizzaEmPython", "max_stars_repo_head_hexsha": "1cd668e87db12cc36e679cbd33f324eacb2dc0da", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gafricoPizza/grafico.py", "max_issues_repo_name": "lucasDEV20/GafricoPizzaEmPython", "max_issues_repo_head_hexsha": "1cd668e87db12cc36e679cbd33f324eacb2dc0da", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gafricoPizza/grafico.py", "max_forks_repo_name": "lucasDEV20/GafricoPizzaEmPython", "max_forks_repo_head_hexsha": "1cd668e87db12cc36e679cbd33f324eacb2dc0da", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.25, "max_line_length": 60, "alphanum_fraction": 0.64, "include": true, "reason": "import numpy", "num_tokens": 113}
|
import argparse
from design_search import RobotDesignEnv, make_graph, build_normalized_robot, presimulate, simulate
import mcts
import numpy as np
import os
import pyrobotdesign as rd
import random
import tasks
import time
class CameraTracker(object):
def __init__(self, viewer, sim, robot_idx):
self.viewer = viewer
self.sim = sim
self.robot_idx = robot_idx
self.reset()
def update(self, time_step):
lower = np.zeros(3)
upper = np.zeros(3)
self.sim.get_robot_world_aabb(self.robot_idx, lower, upper)
# Update camera position to track the robot smoothly
target_pos = 0.5 * (lower + upper)
camera_pos = self.viewer.camera_params.position.copy()
camera_pos += 5.0 * time_step * (target_pos - camera_pos)
self.viewer.camera_params.position = camera_pos
def reset(self):
lower = np.zeros(3)
upper = np.zeros(3)
self.sim.get_robot_world_aabb(self.robot_idx, lower, upper)
self.viewer.camera_params.position = 0.5 * (lower + upper)
def run_trajectory(sim, robot_idx, input_sequence, task, step_callback):
step_callback(0)
for j in range(input_sequence.shape[1]):
for k in range(task.interval):
step_idx = j * task.interval + k
sim.set_joint_targets(robot_idx, input_sequence[:,j].reshape(-1, 1))
task.add_noise(sim, step_idx)
sim.step()
step_callback(step_idx + 1)
def view_trajectory(sim, robot_idx, input_sequence, task):
record_step_indices = set()
sim.save_state()
viewer = rd.GLFWViewer()
# Get robot bounds
lower = np.zeros(3)
upper = np.zeros(3)
sim.get_robot_world_aabb(robot_idx, lower, upper)
# Set initial camera parameters
task_name = type(task).__name__
if 'Ridged' in task_name or 'Gap' in task_name:
viewer.camera_params.yaw = 0.0
elif 'Wall' in task_name:
viewer.camera_params.yaw = -np.pi / 2
else:
viewer.camera_params.yaw = -np.pi / 4
viewer.camera_params.pitch = -np.pi / 6
viewer.camera_params.distance = 1.5 * np.linalg.norm(upper - lower)
tracker = CameraTracker(viewer, sim, robot_idx)
j = 0
k = 0
sim_time = time.time()
while not viewer.should_close():
current_time = time.time()
while sim_time < current_time:
step_idx = j * task.interval + k
if input_sequence is not None:
sim.set_joint_targets(robot_idx, input_sequence[:,j].reshape(-1, 1))
task.add_noise(sim, step_idx)
sim.step()
tracker.update(task.time_step)
viewer.update(task.time_step)
if viewer.camera_controller.should_record():
record_step_indices.add(step_idx)
sim_time += task.time_step
k += 1
if k >= task.interval:
j += 1
k = 0
if input_sequence is not None and j >= input_sequence.shape[1]:
j = 0
k = 0
sim.restore_state()
tracker.reset()
viewer.render(sim)
sim.restore_state()
return viewer.camera_params, record_step_indices
def finalize_robot(robot):
for link in robot.links:
link.label = ""
link.joint_label = ""
if link.shape == rd.LinkShape.NONE:
link.shape = rd.LinkShape.CAPSULE
link.length = 0.1
link.radius = 0.025
link.color = [1.0, 0.0, 1.0]
if link.joint_type == rd.JointType.NONE:
link.joint_type = rd.JointType.FIXED
link.joint_color = [1.0, 0.0, 1.0]
def main():
parser = argparse.ArgumentParser(description="Robot design viewer.")
parser.add_argument("task", type=str, help="Task (Python class name)")
parser.add_argument("grammar_file", type=str, help="Grammar file (.dot)")
parser.add_argument("rule_sequence", nargs="+", help="Rule sequence to apply")
parser.add_argument("-o", "--optim", default=False, action="store_true",
help="Optimize a trajectory")
parser.add_argument("-s", "--opt_seed", type=int, default=None,
help="Trajectory optimization seed")
parser.add_argument("-e", "--episodes", type=int, default=1,
help="Number of optimization episodes")
parser.add_argument("-j", "--jobs", type=int, required=True,
help="Number of jobs/threads")
parser.add_argument("--input_sequence_file", type=str,
help="File to save input sequence to (.csv)")
parser.add_argument("--save_obj_dir", type=str,
help="Directory to save .obj files to")
parser.add_argument("--save_video_file", type=str,
help="File to save video to (.mp4)")
parser.add_argument("-l", "--episode_len", type=int, default=128,
help="Length of episode")
args = parser.parse_args()
task_class = getattr(tasks, args.task)
task = task_class(episode_len=args.episode_len)
graphs = rd.load_graphs(args.grammar_file)
rules = [rd.create_rule_from_graph(g) for g in graphs]
rule_sequence = [int(s.strip(",")) for s in args.rule_sequence]
if args.opt_seed is not None:
opt_seed = args.opt_seed
else:
opt_seed = random.getrandbits(32)
print("Using optimization seed:", opt_seed)
graph = make_graph(rules, rule_sequence)
robot = build_normalized_robot(graph)
finalize_robot(robot)
if args.optim:
input_sequence, result = simulate(robot, task, opt_seed, args.jobs,
args.episodes)
print("Result:", result)
else:
input_sequence = None
if args.input_sequence_file and input_sequence is not None:
import csv
with open(args.input_sequence_file, 'w', newline='') as input_seq_file:
writer = csv.writer(input_seq_file)
for col in input_sequence.T:
writer.writerow(col)
print("Saved input sequence to file:", args.input_sequence_file)
robot_init_pos, has_self_collision = presimulate(robot)
if has_self_collision:
print("Warning: robot self-collides in initial configuration")
main_sim = rd.BulletSimulation(task.time_step)
task.add_terrain(main_sim)
# Rotate 180 degrees around the y axis, so the base points to the right
main_sim.add_robot(robot, robot_init_pos, rd.Quaterniond(0.0, 0.0, 1.0, 0.0))
robot_idx = main_sim.find_robot_index(robot)
camera_params, record_step_indices = view_trajectory(
main_sim, robot_idx, input_sequence, task)
if args.save_obj_dir and input_sequence is not None:
import export_mesh
if record_step_indices:
print("Saving .obj files for {} steps".format(len(record_step_indices)))
os.makedirs(args.save_obj_dir, exist_ok=True)
# Save the props/terrain once
obj_file_name = os.path.join(args.save_obj_dir, 'terrain.obj')
mtl_file_name = os.path.join(args.save_obj_dir, 'terrain.mtl')
with open(obj_file_name, 'w') as obj_file, \
open(mtl_file_name, 'w') as mtl_file:
dumper = export_mesh.ObjDumper(obj_file, mtl_file)
obj_file.write("mtllib {}\n".format(os.path.split(mtl_file_name)[-1]))
for prop_idx in range(main_sim.get_prop_count()):
export_mesh.dump_prop(prop_idx, main_sim, dumper)
dumper.finish()
# Save the robot once per step
def save_obj_callback(step_idx):
if record_step_indices:
if step_idx not in record_step_indices:
return
else:
if step_idx % 128 != 0:
return
obj_file_name = os.path.join(args.save_obj_dir,
'robot_{:04}.obj'.format(step_idx))
# Use one .mtl file for all steps
mtl_file_name = os.path.join(args.save_obj_dir, 'robot.mtl')
with open(obj_file_name, 'w') as obj_file, \
open(mtl_file_name, 'w') as mtl_file:
dumper = export_mesh.ObjDumper(obj_file, mtl_file)
obj_file.write("mtllib {}\n".format(os.path.split(mtl_file_name)[-1]))
export_mesh.dump_robot(robot_idx, main_sim, dumper)
dumper.finish()
run_trajectory(main_sim, robot_idx, input_sequence, task, save_obj_callback)
if args.save_video_file and input_sequence is not None:
import cv2
if record_step_indices:
print("Saving video for {} steps".format(len(record_step_indices)))
viewer = rd.GLFWViewer()
# Copy camera parameters from the interactive viewer
viewer.camera_params = camera_params
tracker = CameraTracker(viewer, main_sim, robot_idx)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
writer = cv2.VideoWriter(args.save_video_file, fourcc, 60.0,
viewer.get_framebuffer_size())
writer.set(cv2.VIDEOWRITER_PROP_QUALITY, 100)
def write_frame_callback(step_idx):
tracker.update(task.time_step)
# 240 steps/second / 4 = 60 fps
if step_idx % 4 == 0:
# Flip vertically, convert RGBA to BGR
frame = viewer.render_array(main_sim)[::-1,:,2::-1]
writer.write(frame)
run_trajectory(main_sim, robot_idx, input_sequence, task,
write_frame_callback)
writer.release()
if __name__ == '__main__':
main()
|
{"hexsha": "7751683e8665719d88fd0fa7121e8e021261d214", "size": 8912, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/design_search/viewer.py", "max_stars_repo_name": "ONLYA/RoboGrammar", "max_stars_repo_head_hexsha": "4b9725739b24dc9df4049866c177db788b1e458f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 156, "max_stars_repo_stars_event_min_datetime": "2020-10-02T14:33:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T22:30:30.000Z", "max_issues_repo_path": "examples/design_search/viewer.py", "max_issues_repo_name": "ONLYA/RoboGrammar", "max_issues_repo_head_hexsha": "4b9725739b24dc9df4049866c177db788b1e458f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-12-14T01:24:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-16T10:01:16.000Z", "max_forks_repo_path": "examples/design_search/viewer.py", "max_forks_repo_name": "ONLYA/RoboGrammar", "max_forks_repo_head_hexsha": "4b9725739b24dc9df4049866c177db788b1e458f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 43, "max_forks_repo_forks_event_min_datetime": "2020-10-02T00:01:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-06T17:02:38.000Z", "avg_line_length": 34.4092664093, "max_line_length": 99, "alphanum_fraction": 0.671005386, "include": true, "reason": "import numpy", "num_tokens": 2208}
|
! { dg-do run }
! { dg-options "-fdump-tree-original" }
!
! PR fortran/56845
!
module m
type t
integer ::a
end type t
contains
subroutine sub
type(t), save, allocatable :: x
class(t), save,allocatable :: y
if (.not. same_type_as(x,y)) STOP 1
end subroutine sub
subroutine sub2
type(t), save, allocatable :: a(:)
class(t), save,allocatable :: b(:)
if (.not. same_type_as(a,b)) STOP 2
end subroutine sub2
end module m
use m
call sub()
call sub2()
end
! { dg-final { scan-tree-dump-times "static struct __class_m_T_1_0a b = {._data={.data=0B}, ._vptr=&__vtab_m_T};" 1 "original" } }
! { dg-final { scan-tree-dump-times "static struct __class_m_T_a y = {._data=0B, ._vptr=&__vtab_m_T};" 1 "original" } }
|
{"hexsha": "d2514772a0386da798a040a098d24979db7349dc", "size": 715, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "validation_tests/llvm/f18/gfortran.dg/class_allocate_14.f90", "max_stars_repo_name": "brugger1/testsuite", "max_stars_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-02-12T18:20:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T19:46:19.000Z", "max_issues_repo_path": "validation_tests/llvm/f18/gfortran.dg/class_allocate_14.f90", "max_issues_repo_name": "brugger1/testsuite", "max_issues_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-08-31T22:05:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T18:30:03.000Z", "max_forks_repo_path": "validation_tests/llvm/f18/gfortran.dg/class_allocate_14.f90", "max_forks_repo_name": "brugger1/testsuite", "max_forks_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-08-31T21:59:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T22:06:46.000Z", "avg_line_length": 23.064516129, "max_line_length": 130, "alphanum_fraction": 0.6685314685, "num_tokens": 250}
|
/*
Copyright 2010 Kenneth Riddile
Use, modification and distribution are subject to the Boost Software License,
Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt).
*/
/*************************************************************************************************/
#ifndef BOOST_GIL_EXTENSION_IO_TARGA_IO_WRITE_HPP
#define BOOST_GIL_EXTENSION_IO_TARGA_IO_WRITE_HPP
////////////////////////////////////////////////////////////////////////////////////////
/// \file
/// \brief
/// \author Kenneth Riddile \n
///
/// \date 2010 \n
///
////////////////////////////////////////////////////////////////////////////////////////
#include <vector>
#include <boost/gil/extension/io_new/detail/base.hpp>
#include <boost/gil/extension/io_new/detail/io_device.hpp>
#include <boost/gil/extension/io_new/targa_tags.hpp>
namespace boost { namespace gil {
template < int N > struct get_targa_view_type {};
template <> struct get_targa_view_type< 3 > { typedef bgr8_view_t type; };
template <> struct get_targa_view_type< 4 > { typedef bgra8_view_t type; };
template< typename Device >
class writer< Device
, targa_tag
>
{
public:
writer( Device& file )
: _out( file )
{
}
~writer()
{
}
template<typename View>
void apply( const View& view )
{
write( view );
}
template<typename View>
void apply( const View& view
, const image_write_info< targa_tag >& /* info */
)
{
// Add code here, once image_write_info< targa_tag > isn't empty anymore.
write( view );
}
private:
template< typename View >
void write( const View& view )
{
uint8_t bit_depth = static_cast<uint8_t>( num_channels<View>::value * 8 );
// write the TGA header
_out.write_uint8( 0 ); // offset
_out.write_uint8( targa_color_map_type::_rgb );
_out.write_uint8( targa_image_type::_rgb );
_out.write_uint16( 0 ); // color map start
_out.write_uint16( 0 ); // color map length
_out.write_uint8( 0 ); // color map depth
_out.write_uint16( 0 ); // x origin
_out.write_uint16( 0 ); // y origin
_out.write_uint16( static_cast<uint16_t>( view.width() ) ); // width in pixels
_out.write_uint16( static_cast<uint16_t>( view.height() ) ); // height in pixels
_out.write_uint8( bit_depth );
if( 32 == bit_depth )
{
_out.write_uint8( 8 ); // 8-bit alpha channel descriptor
}
else
{
_out.write_uint8( 0 );
}
write_image< View
, typename get_targa_view_type< num_channels< View >::value >::type
>( view );
}
template< typename View
, typename TGA_View
>
void write_image( const View& view )
{
size_t row_size = view.width() * num_channels<View>::value;
byte_vector_t buffer( row_size );
std::fill( buffer.begin(), buffer.end(), 0 );
TGA_View row = interleaved_view( view.width()
, 1
, reinterpret_cast<typename TGA_View::value_type*>( &buffer.front() )
, row_size
);
for( typename View::y_coord_t y = view.height() - 1; y > -1; --y )
{
copy_pixels( subimage_view( view
, 0
, static_cast<int>( y )
, static_cast<int>( view.width() )
, 1
)
, row
);
_out.write( &buffer.front(), row_size );
}
}
private:
Device& _out;
};
struct targa_write_is_supported
{
template< typename View >
struct apply
: public is_write_supported< typename get_pixel_type< View >::type
, targa_tag
>
{};
};
template< typename Device >
class dynamic_image_writer< Device
, targa_tag
>
: public writer< Device
, targa_tag
>
{
typedef writer< Device
, targa_tag
> parent_t;
public:
dynamic_image_writer( Device& file )
: parent_t( file )
{}
template< typename Views >
void apply( const any_image_view< Views >& views )
{
detail::dynamic_io_fnobj< targa_write_is_supported
, parent_t
> op( this );
apply_operation( views, op );
}
};
} // namespace gil
} // namespace boost
#endif // BOOST_GIL_EXTENSION_IO_TARGA_IO_WRITE_HPP
|
{"hexsha": "82fb791adbce4e43b549433f215cd748f4007a4d", "size": 4977, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "3rdparty/boost/boost/gil/extension/io_new/formats/targa/write.hpp", "max_stars_repo_name": "Greentwip/windy", "max_stars_repo_head_hexsha": "4eb8174f952c5b600ff004827a5c85dbfb013091", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2017-07-13T21:11:55.000Z", "max_stars_repo_stars_event_max_datetime": "2017-07-13T21:11:55.000Z", "max_issues_repo_path": "3rdparty/boost/boost/gil/extension/io_new/formats/targa/write.hpp", "max_issues_repo_name": "Greentwip/Windy", "max_issues_repo_head_hexsha": "4eb8174f952c5b600ff004827a5c85dbfb013091", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "3rdparty/boost/boost/gil/extension/io_new/formats/targa/write.hpp", "max_forks_repo_name": "Greentwip/Windy", "max_forks_repo_head_hexsha": "4eb8174f952c5b600ff004827a5c85dbfb013091", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1967213115, "max_line_length": 108, "alphanum_fraction": 0.4964838256, "num_tokens": 1066}
|
from __future__ import print_function
import mxnet as mx
import numpy as np
import argparse
import re
import sys
from convert_symbol import proto2symbol
caffe_flag = True
try:
import caffe
except ImportError:
import caffe_parse.parse_from_protobuf as parse
caffe_flag = False
def get_caffe_iter(layer_names, layers):
for layer_idx, layer in enumerate(layers):
layer_name = re.sub('[-/]', '_', layer_names[layer_idx])
layer_type = layer.type
layer_blobs = layer.blobs
yield (layer_name, layer_type, layer_blobs)
def get_iter(layers):
for layer in layers:
layer_name = re.sub('[-/]', '_', layer.name)
layer_type = layer.type
layer_blobs = layer.blobs
yield (layer_name, layer_type, layer_blobs)
def main():
parser = argparse.ArgumentParser(description='Caffe prototxt to mxnet model parameter converter.\
Note that only basic functions are implemented. You are welcomed to contribute to this file.')
parser.add_argument('caffe_prototxt', help='The prototxt file in Caffe format')
parser.add_argument('caffe_model', help='The binary model parameter file in Caffe format')
parser.add_argument('save_model_name', help='The name of the output model prefix')
args = parser.parse_args()
sym, arg_params, aux_params, input_dim = process_caffe_model(args.caffe_prototxt, args.caffe_model)
model = mx.mod.Module(symbol=sym, label_names=['prob_label', ])
model.bind(data_shapes=[('data', tuple(input_dim))])
model.init_params(arg_params=arg_params, aux_params=aux_params)
model.save_checkpoint(args.save_model_name, 1)
print ('Saved model successfully to {}'.format(args.save_model_name))
def process_caffe_model(caffe_prototxt, caffe_model, output_file=None, data=None, data_shapes=None):
prob, input_dim = proto2symbol(caffe_prototxt)
layers = ''
layer_names = ''
if caffe_flag:
caffe.set_mode_cpu()
net_caffe = caffe.Net(caffe_prototxt, caffe_model, caffe.TEST)
layer_names = net_caffe._layer_names
layers = net_caffe.layers
else:
layers = parse.parse_caffemodel(caffe_model)
arg_shapes, output_shapes, aux_shapes = prob.infer_shape(data=tuple(input_dim))
arg_names = prob.list_arguments()
aux_names = prob.list_auxiliary_states()
arg_shape_dic = dict(zip(arg_names, arg_shapes))
aux_shape_dic = dict(zip(aux_names, aux_shapes))
arg_params = {}
aux_params = {}
iter = ''
if caffe_flag:
iter = get_caffe_iter(layer_names, layers)
else:
iter = get_iter(layers)
first_conv = True
for layer_name, layer_type, layer_blobs in iter:
if layer_type == 'Convolution' or layer_type == 'InnerProduct' or layer_type == 4 or layer_type == 14 \
or layer_type == 'PReLU':
if layer_type == 'PReLU':
assert (len(layer_blobs) == 1)
wmat = layer_blobs[0].data
weight_name = layer_name + '_gamma'
arg_params[weight_name] = mx.nd.zeros(wmat.shape)
arg_params[weight_name][:] = wmat
continue
wmat_dim = []
if getattr(layer_blobs[0].shape, 'dim', None) is not None:
if len(layer_blobs[0].shape.dim) > 0:
wmat_dim = layer_blobs[0].shape.dim
else:
wmat_dim = [layer_blobs[0].num, layer_blobs[0].channels, layer_blobs[0].height, layer_blobs[0].width]
else:
wmat_dim = list(layer_blobs[0].shape)
wmat = np.array(layer_blobs[0].data).reshape(wmat_dim)
channels = wmat_dim[1]
if channels == 3 or channels == 4: # RGB or RGBA
if first_conv:
print ('Swapping BGR of caffe into RGB in mxnet')
wmat[:, [0, 2], :, :] = wmat[:, [2, 0], :, :]
assert(wmat.flags['C_CONTIGUOUS'] is True)
sys.stdout.write('converting layer {0}, wmat shape = {1}'.format(layer_name, wmat.shape))
if len(layer_blobs) == 2:
bias = np.array(layer_blobs[1].data)
bias = bias.reshape((bias.shape[0], 1))
assert(bias.flags['C_CONTIGUOUS'] is True)
bias_name = layer_name + "_bias"
bias = bias.reshape(arg_shape_dic[bias_name])
arg_params[bias_name] = mx.nd.zeros(bias.shape)
arg_params[bias_name][:] = bias
sys.stdout.write(', bias shape = {}'.format(bias.shape))
sys.stdout.write('\n')
sys.stdout.flush()
wmat = wmat.reshape((wmat.shape[0], -1))
weight_name = layer_name + "_weight"
if weight_name not in arg_shape_dic:
print(weight_name + ' not found in arg_shape_dic.')
continue
wmat = wmat.reshape(arg_shape_dic[weight_name])
arg_params[weight_name] = mx.nd.zeros(wmat.shape)
arg_params[weight_name][:] = wmat
if first_conv and (layer_type == 'Convolution' or layer_type == 4):
first_conv = False
elif layer_type == 'Scale':
bn_name = layer_name.replace('scale', 'bn')
gamma = layer_blobs[0].data
beta = layer_blobs[1].data
# beta = np.expand_dims(beta, 1)
beta_name = '{}_beta'.format(bn_name)
gamma_name = '{}_gamma'.format(bn_name)
beta = beta.reshape(arg_shape_dic[beta_name])
gamma = gamma.reshape(arg_shape_dic[gamma_name])
arg_params[beta_name] = mx.nd.zeros(beta.shape)
arg_params[gamma_name] = mx.nd.zeros(gamma.shape)
arg_params[beta_name][:] = beta
arg_params[gamma_name][:] = gamma
assert gamma.flags['C_CONTIGUOUS'] is True
assert beta.flags['C_CONTIGUOUS'] is True
print ('converting scale layer, beta shape = {}, gamma shape = {}'.format(beta.shape, gamma.shape))
elif layer_type == 'BatchNorm':
bn_name = layer_name
mean = layer_blobs[0].data
var = layer_blobs[1].data
moving_average_factor = layer_blobs[2].data
mean_name = '{}_moving_mean'.format(bn_name)
var_name = '{}_moving_var'.format(bn_name)
maf_name = '{}_momentum'.format(bn_name)
mean = mean.reshape(aux_shape_dic[mean_name])
var = var.reshape(aux_shape_dic[var_name])
aux_params[mean_name] = mx.nd.zeros(mean.shape)
aux_params[var_name] = mx.nd.zeros(var.shape)
arg_params[maf_name] = mx.nd.zeros(moving_average_factor.shape)
aux_params[mean_name][:] = mean
aux_params[var_name][:] = var
arg_params[maf_name][:] = moving_average_factor
assert var.flags['C_CONTIGUOUS'] is True
assert mean.flags['C_CONTIGUOUS'] is True
print ('converting batchnorm layer, mean shape = {}, var shape = {}'.format(mean.shape, var.shape))
else:
assert len(layer_blobs) == 0
print ('\tskipping layer {} of type {}'.format(layer_name, layer_type))
return prob, arg_params, aux_params, input_dim
if __name__ == '__main__':
main()
|
{"hexsha": "a648530087518cb2c76141641c1c0636f310a1c0", "size": 7341, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/caffe_converter/convert_model.py", "max_stars_repo_name": "dmmiller612/mxnet", "max_stars_repo_head_hexsha": "3f410c23cb02df64625d7c8f9f299b580236f6a5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-07T17:00:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-07T17:00:20.000Z", "max_issues_repo_path": "tools/caffe_converter/convert_model.py", "max_issues_repo_name": "dmmiller612/mxnet", "max_issues_repo_head_hexsha": "3f410c23cb02df64625d7c8f9f299b580236f6a5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/caffe_converter/convert_model.py", "max_forks_repo_name": "dmmiller612/mxnet", "max_forks_repo_head_hexsha": "3f410c23cb02df64625d7c8f9f299b580236f6a5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.5580110497, "max_line_length": 121, "alphanum_fraction": 0.6124506198, "include": true, "reason": "import numpy", "num_tokens": 1749}
|
# -*- coding: utf-8 -*-
"""
Tonemapping Operators Plotting
==============================
Defines the tonemapping operators plotting objects:
- :func:`colour_hdri.plotting.plot_tonemapping_operator_image`
"""
import matplotlib
import matplotlib.ticker
import numpy as np
from colour.plotting import (CONSTANTS_COLOUR_STYLE, artist, override_style,
render)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2015-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'plot_tonemapping_operator_image',
]
@override_style()
def plot_tonemapping_operator_image(
image,
luminance_function,
log_scale=False,
cctf_encoding=CONSTANTS_COLOUR_STYLE.colour.colourspace.cctf_encoding,
**kwargs):
"""
Plots given tonemapped image with superimposed luminance mapping function.
Parameters
----------
image : array_like
Tonemapped image to plot.
luminance_function : callable
Luminance mapping function.
log_scale : bool, optional
Use a log scale for plotting the luminance mapping function.
cctf_encoding : callable, optional
Encoding colour component transfer function / opto-electronic
transfer function used for plotting.
Other Parameters
----------------
\\**kwargs : dict, optional
{:func:`colour.plotting.render`},
Please refer to the documentation of the previously listed definition.
Returns
-------
tuple
Current figure and axes.
"""
settings = {'uniform': True}
settings.update(kwargs)
figure, axes = artist(**settings)
shape = image.shape
bounding_box = [0, 1, 0, 1]
image = np.clip(cctf_encoding(image), 0, 1)
axes.imshow(
image,
aspect=shape[0] / shape[1],
extent=bounding_box,
interpolation='nearest')
axes.plot(
np.linspace(0, 1, len(luminance_function)),
luminance_function,
color='red')
settings = {
'axes': axes,
'bounding_box': bounding_box,
'x_ticker': True,
'y_ticker': True,
'x_label': 'Input Luminance',
'y_label': 'Output Luminance',
}
settings.update(kwargs)
if log_scale:
settings.update({
'x_label': '$log_2$ Input Luminance',
'x_ticker_locator': matplotlib.ticker.AutoMinorLocator(0.5)
})
matplotlib.pyplot.gca().set_xscale('log', basex=2)
matplotlib.pyplot.gca().xaxis.set_major_formatter(
matplotlib.ticker.ScalarFormatter())
return render(**settings)
|
{"hexsha": "c668bc0b3cb84e358e297010a86271c5f3464560", "size": 2800, "ext": "py", "lang": "Python", "max_stars_repo_path": "colour_hdri/plotting/tonemapping.py", "max_stars_repo_name": "colour-science/colour-hdri", "max_stars_repo_head_hexsha": "3a97c4ad8bc328e2fffabf84ac8b56d795dbeb82", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 92, "max_stars_repo_stars_event_min_datetime": "2015-09-19T22:11:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T06:37:53.000Z", "max_issues_repo_path": "colour_hdri/plotting/tonemapping.py", "max_issues_repo_name": "colour-science/colour-hdri", "max_issues_repo_head_hexsha": "3a97c4ad8bc328e2fffabf84ac8b56d795dbeb82", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2017-05-25T08:55:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T18:26:43.000Z", "max_forks_repo_path": "colour_hdri/plotting/tonemapping.py", "max_forks_repo_name": "colour-science/colour-hdri", "max_forks_repo_head_hexsha": "3a97c4ad8bc328e2fffabf84ac8b56d795dbeb82", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2016-01-18T17:29:51.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-12T12:54:18.000Z", "avg_line_length": 26.9230769231, "max_line_length": 78, "alphanum_fraction": 0.6417857143, "include": true, "reason": "import numpy", "num_tokens": 648}
|
import numpy as np
import cv2
# Identify pixels above the threshold
# Threshold of RGB > 160 does a nice job of identifying ground pixels only
def color_thresh(img, rgb_thresh=(160, 160, 160)):
# Create an array of zeros same xy size as img, but single channel
color_select = np.zeros_like(img[:,:,0])
# Require that each pixel be above all three threshold values in RGB
# above_thresh will now contain a boolean array with "True"
# where threshold was met
above_thresh = (img[:,:,0] > rgb_thresh[0]) \
& (img[:,:,1] > rgb_thresh[1]) \
& (img[:,:,2] > rgb_thresh[2])
# Index the array of zeros with the boolean array and set to 1
color_select[above_thresh] = 1
# Return the binary image
return color_select
# Define a function to convert from image coords to rover coords
def rover_coords(binary_img):
# Identify nonzero pixels
ypos, xpos = binary_img.nonzero()
# Calculate pixel positions with reference to the rover position being at the
# center bottom of the image.
x_pixel = -(ypos - binary_img.shape[0]).astype(np.float)
y_pixel = -(xpos - binary_img.shape[1]/2 ).astype(np.float)
return x_pixel, y_pixel
# Define a function to convert to radial coords in rover space
def to_polar_coords(x_pixel, y_pixel):
# Convert (x_pixel, y_pixel) to (distance, angle)
# in polar coordinates in rover space
# Calculate distance to each pixel
dist = np.sqrt(x_pixel**2 + y_pixel**2)
# Calculate angle away from vertical for each pixel
angles = np.arctan2(y_pixel, x_pixel)
return dist, angles
# Define a function to map rover space pixels to world space
def rotate_pix(xpix, ypix, yaw):
# Convert yaw to radians
yaw_rad = yaw * np.pi / 180
xpix_rotated = (xpix * np.cos(yaw_rad)) - (ypix * np.sin(yaw_rad))
ypix_rotated = (xpix * np.sin(yaw_rad)) + (ypix * np.cos(yaw_rad))
# Return the result
return xpix_rotated, ypix_rotated
def translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale):
# Apply a scaling and a translation
xpix_translated = (xpix_rot / scale) + xpos
ypix_translated = (ypix_rot / scale) + ypos
# Return the result
return xpix_translated, ypix_translated
# Define a function to apply rotation and translation (and clipping)
# Once you define the two functions above this function should work
def pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale):
# Apply rotation
xpix_rot, ypix_rot = rotate_pix(xpix, ypix, yaw)
# Apply translation
xpix_tran, ypix_tran = translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale)
# Perform rotation, translation and clipping all at once
x_pix_world = np.clip(np.int_(xpix_tran), 0, world_size - 1)
y_pix_world = np.clip(np.int_(ypix_tran), 0, world_size - 1)
# Return the result
return x_pix_world, y_pix_world
# Define a function to perform a perspective transform
def perspect_transform(img, src, dst):
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))# keep same size as input image
return warped
# Segment image into three components, ground (B), rock (G) and non-navigable (R) using
# color thresholding.
# Ground color should be above certain level, while rocks are golden so a combination of
# high red and green with low blue channel values would identify rocks.
def segment_image(img):
# ground thresholding using lower bounds
ground_threshold = (175, 175, 175)
segmented_image = np.zeros_like(img)
ground_mask = color_thresh(img, ground_threshold)
# rock thresholding using lower bounds on RG and upper bounds on the blue channel
rock_threshold = (120, 120, 60)
rock_mask = np.zeros_like(ground_mask)
rock_mask[(img[:,:,0] > rock_threshold[0]) & (img[:,:,1] > rock_threshold[1]) & (img[:,:,2] < rock_threshold[2])] = 1
# place non-ground pixels in R
segmented_image[:,:,0] = 255 * np.logical_not(ground_mask)
# place rock mask in G
segmented_image[:,:,1] = 255 * (rock_mask & np.logical_not(ground_mask))
# place ground pixels in B
segmented_image[:,:,2] = 255 * ground_mask
return segmented_image
# Gets a set of predefined sources and desinations with its corresponding scale for
# perspective transform
def get_predefined_src_dst(img):
dst_size = 5
bottom_offset = 6
source = np.float32([[14, 140], [301 ,140],[200, 96], [118, 96]])
destination = np.float32([[img.shape[1]/2 - dst_size, img.shape[0] - bottom_offset],
[img.shape[1]/2 + dst_size, img.shape[0] - bottom_offset],
[img.shape[1]/2 + dst_size, img.shape[0] - 2*dst_size - bottom_offset],
[img.shape[1]/2 - dst_size, img.shape[0] - 2*dst_size - bottom_offset],
])
scale = 10.0
return source, destination, scale
# Apply the above functions in succession and update the Rover state accordingly
def perception_step(Rover):
# get the predefined set of source and destinations based on the Rover img size
src, dst, scale = get_predefined_src_dst(Rover.img)
# segment the image into channels of ground, rock and obstacle, and transform it
segmented_image = perspect_transform(segment_image(Rover.img), src, dst)
# extract information from the Rover
xpos = Rover.pos[0]
ypos = Rover.pos[1]
yaw = Rover.yaw
world_size = Rover.worldmap.shape[0]
# store the segmentation results in vision_image and add to the mapping in worldmap
for ch in range(3):
# get segmentation of the channel
segment = segmented_image[:,:,ch]
# display by storing into vision_image
Rover.vision_image[:, :, ch] = segment
# convert segment to world coordinates
rover_x, rover_y = rover_coords(segment)
world_x, world_y = pix_to_world(rover_x, rover_y, xpos, ypos, yaw, world_size, scale)
# increment the worldmap's corresponding channel by 10.0
Rover.worldmap[world_y, world_x, ch] = Rover.worldmap[world_y, world_x, ch] + 10.0
# calculate the navigation distances and angles as the ground pixels
xpix, ypix = rover_coords(segmented_image[:,:,2])
dist, angles = to_polar_coords(xpix, ypix)
mean_dir = np.mean(angles)
Rover.nav_dists = dist
Rover.nav_angles = angles
return Rover
|
{"hexsha": "6030ff153a1b9b172d9345b5f8989d6d2d63945d", "size": 6428, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/perception.py", "max_stars_repo_name": "northerncat/RoboND-Rover-Project", "max_stars_repo_head_hexsha": "52030b8eaae3cde912d79bee42dced50ec81a6af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/perception.py", "max_issues_repo_name": "northerncat/RoboND-Rover-Project", "max_issues_repo_head_hexsha": "52030b8eaae3cde912d79bee42dced50ec81a6af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/perception.py", "max_forks_repo_name": "northerncat/RoboND-Rover-Project", "max_forks_repo_head_hexsha": "52030b8eaae3cde912d79bee42dced50ec81a6af", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.1409395973, "max_line_length": 121, "alphanum_fraction": 0.6845052894, "include": true, "reason": "import numpy", "num_tokens": 1663}
|
#!/usr/bin/python
# md.py - An Event Driven Molecular Dynamics (EDMD) Simulator
#
# This script performs a simple, event-drive molecular dynamics
# simulation on a pygame canvas
#
# Dependencies:
# - pygame
# - numpy
# - particle.py (EDMD project)
# - event.py (EDMD project)
#
# Andrew D. McGuire 2017
# a.mcguire227@gmail.com
#----------------------------------------------------------
import pygame
import numpy as np
from pygame.locals import *
import pygame.display
import argparse
from particle import Particle
from event import Event
# Handle command line args
parser = argparse.ArgumentParser(description='An Event Driven Molecular Dynamics (EDMD) Simulator',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', action="store", dest="npart", default=30, type=int,
help='number of particles to simulate')
parser.add_argument('--dt', action="store", dest="dt", type=float, default=10.0,
help='max time-step size between graphical updates - higher is faster')
parser.add_argument('-x', action="store", dest="xmax", type=int, default=300.0,
help='simulation box width [px]')
parser.add_argument('-y', action="store", dest="ymax", type=int, default=300.0,
help='simulation box height [px]')
options = parser.parse_args()
def setup(options,time,r):
''' This function is called upon entry to create the
simulation canvas which we draw onto and the particle array.
Canvas size and particle number is dependent on the window
size. '''
#part_to_init = options.npart #int(round(xmax*ymax/3000.0))
particles = initParticles(options.npart,r,options.xmax,options.ymax)
return particles
def simulate(options, particles, time):
''' Advances the particle ensemble over the time interval dt, or
to the next collision time, whichever comes first. If a
collision is detected within (time,time+dt) then it's carried
out and the sim time is updated.
args:
options - a valid EDMD options object
particles - list of Particle objects
time - simualtion time at start of jump
returns:
particles - the updates list of Particle objects
time - new simulation time
'''
# Compute the time to the next collision
coll_list = getCollisionList(particles,options.xmax,options.ymax)
dt = options.dt
if (len(coll_list) < 1):
dt_col = dt + 1 # just needs to exceed dt
else:
dt_col = (coll_list[0]).t
# Check for collisions in the current time
if (dt < dt_col):
# No collision in the time step
particles = advanceParticles(particles, dt)
time = time + dt
else:
# Collision has occured between the step
# so, carry it out. Highlighting the particles
# involved.
particles = advanceParticles(particles, dt_col)
firstEvent = coll_list[0]
particles = highlightEventParticles(firstEvent,particles)
particles = performCollision(firstEvent,particles)
time += dt_col
return particles, time
def highlightEventParticles(CurrentEvent,particles):
''' Highlight the particle(s) involved in an event
args:
CurrentEvent - a valid Event object
'''
p1 = particles[CurrentEvent.p1_index]
p1.highlight()
if (CurrentEvent.p2_index != None):
p2 = particles[CurrentEvent.p2_index]
p2.highlight()
return particles
def getWallCollisionTime(Part,xmax,ymax):
''' Compute the first collision time with between
particle Part and any wall
returns object attributes
t - first collision time
wall - wall associated with first collision
locals vars
t_side # side wall collision time
t_ud # top or bottom wall collision time
w_side # which side wall ('r' or 'l')
w_ud # top or bottom wall first? ('u' ,d')
'''
# side walls
if (Part.vel[0] > 0):
t_side = (xmax - Part.pos[0] - Part.radius)/Part.vel[0]
w_side = 'r'
elif (Part.vel[0] < 0):
t_side = (0 - Part.pos[0] + Part.radius)/Part.vel[0]
w_side = 'l'
else:
# particle not moving in x direction
t_side = None
w_side = None
# top and bottom
if (Part.vel[1] > 0):
t_ud = (ymax - Part.pos[1] - Part.radius)/Part.vel[1]
w_ud = 'd'
elif (Part.vel[1] < 0):
t_ud = (0 - Part.pos[1] + Part.radius)/Part.vel[1]
w_ud = 'u'
else:
# particle not moving in y direction
t_ud = None
w_ud = None
if (t_side == None and t_ud == None):
# part is stationary
t = None
wall= None
elif (t_side <= t_ud):
t = t_side
wall = w_side
else:
t = t_ud
wall = w_ud
return type('obj', (object,),{'t': t, 'wall': wall})
def getCollisionTime(Part1, Part2):
''' Compute the time until collision between particle Part1 and
Part2.
return time as None if no collision time solution found '''
deltaVel = Part1.vel - Part2.vel
deltaPos = Part1.pos - Part2.pos
minDist = Part1.radius + Part2.radius
a = np.dot(deltaVel, deltaVel)
b = 2.0*np.dot(deltaPos,deltaVel)
c = np.dot(deltaPos,deltaPos) - minDist*minDist
discrim = b*b - 4*a*c
if ((discrim > 0) and (b < 0)):
t1 = (-b - (discrim**0.5))/(2*a)
return t1
return None
def getCollisionList(particles,xmax,ymax):
''' Returns an array of collision Event objects, ordered by their
time attribute (smallest to largest, Nones at the end)
args:
particles - an array of Particle objects '''
# return
coll_list = []
# loop through the particle array
for i in range(len(particles)):
wall_collision = getWallCollisionTime(particles[i],xmax,ymax)
firstEvent = Event('w',wall_collision.t,i,None,wall_collision.wall)
for j in range(i+1, len(particles)):
if (i != j):
col_time = getCollisionTime(particles[i],particles[j])
# Replace firstEvent if coll time is smaller than current
# firstEvent.time
if col_time != None:
if (col_time < firstEvent.t):
firstEvent = Event('p',col_time,i,j,None)
# Add to the collision list if event is valid
if (firstEvent.t != None):
coll_list.append(firstEvent)
# Sort the Event array and return it
coll_list = sorted(coll_list,key=lambda event: event.t)
return coll_list
def performCollision(event,particles):
''' Apply collision operator according according to event
args:
event - a valid Event object
'''
if (event.wc_log):
# Perform wall collision
if (event.wall == 'r' or event.wall == 'l'):
particles[event.p1_index].reflect_side()
elif (event.wall == 'u' or event.wall == 'd'):
particles[event.p1_index].reflect_top()
else:
raise RuntimeError("invalid collison event detected.")
else:
# Perform binary particle collision
J = impulse(particles[event.p1_index],
particles[event.p2_index])
particles[event.p1_index].apply_impulse(J[0],J[1])
particles[event.p2_index].apply_impulse(-J[0],-J[1])
return particles
def impulse(Part1,Part2):
''' Compute the impulse associated with a particle-particle collision
https:#introcs.cs.princeton.edu/java/assignments/collisions.html
J = 2*m1*m2*(dv*dr)/(sigma*(m1+m2))
args:
Part1 - valid Particle object
Part2 - valid Particle object
'''
dr = Part2.pos - Part1.pos
dv = Part2.vel - Part1.vel
sigma = Part1.radius + Part2.radius
hmm = 2*Part1.mass*Part2.mass/(Part1.mass + Part2.mass)
J = np.dot(dv,dr)*hmm/sigma
return [J*dr[0]/sigma,J*dr[1]/sigma]
def advanceParticles(particles,dt):
''' Advance the ensemble forward in time by dt
in a straight line trajectory (no collisions) '''
for i in range(len(particles)):
particles[i].updateProperties(dt)
return particles
def initParticles(n,r,xmax, ymax):
''' Intialise n particles with radius r in box with dimensions
(xmax,ymax) such that there are no overlapping particles
return:
particle - an array of Particle objects
'''
parts = []
dx = initialSpacing(n, xmax, ymax)
n_init = 0
for i in range(int(round(xmax/dx))):
for j in range(int(round(ymax/dx))):
if (n_init < n):
parts.append(Particle(dx*(i+0.5),dx*(j+0.5),r))
n_init += 1
return parts
def initialSpacing(n, x, y):
''' Returns the intialise spacing between particles to put n
particles on a uniform grid with limits x, y '''
num1 = -(x+y)
num2sqred = (x+y)**2.0 + 4.0*x*y*(n-1)
num2 = num2sqred**0.5
den = 2.0*(n-1)
dx = (num1 + num2) / den
return dx
def main(options):
# define the simualtion parameters
r = 7 # particle radius to use
time = 0.0 # global simulation time
paused_log = True # paused indicator bool
# set-up the screen
pygame.init()
screen = pygame.display.set_mode((int(options.xmax), int(options.ymax)))
pygame.display.set_caption('EDMD')
pygame.mouse.set_visible(1)
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((255, 255, 255))
screen.blit(background, (0, 0))
pygame.display.flip()
clock = pygame.time.Clock()
# set-up the particles
particles = setup(options, time, r)
allsprites = pygame.sprite.RenderPlain(particles)
print "EDMD simulation initialised with", len(particles), "particles."
print "Press ESC or click the 'x' to end the simulation."
print "Click anywhere on the screen to pause."
#The main run loop
quit_log = False
paused_log = False
while not quit_log:
clock.tick(60)
if not paused_log:
particles, time = simulate(options, particles, time)
#Handle Input Events
for event in pygame.event.get():
if event.type == QUIT:
quit_log = True
elif event.type == KEYDOWN and event.key == K_ESCAPE:
quit_log = True
elif event.type == MOUSEBUTTONDOWN:
paused_log = not paused_log
elif event.type == MOUSEBUTTONUP:
# fist.unpunch()
pass
if not paused_log:
allsprites.update()
#Draw Everything
screen.blit(background, (0, 0))
allsprites.draw(screen)
pygame.display.flip()
pygame.quit()
if __name__ == '__main__':
main(options)
|
{"hexsha": "ad2cbb5190d16c6d9bdf91cf7fe6e40531bf3cba", "size": 10626, "ext": "py", "lang": "Python", "max_stars_repo_path": "md.py", "max_stars_repo_name": "adm78/EDMD", "max_stars_repo_head_hexsha": "1d6ba28841ac6917a31fe513505032c13f6b092a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "md.py", "max_issues_repo_name": "adm78/EDMD", "max_issues_repo_head_hexsha": "1d6ba28841ac6917a31fe513505032c13f6b092a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-12-02T23:57:02.000Z", "max_issues_repo_issues_event_max_datetime": "2017-12-02T23:57:02.000Z", "max_forks_repo_path": "md.py", "max_forks_repo_name": "adm78/EDMD", "max_forks_repo_head_hexsha": "1d6ba28841ac6917a31fe513505032c13f6b092a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9536784741, "max_line_length": 99, "alphanum_fraction": 0.630246565, "include": true, "reason": "import numpy", "num_tokens": 2687}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.