hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a050a4b872072ed8a56d1d982d0727510675e73
| 7,233
|
py
|
Python
|
tests/particles/setplot.py
|
jkapilian/geoclaw
|
89edf01902848529949b85c50a85b987ec869c9e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/particles/setplot.py
|
jkapilian/geoclaw
|
89edf01902848529949b85c50a85b987ec869c9e
|
[
"BSD-3-Clause"
] | 1
|
2020-06-28T21:28:49.000Z
|
2020-06-28T21:28:49.000Z
|
tests/particles/setplot.py
|
jkapilian/geoclaw
|
89edf01902848529949b85c50a85b987ec869c9e
|
[
"BSD-3-Clause"
] | 2
|
2015-12-09T18:49:46.000Z
|
2016-05-18T20:44:18.000Z
|
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
from __future__ import absolute_import
from __future__ import print_function
from clawpack.visclaw import gaugetools
from clawpack.visclaw import particle_tools
from clawpack.visclaw import legend_tools
from importlib import reload
reload(particle_tools)
#--------------------------
def setplot(plotdata=None):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
if plotdata is None:
from clawpack.visclaw.data import ClawPlotData
plotdata = ClawPlotData()
from clawpack.visclaw import colormaps, geoplot
plotdata.clearfigures() # clear any old figures,axes,items data
plotdata.format = 'ascii' # Format of output
print('Reading all gauges...')
gauge_solutions = particle_tools.read_gauges(gaugenos='all',
outdir=plotdata.outdir)
gaugenos_lagrangian = [k for k in gauge_solutions.keys() \
if gauge_solutions[k].gtype=='lagrangian']
gaugenos_stationary = [k for k in gauge_solutions.keys() \
if gauge_solutions[k].gtype=='stationary']
print('+++ gaugenos_lagrangian: ',gaugenos_lagrangian)
def add_particles(current_data):
t = current_data.t
# plot recent path:
t_path_length = 10. # length of path trailing particle
kwargs_plot_path = {'linewidth':1, 'color':'k'}
particle_tools.plot_paths(gauge_solutions,
t1=t-t_path_length, t2=t,
gaugenos=gaugenos_lagrangian,
kwargs_plot=kwargs_plot_path)
# plot current location:
kwargs_plot_point = {'marker':'o','markersize':3,'color':'k'}
particle_tools.plot_particles(gauge_solutions, t,
gaugenos=gaugenos_lagrangian,
kwargs_plot=kwargs_plot_point)
# plot any stationary gauges:
gaugetools.plot_gauge_locations(current_data.plotdata, \
gaugenos=gaugenos_stationary, format_string='kx', add_labels=False)
kwargs={'loc':'upper left'}
legend_tools.add_legend(['Lagrangian particle','Stationary gauge'],
linestyles=['',''], markers=['o','x'],
loc='lower right', framealpha=0.5, fontsize=10)
def speed(current_data):
from pylab import sqrt, where, zeros
from numpy.ma import masked_where, allequal
q = current_data.q
h = q[0,:,:]
hs = sqrt(q[1,:,:]**2 + q[2,:,:]**2)
s = where(h>1e-3, hs/h, 0.)
s = masked_where(h<1e-3, s)
s = s * 1.94384 # convert to knots
return s
speed_cmap = colormaps.make_colormap({0:[0,1,1], 0.5:[1,1,0], 1:[1,0,0]})
#-----------------------------------------
# Figure for pcolor plot
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='pcolor', figno=0)
plotfigure.kwargs = {'figsize': (9,4)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('pcolor')
plotaxes.title = 'Speed'
plotaxes.scaled = True
plotaxes.xlimits = [0,80]
plotaxes.ylimits = [0,50]
plotaxes.afteraxes = add_particles
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = geoplot.surface
#plotitem.plot_var = geoplot.surface_or_depth
plotitem.plot_var = speed
#plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmap = speed_cmap
plotitem.pcolor_cmin = 0.
plotitem.pcolor_cmax = 20
plotitem.add_colorbar = True
plotitem.colorbar_label = 'knots'
plotitem.amr_celledges_show = [0,0,0]
plotitem.amr_patchedges_show = [1]
plotitem.amr_patchedges_color = ['m','g','w']
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.show = False
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0,0,0]
# Add contour lines of topography:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.show = False
plotitem.plot_var = geoplot.topo
from numpy import arange, linspace
plotitem.contour_levels = arange(-75,75,10)
#plotitem.contour_nlevels = 10
plotitem.amr_contour_colors = ['g'] # color on each level
plotitem.kwargs = {'linestyles':'solid'}
plotitem.amr_contour_show = [1,1,1] # show contours only on finest level
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
#-----------------------------------------
# Figures for gauges
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Surface & topo', figno=300, \
type='each_gauge')
plotfigure.clf_each_gauge = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = 'auto'
plotaxes.ylimits = [-100,100]
plotaxes.title = 'Surface'
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
# Plot topo as green curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
def gaugetopo(current_data):
q = current_data.q
h = q[0,:]
eta = q[3,:]
topo = eta - h
return topo
plotitem.plot_var = gaugetopo
plotitem.plotstyle = 'g-'
def add_zeroline(current_data):
from pylab import plot, legend
t = current_data.t
legend(('surface','topography'),loc='lower left')
plot(t, 0*t, 'k')
plotaxes.afteraxes = add_zeroline
#-----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = range(40)
plotdata.print_gaugenos = [] # list of gauges to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.parallel = True # make multiple frame png's at once
plotdata.html_movie_width = 700 # width used in JSAnimation
return plotdata
| 34.279621
| 80
| 0.609982
|
4a050a55b8290f3b9c0515a5e786efbf837b71c7
| 22,884
|
py
|
Python
|
stable_baselines/deepq_poher/dqn.py
|
kevslinger/stable-baselines
|
4bf9f3c1db49f462f5fb35df967d836d92a3dbcd
|
[
"MIT"
] | null | null | null |
stable_baselines/deepq_poher/dqn.py
|
kevslinger/stable-baselines
|
4bf9f3c1db49f462f5fb35df967d836d92a3dbcd
|
[
"MIT"
] | null | null | null |
stable_baselines/deepq_poher/dqn.py
|
kevslinger/stable-baselines
|
4bf9f3c1db49f462f5fb35df967d836d92a3dbcd
|
[
"MIT"
] | null | null | null |
from functools import partial
import tensorflow as tf
import numpy as np
import gym
# KEVIN ADD
import csv
import os
from stable_baselines import logger
from stable_baselines.common import tf_util, SetVerbosity, TensorboardWriter
from stable_baselines.common.vec_env import VecEnv
from stable_baselines.common.schedules import LinearSchedule
from stable_baselines.poher.buffers import ReplayBuffer
from stable_baselines.deepq_poher.build_graph import build_train
from stable_baselines.deepq_poher.policies import DQNPolicy
from stable_baselines.deepq_poher.base_class import OffPolicyRLModel
class DQNPOHER(OffPolicyRLModel):
"""
The DQN model class.
DQN paper: https://arxiv.org/abs/1312.5602
Dueling DQN: https://arxiv.org/abs/1511.06581
Double-Q Learning: https://arxiv.org/abs/1509.06461
Prioritized Experience Replay: https://arxiv.org/abs/1511.05952
:param policy: (DQNPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, LnMlpPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) discount factor
:param learning_rate: (float) learning rate for adam optimizer
:param buffer_size: (int) size of the replay buffer
:param exploration_fraction: (float) fraction of entire training period over which the exploration rate is
annealed
:param exploration_final_eps: (float) final value of random action probability
:param exploration_initial_eps: (float) initial value of random action probability
:param train_freq: (int) update the model every `train_freq` steps. set to None to disable printing
:param batch_size: (int) size of a batched sampled from replay buffer for training
:param double_q: (bool) Whether to enable Double-Q learning or not.
:param learning_starts: (int) how many steps of the model to collect transitions for before learning starts
:param target_network_update_freq: (int) update the target network every `target_network_update_freq` steps.
:param prioritized_replay: (bool) if True prioritized replay buffer will be used.
:param prioritized_replay_alpha: (float)alpha parameter for prioritized replay buffer.
It determines how much prioritization is used, with alpha=0 corresponding to the uniform case.
:param prioritized_replay_beta0: (float) initial value of beta for prioritized replay buffer
:param prioritized_replay_beta_iters: (int) number of iterations over which beta will be annealed from initial
value to 1.0. If set to None equals to max_timesteps.
:param prioritized_replay_eps: (float) epsilon to add to the TD errors when updating priorities.
:param param_noise: (bool) Whether or not to apply noise to the parameters of the policy.
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, gamma=0.99, learning_rate=5e-4, buffer_size=50000, exploration_fraction=0.1,
exploration_final_eps=0.02, exploration_initial_eps=1.0, train_freq=1, batch_size=32, double_q=True,
learning_starts=1000, target_network_update_freq=500, prioritized_replay=False,
prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6, param_noise=False,
n_cpu_tf_sess=None, verbose=0, tensorboard_log=None, logdir=None, layers=None,
_init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False, seed=None, _run=None):
# TODO: replay_buffer refactoring
super(DQNPOHER, self).__init__(policy=policy, env=env, replay_buffer=None, verbose=verbose, policy_base=DQNPolicy,
requires_vec_env=False, policy_kwargs=policy_kwargs, seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
self.param_noise = param_noise
self.learning_starts = learning_starts
self.train_freq = train_freq
self.prioritized_replay = prioritized_replay
self.prioritized_replay_eps = prioritized_replay_eps
self.batch_size = batch_size
self.target_network_update_freq = target_network_update_freq
self.prioritized_replay_alpha = prioritized_replay_alpha
self.prioritized_replay_beta0 = prioritized_replay_beta0
self.prioritized_replay_beta_iters = prioritized_replay_beta_iters
self.exploration_final_eps = exploration_final_eps
self.exploration_initial_eps = exploration_initial_eps
self.exploration_fraction = exploration_fraction
self.buffer_size = buffer_size
self.learning_rate = learning_rate
self.gamma = gamma
self.logdir = logdir
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
self.double_q = double_q
self.graph = None
self.sess = None
self._train_step = None
self.step_model = None
self.update_target = None
self.act = None
self.proba_step = None
self.replay_buffer = None
self.beta_schedule = None
self.exploration = None
self.params = None
self.summary = None
self.layers = layers
if _init_setup_model:
self.setup_model()
self._run = _run # SACRED / OMNIBOARD REPORTING
def _get_pretrain_placeholders(self):
policy = self.step_model
return policy.obs_ph, tf.placeholder(tf.int32, [None]), policy.q_values
def setup_model(self):
with SetVerbosity(self.verbose):
assert not isinstance(self.action_space, gym.spaces.Box), \
"Error: DQN cannot output a gym.spaces.Box action space."
# If the policy is wrap in functool.partial (e.g. to disable dueling)
# unwrap it to check the class type
if isinstance(self.policy, partial):
test_policy = self.policy.func
else:
test_policy = self.policy
assert issubclass(test_policy, DQNPolicy), "Error: the input policy for the DQN model must be " \
"an instance of DQNPolicy."
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
print("*******************************")
print("********************************")
print(self.dqn_observation_space)
print("That was the observation space right before build_train")
print(self.her_observation_space)
print("That was her obs space")
print("********************************")
print("********************************")
#exit(0)
# We need to give these functions the tiny observation space, not the full one
self.act, self._train_step, self.update_target, self.step_model = build_train(
q_func=partial(self.policy, **self.policy_kwargs),
ob_space=self.dqn_observation_space,
ac_space=self.action_space,
optimizer=optimizer,
gamma=self.gamma,
grad_norm_clipping=10,
param_noise=self.param_noise,
sess=self.sess,
full_tensorboard_log=self.full_tensorboard_log,
double_q=self.double_q,
layers=self.layers
)
self.proba_step = self.step_model.proba_step
self.params = tf_util.get_trainable_vars("deepq")
# Break after build_train)
#exit(0)
# Initialize the parameters and copy them to the target network.
tf_util.initialize(self.sess)
self.update_target(sess=self.sess)
self.summary = tf.summary.merge_all()
def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="DQN",
reset_num_timesteps=True, replay_wrapper=None):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
# Create the replay buffer
self.replay_buffer = ReplayBuffer(self.buffer_size)
self.beta_schedule = None
# HEY DIPSHIT
# HERE IS WHERE WE MAKE THE HERGOALWRAPPERREPLAYUBUDDYSYSTEMTHING
# THANKY OU FOR LOOKING
if replay_wrapper is not None:
assert not self.prioritized_replay, "Prioritized replay buffer is not supported by HER"
self.replay_buffer = replay_wrapper(self.replay_buffer)
# Create the schedule for exploration starting from 1.
self.exploration = LinearSchedule(schedule_timesteps=int(self.exploration_fraction * total_timesteps),
initial_p=self.exploration_initial_eps,
final_p=self.exploration_final_eps)
episode_rewards = [0.0]
episode_successes = []
callback.on_training_start(locals(), globals())
callback.on_rollout_start()
reset = True
obs = self.env.reset()
print("The obs space is ")
print(obs)
#exit(0)
# Retrieve unnormalized observation for saving into the buffer
if self._vec_normalize_env is not None:
# The answer is no
print("Hello hi yes do we run this code")
exit(0)
obs_ = self._vec_normalize_env.get_original_obs().squeeze()
for _ in range(total_timesteps):
# Take action and update exploration to the newest value
kwargs = {}
if not self.param_noise:
# we run this code
update_eps = self.exploration.value(self.num_timesteps)
update_param_noise_threshold = 0.
else:
# we do not run this code
update_eps = 0.
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = \
-np.log(1. - self.exploration.value(self.num_timesteps) +
self.exploration.value(self.num_timesteps) / float(self.env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
with self.sess.as_default():
# Here, obs needs to remove the achieved goal.
# Change this to self.act(np.array(obs)[:goal_length][None]...
# BOOM I hope that works. I just added [:env.dqn...]
action = self.act(np.array(obs)[:self.env.dqn_observation_space.n][None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
# these observations would come to us with all 3 parts (observation, desired_goal, achieved_goal)
new_obs, rew, done, info = self.env.step(env_action)
self.num_timesteps += 1
# Stop training if return value is False
callback.update_locals(locals())
if callback.on_step() is False:
break
# Store only the unnormalized version
if self._vec_normalize_env is not None:
# This code doesn't run btw
new_obs_ = self._vec_normalize_env.get_original_obs().squeeze()
reward_ = self._vec_normalize_env.get_original_reward().squeeze()
else:
# This code does run
# Avoid changing the original ones
obs_, new_obs_, reward_ = obs, new_obs, rew
# Store transition in the replay buffer.
# We're adding the full observation (all 3 parts) in the replay buffer)
self.replay_buffer_add(obs_, action, reward_, new_obs_, done, info)
obs = new_obs
# Save the unnormalized observation
if self._vec_normalize_env is not None:
obs_ = new_obs_
if writer is not None:
ep_rew = np.array([reward_]).reshape((1, -1))
ep_done = np.array([done]).reshape((1, -1))
tf_util.total_episode_reward_logger(self.episode_reward, ep_rew, ep_done, writer,
self.num_timesteps)
episode_rewards[-1] += reward_
if done:
maybe_is_success = info.get('is_success')
if maybe_is_success is not None:
episode_successes.append(float(maybe_is_success))
if not isinstance(self.env, VecEnv):
obs = self.env.reset()
episode_rewards.append(0.0)
reset = True
# Do not train if the warmup phase is not over
# or if there are not enough samples in the replay buffer
can_sample = self.replay_buffer.can_sample(self.batch_size)
if can_sample and self.num_timesteps > self.learning_starts \
and self.num_timesteps % self.train_freq == 0:
callback.on_rollout_end()
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
# HERE is where I want to get the observations down to 2 elements, just observation and desired_goal
# This should come from the sample function, I think
obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(self.batch_size,
self.env.dqn_observation_space.n,
env=self._vec_normalize_env)
weights, batch_idxes = np.ones_like(rewards), None
# pytype:enable=bad-unpacking
if writer is not None:
# run loss backprop with summary, but once every 100 steps save the metadata
# (memory, compute time, ...)
if (1 + self.num_timesteps) % 100 == 0:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, td_errors = self._train_step(obses_t, actions, rewards, obses_tp1, obses_tp1,
dones, weights, sess=self.sess, options=run_options,
run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % self.num_timesteps)
else:
summary, td_errors = self._train_step(obses_t, actions, rewards, obses_tp1, obses_tp1,
dones, weights, sess=self.sess)
writer.add_summary(summary, self.num_timesteps)
else:
_, td_errors = self._train_step(obses_t, actions, rewards, obses_tp1, obses_tp1, dones, weights,
sess=self.sess)
callback.on_rollout_start()
if can_sample and self.num_timesteps > self.learning_starts and \
self.num_timesteps % self.target_network_update_freq == 0:
# Update target network periodically.
self.update_target(sess=self.sess)
if len(episode_rewards[-101:-1]) == 0:
mean_100ep_reward = -np.inf
else:
mean_100ep_reward = round(float(np.mean(episode_rewards[-101:-1])), 1)
num_episodes = len(episode_rewards)
if self.verbose >= 1 and done and log_interval is not None and len(episode_rewards) % log_interval == 0:
logger.record_tabular("steps", self.num_timesteps)
logger.record_tabular("episodes", num_episodes)
if len(episode_successes) > 0:
logger.logkv("success rate", np.mean(episode_successes[-100:]))
logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
logger.record_tabular("% time spent exploring",
int(100 * self.exploration.value(self.num_timesteps)))
logger.dump_tabular()
# Save updates to CSV
if self.logdir is not None:
if not os.path.isdir(self.logdir):
os.mkdir(self.logdir)
with open(f'{self.logdir}/output.csv', 'a', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow([num_episodes, self.num_timesteps, mean_100ep_reward,
np.mean(episode_successes[-100:])])
# Save updates to Sacred
if self._run is not None:
self._run.log_scalar('100-Episode Mean Reward', mean_100ep_reward, self.num_timesteps)
self._run.log_scalar('Success Rate', np.mean(episode_successes[-100:]), self.num_timesteps)
callback.on_training_end()
return self
def predict(self, observation, state=None, mask=None, deterministic=True):
observation = np.array(observation)
vectorized_env = self._is_vectorized_observation(observation, self.observation_space)
observation = observation.reshape((-1,) + self.observation_space.shape)
with self.sess.as_default():
actions, _, _ = self.step_model.step(observation, deterministic=deterministic)
if not vectorized_env:
actions = actions[0]
return actions, None
def action_probability(self, observation, state=None, mask=None, actions=None, logp=False):
observation = np.array(observation)
vectorized_env = self._is_vectorized_observation(observation, self.observation_space)
observation = observation.reshape((-1,) + self.observation_space.shape)
actions_proba = self.proba_step(observation, state, mask)
if actions is not None: # comparing the action distribution, to given actions
actions = np.array([actions])
assert isinstance(self.action_space, gym.spaces.Discrete)
actions = actions.reshape((-1,))
assert observation.shape[0] == actions.shape[0], "Error: batch sizes differ for actions and observations."
actions_proba = actions_proba[np.arange(actions.shape[0]), actions]
# normalize action proba shape
actions_proba = actions_proba.reshape((-1, 1))
if logp:
actions_proba = np.log(actions_proba)
if not vectorized_env:
if state is not None:
raise ValueError("Error: The environment must be vectorized when using recurrent policies.")
actions_proba = actions_proba[0]
return actions_proba
def get_parameter_list(self):
return self.params
def save(self, save_path, cloudpickle=False):
# params
data = {
"double_q": self.double_q,
"param_noise": self.param_noise,
"learning_starts": self.learning_starts,
"train_freq": self.train_freq,
"prioritized_replay": self.prioritized_replay,
"prioritized_replay_eps": self.prioritized_replay_eps,
"batch_size": self.batch_size,
"target_network_update_freq": self.target_network_update_freq,
"prioritized_replay_alpha": self.prioritized_replay_alpha,
"prioritized_replay_beta0": self.prioritized_replay_beta0,
"prioritized_replay_beta_iters": self.prioritized_replay_beta_iters,
"exploration_final_eps": self.exploration_final_eps,
"exploration_fraction": self.exploration_fraction,
"learning_rate": self.learning_rate,
"gamma": self.gamma,
"verbose": self.verbose,
"observation_space": self.observation_space,
"action_space": self.action_space,
"policy": self.policy,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
| 52.728111
| 129
| 0.602517
|
4a050a5c1682d36af4fc5ea55eedb6f0188cff80
| 10,124
|
py
|
Python
|
isi_sdk/models/quota_notification_extended.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
isi_sdk/models/quota_notification_extended.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
isi_sdk/models/quota_notification_extended.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
class QuotaNotificationExtended(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
QuotaNotificationExtended - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'action_alert': 'bool',
'action_email_address': 'str',
'action_email_owner': 'bool',
'email_template': 'str',
'holdoff': 'int',
'schedule': 'str',
'condition': 'str',
'id': 'str',
'threshold': 'str'
}
self.attribute_map = {
'action_alert': 'action_alert',
'action_email_address': 'action_email_address',
'action_email_owner': 'action_email_owner',
'email_template': 'email_template',
'holdoff': 'holdoff',
'schedule': 'schedule',
'condition': 'condition',
'id': 'id',
'threshold': 'threshold'
}
self._action_alert = None
self._action_email_address = None
self._action_email_owner = None
self._email_template = None
self._holdoff = None
self._schedule = None
self._condition = None
self._id = None
self._threshold = None
@property
def action_alert(self):
"""
Gets the action_alert of this QuotaNotificationExtended.
Send alert when rule matches.
:return: The action_alert of this QuotaNotificationExtended.
:rtype: bool
"""
return self._action_alert
@action_alert.setter
def action_alert(self, action_alert):
"""
Sets the action_alert of this QuotaNotificationExtended.
Send alert when rule matches.
:param action_alert: The action_alert of this QuotaNotificationExtended.
:type: bool
"""
self._action_alert = action_alert
@property
def action_email_address(self):
"""
Gets the action_email_address of this QuotaNotificationExtended.
Email a specific email address when rule matches.
:return: The action_email_address of this QuotaNotificationExtended.
:rtype: str
"""
return self._action_email_address
@action_email_address.setter
def action_email_address(self, action_email_address):
"""
Sets the action_email_address of this QuotaNotificationExtended.
Email a specific email address when rule matches.
:param action_email_address: The action_email_address of this QuotaNotificationExtended.
:type: str
"""
self._action_email_address = action_email_address
@property
def action_email_owner(self):
"""
Gets the action_email_owner of this QuotaNotificationExtended.
Email quota domain owner when rule matches.
:return: The action_email_owner of this QuotaNotificationExtended.
:rtype: bool
"""
return self._action_email_owner
@action_email_owner.setter
def action_email_owner(self, action_email_owner):
"""
Sets the action_email_owner of this QuotaNotificationExtended.
Email quota domain owner when rule matches.
:param action_email_owner: The action_email_owner of this QuotaNotificationExtended.
:type: bool
"""
self._action_email_owner = action_email_owner
@property
def email_template(self):
"""
Gets the email_template of this QuotaNotificationExtended.
Path of optional /ifs template file used for email actions.
:return: The email_template of this QuotaNotificationExtended.
:rtype: str
"""
return self._email_template
@email_template.setter
def email_template(self, email_template):
"""
Sets the email_template of this QuotaNotificationExtended.
Path of optional /ifs template file used for email actions.
:param email_template: The email_template of this QuotaNotificationExtended.
:type: str
"""
self._email_template = email_template
@property
def holdoff(self):
"""
Gets the holdoff of this QuotaNotificationExtended.
Time to wait between detections for rules triggered by user actions.
:return: The holdoff of this QuotaNotificationExtended.
:rtype: int
"""
return self._holdoff
@holdoff.setter
def holdoff(self, holdoff):
"""
Sets the holdoff of this QuotaNotificationExtended.
Time to wait between detections for rules triggered by user actions.
:param holdoff: The holdoff of this QuotaNotificationExtended.
:type: int
"""
self._holdoff = holdoff
@property
def schedule(self):
"""
Gets the schedule of this QuotaNotificationExtended.
Schedule for rules that repeatedly notify.
:return: The schedule of this QuotaNotificationExtended.
:rtype: str
"""
return self._schedule
@schedule.setter
def schedule(self, schedule):
"""
Sets the schedule of this QuotaNotificationExtended.
Schedule for rules that repeatedly notify.
:param schedule: The schedule of this QuotaNotificationExtended.
:type: str
"""
self._schedule = schedule
@property
def condition(self):
"""
Gets the condition of this QuotaNotificationExtended.
The condition detected.
:return: The condition of this QuotaNotificationExtended.
:rtype: str
"""
return self._condition
@condition.setter
def condition(self, condition):
"""
Sets the condition of this QuotaNotificationExtended.
The condition detected.
:param condition: The condition of this QuotaNotificationExtended.
:type: str
"""
allowed_values = ["exceeded", "denied", "violated", "expired"]
if condition not in allowed_values:
raise ValueError(
"Invalid value for `condition`, must be one of {0}"
.format(allowed_values)
)
self._condition = condition
@property
def id(self):
"""
Gets the id of this QuotaNotificationExtended.
The system ID given to the rule.
:return: The id of this QuotaNotificationExtended.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this QuotaNotificationExtended.
The system ID given to the rule.
:param id: The id of this QuotaNotificationExtended.
:type: str
"""
self._id = id
@property
def threshold(self):
"""
Gets the threshold of this QuotaNotificationExtended.
The quota threshold detected.
:return: The threshold of this QuotaNotificationExtended.
:rtype: str
"""
return self._threshold
@threshold.setter
def threshold(self, threshold):
"""
Sets the threshold of this QuotaNotificationExtended.
The quota threshold detected.
:param threshold: The threshold of this QuotaNotificationExtended.
:type: str
"""
allowed_values = ["hard", "soft", "advisory"]
if threshold not in allowed_values:
raise ValueError(
"Invalid value for `threshold`, must be one of {0}"
.format(allowed_values)
)
self._threshold = threshold
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.516035
| 96
| 0.600454
|
4a050b874875af324414776040125aee96270dbd
| 294
|
py
|
Python
|
neovim_pytc/__init__.py
|
mvilim/neovim-pytc-example
|
5d0927c7eb57a14813900a4b142b8640a7963cfe
|
[
"MIT"
] | 21
|
2019-04-03T09:59:13.000Z
|
2022-03-09T06:38:10.000Z
|
neovim_pytc/__init__.py
|
mvilim/neovim-pytc-example
|
5d0927c7eb57a14813900a4b142b8640a7963cfe
|
[
"MIT"
] | null | null | null |
neovim_pytc/__init__.py
|
mvilim/neovim-pytc-example
|
5d0927c7eb57a14813900a4b142b8640a7963cfe
|
[
"MIT"
] | 3
|
2020-02-01T03:42:02.000Z
|
2021-04-22T13:24:18.000Z
|
# This file is part of the neovim-pytc-example. It is currently hosted at
# https://github.com/mvilim/neovim-pytc-example
#
# neovim-pytc-example is licensed under the MIT license. A copy of the license can be
# found in the root folder of the project.
from neovim_pytc.neovim_pytc import run
| 36.75
| 85
| 0.77551
|
4a050da756379e11c19ca6efff7eb72539c598ed
| 3,450
|
py
|
Python
|
GPF/NE/graph.py
|
seeker1943/GPF
|
478e3c121f8ca774b9c6fefcfe1180ab4b7aa918
|
[
"MIT"
] | 104
|
2018-11-04T04:47:13.000Z
|
2022-02-26T11:52:47.000Z
|
GPF/NE/graph.py
|
seeker1943/GPF
|
478e3c121f8ca774b9c6fefcfe1180ab4b7aa918
|
[
"MIT"
] | 4
|
2019-03-03T01:35:57.000Z
|
2021-05-08T13:59:44.000Z
|
GPF/NE/graph.py
|
seeker1943/GPF
|
478e3c121f8ca774b9c6fefcfe1180ab4b7aa918
|
[
"MIT"
] | 25
|
2019-02-01T07:19:18.000Z
|
2022-01-25T06:11:29.000Z
|
"""Graph utilities."""
# from time import time
import networkx as nx
import pickle as pkl
import numpy as np
import scipy.sparse as sp
class Graph(object):
def __init__(self):
self.G = None
self.look_up_dict = {}
self.look_back_list = []
self.node_size = 0
def encode_node(self):
look_up = self.look_up_dict
look_back = self.look_back_list
for node in self.G.nodes():
look_up[node] = self.node_size
look_back.append(node)
self.node_size += 1
self.G.nodes[node]['status'] = ''
def read_g(self, g):
self.G = g
self.encode_node()
def read_adjlist(self, filename):
""" Read graph from adjacency file in which the edge must be unweighted
the format of each line: v1 n1 n2 n3 ... nk
:param filename: the filename of input file
"""
self.G = nx.read_adjlist(filename, create_using=nx.DiGraph())
for i, j in self.G.edges():
self.G[i][j]['weight'] = 1.0
self.encode_node()
def read_edgelist(self, filename, weighted=False, directed=False):
self.G = nx.DiGraph()
if directed:
def read_unweighted(l):
src, dst = l.split()
self.G.add_edge(src, dst)
self.G[src][dst]['weight'] = 1.0
def read_weighted(l):
src, dst, w = l.split()
self.G.add_edge(src, dst)
self.G[src][dst]['weight'] = float(w)
else:
def read_unweighted(l):
src, dst = l.split()
self.G.add_edge(src, dst)
self.G.add_edge(dst, src)
self.G[src][dst]['weight'] = 1.0
self.G[dst][src]['weight'] = 1.0
def read_weighted(l):
src, dst, w = l.split()
self.G.add_edge(src, dst)
self.G.add_edge(dst, src)
self.G[src][dst]['weight'] = float(w)
self.G[dst][src]['weight'] = float(w)
fin = open(filename, 'r')
func = read_unweighted
if weighted:
func = read_weighted
while 1:
l = fin.readline()
if l == '':
break
func(l)
fin.close()
self.encode_node()
def read_node_label(self, filename):
fin = open(filename, 'r')
while 1:
l = fin.readline()
if l == '':
break
vec = l.split()
self.G.nodes[vec[0]]['label'] = vec[1:]
fin.close()
def read_node_features(self, filename):
fin = open(filename, 'r')
for l in fin.readlines():
vec = l.split()
self.G.nodes[vec[0]]['feature'] = np.array(
[float(x) for x in vec[1:]])
fin.close()
def read_node_status(self, filename):
fin = open(filename, 'r')
while 1:
l = fin.readline()
if l == '':
break
vec = l.split()
self.G.nodes[vec[0]]['status'] = vec[1] # train test valid
fin.close()
def read_edge_label(self, filename):
fin = open(filename, 'r')
while 1:
l = fin.readline()
if l == '':
break
vec = l.split()
self.G[vec[0]][vec[1]]['label'] = vec[2:]
fin.close()
| 30
| 79
| 0.484348
|
4a050e5a99a6c3753a582f688f1d4ff35ed20a00
| 8,973
|
py
|
Python
|
samples/openapi3/client/petstore/python/petstore_api/model/additional_properties_class.py
|
gasugesu/openapi-generator
|
e1c43f135639b9f300350f788fec98bbc375c932
|
[
"Apache-2.0"
] | 3
|
2021-05-19T03:12:48.000Z
|
2022-01-28T19:15:42.000Z
|
samples/openapi3/client/petstore/python/petstore_api/model/additional_properties_class.py
|
gasugesu/openapi-generator
|
e1c43f135639b9f300350f788fec98bbc375c932
|
[
"Apache-2.0"
] | 3
|
2021-05-11T23:55:26.000Z
|
2022-02-27T11:17:21.000Z
|
samples/openapi3/client/petstore/python/petstore_api/model/additional_properties_class.py
|
gasugesu/openapi-generator
|
e1c43f135639b9f300350f788fec98bbc375c932
|
[
"Apache-2.0"
] | 1
|
2020-10-05T11:13:04.000Z
|
2020-10-05T11:13:04.000Z
|
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class AdditionalPropertiesClass(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'map_property': ({str: (str,)},), # noqa: E501
'map_of_map_property': ({str: ({str: (str,)},)},), # noqa: E501
'anytype_1': (bool, date, datetime, dict, float, int, list, str, none_type,), # noqa: E501
'map_with_undeclared_properties_anytype_1': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), # noqa: E501
'map_with_undeclared_properties_anytype_2': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), # noqa: E501
'map_with_undeclared_properties_anytype_3': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), # noqa: E501
'empty_map': (bool, date, datetime, dict, float, int, list, str,), # noqa: E501
'map_with_undeclared_properties_string': ({str: (str,)},), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'map_property': 'map_property', # noqa: E501
'map_of_map_property': 'map_of_map_property', # noqa: E501
'anytype_1': 'anytype_1', # noqa: E501
'map_with_undeclared_properties_anytype_1': 'map_with_undeclared_properties_anytype_1', # noqa: E501
'map_with_undeclared_properties_anytype_2': 'map_with_undeclared_properties_anytype_2', # noqa: E501
'map_with_undeclared_properties_anytype_3': 'map_with_undeclared_properties_anytype_3', # noqa: E501
'empty_map': 'empty_map', # noqa: E501
'map_with_undeclared_properties_string': 'map_with_undeclared_properties_string', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""AdditionalPropertiesClass - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
map_property ({str: (str,)}): [optional] # noqa: E501
map_of_map_property ({str: ({str: (str,)},)}): [optional] # noqa: E501
anytype_1 (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501
map_with_undeclared_properties_anytype_1 ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): [optional] # noqa: E501
map_with_undeclared_properties_anytype_2 ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): [optional] # noqa: E501
map_with_undeclared_properties_anytype_3 ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): [optional] # noqa: E501
empty_map (bool, date, datetime, dict, float, int, list, str): an object with no declared properties and no undeclared properties, hence it's an empty map.. [optional] # noqa: E501
map_with_undeclared_properties_string ({str: (str,)}): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 47.226316
| 193
| 0.607378
|
4a050f468ccf0764081362bfd073f88f0cbe7701
| 2,750
|
py
|
Python
|
common/jsonn.py
|
wood-j/clipboard-py
|
4001e92c26279b28b55bd75d8627036ac436c1e1
|
[
"MIT"
] | 1
|
2019-11-28T15:48:07.000Z
|
2019-11-28T15:48:07.000Z
|
common/jsonn.py
|
wood-j/clipboard-py
|
4001e92c26279b28b55bd75d8627036ac436c1e1
|
[
"MIT"
] | null | null | null |
common/jsonn.py
|
wood-j/clipboard-py
|
4001e92c26279b28b55bd75d8627036ac436c1e1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
from datetime import datetime, date, time
from common.log import logger
class JsonSerializable(object):
def dump_json(self):
# return json.dumps(self, indent=4, ensure_ascii=False, cls=DefaultEncoder)
dic = self.dump_dict()
return json.dumps(dic, indent=4, ensure_ascii=False, cls=DefaultEncoder)
def dump_dict(self):
# return self.__dict__
def recurse_dic(dic: dict):
for key, value in dic.items():
if isinstance(value, JsonSerializable):
dic[key] = value.dump_dict()
elif isinstance(value, (list, tuple)) and value:
first = value[0]
if isinstance(first, JsonSerializable):
dic[key] = [x.dump_dict() for x in value]
elif isinstance(value, dict) and value:
dic[key] = recurse_dic(value)
return dic
dic = recurse_dic(self.__dict__)
return dic
def load_dict(self, dic: dict):
for key, value in dic.items():
if not hasattr(self, key):
logger.warning(f'"JsonSerializable" load from dict skip unexpected property name: {key}')
continue
setattr(self, key, value)
class DefaultEncoder(json.JSONEncoder):
def default(self, field):
if isinstance(field, datetime):
return field.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(field, date):
return field.strftime('%Y-%m-%d')
elif isinstance(field, time):
return field.strftime('%H:%M:%S')
elif isinstance(field, JsonSerializable):
return field.dump_dict()
elif hasattr(field, '__dict__'):
return field.__dict__
else:
return json.JSONEncoder.default(self, field)
class SampleChild(JsonSerializable):
def __init__(self):
self.child_name = 'child'
self.date = datetime.now().date()
self.time = datetime.now().time()
class Sample(JsonSerializable):
def __init__(self):
self.text = 'sample'
self.date = datetime.now().date()
self.time = datetime.now().time()
self.default_child = SampleChild()
self.child_tuple = (
SampleChild(),
SampleChild(),
SampleChild(),
)
self.child_list = [
SampleChild(),
SampleChild(),
SampleChild(),
]
self.child_dic = {
'1': SampleChild(),
'2': SampleChild(),
'3': SampleChild(),
}
if __name__ == '__main__':
sample = Sample()
js = sample.dump_json()
logger.debug(js)
pass
| 30.898876
| 105
| 0.557455
|
4a050fd181300f5e725afac84cd0d1615d6d4dd5
| 14,187
|
py
|
Python
|
main.py
|
tileb1/LearnTrajDep
|
65849984fde5064974e06e6571688d8fed7a4794
|
[
"MIT"
] | null | null | null |
main.py
|
tileb1/LearnTrajDep
|
65849984fde5064974e06e6571688d8fed7a4794
|
[
"MIT"
] | null | null | null |
main.py
|
tileb1/LearnTrajDep
|
65849984fde5064974e06e6571688d8fed7a4794
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""overall code framework is adapped from https://github.com/weigq/3d_pose_baseline_pytorch"""
from __future__ import print_function, absolute_import, division
import os
import time
import torch
import torch.nn as nn
import torch.optim
from torch.utils.data import DataLoader
from torch.autograd import Variable
import numpy as np
from progress.bar import Bar
import pandas as pd
from utils import loss_funcs, utils as utils
from utils.opt import Options
from utils.h36motion import H36motion
import utils.model as nnmodel
import utils.data_utils as data_utils
from utils.constants import *
def main(opt):
start_epoch = 0
err_best = 10000
lr_now = opt.lr
# define log csv file
script_name = os.path.basename(__file__).split('.')[0]
script_name = script_name + "_in{:d}_out{:d}_dctn{:d}".format(opt.input_n, opt.output_n, opt.dct_n)
# create model
print(">>> creating model")
input_n = opt.input_n
output_n = opt.output_n
dct_n = opt.dct_n
sample_rate = opt.sample_rate
# 48 nodes for angle prediction
model = nnmodel.GCN(input_feature=dct_n, hidden_feature=opt.linear_size, p_dropout=opt.dropout,
num_stage=opt.num_stage, node_n=48)
if is_cuda:
model.cuda()
print(">>> total params: {:.2f}M".format(sum(p.numel() for p in model.parameters()) / 1000000.0))
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
# continue from checkpoint
if opt.is_load:
model_path_len = 'checkpoint/test/ckpt_main_gcn_muti_att_best.pth.tar'
print(">>> loading ckpt len from '{}'".format(model_path_len))
if is_cuda:
ckpt = torch.load(model_path_len)
else:
ckpt = torch.load(model_path_len, map_location='cpu')
start_epoch = ckpt['epoch']
err_best = ckpt['err']
lr_now = ckpt['lr']
model.load_state_dict(ckpt['state_dict'])
optimizer.load_state_dict(ckpt['optimizer'])
print(">>> ckpt len loaded (epoch: {} | err: {})".format(start_epoch, err_best))
# data loading
print(">>> loading data")
train_dataset = H36motion(path_to_data=opt.data_dir, actions='all', input_n=input_n, output_n=output_n,
split=0, sample_rate=sample_rate, dct_n=dct_n)
data_std = train_dataset.data_std
data_mean = train_dataset.data_mean
val_dataset = H36motion(path_to_data=opt.data_dir, actions='all', input_n=input_n, output_n=output_n,
split=2, sample_rate=sample_rate, data_mean=data_mean, data_std=data_std, dct_n=dct_n)
# load dadasets for training
train_loader = DataLoader(
dataset=train_dataset,
batch_size=opt.train_batch,
shuffle=True,
num_workers=opt.job,
pin_memory=True)
val_loader = DataLoader(
dataset=val_dataset,
batch_size=opt.test_batch,
shuffle=False,
num_workers=opt.job,
pin_memory=True)
acts = data_utils.define_actions('all')
test_data = dict()
for act in acts:
test_dataset = H36motion(path_to_data=opt.data_dir, actions=act, input_n=input_n, output_n=output_n, split=1,
sample_rate=sample_rate, data_mean=data_mean, data_std=data_std, dct_n=dct_n)
test_data[act] = DataLoader(
dataset=test_dataset,
batch_size=opt.test_batch,
shuffle=False,
num_workers=opt.job,
pin_memory=True)
print(">>> data loaded !")
print(">>> train data {}".format(train_dataset.__len__()))
print(">>> validation data {}".format(val_dataset.__len__()))
for epoch in range(start_epoch, opt.epochs):
if (epoch + 1) % opt.lr_decay == 0:
lr_now = utils.lr_decay(optimizer, lr_now, opt.lr_gamma)
print('==========================')
print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))
ret_log = np.array([epoch + 1])
head = np.array(['epoch'])
# per epoch
lr_now, t_l, t_e, t_3d = train(train_loader, model, optimizer, input_n=input_n,
lr_now=lr_now, max_norm=opt.max_norm, is_cuda=is_cuda,
dim_used=train_dataset.dim_used, dct_n=dct_n)
ret_log = np.append(ret_log, [lr_now, t_l, t_e, t_3d])
head = np.append(head, ['lr', 't_l', 't_e', 't_3d'])
v_e, v_3d = val(val_loader, model, input_n=input_n, is_cuda=is_cuda, dim_used=train_dataset.dim_used,
dct_n=dct_n)
ret_log = np.append(ret_log, [v_e, v_3d])
head = np.append(head, ['v_e', 'v_3d'])
test_3d_temp = np.array([])
test_3d_head = np.array([])
for act in acts:
test_e, test_3d = test(test_data[act], model, input_n=input_n, output_n=output_n, is_cuda=is_cuda,
dim_used=train_dataset.dim_used, dct_n=dct_n)
ret_log = np.append(ret_log, test_e)
test_3d_temp = np.append(test_3d_temp, test_3d)
test_3d_head = np.append(test_3d_head,
[act + '3d80', act + '3d160', act + '3d320', act + '3d400'])
head = np.append(head, [act + '80', act + '160', act + '320', act + '400'])
if output_n > 10:
head = np.append(head, [act + '560', act + '1000'])
test_3d_head = np.append(test_3d_head,
[act + '3d560', act + '3d1000'])
ret_log = np.append(ret_log, test_3d_temp)
head = np.append(head, test_3d_head)
# update log file and save checkpoint
df = pd.DataFrame(np.expand_dims(ret_log, axis=0))
if epoch == start_epoch:
df.to_csv(opt.ckpt + '/' + script_name + '.csv', header=head, index=False)
else:
with open(opt.ckpt + '/' + script_name + '.csv', 'a') as f:
df.to_csv(f, header=False, index=False)
if not np.isnan(v_e):
is_best = v_e < err_best
err_best = min(v_e, err_best)
else:
is_best = False
file_name = ['ckpt_' + script_name + '_best.pth.tar', 'ckpt_' + script_name + '_last.pth.tar']
utils.save_ckpt({'epoch': epoch + 1,
'lr': lr_now,
'err': test_e[0],
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict()},
ckpt_path=opt.ckpt,
is_best=is_best,
file_name=file_name)
def train(train_loader, model, optimizer, input_n=20, dct_n=20, lr_now=None, max_norm=True, is_cuda=False, dim_used=[]):
t_l = utils.AccumLoss()
t_e = utils.AccumLoss()
t_3d = utils.AccumLoss()
model.train()
st = time.time()
bar = Bar('>>>', fill='>', max=len(train_loader))
for i, (inputs, targets, all_seq) in enumerate(train_loader):
inputs = inputs.float()
targets = targets.float()
all_seq = all_seq.float()
# print(inputs.shape)
# print(targets.shape)
# print(all_seq.shape)
# skip the last batch if only have one sample for batch_norm layers
batch_size = inputs.shape[0]
if batch_size == 1:
continue
bt = time.time()
if is_cuda:
inputs = Variable(inputs.cuda()).float()
# targets = Variable(targets.cuda(async=True)).float()
all_seq = all_seq.to('cuda').float()
# all_seq = Variable(all_seq.cuda(async=True)).float()
outputs = model(inputs)
n = outputs.shape[0]
outputs = outputs.view(n, -1)
# targets = targets.view(n, -1)
loss = loss_funcs.sen_loss(outputs, all_seq, dim_used, dct_n)
# calculate loss and backward
optimizer.zero_grad()
loss.backward()
if max_norm:
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)
optimizer.step()
n, _, _ = all_seq.data.shape
# 3d error
m_err = loss_funcs.mpjpe_error(outputs, all_seq, input_n, dim_used, dct_n)
# angle space error
e_err = loss_funcs.euler_error(outputs, all_seq, input_n, dim_used, dct_n)
# update the training loss
t_l.update(loss.cpu().data.numpy() * n, n)
t_e.update(e_err.cpu().data.numpy() * n, n)
t_3d.update(m_err.cpu().data.numpy() * n, n)
bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(i + 1, len(train_loader), time.time() - bt,
time.time() - st)
bar.next()
bar.finish()
return lr_now, t_l.avg, t_e.avg, t_3d.avg
def test(train_loader, model, input_n=20, output_n=50, dct_n=20, is_cuda=False, dim_used=[]):
N = 0
# t_l = 0
if output_n >= 25:
eval_frame = [1, 3, 7, 9, 13, 24]
elif output_n == 10:
eval_frame = [1, 3, 7, 9]
t_e = np.zeros(len(eval_frame))
t_3d = np.zeros(len(eval_frame))
model.eval()
st = time.time()
bar = Bar('>>>', fill='>', max=len(train_loader))
for i, (inputs, targets, all_seq) in enumerate(train_loader):
bt = time.time()
inputs = inputs.float()
targets = targets.float()
all_seq = all_seq.float()
if is_cuda:
inputs = Variable(inputs.cuda()).float()
# targets = Variable(targets.cuda(async=True)).float()
all_seq = all_seq.to('cuda').float()
# all_seq = Variable(all_seq.cuda(async=True)).float()
outputs = model(inputs)
n = outputs.shape[0]
# outputs = outputs.view(n, -1)
# targets = targets.view(n, -1)
# loss = loss_funcs.sen_loss(outputs, all_seq, dim_used)
n, seq_len, dim_full_len = all_seq.data.shape
dim_used_len = len(dim_used)
# inverse dct transformation
_, idct_m = data_utils.get_dct_matrix(seq_len)
idct_m = torch.from_numpy(idct_m).float()
if is_cuda:
idct_m = idct_m.to('cuda')
outputs_t = outputs.view(-1, dct_n).transpose(0, 1)
outputs_exp = torch.matmul(idct_m[:, :dct_n], outputs_t).transpose(0, 1).contiguous().view(-1, dim_used_len,
seq_len).transpose(1,
2)
pred_expmap = all_seq.clone()
dim_used = np.array(dim_used)
pred_expmap[:, :, dim_used] = outputs_exp
pred_expmap = pred_expmap[:, input_n:, :].contiguous().view(-1, dim_full_len)
targ_expmap = all_seq[:, input_n:, :].clone().contiguous().view(-1, dim_full_len)
pred_expmap[:, 0:6] = 0
targ_expmap[:, 0:6] = 0
pred_expmap = pred_expmap.view(-1, 3)
targ_expmap = targ_expmap.view(-1, 3)
# get euler angles from expmap
pred_eul = data_utils.rotmat2euler_torch(data_utils.expmap2rotmat_torch(pred_expmap))
pred_eul = pred_eul.view(-1, dim_full_len).view(-1, output_n, dim_full_len)
targ_eul = data_utils.rotmat2euler_torch(data_utils.expmap2rotmat_torch(targ_expmap))
targ_eul = targ_eul.view(-1, dim_full_len).view(-1, output_n, dim_full_len)
# get 3d coordinates
targ_p3d = data_utils.expmap2xyz_torch(targ_expmap.view(-1, dim_full_len)).view(n, output_n, -1, 3)
print(pred_expmap.shape)
print(targ_expmap.shape)
pred_p3d = data_utils.expmap2xyz_torch(pred_expmap.view(-1, dim_full_len)).view(n, output_n, -1, 3)
# update loss and testing errors
for k in np.arange(0, len(eval_frame)):
j = eval_frame[k]
t_e[k] += torch.mean(torch.norm(pred_eul[:, j, :] - targ_eul[:, j, :], 2, 1)).cpu().data.numpy() * n
t_3d[k] += torch.mean(torch.norm(
targ_p3d[:, j, :, :].contiguous().view(-1, 3) - pred_p3d[:, j, :, :].contiguous().view(-1, 3), 2,
1)).cpu().data.numpy() * n
# t_l += loss.cpu().data.numpy()[0] * n
N += n
bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(i + 1, len(train_loader), time.time() - bt,
time.time() - st)
bar.next()
bar.finish()
return t_e / N, t_3d / N
def val(train_loader, model, input_n=20, dct_n=20, is_cuda=False, dim_used=[]):
# t_l = utils.AccumLoss()
t_e = utils.AccumLoss()
t_3d = utils.AccumLoss()
model.eval()
st = time.time()
bar = Bar('>>>', fill='>', max=len(train_loader))
for i, (inputs, targets, all_seq) in enumerate(train_loader):
bt = time.time()
inputs = inputs.float()
targets = targets.float()
all_seq = all_seq.float()
if is_cuda:
inputs = Variable(inputs.cuda()).float()
# targets = Variable(targets.cuda(async=True)).float()
all_seq = all_seq.to('cuda').float()
# all_seq = Variable(all_seq.cuda(async=True)).float()
outputs = model(inputs)
n = outputs.shape[0]
outputs = outputs.view(n, -1)
# targets = targets.view(n, -1)
# loss = loss_funcs.sen_loss(outputs, all_seq, dim_used)
n, _, _ = all_seq.data.shape
m_err = loss_funcs.mpjpe_error(outputs, all_seq, input_n, dim_used, dct_n)
e_err = loss_funcs.euler_error(outputs, all_seq, input_n, dim_used, dct_n)
# t_l.update(loss.cpu().data.numpy()[0] * n, n)
t_e.update(e_err.cpu().data.numpy() * n, n)
t_3d.update(m_err.cpu().data.numpy() * n, n)
bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(i + 1, len(train_loader), time.time() - bt,
time.time() - st)
bar.next()
bar.finish()
return t_e.avg, t_3d.avg
if __name__ == "__main__":
option = Options().parse()
main(option)
| 38.868493
| 120
| 0.57306
|
4a051042028528ddf47b6de5902273a353d537be
| 3,563
|
py
|
Python
|
Exercise-2/sensor_stick/scripts/segmentation.py
|
congthanh184/RoboND-Perception-Exercises
|
2716a2681e9c5718064bff296eb865bfba821f4f
|
[
"MIT"
] | null | null | null |
Exercise-2/sensor_stick/scripts/segmentation.py
|
congthanh184/RoboND-Perception-Exercises
|
2716a2681e9c5718064bff296eb865bfba821f4f
|
[
"MIT"
] | null | null | null |
Exercise-2/sensor_stick/scripts/segmentation.py
|
congthanh184/RoboND-Perception-Exercises
|
2716a2681e9c5718064bff296eb865bfba821f4f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Import modules
from pcl_helper import *
# TODO: Define functions as required
# Callback function for your Point Cloud Subscriber
def pcl_callback(pcl_msg):
# TODO: Convert ROS msg to PCL data
cloud = ros_to_pcl(pcl_msg)
# TODO: Voxel Grid Downsampling
vox = cloud.make_voxel_grid_filter()
LEAF_SIZE = 0.01
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
cloud_filtered = vox.filter()
# TODO: PassThrough Filter
passthrough = cloud_filtered.make_passthrough_filter()
filter_axis = 'z'
passthrough.set_filter_field_name(filter_axis)
axis_min = 0.6
axis_max = 1.1
passthrough.set_filter_limits(axis_min, axis_max)
cloud_filtered = passthrough.filter()
# TODO: RANSAC Plane Segmentation
seg = cloud_filtered.make_segmenter()
# Set the model you wish to fit
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
# Max distance for a point to be considered fitting the model
# Experiment with different values for max_distance
# for segmenting the table
max_distance = .037 # 0.04 to get rid of table edge
seg.set_distance_threshold(max_distance)
inliers, coefficients = seg.segment()
# TODO: Extract inliers and outliers
cloud_table = cloud_filtered.extract(inliers, negative=False)
cloud_objects = cloud_filtered.extract(inliers, negative=True)
# TODO: Euclidean Clustering
white_cloud = XYZRGB_to_XYZ(cloud_objects)
tree = white_cloud.make_kdtree()
# TODO: Create Cluster-Mask Point Cloud to visualize each cluster separately
ec = white_cloud.make_EuclideanClusterExtraction()
ec.set_ClusterTolerance(0.03)
ec.set_MinClusterSize(10)
ec.set_MaxClusterSize(2000)
ec.set_SearchMethod(tree)
cluster_indices = ec.Extract()
cluster_color = get_color_list(len(cluster_indices))
color_cluster_point_list = []
for j, indices in enumerate(cluster_indices):
for i, indice in enumerate(indices):
color_cluster_point_list.append([white_cloud[indice][0],
white_cloud[indice][1],
white_cloud[indice][2],
rgb_to_float(cluster_color[j])])
#Create new cloud containing all clusters, each with unique color
cluster_cloud = pcl.PointCloud_PointXYZRGB()
cluster_cloud.from_list(color_cluster_point_list)
# TODO: Convert PCL data to ROS messages
ros_cloud_table = pcl_to_ros(cloud_table)
ros_cloud_objects = pcl_to_ros(cloud_objects)
ros_cluster_cloud = pcl_to_ros(cluster_cloud)
# TODO: Publish ROS messages
pcl_table_pub.publish(ros_cloud_table)
pcl_objects_pub.publish(ros_cloud_objects)
pcl_cluster_pub.publish(ros_cluster_cloud)
if __name__ == '__main__':
# TODO: ROS node initialization
rospy.init_node('clustering', anonymous=True)
# TODO: Create Subscribers
pcl_sub = rospy.Subscriber('/sensor_stick/point_cloud', pc2.PointCloud2, pcl_callback, queue_size=1)
# TODO: Create Publishers
pcl_objects_pub = rospy.Publisher('/pcl_objects', PointCloud2, queue_size=1)
pcl_table_pub = rospy.Publisher('/pcl_table', PointCloud2, queue_size=1)
pcl_cluster_pub = rospy.Publisher('/pcl_cluster', PointCloud2, queue_size=1)
# Initialize color_list
get_color_list.color_list = []
print get_color_list(4)
# TODO: Spin while node is not shutdown
while not rospy.is_shutdown():
rospy.spin()
| 33.933333
| 104
| 0.708672
|
4a0510c605adfe7d00fa90eee8e7d300882a753f
| 1,946
|
py
|
Python
|
physbryk/da_bin/sensor_test.py
|
Geoffysicist/PhysBryk
|
a3e5684bf9f12df49ad34a536ef6cc23f7473982
|
[
"CC0-1.0"
] | null | null | null |
physbryk/da_bin/sensor_test.py
|
Geoffysicist/PhysBryk
|
a3e5684bf9f12df49ad34a536ef6cc23f7473982
|
[
"CC0-1.0"
] | 10
|
2020-10-19T10:02:59.000Z
|
2020-11-24T08:18:30.000Z
|
physbryk/da_bin/sensor_test.py
|
Geoffysicist/PhysBryk
|
a3e5684bf9f12df49ad34a536ef6cc23f7473982
|
[
"CC0-1.0"
] | null | null | null |
"""Sensor demo for Adafruit Feather Sense. Prints data from each of the sensors."""
import time
import array
import math
import board
import audiobusio
import adafruit_apds9960.apds9960
import adafruit_bmp280
import adafruit_lis3mdl
import adafruit_lsm6ds.lsm6ds33
import adafruit_sht31d
i2c = board.I2C()
apds9960 = adafruit_apds9960.apds9960.APDS9960(i2c)
bmp280 = adafruit_bmp280.Adafruit_BMP280_I2C(i2c)
lis3mdl = adafruit_lis3mdl.LIS3MDL(i2c)
lsm6ds33 = adafruit_lsm6ds.lsm6ds33.LSM6DS33(i2c)
sht31d = adafruit_sht31d.SHT31D(i2c)
microphone = audiobusio.PDMIn(board.MICROPHONE_CLOCK, board.MICROPHONE_DATA,
sample_rate=16000, bit_depth=16)
def normalized_rms(values):
minbuf = int(sum(values) / len(values))
return int(math.sqrt(sum(float(sample - minbuf) *
(sample - minbuf) for sample in values) / len(values)))
apds9960.enable_proximity = True
apds9960.enable_color = True
# Set this to sea level pressure in hectoPascals at your location for accurate altitude reading.
bmp280.sea_level_pressure = 1013.25
while True:
samples = array.array('H', [0] * 160)
microphone.record(samples, len(samples))
print("\nFeather Sense Sensor Demo")
print("---------------------------------------------")
print("Proximity:", apds9960.proximity)
print("Red: {}, Green: {}, Blue: {}, Clear: {}".format(*apds9960.color_data))
print("Temperature: {:.1f} C".format(bmp280.temperature))
print("Barometric pressure:", bmp280.pressure)
print("Altitude: {:.1f} m".format(bmp280.altitude))
print("Magnetic: {:.3f} {:.3f} {:.3f} uTesla".format(*lis3mdl.magnetic))
print("Acceleration: {:.2f} {:.2f} {:.2f} m/s^2".format(*lsm6ds33.acceleration))
print("Gyro: {:.2f} {:.2f} {:.2f} dps".format(*lsm6ds33.gyro))
print("Humidity: {:.1f} %".format(sht31d.relative_humidity))
print("Sound level:", normalized_rms(samples))
time.sleep(2)
| 37.423077
| 96
| 0.688592
|
4a0510ee9ae92b1506305063f85688d07ea35197
| 5,146
|
py
|
Python
|
2algo/results and data complilation/cpu_comparison_latest.py
|
allengrr/deadlock_project
|
933878077c45a7df04daa087407bb2620c064617
|
[
"MIT"
] | null | null | null |
2algo/results and data complilation/cpu_comparison_latest.py
|
allengrr/deadlock_project
|
933878077c45a7df04daa087407bb2620c064617
|
[
"MIT"
] | null | null | null |
2algo/results and data complilation/cpu_comparison_latest.py
|
allengrr/deadlock_project
|
933878077c45a7df04daa087407bb2620c064617
|
[
"MIT"
] | 1
|
2021-03-21T17:54:26.000Z
|
2021-03-21T17:54:26.000Z
|
import matplotlib.pyplot as plt
import data as rd
import redo_for_cpu_data as redo
fig = plt.figure()
ax1 = fig.add_subplot(141)
ax2 = fig.add_subplot(142)
ax3 = fig.add_subplot(143)
ax4 = fig.add_subplot(144)
style = ['g--^', 'r:o', 'b-.s', 'm--*', 'k-.>', 'c-.s']
names = ('RMS + Bankers',
'EDF + Bankers',
'RMS + wound wait',
'RMS + wait die',
'EDF + wound wait',
'EDF + wait die')
_cpu = {
24: rd.cpu2_2_4,
25: rd.cpu2_2_5,
26: rd.cpu2_2_6,
27: rd.cpu2_2_7,
34: rd.cpu2_3_4,
35: rd.cpu2_3_6,
36: rd.cpu2_3_5,
37: redo.cpu2_3_7,
74: rd.cpu2_7_4,
75: rd.cpu2_7_5,
76: rd.cpu2_7_6,
77: rd.cpu2_7_7,
104: rd.cpu2_10_7,
105: rd.cpu2_10_6,
106: rd.cpu2_10_5,
107: rd.cpu2_10_4,
124: rd.cpu2_12_4,
125: rd.cpu2_12_6,
126: rd.cpu2_12_5,
127: rd.cpu2_12_7,
164: rd.cpu2_16_6,
165: rd.cpu2_16_4,
166: rd.cpu2_16_5,
167: rd.cpu2_16_7,
}
_memory = {
24: rd.memory2_2_4,
25: rd.memory2_2_5,
26: rd.memory2_2_6,
27: rd.memory2_2_7,
34: rd.memory2_3_4,
35: rd.memory2_3_5,
36: rd.memory2_3_6,
37: rd.memory2_3_7,
74: rd.memory2_7_4,
75: rd.memory2_7_5,
76: rd.memory2_7_6,
77: rd.memory2_7_7,
104: rd.memory2_10_4,
105: rd.memory2_10_5,
106: rd.memory2_10_6,
107: rd.memory2_10_7,
124: rd.memory2_12_4,
125: rd.memory2_12_5,
126: rd.memory2_12_6,
127: rd.memory2_12_7,
164: rd.memory2_16_4,
165: rd.memory2_16_5,
166: rd.memory2_16_6,
167: rd.memory2_16_7,
}
def format_data(d_dict):
t_data = {}
_keys = list(d_dict.keys())
s4 = 0
s5 = 1
s6 = 2
s7 = 3
for i in range(len(_keys)):
j = _keys[i]
if i == s4:
if 4 in t_data:
t_data[4].append(d_dict[j])
s4 += 4
else:
t_data[4] = [d_dict[j]]
s4 += 4
elif i == s5:
if 5 in t_data:
t_data[5].append(d_dict[j])
s5 += 4
else:
t_data[5] = [d_dict[j]]
s5 += 4
elif i == s6:
if 6 in t_data:
t_data[6].append(d_dict[j])
s6 += 4
else:
t_data[6] = [d_dict[j]]
s6 += 4
elif i == s7:
if 7 in t_data:
t_data[7].append(d_dict[j])
s7 += 4
else:
t_data[7] = [d_dict[j]]
s7 += 4
#print(t_data)
return t_data
def _mov_avg(a1):
ma1 = [] # moving average list
avg1 = 0 # moving average pointwise
count = 0
for i in range(len(a1)):
count += 1
avg1 = ((count - 1) * avg1 + a1[i]) / count
ma1.append(round(avg1, 4)) # cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return ma1
#sample = []
def plot_cpu(plot_data, axis, no):
global ax1, ax2, ax3, ax4
_map = {5:4, 6:5, 4:6, 7:7}
ax_map = {ax1: ax3, ax2: ax1, ax3: ax2, ax4:ax4}
#no = _map[no]
ax = ax_map[axis]
for i in plot_data:
style_id = plot_data.index(i)
if (style_id == 2) and (no == 6):
mv = _mov_avg(i)
# sample.append(len(mv))
pt = mv[0:len(mv):int((len(mv) / 10)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
a = list(range(0, 222))
ptx = a[0:len(a):int((len(a) / 10)) + 1]
if ptx[-1] != a[-1]:
ptx.append(a[-1])
else:
mv = _mov_avg(i[:222])
#sample.append(len(mv))
pt = mv[0:len(mv):int((len(mv) / 10)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
a = list(range(0, len(mv)))
ptx = a[0:len(a):int((len(a) / 10)) + 1]
if ptx[-1] != a[-1]:
ptx.append(a[-1])
ax.grid(True)
#ptx = [mv.index(i) for i in pt]
ax.plot(ptx,
pt,
style[style_id],
linewidth=2,
label=f'{names[style_id]} (Avg) : {mv[-1]}')
ax.set_title(f'Moving Utilization for {_map[no]} MEC Set-up')
ax.set_xlabel('Time Period')
ax.set_ylabel('CPU Utilization in Percentage')
ax.set_ylim(top=8.1)
ax.set_ylim(bottom=1.5)
ax.legend()
plt.subplot(ax)
def _plot_cpu(plot_data, ax, no):
ax.grid(True)
for i in plot_data:
style_id = plot_data.index(i)
ax.plot(i,
linewidth=2,
label=names[style_id])
ax.set_title(f'Moving Utilization for {no} MEC Set-up')
ax.set_xlabel('Time Period')
ax.legend()
plt.subplot(ax)
def call_plot():
axis = {4:ax1, 5:ax2, 6:ax3, 7:ax4}
k = format_data(_cpu)
for i in k:
#print(i, len(k[i]), k[i])
#plot_cpu(k[i], axis[i], i)
plot_cpu(k[i], axis[i], i)
fig.suptitle('MEC CPU Utilization During Deadlock Experiment')
# plt.subplots_adjust(wspace=0.3, hspace=0.2)
plt.show()
call_plot()
#print(min(sample))
| 22.669604
| 66
| 0.495919
|
4a05111c1d4f8d967aec49b2a423b1b3b6ab37e5
| 293
|
py
|
Python
|
xendit/models/_base_query.py
|
adyaksaw/xendit-python
|
47b05f2a6582104a274dc12a172c6421de86febc
|
[
"MIT"
] | 10
|
2020-10-31T23:34:34.000Z
|
2022-03-08T19:08:55.000Z
|
xendit/models/_base_query.py
|
adyaksaw/xendit-python
|
47b05f2a6582104a274dc12a172c6421de86febc
|
[
"MIT"
] | 22
|
2020-07-30T14:25:07.000Z
|
2022-03-31T03:55:46.000Z
|
xendit/models/_base_query.py
|
adyaksaw/xendit-python
|
47b05f2a6582104a274dc12a172c6421de86febc
|
[
"MIT"
] | 11
|
2020-07-28T08:09:40.000Z
|
2022-03-18T00:14:02.000Z
|
class BaseQuery(dict):
"""Abstract class for query with object type"""
def __init__(self, **kwargs):
dict_params = {}
for key, value in kwargs.items():
if value is not None:
dict_params[key] = value
dict.__init__(self, dict_params)
| 29.3
| 51
| 0.583618
|
4a051141e5a213bbb2370294e5af91b1ead0e957
| 2,103
|
py
|
Python
|
ranking_policies/config.py
|
acwatt/karp_ranking_policies
|
6672e510398dd8253d9bc3d7f845ac4aac0822ea
|
[
"MIT"
] | null | null | null |
ranking_policies/config.py
|
acwatt/karp_ranking_policies
|
6672e510398dd8253d9bc3d7f845ac4aac0822ea
|
[
"MIT"
] | null | null | null |
ranking_policies/config.py
|
acwatt/karp_ranking_policies
|
6672e510398dd8253d9bc3d7f845ac4aac0822ea
|
[
"MIT"
] | null | null | null |
# Python 3.7
# File name:
# Authors: Aaron Watt
# Date: 2021-07-05
"""Module to be imported for project settings."""
# Standard library imports
from pathlib import Path
import sys
# CLASSES --------------------------
class Paths:
"""Inner paths class to store project paths commonly used.
This will search the current working directory path for the name of the
repo (beecensus). Since this code is only called from main.py, and main.py
is inside the repo, it should be able to find the beecensus path.
This also means the name of the repo cannot be changed.
Since this is an inner class, paths will be accessible in the following way:
Project = ProjectSettings() # instance of the outer class
Project.paths.root # this will be the pathlib path to the github repo root
"""
def __init__(self):
# add root path of the project / git repo
self.root = Path(*Path.cwd().parts[:Path.cwd().parts.index('karp_ranking_policies') + 1])
# Top-level paths
self.code = self.root / 'ranking_policies'
self.docs = self.root / 'docs'
self.output = self.root / 'output'
# Data directories
self.data = Data(self.root / 'data')
class Data:
"""Inner inner paths class to store data file paths."""
def __init__(self, data_dir):
self.root = data_dir
self.sunny = self.root / 'sunny'
self.andy = self.root / 'andy'
self.tables = self.root / 'tables'
self.temp = self.root / 'temp'
# Lookup tables
self.lookup_jpg = self.tables / 'tbl_jpg_lookup.csv'
self.lookup_fips = self.tables / 'tbl_fips_lookup.csv'
# Data files
self.sunnyinput = self.sunny / 'clean' / 'grouped_nation.1751_2014.csv'
self.andyinput = self.andy / 'clean' / 'ts_allYears_nation.1751_2014.csv'
# FUNCTIONS --------------------------
# MAIN -------------------------------
# Create instances of each class to be called from other
PATHS = Paths()
# OTHER GLOBALS -------------------------------
# REFERENCES -------------------------
"""
"""
| 31.863636
| 97
| 0.611507
|
4a0511c6d86b73d1de1ce8632b0dddf9bb28c9fa
| 19,345
|
py
|
Python
|
tensorflow/example3_resnet/tf_resnet.py
|
BrianKmdy/mantaflow
|
273d6c148374316e4d04cae4f46fed56a630e183
|
[
"Apache-2.0"
] | 95
|
2019-12-04T21:39:51.000Z
|
2022-03-12T01:03:36.000Z
|
tensorflow/example3_resnet/tf_resnet.py
|
BrianKmdy/mantaflow
|
273d6c148374316e4d04cae4f46fed56a630e183
|
[
"Apache-2.0"
] | 4
|
2019-12-21T15:08:54.000Z
|
2021-02-28T19:40:08.000Z
|
tensorflow/example3_resnet/tf_resnet.py
|
BrianKmdy/mantaflow
|
273d6c148374316e4d04cae4f46fed56a630e183
|
[
"Apache-2.0"
] | 26
|
2020-01-21T00:48:47.000Z
|
2022-01-14T06:04:20.000Z
|
#******************************************************************************
#
# simplified L2 conv net training examples
# Copyright 2018 Nils Thuerey, You Xie, Erik Franz, Mengyu Chu
#
# This program is free software, distributed under the terms of the
# Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
#******************************************************************************
import time, os, sys, math
import numpy as np
import tensorflow as tf
from tensorflow.python.client import timeline
# load manta tools
sys.path.append("../tools")
import tilecreator as tc
import paramhelpers as ph
import fluiddataloader as FDL
# there's no GAN here, but this script simplified the setup of conv nets
from GAN import GAN, lrelu
# ---------------------------------------------
# initialize parameters / command line params
outputOnly = int(ph.getParam( "out", False ))>0 # output/generation mode, main mode switch
basePath = ph.getParam( "basePath", '../data/' )
randSeed = int(ph.getParam( "randSeed", 1 )) # seed for np and tf initialization
load_model_test = int(ph.getParam( "load_model_test", -1 )) # the number of the test to load a model from. can be used in training and output mode. -1 to not load a model
load_model_no = int(ph.getParam( "load_model_no", -1 )) # nubmber of the model to load
simSizeLow = int(ph.getParam( "simSize", 64 )) # tiles of low res sim
tileSizeLow = int(ph.getParam( "tileSize", 16 )) # size of low res tiles
upRes = int(ph.getParam( "upRes", 4 )) # scaling factor
#Data and Output
loadPath = ph.getParam( "loadPath", '../data/' ) # path to training data
fromSim = int(ph.getParam( "fromSim", 1000 )) # range of sim data to use, start index
toSim = int(ph.getParam( "toSim", -1 )) # end index
dataDimension = int(ph.getParam( "dataDim", 2 )) # dimension of dataset, can be 2 or 3. in case of 3D any scaling will only be applied to H and W (DHW)
numOut = int(ph.getParam( "numOut", 200 )) # number ouf images to output (from start of sim)
saveOut = int(ph.getParam( "saveOut", False ))>0 # save output of output mode as .npz in addition to images
loadOut = int(ph.getParam( "loadOut", -1 )) # load output from npz to use in output mode instead of tiles. number or output dir, -1 for not use output data
outputImages = int(ph.getParam( "img", True ))>0 # output images
#Training
genModel = ph.getParam( "genModel", 'gen_test' ) # choose generator model
learning_rate = float(ph.getParam( "learningRate", 0.0002 ))
decayLR = int(ph.getParam( "decayLR", False ))>0 # decay learning rate?
dropout = float(ph.getParam( "dropout", 1.0 )) # keep prop for all dropout layers during training
dropoutOutput = float(ph.getParam( "dropoutOutput", dropout )) # affects testing, full sim output and progressive output during training
batch_size = int(ph.getParam( "batchSize", 32 )) # batch size for pretrainig and output, default for batchSizeDisc and batchSizeGen
trainingEpochs = int(ph.getParam( "trainingEpochs", 10000 )) # for GAN training
batch_norm = int(ph.getParam( "batchNorm", True ))>0 # apply batch normalization to conv and deconv layers
useVelocities = int(ph.getParam( "useVelocities", 0 )) # use velocities or not
useDataAugmentation = int(ph.getParam( "dataAugmentation", 0 )) # use dataAugmentation or not
minScale = float(ph.getParam( "minScale", 0.85 )) # augmentation params...
maxScale = float(ph.getParam( "maxScale", 1.15 ))
rot = int(ph.getParam( "rot", 2 )) #rot: 1: 90 degree rotations; 2: full rotation; else: nop rotation
flip = int(ph.getParam( "flip", 1 ))
#Test and Save
testPathStartNo = int(ph.getParam( "testPathStartNo", 0 ))
testInterval = int(ph.getParam( "testInterval", 20 )) # interval in epochs to run tests should be lower or equal outputInterval
numTests = int(ph.getParam( "numTests", 10 )) # number of tests to run from test data each test interval, run as batch
outputInterval = int(ph.getParam( "outputInterval", 100 )) # interval in epochs to output statistics
saveInterval = int(ph.getParam( "saveInterval", 200 )) # interval in epochs to save model
alwaysSave = int(ph.getParam( "alwaysSave", True )) #
maxToKeep = int(ph.getParam( "keepMax", 3 )) # maximum number of model saves to keep in each test-run
genTestImg = int(ph.getParam( "genTestImg", -1 )) # if > -1 generate test image every output interval
note = ph.getParam( "note", "" ) # optional info about the current test run, printed in log and overview
data_fraction = float(ph.getParam( "data_fraction", 0.3 ))
frame_max = int(ph.getParam( "frame_max", 120 ))
frame_min = int(ph.getParam( "frame_min", 0 ))
ph.checkUnusedParams()
# initialize
simSizeHigh = simSizeLow * upRes
tileSizeHigh = tileSizeLow * upRes
if not (dataDimension == 2 or dataDimension == 3):
print('Unsupported data dimension {}. Only 2 and 3 are supported'.format(dataDimension))
exit(1)
if toSim==-1:
toSim = fromSim
channelLayout_low = 'd'
lowfilename = "density_low_%04d.uni"
highfilename = "density_high_%04d.uni"
lowfilename = "density_low_%04d.npz"
highfilename = "density_high_%04d.npz"
mfl = ["density"]
mfh = ["density"]
if useVelocities:
channelLayout_low += ',vx,vy,vz'
mfl= np.append(mfl, "velocity")
if (outputOnly):
highfilename = None
mfh = None
data_fraction = 1.0
useTempoD = False
useTempoL2 = False
useDataAugmentation = 0
dirIDs = np.linspace(fromSim, toSim, (toSim-fromSim+1),dtype='int16')
tiCr = tc.TileCreator(tileSizeLow=tileSizeLow, simSizeLow=simSizeLow , dim =dataDimension, dim_t = 1, channelLayout_low = channelLayout_low, upres=upRes)
floader = FDL.FluidDataLoader( print_info=1, base_path=loadPath, filename=lowfilename, oldNamingScheme=False, filename_y=highfilename, filename_index_min=frame_min, filename_index_max=frame_max, indices=dirIDs, data_fraction=data_fraction, multi_file_list=mfl, multi_file_list_y=mfh)
if useDataAugmentation:
tiCr.initDataAugmentation(rot=rot, minScale=minScale, maxScale=maxScale ,flip=flip)
inputx, y, xFilenames = floader.get()
tiCr.addData(inputx,y)
print("Random seed: {}".format(randSeed))
np.random.seed(randSeed)
tf.set_random_seed(randSeed)
# ---------------------------------------------
# 2D: tileSize x tileSize tiles; 3D: tileSize x tileSize x tileSize chunks
n_input = tileSizeLow ** 2
n_output = tileSizeHigh ** 2
if dataDimension == 3:
n_input *= tileSizeLow
n_output *= (tileSizeLow*upRes)
n_inputChannels = 1
if useVelocities:
n_inputChannels += 3
n_input *= n_inputChannels
# init paths
if not load_model_test == -1:
if not os.path.exists(basePath + 'test_%04d/' % load_model_test):
print('ERROR: Test to load does not exist.')
load_path = basePath + 'test_%04d/model_%04d.ckpt' % (load_model_test, load_model_no)
if outputOnly:
out_path_prefix = 'out_%04d-%04d' % (load_model_test,load_model_no)
test_path,_ = ph.getNextGenericPath(out_path_prefix, 0, basePath + 'test_%04d/' % load_model_test)
else:
test_path,load_model_test_new = ph.getNextTestPath(testPathStartNo, basePath)
else:
test_path,load_model_test_new = ph.getNextTestPath(testPathStartNo, basePath)
# logging & info
sys.stdout = ph.Logger(test_path)
print('Note: {}'.format(note))
print("\nCalled on machine '"+ os.uname()[1] +"' with: " + str(" ".join(sys.argv) ) )
print("\nUsing parameters:\n"+ph.paramsToString())
ph.writeParams(test_path+"params.json") # export parameters in human readable format
if outputOnly:
print('*****OUTPUT ONLY*****')
# ---------------------------------------------
# TENSORFLOW SETUP
import scipy.misc
def save_img(out_path, img):
img = np.clip(img * 255.0, 0, 255).astype(np.uint8)
scipy.misc.imsave(out_path, img)
def save_img_3d(out_path, img):
data = np.concatenate([np.sum(img, axis=0), np.sum(img, axis=1), np.sum(img, axis=2)], axis=0)
save_img(out_path, data)
# in/out data is treated by long vector by default, reshaped into spatial
# grid in generator model
# low res input for generator
x = tf.placeholder(tf.float32, shape=[None, n_input])
# reference output
y = tf.placeholder(tf.float32, shape=[None, n_output])
kk = tf.placeholder(tf.float32)
#keep probablity for dropout
keep_prob = tf.placeholder(tf.float32)
print("x: {}".format(x.get_shape()))
# --- main graph setup ---
rbId = 0
def resBlock(gan, inp, s1,s2, reuse, use_batch_norm, filter_size=3):
global rbId
# convolutions of resnet block
if dataDimension == 2:
filter = [filter_size,filter_size]
filter1 = [1,1]
elif dataDimension == 3:
filter = [filter_size,filter_size,filter_size]
filter1 = [1,1,1]
gc1,_ = gan.convolutional_layer( s1, filter, tf.nn.relu, stride=[1], name="g_cA%d"%rbId, in_layer=inp, reuse=reuse, batch_norm=use_batch_norm, train=train) #->16,64
gc2,_ = gan.convolutional_layer( s2, filter, None , stride=[1], name="g_cB%d"%rbId, reuse=reuse, batch_norm=use_batch_norm, train=train) #->8,128
# shortcut connection
gs1,_ = gan.convolutional_layer(s2, filter1 , None , stride=[1], name="g_s%d"%rbId, in_layer=inp, reuse=reuse, batch_norm=use_batch_norm, train=train) #->16,64
resUnit1 = tf.nn.relu( tf.add( gc2, gs1 ) )
rbId += 1
return resUnit1
############################################ resnet ###############################################################
def gen_resnet(_in, reuse=False, use_batch_norm=False, train=None):
global rbId
print("\n\tGenerator (resnet-std)")
with tf.variable_scope("generator", reuse=reuse) as scope:
if dataDimension == 2:
_in = tf.reshape(_in, shape=[-1, tileSizeLow, tileSizeLow, n_inputChannels]) #NHWC
patchShape = [2,2]
elif dataDimension == 3:
_in = tf.reshape(_in, shape=[-1, tileSizeLow, tileSizeLow, tileSizeLow, n_inputChannels]) #NDHWC
patchShape = [2,2,2]
rbId = 0
gan = GAN(_in)
for i in range( int(math.log(upRes, 2)) ):
inp = gan.max_depool()
ru1 = resBlock(gan, inp, n_inputChannels*2,n_inputChannels*8, reuse, use_batch_norm,5)
ru2 = resBlock(gan, ru1, 64, 64, reuse, use_batch_norm,5)
inRu3 = ru2
ru3 = resBlock(gan, inRu3, 32, 8, reuse, use_batch_norm,5)
ru4 = resBlock(gan, ru3, 2, 1, reuse, False,5)
resF = tf.reshape( ru4, shape=[-1, n_output] )
print("\tDOFs: %d , %f m " % ( gan.getDOFs() , gan.getDOFs()/1000000.) )
return resF
def gen_resnetSm(_in, reuse=False, use_batch_norm=False, train=None):
global rbId
print("\n\tGenerator (resnet-sm)")
with tf.variable_scope("generatorSm", reuse=reuse) as scope:
if dataDimension == 2:
_in = tf.reshape(_in, shape=[-1, tileSizeLow, tileSizeLow, n_inputChannels]) #NHWC
patchShape = [2,2]
elif dataDimension == 3:
_in = tf.reshape(_in, shape=[-1, tileSizeLow, tileSizeLow, tileSizeLow, n_inputChannels]) #NDHWC
patchShape = [2,2,2]
rbId = 0
gan = GAN(_in)
for i in range( int(math.log(upRes, 2)) ):
inp = gan.max_depool()
ru1 = resBlock(gan, inp, n_inputChannels*2,n_inputChannels*8, reuse, use_batch_norm,3)
ru2 = resBlock(gan, ru1, 16, 16, reuse, use_batch_norm,3)
inRu3 = ru2
ru3 = resBlock(gan, inRu3, 8, 4, reuse, use_batch_norm,3)
ru4 = resBlock(gan, ru3, 2, 1, reuse, False,5)
resF = tf.reshape( ru4, shape=[-1, n_output] )
print("\tDOFs: %d , %f m " % ( gan.getDOFs() , gan.getDOFs()/1000000.) )
return resF
############################################gen_test###############################################################
def gen_test(_in, reuse=False, use_batch_norm=False, train=None):
global rbId
print("\n\tGenerator-test")
with tf.variable_scope("generator-test", reuse=reuse) as scope:
if dataDimension == 2:
_in = tf.reshape(_in, shape=[-1, tileSizeLow, tileSizeLow, n_inputChannels]) #NHWC
patchShape = [2,2]
elif dataDimension == 3:
_in = tf.reshape(_in, shape=[-1, tileSizeLow, tileSizeLow, tileSizeLow, n_inputChannels]) #NDHWC
patchShape = [2,2,2]
rbId = 0
gan = GAN(_in)
for i in range( int(math.log(upRes, 2)) ):
inp = gan.max_depool()
gan.deconvolutional_layer(32, patchShape, None, stride=[1,1], name="g_D1", reuse=reuse, batch_norm=False, train=train)
outp,_ = gan.deconvolutional_layer(1 , patchShape, None, stride=[1,1], name="g_D2", reuse=reuse, batch_norm=False, train=train)
return tf.reshape( outp, shape=[-1, n_output] )
# init generator models from command line
gen_model = locals()[genModel]
if not outputOnly:
train = tf.placeholder(tf.bool)
else:
train = False
G = gen_model(x, use_batch_norm=batch_norm, train=train)
if not outputOnly:
gen_l2_loss = tf.nn.l2_loss(y - G)
# set up decaying learning rate, if enabled
lr_global_step = tf.Variable(0, trainable=False)
learning_rate_scalar = learning_rate
if decayLR:
learning_rate = tf.train.polynomial_decay(learning_rate, lr_global_step, trainingEpochs//2, learning_rate_scalar*0.05, power=1.1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
gen_update_ops = update_ops[:]
#variables to be used in the different otimization steps
vars = tf.trainable_variables()
g_var = [var for var in vars if "g_" in var.name]
with tf.control_dependencies(gen_update_ops):
pretrain_optimizer = tf.train.AdamOptimizer(learning_rate).minimize(gen_l2_loss, var_list=g_var)
sys.stdout.flush()
# create session and saver
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.InteractiveSession(config = config)
saver = tf.train.Saver(max_to_keep=maxToKeep)
# init vars or load model
if load_model_test == -1:
sess.run(tf.global_variables_initializer())
else:
saver.restore(sess, load_path)
print("Model restored from %s." % load_path)
if not outputOnly:
lossPretrain_gen = tf.summary.scalar("generator_L2_loss", gen_l2_loss)
merged_summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(test_path, sess.graph)
save_no = 0
tileSizeHi = upRes * tileSizeLow
if dataDimension == 2:
tilesPerImg = (simSizeHigh // tileSizeHi) ** 2
else:
tilesPerImg = (simSizeHigh // tileSizeHi) ** 3
image_no = 0
if not outputOnly:
os.makedirs(test_path+'test_img/')
def modifyVel(dens,vel):
return vel # not active right now...
def getInput(index = 1, randomtile = True, isTraining = True, batch_size = 1, useDataAugmentation = False):
if randomtile == False:
batch_xs, batch_ys = tiCr.getFrameTiles(index)
else:
batch_xs, batch_ys = tiCr.selectRandomTiles(selectionSize = batch_size, augment=useDataAugmentation)
batch_xs = np.reshape(batch_xs, (-1, n_input))
batch_ys = np.reshape(batch_ys, (-1, n_output))
return batch_xs, batch_ys
#evaluate the generator (sampler) on the first step of the first simulation and output result
def generateTestImage(sim_no = fromSim, frame_no = 1, outPath = test_path,imageindex = 0):
if (not outputOnly):
batch_xs, _ = getInput(randomtile = False, index = (sim_no-fromSim)*frame_max + frame_no)
else:
batch_xs = inputx[frame_no]
resultTiles = []
for tileno in range(batch_xs.shape[0]):
batch_xs_in = np.reshape(batch_xs[tileno],[-1, n_input])
results = sess.run(G, feed_dict={x: batch_xs_in, keep_prob: dropoutOutput, train: False})
resultTiles.extend(results)
resultTiles = np.array(resultTiles)
if dataDimension == 2: # resultTiles may have a different size
imgSz = int(resultTiles.shape[1]**(1.0/2) + 0.5)
resultTiles = np.reshape(resultTiles,[resultTiles.shape[0],imgSz,imgSz, 1])
else:
imgSz = int(resultTiles.shape[1]**(1.0/3) + 0.5)
resultTiles = np.reshape(resultTiles,[resultTiles.shape[0],imgSz,imgSz,imgSz])
tiles_in_image=[int(simSizeHigh/tileSizeHigh),int(simSizeHigh/tileSizeHigh)]
tc.savePngsGrayscale(resultTiles,outPath, imageCounter=imageindex, tiles_in_image=tiles_in_image)
def saveModel(cost, exampleOut=-1, imgPath = test_path):
global save_no
saver.save(sess, test_path + 'model_%04d.ckpt' % save_no)
msg = 'Saved Model %04d with cost %f.' % (save_no, cost)
if exampleOut > -1:
generateTestImage(imageindex = save_no, outPath = imgPath)
save_no += 1
return msg
# write summary to test overview
loaded_model = ''
if not load_model_test == -1:
loaded_model = ', Loaded %04d, %04d' % (load_model_test , load_model_no)
with open(basePath + 'test_overview.log', "a") as text_file:
if not outputOnly:
text_file.write(test_path[-10:-1] + ': {}D, \"{}\"\n'.format(dataDimension, note))
text_file.write('\t{} Epochs, gen: {} '.format(trainingEpochs, gen_model.__name__) + loaded_model + '\n')
else:
text_file.write('Output:' + loaded_model + ' (' + test_path[-28:-1] + ')\n')
text_file.write('\ttile size: {}, seed: {}, dropout-out: {:.4f}'.format(tileSizeLow, randSeed, dropoutOutput) + '\n')
# ---------------------------------------------
# ---------------------------------------------
training_duration = 0.0
#train generator using L2 loss
if (not outputOnly): # and pretrain>0:
try:
print('Using generator with L2 loss, ' + '{} epochs\n'.format(trainingEpochs))
print('\n*****TRAINING STARTED***** (stop with ctrl-c)\n')
startTime = time.time()
epochTime = startTime
avgCost = 0
for epoch in range(trainingEpochs):
batch_xs, batch_ys = getInput(batch_size = batch_size, useDataAugmentation = useDataAugmentation)
saved=False
_, gen_cost, summary = sess.run([pretrain_optimizer, gen_l2_loss, lossPretrain_gen], feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout, train: True})
summary_writer.add_summary(summary, epoch)
avgCost += gen_cost
if (epoch + 1) % outputInterval == 0:
print('%05d / %d: last interval: %.02f seconds, %.02f min remaining. avg cost: %.02f' % (epoch+1, trainingEpochs, (time.time() - epochTime), ((trainingEpochs - epoch) * (time.time() - startTime) / epoch / 60.0), (avgCost / outputInterval)))
epochTime = time.time()
avgCost = 0
if (epoch + 1) % saveInterval == 0:
saved = True
print(saveModel(gen_cost, genTestImg, test_path+"test_img/"))
except KeyboardInterrupt:
print("training interrupted")
sys.stdout.flush()
with open(basePath + 'test_overview.log', "a") as text_file:
text_file.write('\ttraining interrupted after %d epochs' % (epoch + 1) + '\n')
if not saved:
print(saveModel(gen_cost, genTestImg, test_path+"test_img/"))
print('\n*****TRAINING FINISHED*****')
training_duration = (time.time() - startTime) / 60.0
print('Training needed %.02f minutes.' % (training_duration))
print('To apply the trained model, call the script with command line parameters "out 1 load_model_test %d load_model_no %d " ' % (load_model_test_new, (save_no-1)) )
sys.stdout.flush()
with open(basePath + 'test_overview.log', "a") as text_file:
text_file.write('\ttraining duration: %.02f minutes' % training_duration + '\n')
### OUTPUT MODE ###
elif outputOnly: #may not work if using tiles smaller than full sim size
print('*****OUTPUT ONLY*****')
for layerno in range(0,frame_max-frame_min):
print('Generating %d' % (layerno))
if dataDimension == 2:
generateTestImage(fromSim,layerno, outPath = test_path, imageindex = layerno)
else:
print('Not supported at the moment...')
#generate3DUni(fromSim,layerno,outPath = test_path, imageindex = layerno)
print('Test finished, %d outputs written to %s.' % (frame_max-frame_min, test_path) )
| 41.247335
| 283
| 0.684776
|
4a0512c06d9fba44fe83b7b445e99bade4f56291
| 17,112
|
py
|
Python
|
phagraphnn/utilities.py
|
spudlig/PhaGraphNN
|
af95d5570aa14df4c2b05a400d46e01f245428f8
|
[
"MIT"
] | 1
|
2020-09-21T01:19:50.000Z
|
2020-09-21T01:19:50.000Z
|
phagraphnn/utilities.py
|
spudlig/PhaGraphNN
|
af95d5570aa14df4c2b05a400d46e01f245428f8
|
[
"MIT"
] | null | null | null |
phagraphnn/utilities.py
|
spudlig/PhaGraphNN
|
af95d5570aa14df4c2b05a400d46e01f245428f8
|
[
"MIT"
] | null | null | null |
import logging
log = logging.getLogger(__name__)
import numpy as np
import xlrd
import os
import pickle
import CDPL.Chem as Chem
import CDPL.Biomol as Biomol
import CDPL.Util as Util
import CDPL.Pharm as Pharm
import CDPL.Base as Base
import CDPL.ConfGen as ConfGen
import CDPL.Math as Math
import tensorflow as tf
import rdkit.Chem.AllChem as RDAllChem
from rdkit import Chem as RDChem
from urllib.request import urlretrieve
def readChemblXls(path_to_xls,col_entries = [0,7,10],sheet_index=0,n_entries=10000):
'''
reads the xls files and retrieves the ChemblID, smiles and activity metric \n
Input: \n
path_to_xls (string): path to the file.xls \n
col_entries (list): the entries one wants to retrieve. Default: [0,7,10] \n
sheet_index (int): Which sheet should be adressed. Default: 0 \n
n_entries (int): How many rows are in the file and should be retieved. Default: 10000 \n
Returns: \n
list: all values retrieved from the xls file
'''
wb = xlrd.open_workbook(path_to_xls)
sheet = wb.sheet_by_index(0)
nr_row = n_entries
row_nr = 0
colEntries = col_entries
data = []
try:
for row in range(nr_row):
single_entry = []
for col_entry in col_entries:
single_entry.append(sheet.cell_value(row, col_entry))
row_nr += 1
data.append(single_entry)
except Exception as e:
log.info("End of xls file with",row_nr,"entries.",exc_info=True)
pass
return data
def molFromSmiles(smiles,conformation):
'''
generates a RDKit Molecule from smiles. If confromations is true, then
one random conformation will be generated. \n
Input: \n
smiles (string): smiles string \n
conformation (boolean): generates one 3d conformation according to MMFF94 \n
Return: \n
(RDKitMolecule): the corresponding RDKit molecule
'''
m = None
try:
m = RDChem.MolFromSmiles(smiles)
except Exception as e:
log.error("Could not parse RDKitSmiles smiles",exc_info=True)
if conformation:
try:
m_conf = RDChem.AddHs(m)
RDAllChem.EmbedMolecule(m_conf)
RDAllChem.MMFFOptimizeMolecule(m_conf)
return m_conf
except Exception as e:
log.error("Could not parse generate a valid conformation",exc_info=True)
return m
def molFromSdf(sdf_path,conformation):
'''
generates a RDKit Molecule from smiles. If confromations is true, then
one random conformation will be generated. \n
Input: \n
smiles (string): smiles string \n
conformation (boolean): generates one 3d conformation according to MMFF94 \n
Return: \n
(RDKitMolecule): the corresponding RDKit molecule
'''
suppl = RDChem.SDMolSupplier(sdf_path)
if(len(suppl)>1):
log.error('! More than 1 sdf in file - please use only one sdf per path !')
return
for m in suppl:
if conformation:
return _generateConformation(m)
else:
return m
def pickleGraphs(path_to_folder,data,num_splits):
'''
Pickles the input data list into the set folder and splits it according
to the num_splits defined. \n
Input: \n
path_to_folder (string): path to the output folder \n
data (list): list of graph instances \n
num_splits (int): into how many chuncks the data should be split \n
Return: \n
(boolean): True, if the pickle worked, False otherwise
'''
try:
if not os.path.isdir(path_to_folder):
log.error("Not a valid path:"+path_to_folder,exc_info=True)
return False
le = (len(data) + num_splits - 1) / num_splits
for split_id in range(num_splits):
st = split_id * le
sub_data = data[int(st) : int(st + le)]
with open(path_to_folder+'graphs-%d.pkl' % split_id, 'wb') as f:
pickle.dump(sub_data, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
log.error("saving issue",exc_info=True)
return False
return True
def CDPLmolFromSmiles(smiles_path,conformation):
'''
generates a CDPL Molecule from smiles. If confromations is true, then
one random conformation will be generated with explicit hydrogens. \n
Input: \n
smiles (string): smiles string \n
conformation (boolean): generates one 3d conformation according to MMFF94 \n
Return: \n
(CDPL BasicMolecule): the corresponding CDPL BasicMolecule
'''
mol = Chem.BasicMolecule()
if ".smi" in smiles_path:
smi_reader = Chem.FileSMILESMoleculeReader(smiles_path)
if not smi_reader.read(mol):
log.error("COULD NOT READ Smiles",smiles_path)
return False
else:
mol = Chem.parseSMILES(smiles_path)
if conformation:
return _CDPLgenerateConformation(mol)
else:
return mol
def CDPLmolFromSdf(sdf_path,conformation):
'''
generates a single CDPL Molecule from an sdf-file. If conformations is true, then
one random conformation will be generated. \n
Input: \n
sdf_path (string): path to the sdf file \n
conformation (boolean): generates one 3d conformation according to MMFF94 \n
Return: \n
(CDPL BasicMolecule): the corresponding CDPL BasicMolecule
'''
mol = Chem.BasicMolecule()
ifs = Base.FileIOStream(sdf_path, 'r')
sdf_reader = Chem.SDFMoleculeReader(ifs)
if not sdf_reader.read(mol):
log.error("COULD NOT READ SDF",sdf_path)
return False
if conformation:
return _CDPLgenerateConformation(mol)
return mol
def CDPLphaFromPML(pml_path):
'''
reads a single CDPL BasicPharmacophore from an pml-file.
Input: \n
pml_path (string): path to the pml file \n
Return: \n
(CDPL BasicPharmacophore): the corresponding CDPL BasicPharmacophore
'''
pha = Pharm.BasicPharmacophore()
ifs = Base.FileIOStream(pml_path, 'r')
pml_reader = Pharm.PMLPharmacophoreReader(ifs)
if not pml_reader.read(pha):
log.error("COULD NOT READ PML",pml_path)
return False
return pha
def CDPLphaGenerator(protein,ligand,pha_type):
'''
generates the pharmacophore for either the ligand, the environment or
the interaction between them.
Input: \n
protein (CDPL Fragment): the CDPL protein fragment (=env) \n
ligand (CDPL BasicMolecule): a molecule or a ligand in the corresponding
protein pocket \n
pha_type (string): either "lig_only", "env_only" or None - then its the
interaction pharamcophore \n
Return: \n
(CDPL BasicPharmacophore): the corresponding pharmacophore
'''
lig_pharm = None
if pha_type is 'lig_only':
Chem.perceiveSSSR(ligand, True)
lig_pharm = _CDPLgeneratePha(ligand,pha_type)
return lig_pharm
Chem.perceiveSSSR(protein, True)
env_pharm = None
if pha_type is 'env_only':
env_pharm = _CDPLgeneratePha(protein,pha_type)
return env_pharm
Chem.perceiveSSSR(ligand, True)
lig_pharm = _CDPLgeneratePha(ligand,pha_type)
env_pharm = _CDPLgeneratePha(protein,pha_type)
mapping = Pharm.FeatureMapping()
Pharm.DefaultInteractionAnalyzer().analyze(lig_pharm, env_pharm, mapping)
int_pharm = Pharm.BasicPharmacophore()
Pharm.buildInteractionPharmacophore(int_pharm, mapping)
return int_pharm
def CDPLdownloadProteinFile(pdb_four_letter_code,lig_three_letter_code,radius,remove_water=True):
'''
downloads the PDB with the corresponding four letter code.
Input:\n
pdb_four_letter_code (String): the pdb_four_letter_code of the protein structure -
it then tries to download the corresponding protein structure. \n
Return: \n
(CDPL BasicMolecule): the protein structure \n
'''
directory = os.getcwd()
path = directory +"/temp_pdb/"
if not os.path.exists(path):
os.makedirs(path)
urlretrieve('http://files.rcsb.org/download/'+pdb_four_letter_code+'.pdb',(path+pdb_four_letter_code+".pdb"))
pdb_mol = _CDPLreadFromPDBFile(path+pdb_four_letter_code+".pdb")
if remove_water:
_removeWater(pdb_mol)
environment, ligand = _CDPLextractProteinFragments(pdb_mol,lig_three_letter_code=lig_three_letter_code,radius=radius)
os.remove(path+pdb_four_letter_code+".pdb")
return pdb_mol, environment, ligand
def CDPLreadProteinFile(path_to_pdb,lig_three_letter_code,radius,remove_water=True):
'''
Reads a pdb file from a path
Input: \n
path_to_pdb (String): the path to the protein structure \n
lig_three_letter_code (string): the three letter code for the ligand \n
radius (float): the radius within every residue is being extracted for the environment fragment.
The origin of the radius is the defined ligand. \n
Return: \n
(CDPL BasicMolecule): the protein structure \n
(CDPL Fragment): the environment residues within the defined radius of the ligand \n
(CDPL Fragment): the defined ligand \n
'''
pdb_mol = _CDPLreadFromPDBFile(path_to_pdb)
if remove_water:
_removeWater(pdb_mol)
environment, ligand = _CDPLextractProteinFragments(pdb_mol,lig_three_letter_code,radius=radius)
return pdb_mol, environment, ligand
def savePharmacophore(pha,path):
'''
Saves a particula pha at the target path.\n
Input:\n
pha (CDPL BasicPharmacophore): the pharmacophore to be saved as a pml file \n
path (String): path where to save the pml file (includes the filename.pml)
'''
Pharm.PMLFeatureContainerWriter(Base.FileIOStream(path
,'w')).write(pha)
return True
def _removeWater(mol):
to_remove = list()
for atom in mol.atoms:
if Biomol.getResidueCode(atom) == 'HOH':
to_remove.append(atom)
for atom in to_remove:
mol.removeAtom(mol.getAtomIndex(atom))
def _CDPLextractProteinFragments(pdb_mol, lig_three_letter_code, radius=6.0):
lig = Chem.Fragment()
_CDPLcalcProteinProperties(pdb_mol)
# extract ligand
for atom in pdb_mol.atoms:
if Biomol.getResidueCode(atom) == lig_three_letter_code:
Biomol.extractResidueSubstructure(atom, pdb_mol, lig, False)
if lig.numAtoms == 0:
log.error("The defined three letter code is not existing:",lig_three_letter_code)
# extract environment
env = Chem.Fragment()
Biomol.extractEnvironmentResidues(lig, pdb_mol, env, float(radius))
return env,lig
def _CDPLgeneratePha(mol,pha_type):
'''
PRIVAT METHOD
generates the pharmacophore for the molecule and is used by the CDPLphaGenerator.
Input: \n
mol (CDPL BasicMolecule): the molecule the pharmacophore needs to be generated for
lig_only (string): either True, then there are is no hydrogens coordinates being
calculated \n
Return: \n
(CDPL BasicPharmacophore): the corresponding pharmacophore
'''
if pha_type is not 'lig_only': #TODO What exactly should be in the config for the pha generation?
Chem.generateHydrogen3DCoordinates(mol, True)
pharm = Pharm.BasicPharmacophore()
pharm_generator = Pharm.DefaultPharmacophoreGenerator(True)
pharm_generator.generate(mol, pharm)
return pharm
def _CDPLreadFromPDBFile(pdb_file):
'''
PRIVAT METHOD
reads a pdb file and is used by the CDPLreadProteinFile method.
Input: \n
pdb_file (string): the path to the pdb file \n
Return: \n
(CDPL BasicMolecule): the corresponding pdb molecule
'''
ifs = Base.FileIOStream(pdb_file, 'r')
pdb_reader = Biomol.PDBMoleculeReader(ifs)
pdb_mol = Chem.BasicMolecule()
Biomol.setPDBApplyDictAtomBondingToNonStdResiduesParameter(pdb_reader, False) #TODO Should this be there for the pdb readin? or also in the config?
if not pdb_reader.read(pdb_mol):
log.error("COULD NOT READ PDB",pdb_file)
return False
return pdb_mol
def createVar(tensor, requires_grad=None):
'''
Initializes a tensorflow variable
'''
if requires_grad is None:
return tf.Variable(tensor,trainable=False)
else:
return tf.Variable(tensor,trainable=True)
def indexSelect(source, dim, index):
'''
Selects the corresponding indices for MPN
'''
index_size = tf.shape(index)
suffix_dim = tf.shape(source)[1:]
final_size = tf.concat((index_size, suffix_dim),axis=0)
inter = tf.reshape(index,shape=[-1])
target = tf.gather(source,indices=inter,axis=dim)
return tf.reshape(target,shape=final_size)
def updateConnected(atom_features,scope):
updated = []
for i in scope:
updated.append(atom_features[i])
return updated
def updateConnectedDict(features,feature_dict,scope):
updated = []
for i in scope:
updated.append(features[feature_dict[i]])
return updated
def getConnectedFeatures(features, scope):
'''
this functions selects according to the scope, the selected connected
features and returns them. 0 based indices. \n
INPUT: \n
features (list): a list of features \n
scope (list): a list of connections \n
RETURNS: \n
(list):
'''
gat = tf.gather(features,indices=(scope)) # get for each feature entry the connected ones
gat = tf.reshape(gat,(tf.shape(gat)[0]*tf.shape(gat)[1],tf.shape(gat)[2])) # remove the 3 shape introduced
intermediate_tensor = tf.reduce_sum(tf.abs(gat), 1) # search for all the 0 entries introduced by the gather an concat above
zero_vector = tf.zeros(shape=(tf.shape(gat)[0]), dtype=tf.float32) # generate dummy matrix for comparison
bool_mask = tf.not_equal(intermediate_tensor, zero_vector) # compare dummy with other matrix
return tf.boolean_mask(gat, bool_mask)
def _generateConformation(mol):
'''
PRIVATE METHOD
generates a random confromation for a RDKit Molecule. \n
Input: \n
sdf_path (string): path to the sdf file \n
conformation (boolean): generates one 3d conformation according to MMFF94 \n
Return: \n
(RDKitMolecule): the corresponding RDKit molecule
'''
m_conf = None
try:
m_conf = RDChem.AddHs(mol)
RDAllChem.EmbedMolecule(m_conf)
RDAllChem.MMFFOptimizeMolecule(m_conf)
except Exception as e:
log.error("Could not generate valid conformation",exc_info=True)
pass
return m_conf
def _CDPLgenerateConformation(cdpl_mol):
'''
PRIVAT METHOD
configures a CDPL Molecule for conformation generation. \n
Input: \n
mol (CDPL BasicMolecule): a CDPL BasicMolecule \n
Return: \n
(CDPL BasicMolecule): the corresponding random conf. for the input BasicMolecule
'''
_CDPLconfigForConformation(cdpl_mol) #TODO What exactly should be in the config for the cmp generation?
cg = ConfGen.RandomConformerGenerator()
coords = Math.Vector3DArray()
i = 0
cg.strictMMFF94AtomTyping = False
ConfGen.prepareForConformerGeneration(cdpl_mol)
coords.resize(cdpl_mol.numAtoms, Math.Vector3D())
cg.setup(cdpl_mol)
if cg.generate(coords) != ConfGen.RandomConformerGenerator.SUCCESS:
log.error('! Conformer generation failed !')
return
Chem.set3DCoordinates(cdpl_mol, coords)
return cdpl_mol
def _CDPLconfigForConformation(mol): # TODO is this the right way to handle ligands for conf. generation?
'''
PRIVAT METHOD
configures a CDPL BasicMolecule for conformation generation. \n
Input: \n
mol (CDPL BasicMolecule): a CDPL BasicMolecule \n
Return: \n
(CDPL BasicMolecule): the configured input BasicMolecule
'''
Chem.perceiveComponents(mol, False)
Chem.perceiveSSSR(mol, False)
Chem.setRingFlags(mol, False)
Chem.calcImplicitHydrogenCounts(mol, False)
Chem.perceiveHybridizationStates(mol, False)
Chem.setAromaticityFlags(mol, False)
Chem.calcCIPPriorities(mol, False)
Chem.calcAtomCIPConfigurations(mol, False)
Chem.calcBondCIPConfigurations(mol, False)
Chem.calcAtomStereoDescriptors(mol, False)
Chem.calcBondStereoDescriptors(mol, False)
Chem.calcTopologicalDistanceMatrix(mol, False)
Chem.generate2DCoordinates(mol, False)
Chem.generateBond2DStereoFlags(mol, True)
def _CDPLcalcProteinProperties(pdb_mol): # TODO is this the right way to handle protein structures?
'''
PRIVAT METHOD
configures a CDPL BasicMolecule for a protein structure. Is used in the _CDPLextractProteinFragments method \n
Input: \n
pdb_mol (CDPL BasicMolecule): a CDPL BasicMolecule representing the protein structure \n
'''
Chem.calcImplicitHydrogenCounts(pdb_mol, True)
Chem.perceiveHybridizationStates(pdb_mol, True)
Chem.makeHydrogenComplete(pdb_mol)
Chem.setAtomSymbolsFromTypes(pdb_mol, False)
Chem.calcImplicitHydrogenCounts(pdb_mol, True)
Chem.setRingFlags(pdb_mol, True)
Chem.setAromaticityFlags(pdb_mol, True)
Chem.generateHydrogen3DCoordinates(pdb_mol, True)
Biomol.setHydrogenResidueSequenceInfo(pdb_mol, False)
| 34.155689
| 151
| 0.694951
|
4a051412ae400cb0e882d714bd536db6736e9584
| 1,186
|
py
|
Python
|
tests/test_linechart.py
|
MrHemlock/pglet-python
|
0d8dd2bb07effdd1a09836641ffa9c7e4271b907
|
[
"MIT"
] | null | null | null |
tests/test_linechart.py
|
MrHemlock/pglet-python
|
0d8dd2bb07effdd1a09836641ffa9c7e4271b907
|
[
"MIT"
] | 4
|
2022-01-14T19:30:49.000Z
|
2022-01-19T15:59:03.000Z
|
tests/test_linechart.py
|
MrHemlock/pglet-python
|
0d8dd2bb07effdd1a09836641ffa9c7e4271b907
|
[
"MIT"
] | null | null | null |
import pglet
from pglet import LineChart
from pglet.linechart import Data, Point
def test_verticalbarchart_add():
lc = LineChart(
legend=True,
tooltips=True,
stroke_width=4,
y_min=0,
y_max=100,
y_ticks=2,
y_format="{y}%",
x_type="number",
lines=[
Data(
color="yellow",
legend="yellow color",
points=[Point(x=1, y=100), Point(x=5, y=50)],
),
Data(
color="green",
legend="green color",
points=[Point(x=10, y=20), Point(x=20, y=10)],
),
],
)
assert isinstance(lc, pglet.Control)
assert isinstance(lc, pglet.LineChart)
assert lc.get_cmd_str() == (
'linechart legend="true" strokewidth="4" tooltips="true" xtype="number" '
'yformat="{y}%" ymax="100" ymin="0" yticks="2"\n'
' data color="yellow" legend="yellow color"\n'
' p x="1" y="100"\n'
' p x="5" y="50"\n'
' data color="green" legend="green color"\n'
' p x="10" y="20"\n'
' p x="20" y="10"'
), "Test failed"
| 28.926829
| 81
| 0.48398
|
4a051422645db56d606c929467405bc572046cf6
| 9,308
|
py
|
Python
|
ml3d/datasets/semantic3d.py
|
MSaeedMp/Open3D-ML
|
5e6a007bbe8423291b8c4c760688975c95d2d5c1
|
[
"MIT"
] | 346
|
2021-07-27T20:14:47.000Z
|
2022-03-31T12:19:39.000Z
|
ml3d/datasets/semantic3d.py
|
MSaeedMp/Open3D-ML
|
5e6a007bbe8423291b8c4c760688975c95d2d5c1
|
[
"MIT"
] | 179
|
2021-07-27T15:32:33.000Z
|
2022-03-30T14:32:53.000Z
|
ml3d/datasets/semantic3d.py
|
MSaeedMp/Open3D-ML
|
5e6a007bbe8423291b8c4c760688975c95d2d5c1
|
[
"MIT"
] | 92
|
2021-07-28T13:50:52.000Z
|
2022-03-30T09:24:33.000Z
|
import numpy as np
import pandas as pd
import os, sys, glob, pickle
from pathlib import Path
from os.path import join, exists, dirname, abspath
from sklearn.neighbors import KDTree
import logging
from .utils import DataProcessing as DP
from .base_dataset import BaseDataset, BaseDatasetSplit
from ..utils import make_dir, DATASET
log = logging.getLogger(__name__)
class Semantic3D(BaseDataset):
"""This class is used to create a dataset based on the Semantic3D dataset,
and used in visualizer, training, or testing.
The dataset includes 8 semantic classes and covers a variety of urban
outdoor scenes.
"""
def __init__(self,
dataset_path,
name='Semantic3D',
cache_dir='./logs/cache',
use_cache=False,
num_points=65536,
class_weights=[
5181602, 5012952, 6830086, 1311528, 10476365, 946982,
334860, 269353
],
ignored_label_inds=[0],
val_files=[
'bildstein_station3_xyz_intensity_rgb',
'sg27_station2_intensity_rgb'
],
test_result_folder='./test',
**kwargs):
"""Initialize the function by passing the dataset and other details.
Args:
dataset_path: The path to the dataset to use.
name: The name of the dataset (Semantic3D in this case).
cache_dir: The directory where the cache is stored.
use_cache: Indicates if the dataset should be cached.
num_points: The maximum number of points to use when splitting the dataset.
class_weights: The class weights to use in the dataset.
ignored_label_inds: A list of labels that should be ignored in the dataset.
val_files: The files with the data.
test_result_folder: The folder where the test results should be stored.
Returns:
class: The corresponding class.
"""
super().__init__(dataset_path=dataset_path,
name=name,
cache_dir=cache_dir,
use_cache=use_cache,
class_weights=class_weights,
num_points=num_points,
ignored_label_inds=ignored_label_inds,
val_files=val_files,
test_result_folder=test_result_folder,
**kwargs)
cfg = self.cfg
self.label_to_names = self.get_label_to_names()
self.num_classes = len(self.label_to_names)
self.label_values = np.sort([k for k, v in self.label_to_names.items()])
self.label_to_idx = {l: i for i, l in enumerate(self.label_values)}
self.ignored_labels = np.array([0])
self.all_files = glob.glob(str(Path(self.cfg.dataset_path) / '*.txt'))
self.train_files = [
f for f in self.all_files if exists(
str(Path(f).parent / Path(f).name.replace('.txt', '.labels')))
]
self.test_files = [
f for f in self.all_files if f not in self.train_files
]
self.train_files = np.sort(self.train_files)
self.test_files = np.sort(self.test_files)
self.val_files = []
for i, file_path in enumerate(self.train_files):
for val_file in cfg.val_files:
if val_file in file_path:
self.val_files.append(file_path)
break
self.train_files = np.sort(
[f for f in self.train_files if f not in self.val_files])
@staticmethod
def get_label_to_names():
"""Returns a label to names dictionary object.
Returns:
A dict where keys are label numbers and
values are the corresponding names.
"""
label_to_names = {
0: 'unlabeled',
1: 'man-made terrain',
2: 'natural terrain',
3: 'high vegetation',
4: 'low vegetation',
5: 'buildings',
6: 'hard scape',
7: 'scanning artefacts',
8: 'cars'
}
return label_to_names
def get_split(self, split):
return Semantic3DSplit(self, split=split)
"""Returns a dataset split.
Args:
split: A string identifying the dataset split that is usually one of
'training', 'test', 'validation', or 'all'.
Returns:
A dataset split object providing the requested subset of the data.
"""
def get_split_list(self, split):
"""Returns the list of data splits available.
Args:
split: A string identifying the dataset split that is usually one of
'training', 'test', 'validation', or 'all'.
Returns:
A dataset split object providing the requested subset of the data.
Raises:
ValueError: Indicates that the split name passed is incorrect. The split name should be one of
'training', 'test', 'validation', or 'all'.
"""
if split in ['test', 'testing']:
files = self.test_files
elif split in ['train', 'training']:
files = self.train_files
elif split in ['val', 'validation']:
files = self.val_files
elif split in ['all']:
files = self.val_files + self.train_files + self.test_files
else:
raise ValueError("Invalid split {}".format(split))
return files
def is_tested(self, attr):
"""Checks if a datum in the dataset has been tested.
Args:
attr: The attribute that needs to be checked.
Returns:
If the datum attribute is tested, then return the path where the
attribute is stored; else, returns false.
"""
cfg = self.cfg
name = attr['name']
path = cfg.test_result_folder
store_path = join(path, self.name, name + '.labels')
if exists(store_path):
print("{} already exists.".format(store_path))
return True
else:
return False
def save_test_result(self, results, attr):
"""Saves the output of a model.
Args:
results: The output of a model for the datum associated with the attribute passed.
attr: The attributes that correspond to the outputs passed in results.
"""
cfg = self.cfg
name = attr['name'].split('.')[0]
path = cfg.test_result_folder
make_dir(path)
pred = results['predict_labels'] + 1
store_path = join(path, self.name, name + '.labels')
make_dir(Path(store_path).parent)
np.savetxt(store_path, pred.astype(np.int32), fmt='%d')
log.info("Saved {} in {}.".format(name, store_path))
class Semantic3DSplit(BaseDatasetSplit):
"""This class is used to create a split for Semantic3D dataset.
Initialize the class.
Args:
dataset: The dataset to split.
split: A string identifying the dataset split that is usually one of
'training', 'test', 'validation', or 'all'.
**kwargs: The configuration of the model as keyword arguments.
Returns:
A dataset split object providing the requested subset of the data.
"""
def __init__(self, dataset, split='training'):
super().__init__(dataset, split=split)
self.cfg = dataset.cfg
path_list = dataset.get_split_list(split)
log.info("Found {} pointclouds for {}".format(len(path_list), split))
self.path_list = path_list
self.split = split
self.dataset = dataset
def __len__(self):
return len(self.path_list)
def get_data(self, idx):
pc_path = self.path_list[idx]
log.debug("get_data called {}".format(pc_path))
pc = pd.read_csv(pc_path,
header=None,
delim_whitespace=True,
dtype=np.float32).values
points = pc[:, 0:3]
feat = pc[:, [4, 5, 6]]
intensity = pc[:, 3]
points = np.array(points, dtype=np.float32)
feat = np.array(feat, dtype=np.float32)
intensity = np.array(intensity, dtype=np.float32)
if (self.split != 'test'):
labels = pd.read_csv(pc_path.replace(".txt", ".labels"),
header=None,
delim_whitespace=True,
dtype=np.int32).values
labels = np.array(labels, dtype=np.int32).reshape((-1,))
else:
labels = np.zeros((points.shape[0],), dtype=np.int32)
data = {
'point': points,
'feat': feat,
'intensity': intensity,
'label': labels
}
return data
def get_attr(self, idx):
pc_path = Path(self.path_list[idx])
name = pc_path.name.replace('.txt', '')
pc_path = str(pc_path)
split = self.split
attr = {'idx': idx, 'name': name, 'path': pc_path, 'split': split}
return attr
DATASET._register_module(Semantic3D)
| 34.095238
| 106
| 0.568006
|
4a051540b874277b0509cfd739f942608dab4d92
| 385
|
py
|
Python
|
catalogue/migrations/0003_contact_message_read.py
|
bodealamu/fullstackninja
|
bcb61a49fc6921c8b5a3f50c408ba6d366a287d9
|
[
"MIT"
] | null | null | null |
catalogue/migrations/0003_contact_message_read.py
|
bodealamu/fullstackninja
|
bcb61a49fc6921c8b5a3f50c408ba6d366a287d9
|
[
"MIT"
] | 8
|
2021-03-19T07:54:07.000Z
|
2022-03-12T00:35:32.000Z
|
catalogue/migrations/0003_contact_message_read.py
|
bodealamu/fullstackninja
|
bcb61a49fc6921c8b5a3f50c408ba6d366a287d9
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-06-27 16:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0002_contact'),
]
operations = [
migrations.AddField(
model_name='contact',
name='message_read',
field=models.BooleanField(default=False),
),
]
| 20.263158
| 53
| 0.597403
|
4a0515c4bc21d5b75f3979995e986c9e98d86175
| 3,132
|
py
|
Python
|
moto/core/responses.py
|
tinyclues/moto
|
8612e9d78223d56949eba53ef32bedc4bde0e7de
|
[
"Apache-2.0"
] | null | null | null |
moto/core/responses.py
|
tinyclues/moto
|
8612e9d78223d56949eba53ef32bedc4bde0e7de
|
[
"Apache-2.0"
] | null | null | null |
moto/core/responses.py
|
tinyclues/moto
|
8612e9d78223d56949eba53ef32bedc4bde0e7de
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import json
from urlparse import parse_qs, urlparse
from moto.core.utils import camelcase_to_underscores, method_names_from_class
class BaseResponse(object):
def dispatch(self, request, full_url, headers):
querystring = {}
if hasattr(request, 'body'):
# Boto
self.body = request.body
else:
# Flask server
# FIXME: At least in Flask==0.10.1, request.data is an empty string
# and the information we want is in request.form. Keeping self.body
# definition for back-compatibility
self.body = request.data
querystring = {}
for key, value in request.form.iteritems():
querystring[key] = [value, ]
if not querystring:
querystring.update(parse_qs(urlparse(full_url).query))
if not querystring:
querystring.update(parse_qs(self.body))
if not querystring:
querystring.update(headers)
self.uri = full_url
self.path = urlparse(full_url).path
self.querystring = querystring
self.method = request.method
self.headers = dict(request.headers)
self.response_headers = headers
return self.call_action()
def call_action(self):
headers = self.response_headers
action = self.querystring.get('Action', [""])[0]
action = camelcase_to_underscores(action)
method_names = method_names_from_class(self.__class__)
if action in method_names:
method = getattr(self, action)
response = method()
if isinstance(response, basestring):
return 200, headers, response
else:
body, new_headers = response
status = new_headers.pop('status', 200)
headers.update(new_headers)
return status, headers, body
raise NotImplementedError("The {0} action has not been implemented".format(action))
def metadata_response(request, full_url, headers):
"""
Mock response for localhost metadata
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html
"""
parsed_url = urlparse(full_url)
tomorrow = datetime.datetime.now() + datetime.timedelta(days=1)
credentials = dict(
AccessKeyId="test-key",
SecretAccessKey="test-secret-key",
Token="test-session-token",
Expiration=tomorrow.strftime("%Y-%m-%dT%H:%M:%SZ")
)
path = parsed_url.path
meta_data_prefix = "/latest/meta-data/"
# Strip prefix if it is there
if path.startswith(meta_data_prefix):
path = path[len(meta_data_prefix):]
if path == '':
result = 'iam'
elif path == 'iam':
result = json.dumps({
'security-credentials': {
'default-role': credentials
}
})
elif path == 'iam/security-credentials/':
result = 'default-role'
elif path == 'iam/security-credentials/default-role':
result = json.dumps(credentials)
return 200, headers, result
| 31.959184
| 91
| 0.61143
|
4a0515e3ebdc81679a3f7927abb851dac853a9f0
| 20,237
|
py
|
Python
|
tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/scipy/optimize/slsqp.py
|
iLuSIAnn/test
|
10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e
|
[
"Apache-2.0"
] | 353
|
2020-12-10T10:47:17.000Z
|
2022-03-31T23:08:29.000Z
|
tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/scipy/optimize/slsqp.py
|
iLuSIAnn/test
|
10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e
|
[
"Apache-2.0"
] | 80
|
2020-12-10T09:54:22.000Z
|
2022-03-30T22:08:45.000Z
|
tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/scipy/optimize/slsqp.py
|
iLuSIAnn/test
|
10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e
|
[
"Apache-2.0"
] | 63
|
2020-12-10T17:10:34.000Z
|
2022-03-28T16:27:07.000Z
|
"""
This module implements the Sequential Least Squares Programming optimization
algorithm (SLSQP), originally developed by Dieter Kraft.
See http://www.netlib.org/toms/733
Functions
---------
.. autosummary::
:toctree: generated/
approx_jacobian
fmin_slsqp
"""
__all__ = ['approx_jacobian', 'fmin_slsqp']
import numpy as np
from scipy.optimize._slsqp import slsqp
from numpy import (zeros, array, linalg, append, asfarray, concatenate, finfo,
sqrt, vstack, exp, inf, isfinite, atleast_1d)
from .optimize import (OptimizeResult, _check_unknown_options,
_prepare_scalar_function)
from ._numdiff import approx_derivative
from ._constraints import old_bound_to_new, _arr_to_scalar
__docformat__ = "restructuredtext en"
_epsilon = sqrt(finfo(float).eps)
def approx_jacobian(x, func, epsilon, *args):
"""
Approximate the Jacobian matrix of a callable function.
Parameters
----------
x : array_like
The state vector at which to compute the Jacobian matrix.
func : callable f(x,*args)
The vector-valued function.
epsilon : float
The perturbation used to determine the partial derivatives.
args : sequence
Additional arguments passed to func.
Returns
-------
An array of dimensions ``(lenf, lenx)`` where ``lenf`` is the length
of the outputs of `func`, and ``lenx`` is the number of elements in
`x`.
Notes
-----
The approximation is done using forward differences.
"""
# approx_derivative returns (m, n) == (lenf, lenx)
jac = approx_derivative(func, x, method='2-point', abs_step=epsilon,
args=args)
# if func returns a scalar jac.shape will be (lenx,). Make sure
# it's at least a 2D array.
return np.atleast_2d(jac)
def fmin_slsqp(func, x0, eqcons=(), f_eqcons=None, ieqcons=(), f_ieqcons=None,
bounds=(), fprime=None, fprime_eqcons=None,
fprime_ieqcons=None, args=(), iter=100, acc=1.0E-6,
iprint=1, disp=None, full_output=0, epsilon=_epsilon,
callback=None):
"""
Minimize a function using Sequential Least Squares Programming
Python interface function for the SLSQP Optimization subroutine
originally implemented by Dieter Kraft.
Parameters
----------
func : callable f(x,*args)
Objective function. Must return a scalar.
x0 : 1-D ndarray of float
Initial guess for the independent variable(s).
eqcons : list, optional
A list of functions of length n such that
eqcons[j](x,*args) == 0.0 in a successfully optimized
problem.
f_eqcons : callable f(x,*args), optional
Returns a 1-D array in which each element must equal 0.0 in a
successfully optimized problem. If f_eqcons is specified,
eqcons is ignored.
ieqcons : list, optional
A list of functions of length n such that
ieqcons[j](x,*args) >= 0.0 in a successfully optimized
problem.
f_ieqcons : callable f(x,*args), optional
Returns a 1-D ndarray in which each element must be greater or
equal to 0.0 in a successfully optimized problem. If
f_ieqcons is specified, ieqcons is ignored.
bounds : list, optional
A list of tuples specifying the lower and upper bound
for each independent variable [(xl0, xu0),(xl1, xu1),...]
Infinite values will be interpreted as large floating values.
fprime : callable `f(x,*args)`, optional
A function that evaluates the partial derivatives of func.
fprime_eqcons : callable `f(x,*args)`, optional
A function of the form `f(x, *args)` that returns the m by n
array of equality constraint normals. If not provided,
the normals will be approximated. The array returned by
fprime_eqcons should be sized as ( len(eqcons), len(x0) ).
fprime_ieqcons : callable `f(x,*args)`, optional
A function of the form `f(x, *args)` that returns the m by n
array of inequality constraint normals. If not provided,
the normals will be approximated. The array returned by
fprime_ieqcons should be sized as ( len(ieqcons), len(x0) ).
args : sequence, optional
Additional arguments passed to func and fprime.
iter : int, optional
The maximum number of iterations.
acc : float, optional
Requested accuracy.
iprint : int, optional
The verbosity of fmin_slsqp :
* iprint <= 0 : Silent operation
* iprint == 1 : Print summary upon completion (default)
* iprint >= 2 : Print status of each iterate and summary
disp : int, optional
Overrides the iprint interface (preferred).
full_output : bool, optional
If False, return only the minimizer of func (default).
Otherwise, output final objective function and summary
information.
epsilon : float, optional
The step size for finite-difference derivative estimates.
callback : callable, optional
Called after each iteration, as ``callback(x)``, where ``x`` is the
current parameter vector.
Returns
-------
out : ndarray of float
The final minimizer of func.
fx : ndarray of float, if full_output is true
The final value of the objective function.
its : int, if full_output is true
The number of iterations.
imode : int, if full_output is true
The exit mode from the optimizer (see below).
smode : string, if full_output is true
Message describing the exit mode from the optimizer.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'SLSQP' `method` in particular.
Notes
-----
Exit modes are defined as follows ::
-1 : Gradient evaluation required (g & a)
0 : Optimization terminated successfully
1 : Function evaluation required (f & c)
2 : More equality constraints than independent variables
3 : More than 3*n iterations in LSQ subproblem
4 : Inequality constraints incompatible
5 : Singular matrix E in LSQ subproblem
6 : Singular matrix C in LSQ subproblem
7 : Rank-deficient equality constraint subproblem HFTI
8 : Positive directional derivative for linesearch
9 : Iteration limit reached
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-sqlsp>`.
"""
if disp is not None:
iprint = disp
opts = {'maxiter': iter,
'ftol': acc,
'iprint': iprint,
'disp': iprint != 0,
'eps': epsilon,
'callback': callback}
# Build the constraints as a tuple of dictionaries
cons = ()
# 1. constraints of the 1st kind (eqcons, ieqcons); no Jacobian; take
# the same extra arguments as the objective function.
cons += tuple({'type': 'eq', 'fun': c, 'args': args} for c in eqcons)
cons += tuple({'type': 'ineq', 'fun': c, 'args': args} for c in ieqcons)
# 2. constraints of the 2nd kind (f_eqcons, f_ieqcons) and their Jacobian
# (fprime_eqcons, fprime_ieqcons); also take the same extra arguments
# as the objective function.
if f_eqcons:
cons += ({'type': 'eq', 'fun': f_eqcons, 'jac': fprime_eqcons,
'args': args}, )
if f_ieqcons:
cons += ({'type': 'ineq', 'fun': f_ieqcons, 'jac': fprime_ieqcons,
'args': args}, )
res = _minimize_slsqp(func, x0, args, jac=fprime, bounds=bounds,
constraints=cons, **opts)
if full_output:
return res['x'], res['fun'], res['nit'], res['status'], res['message']
else:
return res['x']
def _minimize_slsqp(func, x0, args=(), jac=None, bounds=None,
constraints=(),
maxiter=100, ftol=1.0E-6, iprint=1, disp=False,
eps=_epsilon, callback=None, finite_diff_rel_step=None,
**unknown_options):
"""
Minimize a scalar function of one or more variables using Sequential
Least Squares Programming (SLSQP).
Options
-------
ftol : float
Precision goal for the value of f in the stopping criterion.
eps : float
Step size used for numerical approximation of the Jacobian.
disp : bool
Set to True to print convergence messages. If False,
`verbosity` is ignored and set to 0.
maxiter : int
Maximum number of iterations.
finite_diff_rel_step : None or array_like, optional
If `jac in ['2-point', '3-point', 'cs']` the relative step size to
use for numerical approximation of `jac`. The absolute step
size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``,
possibly adjusted to fit into the bounds. For ``method='3-point'``
the sign of `h` is ignored. If None (default) then step is selected
automatically.
"""
_check_unknown_options(unknown_options)
iter = maxiter - 1
acc = ftol
epsilon = eps
if not disp:
iprint = 0
# Constraints are triaged per type into a dictionary of tuples
if isinstance(constraints, dict):
constraints = (constraints, )
cons = {'eq': (), 'ineq': ()}
for ic, con in enumerate(constraints):
# check type
try:
ctype = con['type'].lower()
except KeyError:
raise KeyError('Constraint %d has no type defined.' % ic)
except TypeError:
raise TypeError('Constraints must be defined using a '
'dictionary.')
except AttributeError:
raise TypeError("Constraint's type must be a string.")
else:
if ctype not in ['eq', 'ineq']:
raise ValueError("Unknown constraint type '%s'." % con['type'])
# check function
if 'fun' not in con:
raise ValueError('Constraint %d has no function defined.' % ic)
# check Jacobian
cjac = con.get('jac')
if cjac is None:
# approximate Jacobian function. The factory function is needed
# to keep a reference to `fun`, see gh-4240.
def cjac_factory(fun):
def cjac(x, *args):
if jac in ['2-point', '3-point', 'cs']:
return approx_derivative(fun, x, method=jac, args=args,
rel_step=finite_diff_rel_step)
else:
return approx_derivative(fun, x, method='2-point',
abs_step=epsilon, args=args)
return cjac
cjac = cjac_factory(con['fun'])
# update constraints' dictionary
cons[ctype] += ({'fun': con['fun'],
'jac': cjac,
'args': con.get('args', ())}, )
exit_modes = {-1: "Gradient evaluation required (g & a)",
0: "Optimization terminated successfully",
1: "Function evaluation required (f & c)",
2: "More equality constraints than independent variables",
3: "More than 3*n iterations in LSQ subproblem",
4: "Inequality constraints incompatible",
5: "Singular matrix E in LSQ subproblem",
6: "Singular matrix C in LSQ subproblem",
7: "Rank-deficient equality constraint subproblem HFTI",
8: "Positive directional derivative for linesearch",
9: "Iteration limit reached"}
# Transform x0 into an array.
x = asfarray(x0).flatten()
# SLSQP is sent 'old-style' bounds, 'new-style' bounds are required by
# ScalarFunction
if bounds is None or len(bounds) == 0:
new_bounds = (-np.inf, np.inf)
else:
new_bounds = old_bound_to_new(bounds)
# clip the initial guess to bounds, otherwise ScalarFunction doesn't work
x = np.clip(x, new_bounds[0], new_bounds[1])
# Set the parameters that SLSQP will need
# meq, mieq: number of equality and inequality constraints
meq = sum(map(len, [atleast_1d(c['fun'](x, *c['args']))
for c in cons['eq']]))
mieq = sum(map(len, [atleast_1d(c['fun'](x, *c['args']))
for c in cons['ineq']]))
# m = The total number of constraints
m = meq + mieq
# la = The number of constraints, or 1 if there are no constraints
la = array([1, m]).max()
# n = The number of independent variables
n = len(x)
# Define the workspaces for SLSQP
n1 = n + 1
mineq = m - meq + n1 + n1
len_w = (3*n1+m)*(n1+1)+(n1-meq+1)*(mineq+2) + 2*mineq+(n1+mineq)*(n1-meq) \
+ 2*meq + n1 + ((n+1)*n)//2 + 2*m + 3*n + 3*n1 + 1
len_jw = mineq
w = zeros(len_w)
jw = zeros(len_jw)
# Decompose bounds into xl and xu
if bounds is None or len(bounds) == 0:
xl = np.empty(n, dtype=float)
xu = np.empty(n, dtype=float)
xl.fill(np.nan)
xu.fill(np.nan)
else:
bnds = array([(_arr_to_scalar(l), _arr_to_scalar(u))
for (l, u) in bounds], float)
if bnds.shape[0] != n:
raise IndexError('SLSQP Error: the length of bounds is not '
'compatible with that of x0.')
with np.errstate(invalid='ignore'):
bnderr = bnds[:, 0] > bnds[:, 1]
if bnderr.any():
raise ValueError('SLSQP Error: lb > ub in bounds %s.' %
', '.join(str(b) for b in bnderr))
xl, xu = bnds[:, 0], bnds[:, 1]
# Mark infinite bounds with nans; the Fortran code understands this
infbnd = ~isfinite(bnds)
xl[infbnd[:, 0]] = np.nan
xu[infbnd[:, 1]] = np.nan
# ScalarFunction provides function and gradient evaluation
sf = _prepare_scalar_function(func, x, jac=jac, args=args, epsilon=eps,
finite_diff_rel_step=finite_diff_rel_step,
bounds=new_bounds)
# Initialize the iteration counter and the mode value
mode = array(0, int)
acc = array(acc, float)
majiter = array(iter, int)
majiter_prev = 0
# Initialize internal SLSQP state variables
alpha = array(0, float)
f0 = array(0, float)
gs = array(0, float)
h1 = array(0, float)
h2 = array(0, float)
h3 = array(0, float)
h4 = array(0, float)
t = array(0, float)
t0 = array(0, float)
tol = array(0, float)
iexact = array(0, int)
incons = array(0, int)
ireset = array(0, int)
itermx = array(0, int)
line = array(0, int)
n1 = array(0, int)
n2 = array(0, int)
n3 = array(0, int)
# Print the header if iprint >= 2
if iprint >= 2:
print("%5s %5s %16s %16s" % ("NIT", "FC", "OBJFUN", "GNORM"))
# mode is zero on entry, so call objective, constraints and gradients
# there should be no func evaluations here because it's cached from
# ScalarFunction
fx = sf.fun(x)
try:
fx = float(np.asarray(fx))
except (TypeError, ValueError):
raise ValueError("Objective function must return a scalar")
g = append(sf.grad(x), 0.0)
c = _eval_constraint(x, cons)
a = _eval_con_normals(x, cons, la, n, m, meq, mieq)
while 1:
# Call SLSQP
slsqp(m, meq, x, xl, xu, fx, c, g, a, acc, majiter, mode, w, jw,
alpha, f0, gs, h1, h2, h3, h4, t, t0, tol,
iexact, incons, ireset, itermx, line,
n1, n2, n3)
if mode == 1: # objective and constraint evaluation required
fx = sf.fun(x)
c = _eval_constraint(x, cons)
if mode == -1: # gradient evaluation required
g = append(sf.grad(x), 0.0)
a = _eval_con_normals(x, cons, la, n, m, meq, mieq)
if majiter > majiter_prev:
# call callback if major iteration has incremented
if callback is not None:
callback(np.copy(x))
# Print the status of the current iterate if iprint > 2
if iprint >= 2:
print("%5i %5i % 16.6E % 16.6E" % (majiter, sf.nfev,
fx, linalg.norm(g)))
# If exit mode is not -1 or 1, slsqp has completed
if abs(mode) != 1:
break
majiter_prev = int(majiter)
# Optimization loop complete. Print status if requested
if iprint >= 1:
print(exit_modes[int(mode)] + " (Exit mode " + str(mode) + ')')
print(" Current function value:", fx)
print(" Iterations:", majiter)
print(" Function evaluations:", sf.nfev)
print(" Gradient evaluations:", sf.ngev)
return OptimizeResult(x=x, fun=fx, jac=g[:-1], nit=int(majiter),
nfev=sf.nfev, njev=sf.ngev, status=int(mode),
message=exit_modes[int(mode)], success=(mode == 0))
def _eval_constraint(x, cons):
# Compute constraints
if cons['eq']:
c_eq = concatenate([atleast_1d(con['fun'](x, *con['args']))
for con in cons['eq']])
else:
c_eq = zeros(0)
if cons['ineq']:
c_ieq = concatenate([atleast_1d(con['fun'](x, *con['args']))
for con in cons['ineq']])
else:
c_ieq = zeros(0)
# Now combine c_eq and c_ieq into a single matrix
c = concatenate((c_eq, c_ieq))
return c
def _eval_con_normals(x, cons, la, n, m, meq, mieq):
# Compute the normals of the constraints
if cons['eq']:
a_eq = vstack([con['jac'](x, *con['args'])
for con in cons['eq']])
else: # no equality constraint
a_eq = zeros((meq, n))
if cons['ineq']:
a_ieq = vstack([con['jac'](x, *con['args'])
for con in cons['ineq']])
else: # no inequality constraint
a_ieq = zeros((mieq, n))
# Now combine a_eq and a_ieq into a single a matrix
if m == 0: # no constraints
a = zeros((la, n))
else:
a = vstack((a_eq, a_ieq))
a = concatenate((a, zeros([la, 1])), 1)
return a
if __name__ == '__main__':
# objective function
def fun(x, r=[4, 2, 4, 2, 1]):
""" Objective function """
return exp(x[0]) * (r[0] * x[0]**2 + r[1] * x[1]**2 +
r[2] * x[0] * x[1] + r[3] * x[1] +
r[4])
# bounds
bnds = array([[-inf]*2, [inf]*2]).T
bnds[:, 0] = [0.1, 0.2]
# constraints
def feqcon(x, b=1):
""" Equality constraint """
return array([x[0]**2 + x[1] - b])
def jeqcon(x, b=1):
""" Jacobian of equality constraint """
return array([[2*x[0], 1]])
def fieqcon(x, c=10):
""" Inequality constraint """
return array([x[0] * x[1] + c])
def jieqcon(x, c=10):
""" Jacobian of inequality constraint """
return array([[1, 1]])
# constraints dictionaries
cons = ({'type': 'eq', 'fun': feqcon, 'jac': jeqcon, 'args': (1, )},
{'type': 'ineq', 'fun': fieqcon, 'jac': jieqcon, 'args': (10,)})
# Bounds constraint problem
print(' Bounds constraints '.center(72, '-'))
print(' * fmin_slsqp')
x, f = fmin_slsqp(fun, array([-1, 1]), bounds=bnds, disp=1,
full_output=True)[:2]
print(' * _minimize_slsqp')
res = _minimize_slsqp(fun, array([-1, 1]), bounds=bnds,
**{'disp': True})
# Equality and inequality constraints problem
print(' Equality and inequality constraints '.center(72, '-'))
print(' * fmin_slsqp')
x, f = fmin_slsqp(fun, array([-1, 1]),
f_eqcons=feqcon, fprime_eqcons=jeqcon,
f_ieqcons=fieqcon, fprime_ieqcons=jieqcon,
disp=1, full_output=True)[:2]
print(' * _minimize_slsqp')
res = _minimize_slsqp(fun, array([-1, 1]), constraints=cons,
**{'disp': True})
| 36.463063
| 80
| 0.574986
|
4a05177431255b72e9dc5e52c0ac20db04577ad7
| 1,966
|
py
|
Python
|
deeplearning/fashion-mnist/casplt.py
|
c964309085/sas-viya-programming
|
0332ddc3670bfe9043d1fc9d41e346f5eae59994
|
[
"Apache-2.0"
] | 128
|
2016-07-09T16:44:31.000Z
|
2022-03-31T22:01:35.000Z
|
deeplearning/fashion-mnist/casplt.py
|
c964309085/sas-viya-programming
|
0332ddc3670bfe9043d1fc9d41e346f5eae59994
|
[
"Apache-2.0"
] | 14
|
2017-01-06T14:02:17.000Z
|
2022-02-10T02:28:41.000Z
|
deeplearning/fashion-mnist/casplt.py
|
c964309085/sas-viya-programming
|
0332ddc3670bfe9043d1fc9d41e346f5eae59994
|
[
"Apache-2.0"
] | 141
|
2016-07-08T19:52:16.000Z
|
2022-03-01T03:30:33.000Z
|
import pandas as pd
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
def plot_imgs(cas_table, class_list=range(10), images_per_class=2, figsize=(20,20), query_condition=None, font_size=12):
"""Function for plotting image data from a CASTable object"""
class_description = {'class0':'T-shirt/top', 'class1':'Trouser', 'class2':'Pullover', 'class3':'Dress', 'class4':'Coat',
'class5':'Sandal', 'class6':'Shirt', 'class7':'Sneaker', 'class8':'Bag', 'class9':'Ankle boot'}
img_list=[]
lbl_list=[]
prd_list=[]
arr_list=[]
if len(class_list) < images_per_class:
fig, axes = plt.subplots(nrows=len(class_list), ncols=images_per_class, figsize=figsize)
else:
fig, axes = plt.subplots(nrows=images_per_class, ncols=len(class_list), figsize=figsize)
for i in class_list:
a = cas_table.groupby(['_label_']).get_group(['class'+str(i)]).query(query_condition)
b = a.sample(images_per_class).fetch(to=images_per_class)
lbl_list.append((b['Fetch']['_label_']))
img_list.append((b['Fetch']['_image_']))
if query_condition != None:
prd_list.append((b['Fetch']['_DL_PredName_']))
img_df=pd.concat(img_list)
lbl_df=pd.concat(lbl_list)
if query_condition != None:
prd_df=pd.concat(prd_list)
for j in img_df:
c=np.fromstring(j,np.uint8)
c=c.reshape((28,28))
arr_list.append(c)
for x,ax in enumerate(axes.flat):
ax.imshow(arr_list[x],cmap='gray')
ax.set_title('True label: {}'.format(class_description[lbl_df.iloc[x]]))
ax.title.set_fontsize(font_size)
ax.xaxis.label.set_fontsize(font_size)
if query_condition != None:
ax.set_xlabel('Pred label: {}'.format(class_description[prd_df.iloc[x]]))
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
| 35.107143
| 124
| 0.62411
|
4a0518cbc3961ba337b888b4d96cdb8615611a4f
| 2,846
|
py
|
Python
|
user/user.py
|
rhysflook/progress-sockets
|
b155b7bd60ad8ed82da5f7afda3a86ee0b460729
|
[
"MIT"
] | null | null | null |
user/user.py
|
rhysflook/progress-sockets
|
b155b7bd60ad8ed82da5f7afda3a86ee0b460729
|
[
"MIT"
] | null | null | null |
user/user.py
|
rhysflook/progress-sockets
|
b155b7bd60ad8ed82da5f7afda3a86ee0b460729
|
[
"MIT"
] | null | null | null |
import websockets
import asyncio
import requests
import os
import json
class User:
def __init__(self, id, websocket, app):
self.id = id
self.websocket = websocket
self.app = app
self.in_game = False
self.opponent = None
self.active = True
def get_friends(self):
friend_data = requests.get(f'{os.environ.get("API_BASE_URL")}backend/friends/getFriends.php?id={self.id}').json()
friends = {}
for friend in friend_data:
id, name = friend[0], friend[1]
friends[id] = {
'id': id,
'name': name,
'online': False,
'inGame': False
}
if id in self.app.keys():
friends[id]['online'] = True
friends[id]['inGame'] = self.app[id].in_game
return friends
def get_friends_sockets(self):
friend_data = requests.get(f'{os.environ.get("API_BASE_URL")}backend/friends/getFriends.php?id={self.id}').json()
return [self.app[friend[0]].websocket for friend in friend_data if friend[0] in self.app.keys()]
def handle_login(self):
friends = self.get_friends_sockets()
self.notify_friends('login')
def notify_opponent(self, is_returning=False):
if is_returning:
websockets.broadcast({self.opponent.websocket}, json.dumps({
'type': 'reconnect',
'id': self.id
}))
else:
websockets.broadcast({self.opponent.websocket}, json.dumps({
'type': 'disconnect',
'id': self.id
}))
async def handle_disconnect(self):
print(self.app)
self.active = False
if self.in_game:
self.notify_opponent()
await asyncio.sleep(30)
if self.active == False:
self.notify_friends('logout')
try:
print(self.app)
del self.app[self.id]
except:
print('Connection already closed')
def handle_reconnect(self, socket, location):
self.active = True
self.websocket = socket
if self.in_game and location == 'game':
self.notify_opponent(True)
else:
self.in_game = False
def join_game(self, opponent):
self.in_game = True
self.opponent = opponent
self.notify_friends('joinGame')
def leave_game(self):
self.in_game = False
self.opponent = None
self.notify_friends('leaveGame')
def notify_friends(self, messageType, **kwargs):
sockets = self.get_friends_sockets()
websockets.broadcast(sockets, json.dumps({
'type': messageType,
'id': self.id,
**kwargs
}))
| 30.276596
| 121
| 0.549192
|
4a051964290c5f53c61d5a05bb7cba77c18981e8
| 9,802
|
py
|
Python
|
Lib/test/test_site.py
|
ystk/debian-python2.6
|
17d77164dc5d5748e54aeaa5adc89ac511fc71ae
|
[
"PSF-2.0"
] | 3
|
2015-09-22T14:04:54.000Z
|
2021-07-15T07:07:11.000Z
|
Lib/test/test_site.py
|
ystk/debian-python2.6
|
17d77164dc5d5748e54aeaa5adc89ac511fc71ae
|
[
"PSF-2.0"
] | 1
|
2020-09-07T15:33:56.000Z
|
2020-09-07T15:33:56.000Z
|
Lib/test/test_site.py
|
ystk/debian-python2.6
|
17d77164dc5d5748e54aeaa5adc89ac511fc71ae
|
[
"PSF-2.0"
] | 2
|
2015-09-22T14:05:27.000Z
|
2015-11-08T20:15:42.000Z
|
"""Tests for 'site'.
Tests assume the initial paths in sys.path once the interpreter has begun
executing have not been removed.
"""
import unittest
from test.test_support import TestSkipped, run_unittest, TESTFN, EnvironmentVarGuard
import __builtin__
import os
import sys
import encodings
import subprocess
# Need to make sure to not import 'site' if someone specified ``-S`` at the
# command-line. Detect this by just making sure 'site' has not been imported
# already.
if "site" in sys.modules:
import site
else:
raise TestSkipped("importation of site.py suppressed")
if not os.path.isdir(site.USER_SITE):
# need to add user site directory for tests
os.makedirs(site.USER_SITE)
site.addsitedir(site.USER_SITE)
class HelperFunctionsTests(unittest.TestCase):
"""Tests for helper functions.
The setting of the encoding (set using sys.setdefaultencoding) used by
the Unicode implementation is not tested.
"""
def setUp(self):
"""Save a copy of sys.path"""
self.sys_path = sys.path[:]
def tearDown(self):
"""Restore sys.path"""
sys.path = self.sys_path
def test_makepath(self):
# Test makepath() have an absolute path for its first return value
# and a case-normalized version of the absolute path for its
# second value.
path_parts = ("Beginning", "End")
original_dir = os.path.join(*path_parts)
abs_dir, norm_dir = site.makepath(*path_parts)
self.failUnlessEqual(os.path.abspath(original_dir), abs_dir)
if original_dir == os.path.normcase(original_dir):
self.failUnlessEqual(abs_dir, norm_dir)
else:
self.failUnlessEqual(os.path.normcase(abs_dir), norm_dir)
def test_init_pathinfo(self):
dir_set = site._init_pathinfo()
for entry in [site.makepath(path)[1] for path in sys.path
if path and os.path.isdir(path)]:
self.failUnless(entry in dir_set,
"%s from sys.path not found in set returned "
"by _init_pathinfo(): %s" % (entry, dir_set))
def pth_file_tests(self, pth_file):
"""Contain common code for testing results of reading a .pth file"""
self.failUnless(pth_file.imported in sys.modules,
"%s not in sys.path" % pth_file.imported)
self.failUnless(site.makepath(pth_file.good_dir_path)[0] in sys.path)
self.failUnless(not os.path.exists(pth_file.bad_dir_path))
def test_addpackage(self):
# Make sure addpackage() imports if the line starts with 'import',
# adds directories to sys.path for any line in the file that is not a
# comment or import that is a valid directory name for where the .pth
# file resides; invalid directories are not added
pth_file = PthFile()
pth_file.cleanup(prep=True) # to make sure that nothing is
# pre-existing that shouldn't be
try:
pth_file.create()
site.addpackage(pth_file.base_dir, pth_file.filename, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
def test_addsitedir(self):
# Same tests for test_addpackage since addsitedir() essentially just
# calls addpackage() for every .pth file in the directory
pth_file = PthFile()
pth_file.cleanup(prep=True) # Make sure that nothing is pre-existing
# that is tested for
try:
pth_file.create()
site.addsitedir(pth_file.base_dir, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
def test_s_option(self):
usersite = site.USER_SITE
self.assert_(usersite in sys.path)
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite])
self.assertEqual(rc, 1)
rc = subprocess.call([sys.executable, '-s', '-c',
'import sys; sys.exit(%r in sys.path)' % usersite])
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONNOUSERSITE"] = "1"
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONUSERBASE"] = "/tmp"
rc = subprocess.call([sys.executable, '-c',
'import sys, site; sys.exit(site.USER_BASE.startswith("/tmp"))'],
env=env)
self.assertEqual(rc, 1)
class PthFile(object):
"""Helper class for handling testing of .pth files"""
def __init__(self, filename_base=TESTFN, imported="time",
good_dirname="__testdir__", bad_dirname="__bad"):
"""Initialize instance variables"""
self.filename = filename_base + ".pth"
self.base_dir = os.path.abspath('')
self.file_path = os.path.join(self.base_dir, self.filename)
self.imported = imported
self.good_dirname = good_dirname
self.bad_dirname = bad_dirname
self.good_dir_path = os.path.join(self.base_dir, self.good_dirname)
self.bad_dir_path = os.path.join(self.base_dir, self.bad_dirname)
def create(self):
"""Create a .pth file with a comment, blank lines, an ``import
<self.imported>``, a line with self.good_dirname, and a line with
self.bad_dirname.
Creation of the directory for self.good_dir_path (based off of
self.good_dirname) is also performed.
Make sure to call self.cleanup() to undo anything done by this method.
"""
FILE = open(self.file_path, 'w')
try:
print>>FILE, "#import @bad module name"
print>>FILE, "\n"
print>>FILE, "import %s" % self.imported
print>>FILE, self.good_dirname
print>>FILE, self.bad_dirname
finally:
FILE.close()
os.mkdir(self.good_dir_path)
def cleanup(self, prep=False):
"""Make sure that the .pth file is deleted, self.imported is not in
sys.modules, and that both self.good_dirname and self.bad_dirname are
not existing directories."""
if os.path.exists(self.file_path):
os.remove(self.file_path)
if prep:
self.imported_module = sys.modules.get(self.imported)
if self.imported_module:
del sys.modules[self.imported]
else:
if self.imported_module:
sys.modules[self.imported] = self.imported_module
if os.path.exists(self.good_dir_path):
os.rmdir(self.good_dir_path)
if os.path.exists(self.bad_dir_path):
os.rmdir(self.bad_dir_path)
class ImportSideEffectTests(unittest.TestCase):
"""Test side-effects from importing 'site'."""
def setUp(self):
"""Make a copy of sys.path"""
self.sys_path = sys.path[:]
def tearDown(self):
"""Restore sys.path"""
sys.path = self.sys_path
def test_abs__file__(self):
# Make sure all imported modules have their __file__ attribute
# as an absolute path.
# Handled by abs__file__()
site.abs__file__()
for module in (sys, os, __builtin__):
try:
self.assertTrue(os.path.isabs(module.__file__), repr(module))
except AttributeError:
continue
# We could try everything in sys.modules; however, when regrtest.py
# runs something like test_frozen before test_site, then we will
# be testing things loaded *after* test_site did path normalization
def test_no_duplicate_paths(self):
# No duplicate paths should exist in sys.path
# Handled by removeduppaths()
site.removeduppaths()
seen_paths = set()
for path in sys.path:
self.failUnless(path not in seen_paths)
seen_paths.add(path)
def test_add_build_dir(self):
# Test that the build directory's Modules directory is used when it
# should be.
# XXX: implement
pass
def test_setting_quit(self):
# 'quit' and 'exit' should be injected into __builtin__
self.failUnless(hasattr(__builtin__, "quit"))
self.failUnless(hasattr(__builtin__, "exit"))
def test_setting_copyright(self):
# 'copyright' and 'credits' should be in __builtin__
self.failUnless(hasattr(__builtin__, "copyright"))
self.failUnless(hasattr(__builtin__, "credits"))
def test_setting_help(self):
# 'help' should be set in __builtin__
self.failUnless(hasattr(__builtin__, "help"))
def test_aliasing_mbcs(self):
if sys.platform == "win32":
import locale
if locale.getdefaultlocale()[1].startswith('cp'):
for value in encodings.aliases.aliases.itervalues():
if value == "mbcs":
break
else:
self.fail("did not alias mbcs")
def test_setdefaultencoding_removed(self):
# Make sure sys.setdefaultencoding is gone
self.failUnless(not hasattr(sys, "setdefaultencoding"))
def test_sitecustomize_executed(self):
# If sitecustomize is available, it should have been imported.
if "sitecustomize" not in sys.modules:
try:
import sitecustomize
except ImportError:
pass
else:
self.fail("sitecustomize not imported automatically")
def test_main():
run_unittest(HelperFunctionsTests, ImportSideEffectTests)
if __name__ == "__main__":
test_main()
| 37.128788
| 84
| 0.620486
|
4a05197ad8d4ba33745cac275574adeb772b8e41
| 14,818
|
py
|
Python
|
A3C/agent/manager/basic_manager.py
|
sarikayamehmet/Framework-for-Actor-Critic-deep-reinforcement-learning-algorithms
|
a2902f903956427074769b71b41ddc81e10276c3
|
[
"MIT"
] | 8
|
2018-10-12T09:05:52.000Z
|
2021-07-28T11:08:35.000Z
|
A3C/agent/manager/basic_manager.py
|
sarikayamehmet/Framework-for-Actor-Critic-deep-reinforcement-learning-algorithms
|
a2902f903956427074769b71b41ddc81e10276c3
|
[
"MIT"
] | null | null | null |
A3C/agent/manager/basic_manager.py
|
sarikayamehmet/Framework-for-Actor-Critic-deep-reinforcement-learning-algorithms
|
a2902f903956427074769b71b41ddc81e10276c3
|
[
"MIT"
] | 1
|
2019-12-31T10:55:49.000Z
|
2019-12-31T10:55:49.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
import tensorflow as tf
import numpy as np
from agent.network import *
from utils.buffer import Buffer, PrioritizedBuffer
# from utils.schedules import LinearSchedule
from agent.batch import ExperienceBatch
from sklearn.random_projection import SparseRandomProjection
import options
flags = options.get()
class BasicManager(object):
def __init__(self, session, device, id, action_shape, state_shape, concat_size=0, global_network=None, training=True):
self.training = training
self.session = session
self.id = id
self.device = device
self.state_shape = state_shape
self.set_model_size()
if self.training:
self.global_network = global_network
# Gradient optimizer and clip range
if not self.is_global_network():
self.clip = self.global_network.clip
else:
self.initialize_gradient_optimizer()
# Build agents
self.model_list = []
self.build_agents(state_shape=state_shape, action_shape=action_shape, concat_size=concat_size)
# Build experience buffer
if flags.replay_ratio > 0:
if flags.prioritized_replay:
self.experience_buffer = PrioritizedBuffer(size=flags.replay_buffer_size)
# self.beta_schedule = LinearSchedule(flags.max_time_step, initial_p=0.4, final_p=1.0)
else:
self.experience_buffer = Buffer(size=flags.replay_buffer_size)
if flags.predict_reward:
self.reward_prediction_buffer = Buffer(size=flags.reward_prediction_buffer_size)
# Bind optimizer to global
if not self.is_global_network():
self.bind_to_global(self.global_network)
# Count based exploration
if flags.use_count_based_exploration_reward:
self.projection = None
self.projection_dataset = []
if flags.print_loss:
self._loss_list = [{} for _ in range(self.model_size)]
else:
self.global_network = None
self.model_list = global_network.model_list
# Statistics
self._model_usage_list = deque()
def is_global_network(self):
return self.global_network is None
def set_model_size(self):
self.model_size = 1
self.agents_set = set([0])
def build_agents(self, state_shape, action_shape, concat_size):
agent=eval('{}_Network'.format(flags.network))(
session=self.session,
id='{0}_{1}'.format(self.id, 0),
device=self.device,
state_shape=state_shape,
action_shape=action_shape,
concat_size=concat_size,
clip=self.clip[0],
predict_reward=flags.predict_reward,
training = self.training
)
self.model_list.append(agent)
def sync(self):
# assert not self.is_global_network(), 'you are trying to sync the global network with itself'
for i in range(self.model_size):
agent = self.model_list[i]
sync = self.sync_list[i]
agent.sync(sync)
def initialize_gradient_optimizer(self):
self.global_step = []
self.learning_rate = []
self.clip = []
self.gradient_optimizer = []
for i in range(self.model_size):
# global step
self.global_step.append( tf.Variable(0, trainable=False) )
# learning rate
self.learning_rate.append( eval('tf.train.'+flags.alpha_annealing_function)(learning_rate=flags.alpha, global_step=self.global_step[i], decay_steps=flags.alpha_decay_steps, decay_rate=flags.alpha_decay_rate) if flags.alpha_decay else flags.alpha )
# clip
self.clip.append( eval('tf.train.'+flags.clip_annealing_function)(learning_rate=flags.clip, global_step=self.global_step[i], decay_steps=flags.clip_decay_steps, decay_rate=flags.clip_decay_rate) if flags.clip_decay else flags.clip )
# gradient optimizer
self.gradient_optimizer.append( eval('tf.train.'+flags.optimizer+'Optimizer')(learning_rate=self.learning_rate[i], use_locking=True) )
def bind_to_global(self, global_network):
self.sync_list = []
for i in range(self.model_size):
local_agent = self.get_model(i)
global_agent = global_network.get_model(i)
local_agent.minimize_local_loss(optimizer=global_network.gradient_optimizer[i], global_step=global_network.global_step[i], global_var_list=global_agent.get_shared_keys())
self.sync_list.append(local_agent.bind_sync(global_agent)) # for syncing local network with global one
def get_model(self, id):
return self.model_list[id]
def get_statistics(self):
stats = {}
if self.training:
# build loss statistics
if flags.print_loss:
for i in range(self.model_size):
for key, value in self._loss_list[i].items():
stats['loss_{}{}_avg'.format(key,i)] = np.average(value)
# build models usage statistics
if self.model_size > 1:
total_usage = 0
usage_matrix = {}
for u in self._model_usage_list:
if not (u in usage_matrix):
usage_matrix[u] = 0
usage_matrix[u] += 1
total_usage += 1
for i in range(self.model_size):
stats['model_{}'.format(i)] = 0
for key, value in usage_matrix.items():
stats['model_{}'.format(key)] = value/total_usage if total_usage != 0 else 0
return stats
def add_to_statistics(self, id):
self._model_usage_list.append(id)
if len(self._model_usage_list) > flags.match_count_for_evaluation:
self._model_usage_list.popleft() # remove old statistics
def get_shared_keys(self):
vars = []
for agent in self.model_list:
vars += agent.get_shared_keys()
return vars
def reset(self):
self.step = 0
self.agent_id = 0
# Internal states
self.internal_states = None if flags.share_internal_state else [None]*self.model_size
if self.training:
# Count based exploration
if flags.use_count_based_exploration_reward:
self.hash_state_table = {}
def initialize_new_batch(self):
self.batch = ExperienceBatch(self.model_size)
def estimate_value(self, agent_id, states, concats=None, internal_state=None):
return self.get_model(agent_id).predict_value(states=states, concats=concats, internal_state=internal_state)
def act(self, act_function, state, concat=None):
agent_id = self.agent_id
agent = self.get_model(agent_id)
internal_state = self.internal_states if flags.share_internal_state else self.internal_states[agent_id]
action_batch, value_batch, policy_batch, new_internal_state = agent.predict_action(states=[state], concats=[concat], internal_state=internal_state)
if flags.share_internal_state:
self.internal_states = new_internal_state
else:
self.internal_states[agent_id] = new_internal_state
action, value, policy = action_batch[0], value_batch[0], policy_batch[0]
new_state, extrinsic_reward, terminal = act_function(action)
if self.training:
if flags.clip_reward:
extrinsic_reward = np.clip(extrinsic_reward, flags.min_reward, flags.max_reward)
intrinsic_reward = 0
if self.training:
if flags.use_count_based_exploration_reward: # intrinsic reward
intrinsic_reward += self.get_count_based_exploration_reward(new_state)
total_reward = np.array([extrinsic_reward, intrinsic_reward], dtype=np.float32)
if self.training:
self.batch.add_action(agent_id=agent_id, state=state, concat=concat, action=action, policy=policy, reward=total_reward, value=value, internal_state=internal_state)
# update step at the end of the action
self.step += 1
# return result
return new_state, value, action, total_reward, terminal, policy
def get_count_based_exploration_reward(self, new_state):
if len(self.projection_dataset) < flags.projection_dataset_size:
self.projection_dataset.append(new_state.flatten())
if len(self.projection_dataset) == flags.projection_dataset_size:
if self.projection is None:
self.projection = SparseRandomProjection(n_components=flags.exploration_hash_size if flags.exploration_hash_size > 0 else 'auto') # http://scikit-learn.org/stable/modules/random_projection.html
self.projection.fit(self.projection_dataset)
self.projection_dataset = [] # reset
if self.projection is not None:
state_projection = self.projection.transform([new_state.flatten()])[0] # project to smaller dimension
state_hash = ''.join('1' if x > 0 else '0' for x in state_projection) # build binary locality-sensitive hash
if state_hash not in self.hash_state_table:
self.hash_state_table[state_hash] = 1
else:
self.hash_state_table[state_hash] += 1
exploration_bonus = 2/np.sqrt(self.hash_state_table[state_hash]) - 1 # in [-1,1]
return flags.positive_exploration_coefficient*exploration_bonus if exploration_bonus > 0 else flags.negative_exploration_coefficient*exploration_bonus
return 0
def compute_discounted_cumulative_reward(self, batch):
last_value = batch.bootstrap['value'] if 'value' in batch.bootstrap else 0.
batch.compute_discounted_cumulative_reward(agents=self.agents_set, last_value=last_value, gamma=flags.gamma, lambd=flags.lambd)
return batch
def train(self, batch):
# assert self.global_network is not None, 'Cannot train the global network.'
states = batch.states
internal_states = batch.internal_states
concats = batch.concats
actions = batch.actions
policies = batch.policies
values = batch.values
rewards = batch.rewards
dcr = batch.discounted_cumulative_rewards
gae = batch.generalized_advantage_estimators
batch_error = []
for i in range(self.model_size):
batch_size = len(states[i])
if batch_size > 0:
model = self.get_model(i)
# reward prediction
if model.predict_reward:
sampled_batch = self.reward_prediction_buffer.sample()
reward_prediction_states, reward_prediction_target = self.get_reward_prediction_tuple(sampled_batch)
else:
reward_prediction_states = None
reward_prediction_target = None
# train
error, train_info = model.train(
states=states[i], concats=concats[i],
actions=actions[i], values=values[i],
policies=policies[i],
rewards=rewards[i],
discounted_cumulative_rewards=dcr[i],
generalized_advantage_estimators=gae[i],
reward_prediction_states=reward_prediction_states,
reward_prediction_target=reward_prediction_target,
internal_state=internal_states[i][0]
)
batch_error.append(error)
# loss statistics
if flags.print_loss:
for key, value in train_info.items():
if key not in self._loss_list[i]:
self._loss_list[i][key] = deque()
self._loss_list[i][key].append(value)
if len(self._loss_list[i][key]) > flags.match_count_for_evaluation: # remove old statistics
self._loss_list[i][key].popleft()
return batch_error
def bootstrap(self, state, concat=None):
agent_id = self.agent_id
internal_state = self.internal_states if flags.share_internal_state else self.internal_states[agent_id]
value_batch, _ = self.estimate_value(agent_id=agent_id, states=[state], concats=[concat], internal_state=internal_state)
bootstrap = self.batch.bootstrap
bootstrap['internal_state'] = internal_state
bootstrap['agent_id'] = agent_id
bootstrap['state'] = state
bootstrap['concat'] = concat
bootstrap['value'] = value_batch[0]
def replay_value(self, batch): # replay values
# replay values
for (agent_id,pos) in batch.step_generator():
concat, state, internal_state = batch.get_action(['concats','states','internal_states'], agent_id, pos)
value_batch, _ = self.estimate_value(agent_id=agent_id, states=[state], concats=[concat], internal_state=internal_state)
batch.set_action({'values':value_batch[0]}, agent_id, pos)
if 'value' in batch.bootstrap:
bootstrap = batch.bootstrap
agent_id = bootstrap['agent_id']
value_batch, _ = self.estimate_value(agent_id=agent_id, states=[bootstrap['state']], concats=[bootstrap['concat']], internal_state=bootstrap['internal_state'])
bootstrap['value'] = value_batch[0]
return self.compute_discounted_cumulative_reward(batch)
def add_to_reward_prediction_buffer(self, batch):
batch_size = batch.get_size(self.agents_set)
if batch_size < 2:
return
batch_extrinsic_reward = batch.get_cumulative_reward(self.agents_set)[0]
self.reward_prediction_buffer.put(batch=batch, type_id=1 if batch_extrinsic_reward != 0 else 0) # process batch only after sampling, for better perfomance
def get_reward_prediction_tuple(self, batch):
flat_states = [batch.get_action('states', agent_id, pos) for (agent_id,pos) in batch.step_generator(self.agents_set)]
flat_rewards = [batch.get_action('rewards', agent_id, pos) for (agent_id,pos) in batch.step_generator(self.agents_set)]
states_count = len(flat_states)
length = min(3, states_count-1)
start_idx = np.random.randint(states_count-length) if states_count > length else 0
reward_prediction_states = [flat_states[start_idx+i] for i in range(length)]
reward_prediction_target = np.zeros((1,3))
target_reward = flat_rewards[start_idx+length][0] # use only extrinsic rewards
if target_reward == 0:
reward_prediction_target[0][0] = 1.0 # zero
elif target_reward > 0:
reward_prediction_target[0][1] = 1.0 # positive
else:
reward_prediction_target[0][2] = 1.0 # negative
return reward_prediction_states, reward_prediction_target
def add_to_replay_buffer(self, batch, batch_error):
batch_size = batch.get_size(self.agents_set)
if batch_size < 1:
return
batch_reward = batch.get_cumulative_reward(self.agents_set)
batch_extrinsic_reward = batch_reward[0]
batch_intrinsic_reward = batch_reward[1]
batch_tot_reward = batch_extrinsic_reward + batch_intrinsic_reward
if batch_tot_reward == 0 and flags.save_only_batches_with_reward:
return
if flags.replay_using_default_internal_state:
batch.reset_internal_states()
type_id = (1 if batch_intrinsic_reward > 0 else (2 if batch_extrinsic_reward > 0 else 0))
if flags.prioritized_replay:
self.experience_buffer.put(batch=batch, priority=batch_tot_reward, type_id=type_id)
else:
self.experience_buffer.put(batch=batch, type_id=type_id)
def replay_experience(self):
if not self.experience_buffer.has_atleast(flags.replay_start):
return
n = np.random.poisson(flags.replay_ratio)
for _ in range(n):
old_batch = self.experience_buffer.sample()
self.train(self.replay_value(old_batch) if flags.replay_value else old_batch)
def process_batch(self, global_step):
batch = self.compute_discounted_cumulative_reward(self.batch)
# reward prediction
if flags.predict_reward:
self.add_to_reward_prediction_buffer(batch) # do it before training, this way there will be at least one batch in the reward_prediction_buffer
if self.reward_prediction_buffer.is_empty():
return # cannot train without reward prediction, wait until reward_prediction_buffer is not empty
# train
batch_error = self.train(batch)
# experience replay (after training!)
if flags.replay_ratio > 0 and global_step > flags.replay_step:
self.replay_experience()
self.add_to_replay_buffer(batch, batch_error)
| 42.096591
| 250
| 0.759414
|
4a051a8af348e91fcfdc8179ae1de62f09647f97
| 1,746
|
py
|
Python
|
test/_cythonize.py
|
jtwires/pystreaming
|
71c80557d32302895b9e0d18aaf5fe54d5b0b612
|
[
"MIT"
] | null | null | null |
test/_cythonize.py
|
jtwires/pystreaming
|
71c80557d32302895b9e0d18aaf5fe54d5b0b612
|
[
"MIT"
] | null | null | null |
test/_cythonize.py
|
jtwires/pystreaming
|
71c80557d32302895b9e0d18aaf5fe54d5b0b612
|
[
"MIT"
] | null | null | null |
import unittest
from streaming._cythonize import cythonize
def load_tests(_, tests, __):
import doctest
import streaming._cythonize
# pylint: disable=W0212
tests.addTests(doctest.DocTestSuite(streaming._cythonize))
return tests
def _cyfunction():
"""cydocs"""
return 'cython'
class _CyClass(object):
"""cydocs"""
pass
class CythonizeTestCases(unittest.TestCase):
def test_function(self):
@cythonize
def function(positional, keyword=None): # pylint: disable=W0613
"""documentation"""
return 'python'
self.assertEqual(function.__name__, 'function')
self.assertEqual(function.__doc__, 'documentation')
self.assertEqual(function(None), 'python')
@cythonize
def cyfunction():
"""documentation"""
return 'python'
self.assertEqual(cyfunction.__name__, 'cyfunction')
self.assertEqual(cyfunction.__doc__, 'documentation')
self.assertEqual(cyfunction(), 'cython')
# pylint: disable=E1123
self.assertEqual(cyfunction(python=True), 'python')
def test_class(self):
@cythonize
class Class(object):
"""documentation"""
pass
self.assertEqual(Class.__name__, 'Class')
self.assertEqual(Class.__doc__, 'documentation')
self.assertTrue(isinstance(Class(), Class))
@cythonize
class CyClass(object):
"""documentation"""
pass
self.assertEqual(CyClass.__name__, 'CyClass')
self.assertEqual(CyClass.__doc__, 'documentation')
self.assertTrue(isinstance(CyClass(), _CyClass))
self.assertTrue(isinstance(CyClass(python=True), CyClass))
| 28.16129
| 72
| 0.636312
|
4a051ab54fcb7ef8182523e2647428679339979d
| 6,143
|
py
|
Python
|
mne/commands/mne_report.py
|
abramhindle/mne-python
|
989390a484cba219aae74c778b71568586f9edb2
|
[
"BSD-3-Clause"
] | null | null | null |
mne/commands/mne_report.py
|
abramhindle/mne-python
|
989390a484cba219aae74c778b71568586f9edb2
|
[
"BSD-3-Clause"
] | 1
|
2019-09-17T23:54:38.000Z
|
2019-09-17T23:54:38.000Z
|
mne/commands/mne_report.py
|
abramhindle/mne-python
|
989390a484cba219aae74c778b71568586f9edb2
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
r"""Create mne report for a folder.
Examples
--------
Before getting started with ``mne report``, make sure the files you want to
render follow the filename conventions defined by MNE:
.. highlight:: console
.. cssclass:: table-bordered
.. rst-class:: midvalign
============ ==============================================================
Data object Filename convention (ends with)
============ ==============================================================
raw -raw.fif(.gz), -raw_sss.fif(.gz), -raw_tsss.fif(.gz), _meg.fif
events -eve.fif(.gz)
epochs -epo.fif(.gz)
evoked -ave.fif(.gz)
covariance -cov.fif(.gz)
trans -trans.fif(.gz)
forward -fwd.fif(.gz)
inverse -inv.fif(.gz)
============ ==============================================================
To generate a barebones report from all the \*.fif files in the sample
dataset, invoke the following command in a system (e.g., Bash) shell::
$ mne report --path MNE-sample-data/ --verbose
On successful creation of the report, it will open the HTML in a new tab in
the browser. To disable this, use the ``--no-browser`` option.
TO generate a report for a single subject, give the ``SUBJECT`` name and
the ``SUBJECTS_DIR`` and this will generate the MRI slices (with BEM
contours overlaid on top if available)::
$ mne report --path MNE-sample-data/ --subject sample --subjects-dir \
MNE-sample-data/subjects --verbose
To properly render ``trans`` and ``covariance`` files, add the measurement
information::
$ mne report --path MNE-sample-data/ \
--info MNE-sample-data/MEG/sample/sample_audvis-ave.fif \
--subject sample --subjects-dir MNE-sample-data/subjects --verbose
To render whitened ``evoked`` files with baseline correction, add the noise
covariance file::
$ mne report --path MNE-sample-data/ \
--info MNE-sample-data/MEG/sample/sample_audvis-ave.fif \
--cov MNE-sample-data/MEG/sample/sample_audvis-cov.fif --bmax 0 \
--subject sample --subjects-dir MNE-sample-data/subjects --verbose
To generate the report in parallel::
$ mne report --path MNE-sample-data/ \
--info MNE-sample-data/MEG/sample/sample_audvis-ave.fif \
--subject sample --subjects-dir MNE-sample-data/subjects \
--verbose --jobs 6
For help on all the available options, do::
$ mne report --help
"""
import sys
import time
import mne
from mne.report import Report
from mne.utils import verbose, logger
@verbose
def log_elapsed(t, verbose=None):
"""Log elapsed time."""
logger.info('Report complete in %s seconds' % round(t, 1))
def run():
"""Run command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option("-p", "--path", dest="path",
help="Path to folder who MNE-Report must be created")
parser.add_option("-i", "--info", dest="info_fname",
help="File from which info dictionary is to be read",
metavar="FILE")
parser.add_option("-c", "--cov", dest="cov_fname",
help="File from which noise covariance is to be read",
metavar="FILE")
parser.add_option("--bmin", dest="bmin",
help="Time at which baseline correction starts for "
"evokeds", default=None)
parser.add_option("--bmax", dest="bmax",
help="Time at which baseline correction stops for "
"evokeds", default=None)
parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
help="The subjects directory")
parser.add_option("-s", "--subject", dest="subject",
help="The subject name")
parser.add_option("--no-browser", dest="no_browser", action='store_false',
help="Do not open MNE-Report in browser")
parser.add_option("--overwrite", dest="overwrite", action='store_false',
help="Overwrite html report if it already exists")
parser.add_option("-j", "--jobs", dest="n_jobs", help="Number of jobs to"
" run in parallel")
parser.add_option("-m", "--mri-decim", type="int", dest="mri_decim",
default=2, help="Integer factor used to decimate "
"BEM plots")
parser.add_option("--image-format", type="str", dest="image_format",
default='png', help="Image format to use "
"(can be 'png' or 'svg')")
parser.add_option("-v", "--verbose", dest="verbose",
action='store_true', help="run in verbose mode")
options, args = parser.parse_args()
path = options.path
if path is None:
parser.print_help()
sys.exit(1)
info_fname = options.info_fname
cov_fname = options.cov_fname
subjects_dir = options.subjects_dir
subject = options.subject
image_format = options.image_format
mri_decim = int(options.mri_decim)
verbose = True if options.verbose is not None else False
open_browser = False if options.no_browser is not None else True
overwrite = True if options.overwrite is not None else False
n_jobs = int(options.n_jobs) if options.n_jobs is not None else 1
bmin = float(options.bmin) if options.bmin is not None else None
bmax = float(options.bmax) if options.bmax is not None else None
# XXX: this means (None, None) cannot be specified through command line
if bmin is None and bmax is None:
baseline = None
else:
baseline = (bmin, bmax)
t0 = time.time()
report = Report(info_fname, subjects_dir=subjects_dir,
subject=subject, baseline=baseline,
cov_fname=cov_fname, verbose=verbose,
image_format=image_format)
report.parse_folder(path, verbose=verbose, n_jobs=n_jobs,
mri_decim=mri_decim)
log_elapsed(time.time() - t0, verbose=verbose)
report.save(open_browser=open_browser, overwrite=overwrite)
mne.utils.run_command_if_main()
| 39.127389
| 78
| 0.61159
|
4a051b8bfd8ef91d0dbe6001b4c2e790ad58adf7
| 12,275
|
py
|
Python
|
lib/googlecloudsdk/command_lib/domains/contacts_util.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/googlecloudsdk/command_lib/domains/contacts_util.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/command_lib/domains/contacts_util.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contacts utilties for Cloud Domains commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import sys
from apitools.base.protorpclite import messages as _messages
from googlecloudsdk.api_lib.domains import registrations
from googlecloudsdk.command_lib.domains import flags
from googlecloudsdk.command_lib.domains import util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.resource import resource_printer
def ParseContactData(api_version, path):
"""Parses contact data from a yaml file."""
domains_messages = registrations.GetMessagesModule(api_version)
class ContactData(_messages.Message):
"""Message that should be present in YAML file with contacts data."""
# pylint: disable=invalid-name
allContacts = _messages.MessageField(domains_messages.Contact, 1)
registrantContact = _messages.MessageField(domains_messages.Contact, 2)
adminContact = _messages.MessageField(domains_messages.Contact, 3)
technicalContact = _messages.MessageField(domains_messages.Contact, 4)
contacts = util.ParseMessageFromYamlFile(
path, ContactData,
'Contact data file \'{}\' does not contain valid contact messages'.format(
path))
if not contacts:
return None
parsed_contact = None
if contacts.allContacts:
for field in ['registrantContact', 'adminContact', 'technicalContact']:
if contacts.get_assigned_value(field):
raise exceptions.Error(
('Contact data file \'{}\' cannot contain both '
'allContacts and {} fields.').format(path, field))
parsed_contact = domains_messages.ContactSettings(
registrantContact=contacts.allContacts,
adminContact=contacts.allContacts,
technicalContact=contacts.allContacts)
else:
parsed_contact = domains_messages.ContactSettings(
registrantContact=contacts.registrantContact,
adminContact=contacts.adminContact,
technicalContact=contacts.technicalContact)
return parsed_contact
def PromptForContacts(api_version, current_contacts=None):
"""Interactively prompts for Whois Contact information."""
domains_messages = registrations.GetMessagesModule(api_version)
create_call = (current_contacts is None)
if not console_io.PromptContinue(
'Contact data not provided using the --contact-data-from-file flag.',
prompt_string='Do you want to enter it interactively',
default=create_call):
return None
if create_call:
contact = _PromptForSingleContact(domains_messages)
return domains_messages.ContactSettings(
registrantContact=contact,
adminContact=contact,
technicalContact=contact)
choices = [
'all the contacts to the same value', 'registrant contact',
'admin contact', 'technical contact'
]
# TODO(b/166210862): Make it a loop.
index = console_io.PromptChoice(
options=choices,
cancel_option=True,
default=0,
message='Which contact do you want to change?')
if index == 0:
contact = _PromptForSingleContact(domains_messages,
current_contacts.registrantContact)
return domains_messages.ContactSettings(
registrantContact=contact,
adminContact=contact,
technicalContact=contact)
if index == 1:
contact = _PromptForSingleContact(domains_messages,
current_contacts.registrantContact)
return domains_messages.ContactSettings(registrantContact=contact)
if index == 2:
contact = _PromptForSingleContact(domains_messages,
current_contacts.adminContact)
return domains_messages.ContactSettings(adminContact=contact)
if index == 3:
contact = _PromptForSingleContact(domains_messages,
current_contacts.technicalContact)
return domains_messages.ContactSettings(technicalContact=contact)
return None
def _PromptForSingleContact(domains_messages, unused_current_contact=None):
"""Asks a user for a single contact data."""
contact = domains_messages.Contact()
contact.postalAddress = domains_messages.PostalAddress()
# TODO(b/166210862): Use defaults from current_contact.
# But then: How to clear a value?
# TODO(b/166210862): Better validation: Call validate_only after each prompt.
contact.postalAddress.recipients.append(
util.PromptWithValidator(
validator=util.ValidateNonEmpty,
error_message=' Name must not be empty.',
prompt_string='Full name: '))
contact.postalAddress.organization = console_io.PromptResponse(
'Organization (if applicable): ')
contact.email = util.PromptWithValidator(
validator=util.ValidateEmail,
error_message=' Invalid email address.',
prompt_string='Email',
default=properties.VALUES.core.account.Get())
contact.phoneNumber = util.PromptWithValidator(
validator=util.ValidateNonEmpty,
error_message=' Phone number must not be empty.',
prompt_string='Phone number: ',
message='Enter phone number with country code, e.g. "+1.8005550123".')
contact.faxNumber = util.Prompt(
prompt_string='Fax number (if applicable): ',
message='Enter fax number with country code, e.g. "+1.8005550123".')
contact.postalAddress.regionCode = util.PromptWithValidator(
validator=util.ValidateRegionCode,
error_message=(
' Country / Region code must be in ISO 3166-1 format, e.g. "US" or '
'"PL".\n See https://support.google.com/business/answer/6270107 for a'
' list of valid choices.'),
prompt_string='Country / Region code: ',
message='Enter two-letter Country / Region code, e.g. "US" or "PL".')
if contact.postalAddress.regionCode != 'US':
log.status.Print('Refer to the guidelines for entering address field '
'information at '
'https://support.google.com/business/answer/6397478.')
contact.postalAddress.postalCode = console_io.PromptResponse(
'Postal / ZIP code: ')
contact.postalAddress.administrativeArea = console_io.PromptResponse(
'State / Administrative area (if applicable): ')
contact.postalAddress.locality = console_io.PromptResponse(
'City / Locality: ')
contact.postalAddress.addressLines.append(
util.PromptWithValidator(
validator=util.ValidateNonEmpty,
error_message=' Address Line 1 must not be empty.',
prompt_string='Address Line 1: '))
optional_address_lines = []
address_line_num = 2
while len(optional_address_lines) < 4:
address_line_num = 2 + len(optional_address_lines)
address_line = console_io.PromptResponse(
'Address Line {} (if applicable): '.format(address_line_num))
if not address_line:
break
optional_address_lines += [address_line]
if optional_address_lines:
contact.postalAddress.addressLines.extend(optional_address_lines)
return contact
def ParseContactPrivacy(api_version, contact_privacy):
domains_messages = registrations.GetMessagesModule(api_version)
if contact_privacy is None:
return None
return flags.ContactPrivacyEnumMapper(domains_messages).GetEnumForChoice(
contact_privacy)
def PromptForContactPrivacy(api_version, choices, current_privacy=None):
"""Asks a user for Contacts Privacy.
Args:
api_version: Cloud Domains API version to call.
choices: List of privacy choices.
current_privacy: Current privacy. Should be nonempty in update calls.
Returns:
Privacy enum or None if the user cancelled.
"""
if not choices:
raise exceptions.Error('Could not find supported contact privacy.')
domains_messages = registrations.GetMessagesModule(api_version)
# Sort the choices according to the privacy strength.
choices.sort(key=flags.PrivacyChoiceStrength, reverse=True)
if current_privacy:
if len(choices) == 1:
log.status.Print(
'Your current contact privacy is {}. It cannot be changed.'.format(
current_privacy))
return None
else:
update = console_io.PromptContinue(
'Your current contact privacy is {}.'.format(current_privacy),
'Do you want to change it',
default=False)
if not update:
return None
current_choice = 0
for ix, privacy in enumerate(choices):
if privacy == flags.ContactPrivacyEnumMapper(
domains_messages).GetChoiceForEnum(current_privacy):
current_choice = ix
else:
current_choice = 0 # The strongest available privacy
if len(choices) == 1:
ack = console_io.PromptContinue(
'The only supported contact privacy is {}.'.format(choices[0]),
default=True)
if not ack:
return None
return ParseContactPrivacy(api_version, choices[0])
else:
index = console_io.PromptChoice(
options=choices,
default=current_choice,
message='Specify contact privacy')
return ParseContactPrivacy(api_version, choices[index])
def ParsePublicContactsAck(api_version, notices):
"""Parses Contact Notices. Returns public_contact_ack enum or None."""
domains_messages = registrations.GetMessagesModule(api_version)
if notices is None:
return False
for notice in notices:
enum = flags.ContactNoticeEnumMapper(domains_messages).GetEnumForChoice(
notice)
# pylint: disable=line-too-long
if enum == domains_messages.ConfigureContactSettingsRequest.ContactNoticesValueListEntryValuesEnum.PUBLIC_CONTACT_DATA_ACKNOWLEDGEMENT:
return enum
return None
def MergeContacts(api_version, prev_contacts, new_contacts):
domains_messages = registrations.GetMessagesModule(api_version)
if new_contacts is None:
new_contacts = domains_messages.ContactSettings()
return domains_messages.ContactSettings(
registrantContact=(new_contacts.registrantContact or
prev_contacts.registrantContact),
adminContact=(new_contacts.adminContact or prev_contacts.adminContact),
technicalContact=(new_contacts.technicalContact or
prev_contacts.technicalContact))
def _SimplifyContacts(contacts):
"""Returns one contact if all 3 contacts are equal, and all 3 contacts otherwise."""
if contacts.registrantContact == contacts.adminContact and contacts.registrantContact == contacts.technicalContact:
return contacts.registrantContact
return contacts
def PromptForPublicContactsAck(domain, contacts, print_format='default'):
"""Asks a user for Public Contacts Ack.
Args:
domain: Domain name.
contacts: Current Contacts. All 3 contacts should be present.
print_format: Print format, e.g. 'default' or 'yaml'.
Returns:
Boolean: whether the user accepted the notice or not.
"""
log.status.Print(
'You choose to make contact data of domain {} public.\n'
'Anyone who looks it up in the WHOIS directory will be able to see info\n'
'for the domain owner and administrative and technical contacts.\n'
'Make sure it\'s ok with them that their contact data is public.\n'
'This info will be publicly available:'.format(domain))
contacts = _SimplifyContacts(contacts)
resource_printer.Print(contacts, print_format, out=sys.stderr)
return console_io.PromptContinue(
message=None, default=False, throw_if_unattended=True, cancel_on_no=True)
# TODO(b/110398579): Integrate with ARI.
| 39.092357
| 139
| 0.72391
|
4a051bbf7a6b6ec2964f0017c63a128df1cf6886
| 12,234
|
py
|
Python
|
test/forward_backward_compatibility/check_forward_backward_compatibility.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | 1
|
2022-03-16T06:35:10.000Z
|
2022-03-16T06:35:10.000Z
|
test/forward_backward_compatibility/check_forward_backward_compatibility.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | null | null | null |
test/forward_backward_compatibility/check_forward_backward_compatibility.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | null | null | null |
import argparse
import datetime
import re
import sys
import warnings
from collections import defaultdict
import torch
from torch._C import parse_schema
# The date specifies how long the allowlist exclusion should apply to.
#
# - If we NEVER give BC guarantee for an operator, you can put the
# date arbitrarily far in the future.
# - Otherwise, pick a date that is far enough in the future that you
# believe you can land your diff before then.
#
# Allowlist entries can be removed after the date listed on them passes.
#
# Allowlist item format:
# [
# 0: function name regex
# 1: date until which the allowlist entry is valid
# 2: (optional) function argument regex
# ]
#
# NB: function name DOES NOT include overload name!
ALLOW_LIST = [
("c10_experimental", datetime.date(2222, 1, 1)),
# Internal
("static", datetime.date(9999, 1, 1)),
("prim::ModuleDictIndex", datetime.date(9999, 1, 1)),
("prim::MKLDNNRelu6", datetime.date(9999, 1, 1)),
("prim::MKLDNNRelu6_", datetime.date(9999, 1, 1)),
("prim::Concat", datetime.date(9999, 1, 1)),
# Internal, profiler-specific ops
("profiler::_call_end_callbacks_on_jit_fut*", datetime.date(9999, 1, 1)),
("profiler::_record_function_enter", datetime.date(9999, 1, 1)),
("aten::linalg_matrix_rank", datetime.date(2021, 10, 30)),
("aten::linalg_pinv", datetime.date(2021, 10, 30)),
("aten::_cholesky_helper", datetime.date(9999, 1, 1)),
("aten::_lstsq_helper", datetime.date(9999, 1, 1)),
("aten::_syevd_helper", datetime.date(9999, 1, 1)),
("aten::_lu_solve_helper", datetime.date(9999, 1, 1)),
("aten::_lu_with_info", datetime.date(9999, 1, 1)),
("aten::_linalg_solve_out_helper_", datetime.date(9999, 1, 1)),
("aten::select_backward", datetime.date(9999, 1, 1)),
("aten::slice_backward", datetime.date(9999, 1, 1)),
("aten::diagonal_backward", datetime.date(9999, 1, 1)),
("aten::rowwise_prune", datetime.date(9999, 1, 1)),
("aten::adaptive_avg_pool3d_backward", datetime.date(9999, 1, 1)),
("aten::_embedding_bag_dense_backward", datetime.date(9999, 1, 1)),
("aten::randperm", datetime.date(9999, 1, 1)),
("aten::_conv_depthwise2d_backward", datetime.date(2022, 1, 31)),
("aten::conv_depthwise3d_backward", datetime.date(2022, 1, 31)),
("aten::cudnn_convolution_backward", datetime.date(2022, 1, 31)),
("aten::cudnn_convolution_backward_input", datetime.date(2022, 1, 31)),
("aten::cudnn_convolution_backward_weight", datetime.date(2022, 1, 31)),
("aten::cudnn_convolution_transpose_backward", datetime.date(2022, 1, 31)),
("aten::cudnn_convolution_transpose_backward_input", datetime.date(2022, 1, 31)),
("aten::cudnn_convolution_transpose_backward_weight", datetime.date(2022, 1, 31)),
("aten::mkldnn_convolution_backward", datetime.date(2022, 1, 31)),
("aten::mkldnn_convolution_backward_input", datetime.date(2022, 1, 31)),
("aten::mkldnn_convolution_backward_weights", datetime.date(2022, 1, 31)),
("aten::_nnpack_spatial_convolution_backward", datetime.date(2022, 1, 31)),
("aten::_nnpack_spatial_convolution_backward_input", datetime.date(2022, 1, 31)),
("aten::_nnpack_spatial_convolution_backward_weight", datetime.date(2022, 1, 31)),
("aten::_slow_conv2d_forward", datetime.date(2022, 1, 31)),
("aten::_slow_conv2d_backward", datetime.date(2022, 1, 31)),
("aten::slow_conv3d_forward", datetime.date(2022, 1, 31)),
("aten::slow_conv3d_backward", datetime.date(2022, 1, 31)),
("aten::slow_conv_dilated2d_backward", datetime.date(2022, 1, 31)),
("aten::slow_conv_dilated3d_backward", datetime.date(2022, 1, 31)),
("aten::slow_conv_transpose2d", datetime.date(2022, 1, 31)),
("aten::slow_conv_transpose2d_backward", datetime.date(2022, 1, 31)),
("aten::slow_conv_transpose3d", datetime.date(2022, 1, 31)),
("aten::slow_conv_transpose3d_backward", datetime.date(2022, 1, 31)),
("aten::_log_softmax_backward_data", datetime.date(2021, 10, 21)),
("aten::_softmax_backward_data", datetime.date(2021, 10, 21)),
("aten::fused_moving_avg_obs_fake_quant", datetime.date(2021, 10, 21)),
("aten::_fused_moving_avg_obs_fq_helper", datetime.date(2021, 10, 21)),
("aten::_baddbmm_mkl_", datetime.date(2021, 10, 31)),
("aten::grid_sampler_2d_backward", datetime.date(2021, 10, 21)),
("aten::index_add.alpha", datetime.date(2021, 12, 31)),
("aten::index_add_.alpha", datetime.date(2021, 12, 31)),
("prim::TensorExprDynamicGuard", datetime.date(2021, 11, 20)),
("aten::split_with_sizes", datetime.date(2021, 11, 20)),
("aten::split", datetime.date(2021, 12, 20)),
("aten::vsplit", datetime.date(2021, 11, 20)),
("aten::tensor_split", datetime.date(2021, 11, 20)),
("aten::chunk", datetime.date(2021, 11, 20)),
("aten::unbind", datetime.date(2021, 11, 20)),
("aten::hsplit", datetime.date(2021, 11, 20)),
("aten::dsplit", datetime.date(2021, 11, 20)),
("aten::_convolution_nogroup", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_backward", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_backward_bias", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_backward_input", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_backward_weight", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_transpose_backward", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_transpose_backward_input", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_transpose_backward_weight", datetime.date(9999, 1, 1)),
("aten::miopen_depthwise_convolution_backward", datetime.date(9999, 1, 1)),
("aten::miopen_depthwise_convolution_backward_input", datetime.date(9999, 1, 1)),
("aten::miopen_depthwise_convolution_backward_weight", datetime.date(9999, 1, 1)),
("caffe2::", datetime.date(2021, 10, 23)),
("prepacked::unpack_prepacked_sizes_conv2d", datetime.date(9999, 1, 1)),
("prepacked::unpack_prepacked_sizes_linear", datetime.date(9999, 1, 1)),
("q::_FloatToBfloat16Quantized", datetime.date(2021, 12, 21)),
("q::_Bfloat16QuantizedToFloat", datetime.date(2021, 12, 21)),
("aten::_inverse_helper", datetime.date(2021, 12, 31)),
("aten::softplus_backward", datetime.date(2022, 1, 31)),
("aten::softplus_backward.grad_input", datetime.date(2022, 1, 31)),
("aten::quantile", datetime.date(2022, 9, 30)),
("aten::nanquantile", datetime.date(2022, 9, 30)),
("aten::_convolution_double_backward", datetime.date(2022, 3, 31)),
]
ALLOW_LIST_COMPILED = [
(
re.compile(item[0]),
item[1],
re.compile(item[2]) if len(item) > 2 else None,
) for item in ALLOW_LIST if item[1] >= datetime.date.today()
]
def allow_listed(schema):
for item in ALLOW_LIST_COMPILED:
if item[0].search(str(schema)):
if len(item) > 2 and item[2] is not None:
# if arguments regex is present, use it
return bool(item[2].search(str(schema)))
return True
return False
# The nightly will fail to parse newly added syntax to schema declarations
# Add new schemas that will fail the nightly here
dont_parse_list = [
("_TorchScriptTesting.*", datetime.date(2099, 9, 17)),
("test_backend", datetime.date(2099, 9, 17)),
("dist_c10d", datetime.date(2099, 9, 17)),
]
def dont_parse(schema_line):
for item in dont_parse_list:
if item[1] < datetime.date.today():
continue
regexp = re.compile(item[0])
if regexp.search(schema_line):
return True
return False
def load_schemas_to_dict():
new_schemas = torch._C._jit_get_all_schemas()
new_schemas += torch._C._jit_get_custom_class_schemas()
new_schema_dict = defaultdict(list)
for s in new_schemas:
new_schema_dict[s.name].append(s)
return new_schema_dict
def check_bc(existing_schemas):
new_schema_dict = load_schemas_to_dict()
is_bc = True
broken_ops = []
for existing_schema in existing_schemas:
if allow_listed(existing_schema):
print("schema: ", str(existing_schema), " found on allowlist, skipping")
continue
print("processing existing schema: ", str(existing_schema))
matching_new_schemas = new_schema_dict.get(existing_schema.name, [])
found = False
for matching_new_schema in matching_new_schemas:
if matching_new_schema.is_backward_compatible_with(existing_schema):
found = True
break
if not found:
print(
"Can NOT find backward compatible schemas after changes "
"for schema {} from the following candidates:\n[\n{}\n]".format(
str(existing_schema),
"\n\t".join(str(s) for s in matching_new_schemas),
)
)
# TODO Print out more details about why candidates don't match.
broken_ops.append(str(existing_schema))
is_bc = False
if is_bc:
print("Found backward compatible schemas for all existing schemas")
else:
print(
"The PR is introducing backward incompatible changes to the "
"operator library. Please contact PyTorch team to confirm "
"whether this change is wanted or not. \n\nBroken ops: "
"[\n\t{}\n]".format("\n\t".join(broken_ops))
)
return is_bc
def check_fc(existing_schemas):
new_schema_dict = load_schemas_to_dict()
is_fc = True
broken_ops = []
for existing_schema in existing_schemas:
if allow_listed(existing_schema):
print("schema: ", str(existing_schema), " found on allowlist, skipping")
continue
print("processing existing schema: ", str(existing_schema))
matching_new_schemas = new_schema_dict.get(existing_schema.name, [])
found = False
possible_failure_reasons = []
for matching_new_schema in matching_new_schemas:
is_compatible, reason = matching_new_schema.check_forward_compatible_with(existing_schema)
if is_compatible:
found = True
break
if reason != "":
possible_failure_reasons.append(reason)
if not found:
print(
"Can NOT find forward compatible schemas after changes "
"for schema {} from the following candidates:\n[\n{}\n]".format(
str(existing_schema),
"\n\t".join(str(s) for s in matching_new_schemas),
)
)
print(
"Refer to following reasons for failure "
"to find FC schema:\n[\n{}\n]".format(
"\n\t".join(str(r) for r in possible_failure_reasons)
)
)
broken_ops.append(str(existing_schema))
is_fc = False
if is_fc:
print("Found forward compatible schemas for all existing schemas")
else:
warnings.warn(
"The PR is introducing a potentially forward incompatible changes to the "
"operator library. Please contact PyTorch team to confirm "
"whether this change is wanted or not. \n\nBroken ops: "
"[\n\t{}\n]".format("\n\t".join(broken_ops))
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
"--existing-schemas",
help="filename to load existing schemas",
type=str,
default="schemas.txt",
)
args = parser.parse_args()
existing_schema_dict = dict()
slist = []
with open(args.existing_schemas, "r") as f:
while True:
line = f.readline()
if not line:
break
if dont_parse(line.strip()):
print("Not parsing schema line: ", line.strip())
continue
s = parse_schema(line.strip())
slist.append(s)
# TODO in case there is FC breaking changes,
# we just warn for now until there is a policy.
check_fc(slist)
if not check_bc(slist):
sys.exit(1)
| 44.326087
| 102
| 0.646641
|
4a051bc2695c336640fb0949003881783bbf059e
| 2,398
|
py
|
Python
|
src/si/util/util.py
|
minimum16/SIB_MachineLearning
|
e032ce15d96a096a3ca57ba8318d3787755534e2
|
[
"Apache-2.0"
] | null | null | null |
src/si/util/util.py
|
minimum16/SIB_MachineLearning
|
e032ce15d96a096a3ca57ba8318d3787755534e2
|
[
"Apache-2.0"
] | null | null | null |
src/si/util/util.py
|
minimum16/SIB_MachineLearning
|
e032ce15d96a096a3ca57ba8318d3787755534e2
|
[
"Apache-2.0"
] | null | null | null |
import itertools
import numpy as np
import pandas as pd
# Y is reserved to idenfify dependent variables
ALPHA = 'ABCDEFGHIJKLMNOPQRSTUVWXZ'
__all__ = ['label_gen', 'euclidean', 'manhattan', 'sigmoid', 'train_test_split', 'add_intersect', 'to_categorical']
def label_gen(n):
""" Generates a list of n distinct labels similar to Excel"""
def _iter_all_strings():
size = 1
while True:
for s in itertools.product(ALPHA, repeat=size):
yield "".join(s)
size += 1
generator = _iter_all_strings()
def gen():
for s in generator:
return s
return [gen() for _ in range(n)]
def euclidean(x, y):
dist = np.sqrt(np.sum((x - y)**2, axis=1)) # x is single point; y is various points
return dist
def manhattan(x, y):
dist = np.sum(np.abs(x - y))
return dist
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def train_test_split(dataset, split=0.8):
size = dataset.X.shape[0]
idx_split = int(split * size)
arr = np.arange(size)
np.random.shuffle(arr)
from src.si.data.dataset import Dataset
train = Dataset(dataset.X[arr[:idx_split]], dataset.Y[arr[:idx_split]], dataset.xnames, dataset.yname)
test = Dataset(dataset.X[arr[idx_split:]], dataset.Y[arr[idx_split:]], dataset.xnames, dataset.yname)
return train, test
def add_intersect(x):
return np.hstack((np.ones((x.shape[0], 1)), x))
def to_categorical(y, num_classes=None, dtype='float32'):
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
def minibatch(X, batchsize=256, shuffle=True):
N = X.shape[0]
ix = np.arange(N)
n_batches = int(np.ceil(N / batchsize))
if shuffle:
np.random.shuffle(ix)
def mb_generator():
for i in range(n_batches):
yield ix[i * batchsize: (i + 1) * batchsize]
return mb_generator(),
| 27.563218
| 116
| 0.609258
|
4a051c9d4abb08e05f46d16b8a7a346014f5f680
| 4,963
|
py
|
Python
|
game/data/scripts/quests/48_ToTheImmortalPlateau/__init__.py
|
TheDemonLife/Lineage2Server-Interlude
|
d23d145db533fd899d4064026e4bc7ee45c6624a
|
[
"Apache-2.0"
] | 10
|
2019-07-27T13:12:11.000Z
|
2022-01-15T19:13:26.000Z
|
game/data/scripts/quests/48_ToTheImmortalPlateau/__init__.py
|
TheDemonLife/Lineage2Server-Interlude
|
d23d145db533fd899d4064026e4bc7ee45c6624a
|
[
"Apache-2.0"
] | 1
|
2021-08-06T12:15:01.000Z
|
2021-08-09T10:18:47.000Z
|
game/data/scripts/quests/48_ToTheImmortalPlateau/__init__.py
|
TheDemonLife/Lineage2Server-Interlude
|
d23d145db533fd899d4064026e4bc7ee45c6624a
|
[
"Apache-2.0"
] | 2
|
2020-02-20T23:02:26.000Z
|
2020-11-22T09:27:51.000Z
|
# Created by CubicVirtuoso
# Any problems feel free to drop by #l2j-datapack on irc.freenode.net
import sys
from ru.catssoftware.gameserver.model.quest import State
from ru.catssoftware.gameserver.model.quest import QuestState
from ru.catssoftware.gameserver.model.quest.jython import QuestJython as JQuest
qn = "48_ToTheImmortalPlateau"
TRADER_GALLADUCCI_ID = 30097
GALLADUCCIS_ORDER_DOCUMENT_ID_1 = 7563
GALLADUCCIS_ORDER_DOCUMENT_ID_2 = 7564
GALLADUCCIS_ORDER_DOCUMENT_ID_3 = 7565
MAGIC_TRADER_GENTLER_ID = 30094
MAGIC_SWORD_HILT_ID = 7568
JEWELER_SANDRA_ID = 30090
GEMSTONE_POWDER_ID = 7567
PRIEST_DUSTIN_ID = 30116
PURIFIED_MAGIC_NECKLACE_ID = 7566
MARK_OF_TRAVELER_ID = 7570
SCROLL_OF_ESCAPE_SPECIAL = 7557
ADENA_ID = 57
RACE = 3
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = [GALLADUCCIS_ORDER_DOCUMENT_ID_1, GALLADUCCIS_ORDER_DOCUMENT_ID_2, GALLADUCCIS_ORDER_DOCUMENT_ID_3,
MAGIC_SWORD_HILT_ID, GEMSTONE_POWDER_ID, PURIFIED_MAGIC_NECKLACE_ID]
def onEvent (self,event,st) :
htmltext = event
if event == "1" :
st.set("cond","1")
st.setState(State.STARTED)
st.playSound("ItemSound.quest_accept")
st.giveItems(GALLADUCCIS_ORDER_DOCUMENT_ID_1,1)
htmltext = "30097-03.htm"
elif event == "2" :
st.set("cond","2")
st.takeItems(GALLADUCCIS_ORDER_DOCUMENT_ID_1,1)
st.giveItems(MAGIC_SWORD_HILT_ID,1)
htmltext = "30094-02.htm"
elif event == "3" :
st.set("cond","3")
st.takeItems(MAGIC_SWORD_HILT_ID,1)
st.giveItems(GALLADUCCIS_ORDER_DOCUMENT_ID_2,1)
htmltext = "30097-06.htm"
elif event == "4" :
st.set("cond","4")
st.takeItems(GALLADUCCIS_ORDER_DOCUMENT_ID_2,1)
st.giveItems(GEMSTONE_POWDER_ID,1)
htmltext = "30090-02.htm"
elif event == "5" :
st.set("cond","5")
st.takeItems(GEMSTONE_POWDER_ID,1)
st.giveItems(GALLADUCCIS_ORDER_DOCUMENT_ID_3,1)
htmltext = "30097-09.htm"
elif event == "6" :
st.set("cond","6")
st.takeItems(GALLADUCCIS_ORDER_DOCUMENT_ID_3,1)
st.giveItems(PURIFIED_MAGIC_NECKLACE_ID,1)
htmltext = "30116-02.htm"
elif event == "7" :
st.giveItems(SCROLL_OF_ESCAPE_SPECIAL,1)
st.takeItems(PURIFIED_MAGIC_NECKLACE_ID,1)
st.takeItems(MARK_OF_TRAVELER_ID,-1)
htmltext = "30097-12.htm"
st.unset("cond")
st.exitQuest(False)
st.playSound("ItemSound.quest_finish")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
if id == State.CREATED :
st.set("cond","0")
if player.getRace().ordinal() == RACE and st.getQuestItemsCount(MARK_OF_TRAVELER_ID) > 0:
htmltext = "30097-02.htm"
else :
htmltext = "30097-01.htm"
st.exitQuest(1)
elif npcId == 30097 and id == State.COMPLETED :
htmltext = "<html><body>I can't supply you with another Scroll of Escape. Sorry traveller.</body></html>"
elif npcId == 30097 and st.getInt("cond")==1 :
htmltext = "30097-04.htm"
elif npcId == 30097 and st.getInt("cond")==2 :
htmltext = "30097-05.htm"
elif npcId == 30097 and st.getInt("cond")==3 :
htmltext = "30097-07.htm"
elif npcId == 30097 and st.getInt("cond")==4 :
htmltext = "30097-08.htm"
elif npcId == 30097 and st.getInt("cond")==5 :
htmltext = "30097-10.htm"
elif npcId == 30097 and st.getInt("cond")==6 :
htmltext = "30097-11.htm"
elif id == State.STARTED :
if npcId == 30094 and st.getInt("cond")==1 :
htmltext = "30094-01.htm"
elif npcId == 30094 and st.getInt("cond")==2 :
htmltext = "30094-03.htm"
elif npcId == 30090 and st.getInt("cond")==3 :
htmltext = "30090-01.htm"
elif npcId == 30090 and st.getInt("cond")==4 :
htmltext = "30090-03.htm"
elif npcId == 30116 and st.getInt("cond")==5 :
htmltext = "30116-01.htm"
elif npcId == 30116 and st.getInt("cond")==6 :
htmltext = "30116-03.htm"
return htmltext
QUEST = Quest(48,qn,"To The Immortal Plateau")
QUEST.addStartNpc(30097)
QUEST.addTalkId(30097)
QUEST.addTalkId(30094)
QUEST.addTalkId(30090)
QUEST.addTalkId(30116)
| 40.024194
| 157
| 0.605279
|
4a051cc52d09b077fff1fdd3368177cf10e51d93
| 8,032
|
py
|
Python
|
test/neo4j_test.py
|
dhimmel/hetnetpy
|
aa16e6a7092c039a6b175a73a35c006e53acee20
|
[
"BSD-2-Clause-Patent",
"CC0-1.0"
] | 4
|
2018-09-29T03:29:59.000Z
|
2019-04-21T06:48:36.000Z
|
test/neo4j_test.py
|
dhimmel/hetnetpy
|
aa16e6a7092c039a6b175a73a35c006e53acee20
|
[
"BSD-2-Clause-Patent",
"CC0-1.0"
] | null | null | null |
test/neo4j_test.py
|
dhimmel/hetnetpy
|
aa16e6a7092c039a6b175a73a35c006e53acee20
|
[
"BSD-2-Clause-Patent",
"CC0-1.0"
] | 1
|
2019-05-24T08:56:18.000Z
|
2019-05-24T08:56:18.000Z
|
import pathlib
import textwrap
from neo4j import GraphDatabase
import pytest
import hetnetpy.neo4j
import hetnetpy.readwrite
def test_construct_pdp_query():
"""
Test the pdp computation on the metapath from https://doi.org/10.1371/journal.pcbi.1004259.g002
"""
# Since we're working on a known nicotine dependency - Bupropion metapath,
# we already know the dwpc
dwpc = 0.03287590886921623
# Set up the graph for querying
directory = pathlib.Path(__file__).parent.absolute()
path = directory.joinpath("data/hetionet-v1.0-metagraph.json")
metagraph = hetnetpy.readwrite.read_metagraph(path)
compound = "DB01156" # Bupropion
disease = "DOID:0050742" # nicotine dependency
damping_exponent = 0.4
metapath = metagraph.metapath_from_abbrev("CbGpPWpGaD")
# Calculate the pdp without being provided with the dwpc
pdp_query = hetnetpy.neo4j.construct_pdp_query(
metapath, path_style="string", property="identifier", unique_nodes=True
)
assert len(pdp_query) > 0
driver = GraphDatabase.driver("bolt://neo4j.het.io")
params = {"source": compound, "target": disease, "w": damping_exponent}
with driver.session() as session:
results = session.run(pdp_query, params)
results = results.data()
# Note that these are en dashes not hyphens
assert results[0]["path"].split("–")[0] == "Bupropion"
assert results[0]["path"].split("–")[-1] == "nicotine dependence"
percent_dwpc_1 = results[0]["percent_of_DWPC"]
old_pdp_query = pdp_query
# Calculate the pdp with the provided dwpc
pdp_query = hetnetpy.neo4j.construct_pdp_query(
metapath, dwpc, path_style="list", property="identifier", unique_nodes=True
)
assert len(pdp_query) > 0
assert old_pdp_query != pdp_query
with driver.session() as session:
results = session.run(pdp_query, params)
results = results.data()
assert results[0]["path"][0] == "Bupropion"
assert results[0]["path"][-1] == "nicotine dependence"
# We'll check this because it verifies both that the DWPC and the PDP for the path
# are the same for both queries
assert percent_dwpc_1 == pytest.approx(results[0]["percent_of_DWPC"])
sum_percent = 0
for result in results:
sum_percent += result["percent_of_DWPC"]
# The fractions should all add up to around 100 percent
assert sum_percent == pytest.approx(100)
def test_construct_pdp_query_return_values():
"""
Test that the construct_pdp_query function returns the expected query for a
known graph. These tests will not actually execute the query
"""
q1 = textwrap.dedent(
"""\
MATCH path = (n0:Compound)-[:BINDS_CbG]-(n1)-[:PARTICIPATES_GpPW]-(n2)-[:PARTICIPATES_GpPW]-(n3)-[:ASSOCIATES_DaG]-(n4:Disease)
USING JOIN ON n2
WHERE n0.identifier = { source }
AND n4.identifier = { target }
AND n1 <> n3
WITH
[
size((n0)-[:BINDS_CbG]-()),
size(()-[:BINDS_CbG]-(n1)),
size((n1)-[:PARTICIPATES_GpPW]-()),
size(()-[:PARTICIPATES_GpPW]-(n2)),
size((n2)-[:PARTICIPATES_GpPW]-()),
size(()-[:PARTICIPATES_GpPW]-(n3)),
size((n3)-[:ASSOCIATES_DaG]-()),
size(()-[:ASSOCIATES_DaG]-(n4))
] AS degrees, path
WITH path, reduce(pdp = 1.0, d in degrees| pdp * d ^ -{ w }) AS PDP
WITH collect({paths: path, PDPs: PDP}) AS data_maps, count(path) AS PC, sum(PDP) AS DWPC
UNWIND data_maps AS data_map
WITH data_map.paths AS path, data_map.PDPs AS PDP, PC, DWPC
RETURN
substring(reduce(s = '', node IN nodes(path)| s + '–' + node.name), 1) AS path,
PDP,
100 * (PDP / DWPC) AS percent_of_DWPC
ORDER BY percent_of_DWPC DESC
"""
).rstrip()
dwpc = 0.03287590886921623
q2 = textwrap.dedent(
"""\
MATCH path = (n0:Compound)-[:BINDS_CbG]-(n1)-[:PARTICIPATES_GpPW]-(n2)-[:PARTICIPATES_GpPW]-(n3)-[:ASSOCIATES_DaG]-(n4:Disease)
USING JOIN ON n2
WHERE n0.identifier = { source }
AND n4.identifier = { target }
AND n1 <> n3
WITH
[
size((n0)-[:BINDS_CbG]-()),
size(()-[:BINDS_CbG]-(n1)),
size((n1)-[:PARTICIPATES_GpPW]-()),
size(()-[:PARTICIPATES_GpPW]-(n2)),
size((n2)-[:PARTICIPATES_GpPW]-()),
size(()-[:PARTICIPATES_GpPW]-(n3)),
size((n3)-[:ASSOCIATES_DaG]-()),
size(()-[:ASSOCIATES_DaG]-(n4))
] AS degrees, path
WITH path, reduce(pdp = 1.0, d in degrees| pdp * d ^ -{ w }) AS PDP
RETURN
substring(reduce(s = '', node IN nodes(path)| s + '–' + node.name), 1) AS path,
PDP,
100 * (PDP / 0.03287590886921623) AS percent_of_DWPC
ORDER BY percent_of_DWPC DESC
"""
).rstrip()
# Set up the graph for querying
directory = pathlib.Path(__file__).parent.absolute()
path = directory.joinpath("data/hetionet-v1.0-metagraph.json")
metagraph = hetnetpy.readwrite.read_metagraph(path)
metapath = metagraph.metapath_from_abbrev("CbGpPWpGaD")
DWPCless_query = hetnetpy.neo4j.construct_pdp_query(
metapath, path_style="string", property="identifier", unique_nodes=True
)
assert DWPCless_query == q1
DWPC_query = hetnetpy.neo4j.construct_pdp_query(
metapath, dwpc, path_style="string", property="identifier", unique_nodes=True
)
assert DWPC_query == q2
def test_construct_dwpc_query():
"""
Test dwpc query construction and computation on the metapath from
https://doi.org/10.1371/journal.pcbi.1004259.g002
"""
directory = pathlib.Path(__file__).parent.absolute()
path = directory.joinpath("data/hetionet-v1.0-metagraph.json")
metagraph = hetnetpy.readwrite.read_metagraph(path)
compound = "DB01156" # Bupropion
disease = "DOID:0050742" # nicotine dependency
damping_exponent = 0.4
metapath = metagraph.metapath_from_abbrev("CbGpPWpGaD")
query = hetnetpy.neo4j.construct_dwpc_query(
metapath, property="identifier", unique_nodes=True
)
assert len(query) > 0
driver = GraphDatabase.driver("bolt://neo4j.het.io")
params = {"source": compound, "target": disease, "w": damping_exponent}
with driver.session() as session:
results = session.run(query, params)
results = results.single()
assert results
dwpc = results["DWPC"]
assert dwpc == pytest.approx(0.03287590886921623)
@pytest.mark.parametrize(
"style, identifier, expected_output",
[
("list", "name", "[node in nodes(path) | node.name] AS path,"),
("list", "identifier", "[node in nodes(path) | node.identifier] AS path,"),
(
"string",
"name",
"substring(reduce(s = '', node IN nodes(path)| s + '–' + node.name), 1) AS path,",
),
(
"string",
"identifier",
"substring(reduce(s = '', node IN nodes(path)| s + '–' + node.identifier), 1) AS path,",
),
(
"id_lists",
None,
"[node IN nodes(path) | id(node)] AS node_ids,\n[rel IN relationships(path) | id(rel)] AS rel_ids,",
),
],
)
def test_construct_path_return_clause_returns(style, identifier, expected_output):
"""
Test the results of construct_path_return_clause with different parameters
"""
assert (
hetnetpy.neo4j.create_path_return_clause(style, identifier) == expected_output
)
def test_construct_path_return_clause_error():
"""
Ensure that construct_path_return_clause throwns a ValueError when given an invalid style
"""
with pytest.raises(ValueError):
hetnetpy.neo4j.create_path_return_clause("invalid_style")
| 34.324786
| 139
| 0.616534
|
4a051d2e7bb655e174c7daa4c75ba6064ba5156c
| 382
|
py
|
Python
|
setup.py
|
rmyers/stashboard
|
1fc025e895fdd96949364b0c323677680024f5de
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
rmyers/stashboard
|
1fc025e895fdd96949364b0c323677680024f5de
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
rmyers/stashboard
|
1fc025e895fdd96949364b0c323677680024f5de
|
[
"Apache-2.0"
] | null | null | null |
import setuptools
VERSION = '0.0.1'
setuptools.setup(
name='stashboard',
version=VERSION,
description='Stash Horizon Dashboard',
author='Rackspace',
author_email='stash-devs@rackspace.com',
packages=setuptools.find_packages(exclude=['test']),
include_package_data=True,
install_requires=[
'python-troveclient',
],
classifiers=[],
)
| 21.222222
| 56
| 0.67801
|
4a051d7684c9a13adf1c0b4298237714f6b8c4fa
| 2,207
|
py
|
Python
|
Examples/Segmentation/NeighborhoodConnectedImageFilter.py
|
SimpleITK/Staging
|
905519b916f985db6573afa347143722b0aa9710
|
[
"Apache-2.0"
] | null | null | null |
Examples/Segmentation/NeighborhoodConnectedImageFilter.py
|
SimpleITK/Staging
|
905519b916f985db6573afa347143722b0aa9710
|
[
"Apache-2.0"
] | null | null | null |
Examples/Segmentation/NeighborhoodConnectedImageFilter.py
|
SimpleITK/Staging
|
905519b916f985db6573afa347143722b0aa9710
|
[
"Apache-2.0"
] | null | null | null |
'''=========================================================================
'
' Copyright Insight Software Consortium
'
' Licensed under the Apache License, Version 2.0 (the "License");
' you may not use this file except in compliance with the License.
' You may obtain a copy of the License at
'
' http://www.apache.org/licenses/LICENSE-2.0.txt
'
' Unless required by applicable law or agreed to in writing, software
' distributed under the License is distributed on an "AS IS" BASIS,
' WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
' See the License for the specific language governing permissions and
' limitations under the License.
'
'========================================================================='''
import SimpleITK
import sys
if __name__ == '__main__':
#
# Check Command Line
#
if len( sys.argv ) < 7:
print "Usage: NeighborhoodConnectedImageFilter inputImage outputImage lowerThreshold upperThreshold seedX seedY [seed2X seed2Y ... ]";
sys.exit( 1 )
#
# Read the image
#
reader = SimpleITK.ImageFileReader()
reader.SetFileName( sys.argv[1] )
image = reader.Execute();
#
# Set up the writer
#
writer = SimpleITK.ImageFileWriter()
writer.SetFileName( sys.argv[2] )
#
# Blur using CurvatureFlowImageFilter
#
blurFilter = SimpleITK.CurvatureFlowImageFilter()
blurFilter.SetNumberOfIterations( 5 )
blurFilter.SetTimeStep( 0.125 )
image = blurFilter.Execute( image )
#
# Set up NeighborhoodConnectedImageFilter for segmentation
#
segmentationFilter = SimpleITK.NeighborhoodConnectedImageFilter()
segmentationFilter.SetLower( float(sys.argv[3]) )
segmentationFilter.SetUpper( float(sys.argv[4]) )
segmentationFilter.SetReplaceValue( 255 )
radius = [2,2]
segmentationFilter.SetRadius( radius )
for i in range( 5, len(sys.argv)-1, 2 ):
seed = [int(sys.argv[i]), int(sys.argv[i+1])]
segmentationFilter.AddSeed( seed )
print "Adding seed at " + str(seed)
# Run the segmentation filter
image = segmentationFilter.Execute( image )
#
# Write out the result
#
writer.Execute( image )
sys.exit(0)
| 26.914634
| 138
| 0.648391
|
4a051d97babb00953109011e49f892614b9750ec
| 8,670
|
py
|
Python
|
plot_mean_prob_dist_678_for_flux_YSO.py
|
jacob975/deep_learning
|
52a5073589cf78aeadfde8ea51f687bc497a059b
|
[
"MIT"
] | null | null | null |
plot_mean_prob_dist_678_for_flux_YSO.py
|
jacob975/deep_learning
|
52a5073589cf78aeadfde8ea51f687bc497a059b
|
[
"MIT"
] | 10
|
2018-03-14T08:44:12.000Z
|
2018-11-13T13:45:53.000Z
|
plot_mean_prob_dist_678_for_flux_YSO.py
|
jacob975/deep_learning
|
52a5073589cf78aeadfde8ea51f687bc497a059b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
'''
Abstract:
This is a program for ploting probability distribution of labels.
Usage:
plot_prob_dist.py [AI dir list]
Editor and Practicer:
Jacob975
##################################
# Python3 #
# This code is made in python3 #
##################################
20180730
####################################
update log
20180730 version alpha 1:
1. The code works
20191016 version alpha 2:
1. Assign star as blue, YSO as red.
'''
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import tensorflow as tf
import numpy as np
import time
from sys import argv
import os
import itertools
from colour import Color
from sed_test_cnn import bias_variable, weight_variable
from convert_lib import ensemble_mjy_to_mag
import convert_lib
from scipy.interpolate import RegularGridInterpolator
# Assign RGB color to represent MP 1 magnitude.
def rebin3d(arr, new_shape):
shape = (new_shape[0], arr.shape[0] // new_shape[0],
new_shape[1], arr.shape[1] // new_shape[1],
new_shape[2], arr.shape[2] // new_shape[2],
)
return arr.reshape(shape).mean(-1).mean(3).mean(1)
def plot_prob(arti_mag, sort_order, yso_678):
# Print the color for each MP1 slice
fig = plt.figure(
figsize = (8,8)
)
ax = fig.add_subplot(111, projection='3d')
ax.plot_trisurf(
arti_mag[:, 0],
arti_mag[:, 1],
arti_mag[:, 2],
cmap='jet',
edgecolor='none',
zorder = 2,
)
ax.scatter(
np.log10(yso_678[:, 0]),
np.log10(yso_678[:, 1]),
np.log10(yso_678[:, 2]),
c = 'r', s = 2,
zorder = 1,
)
ax.set_xlim(np.amin(IR3_arti_mag[:,0]), np.amax(IR3_arti_mag[:,0]))
ax.set_ylim(np.amin(IR4_arti_mag[:,0]), np.amax(IR4_arti_mag[:,0]))
ax.set_zlim(np.amin(MP1_arti_mag[:,0]), np.amax(MP1_arti_mag[:,0]))
ax.set_xlabel(
"{0} (log(mJy))".format(sort_order[0]),
fontsize=16)
ax.set_ylabel(
"{0} (log(mJy))".format(sort_order[1]),
fontsize=16)
ax.set_zlabel(
"{0} (log(mJy))".format(sort_order[2]),
fontsize=16)
#plt.show()
plt.savefig(
'probability_distribution_for_YSO.png',
dpi = 300,
)
return
# This is a function for classifying sources using Model IV.
def scao_model_iv(AI_saved_dir, arti_flux_678, arti_label_678):
#-----------------------------------
# Load AI
print ('Loading AI...')
width_of_data = 1
img_maj = 3
image_shape = (width_of_data, img_maj)
kernal_shape = (width_of_data, 2)
num_kernal_1 = 32
num_kernal_2 = 64
num_conn_neural = 100
num_label = 3
#-----------------------------------
# Construct an AI
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, width_of_data * img_maj], name = 'x')
y_true = tf.placeholder(tf.float32, [None, 3], name = 'y_true')
y_true_cls = tf.argmax(y_true, axis=1)
x_image = tf.reshape(x, [-1, image_shape[0], image_shape[1], 1])
# First layer( First kernal)
W_conv1 = weight_variable([kernal_shape[0], kernal_shape[1], 1, num_kernal_1])
b_conv1 = bias_variable([num_kernal_1])
h_conv1 = tf.nn.selu(tf.nn.conv2d(x_image, W_conv1, [1,1,1,1], 'SAME') + b_conv1)
# Second layer( Second kernal)
W_conv2 = weight_variable([kernal_shape[0], kernal_shape[1], num_kernal_1, num_kernal_2])
b_conv2 = bias_variable([num_kernal_2])
h_conv2 = tf.nn.selu(tf.nn.conv2d(h_conv1, W_conv2, [1,1,1,1], 'SAME') + b_conv2)
# Third layer ( Fully connected)
W_fc1 = weight_variable([image_shape[0] * image_shape[1] * num_kernal_2, num_conn_neural])
b_fc1 = bias_variable([num_conn_neural])
h_conv2_flat = tf.reshape(h_conv2, [ -1, image_shape[0] * image_shape[1] * num_kernal_2])
h_fc1 = tf.nn.selu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1)
# Output layer
W_fc2 = weight_variable([num_conn_neural, num_label])
b_fc2 = bias_variable([num_label])
layer_last = tf.matmul(h_fc1, W_fc2) + b_fc2
y_pred = tf.nn.softmax(layer_last)
y_pred_cls = tf.argmax(y_pred, axis=1)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Saver
saver = tf.train.Saver()
print ("AI:{0}".format(AI_saved_dir))
if not os.path.exists(AI_saved_dir):
print ("No AI can be restore, please check folder ./checkpoints")
exit(1)
save_path = os.path.join(AI_saved_dir, 'best_validation')
session = tf.Session()
# Restore previous weight
saver.restore(sess=session, save_path=save_path)
#-----------------------------------
# Make a prediction
def predict_label(images, labels):
# Number of images.
num_images = len(images)
# initialize
label_pred = np.zeros(num_images*3).reshape((num_images, 3))
feed_dict = {x: images[:], y_true: labels[:]}
# process
label_pred = session.run(y_pred, feed_dict=feed_dict)
return label_pred
label_pred_678 = predict_label(arti_flux_678, arti_label_678)
#-----------------------------------
# Close session
session.close()
return label_pred_678
#--------------------------------------------
# Main code
if __name__ == "__main__":
VERBOSE = 0
# Measure times
start_time = time.time()
#-----------------------------------
# Load argv
if len(argv) != 3:
print ("Error! Usage: plot_prob_distribution.py [AI dir list] [yso sed list]")
exit(1)
AI_saved_dir_list_name = argv[1]
yso_list_name = argv[2]
# Load data
AI_saved_dir_list = np.loadtxt(
AI_saved_dir_list_name,
dtype = str,
delimiter = '\n')
yso_sed_list = np.loadtxt(yso_list_name)
yso_678 = yso_sed_list[:,5:]
#-----------------------------------
# Initialize
#reduced_num_ticks = 50
num_ticks = 200
# Calculate the probability distribution of labels
band_system = convert_lib.set_SCAO()
fake_error = np.ones(num_ticks)
IR3_arti_flux = np.transpose(
[ np.logspace(
np.log10(0.000107),
np.log10(10000.0),
num=num_ticks),
fake_error])
IR4_arti_flux = np.transpose(
[ np.logspace(
np.log10(0.000216),
np.log10(10000.0),
num=num_ticks),
fake_error])
MP1_arti_flux = np.transpose(
[ np.logspace(
np.log10(0.000898),
np.log10(10000.0),
num=num_ticks),
fake_error])
IR3_arti_mag = np.log10(IR3_arti_flux)
IR4_arti_mag = np.log10(IR4_arti_flux)
MP1_arti_mag = np.log10(MP1_arti_flux)
arti_mag_678 = np.asarray(list(itertools.product( IR3_arti_mag[:,0],
IR4_arti_mag[:,0],
MP1_arti_mag[:,0]
)))
arti_flux_678 = np.asarray(list(itertools.product( IR3_arti_flux[:,0],
IR4_arti_flux[:,0],
MP1_arti_flux[:,0]
)))
arti_label_678 = np.zeros(arti_flux_678.shape)
#-----------------------------------
# Make predictions using each run
sum_label_pred_678 = np.zeros(arti_flux_678.shape)
for AI_saved_dir in AI_saved_dir_list:
label_pred_678 = scao_model_iv(AI_saved_dir, arti_flux_678, arti_label_678)
sum_label_pred_678 += label_pred_678
mean_label_pred_678 = np.divide(sum_label_pred_678, len(AI_saved_dir_list))
#-----------------------------------
# Quantize the probability
mean_label_pred_678[mean_label_pred_678 >= 0.5] = 1.0
mean_label_pred_678[mean_label_pred_678 < 0.5] = 0.0
mean_cls_pred_678 = np.argmax(mean_label_pred_678, axis = 1)
#-----------------------------------
# Shows the degenerate data and pred_labels to band IRAC3, IRAC4, and MIPS1
sort_order_678 = ['IRAC3', 'IRAC4', 'MIPS1']
# Plot YSO only
index_YSO = np.where(mean_cls_pred_678 == 2)
arti_mag_678_YSO = arti_mag_678[index_YSO]
print ('Plot the 3D map')
plot_prob(arti_mag_678_YSO, sort_order_678, yso_678)
#-----------------------------------
# measuring time
elapsed_time = time.time() - start_time
print ("Exiting Main Program, spending ", elapsed_time, "seconds.")
| 36.582278
| 94
| 0.580854
|
4a051e08a7d604d564584c34f4c3600acd20dc9e
| 20,433
|
py
|
Python
|
src/robot/parsing/model.py
|
gdw2/robot-framework
|
f25068edf1502e76ba8664d4b5ed1aebe0ee2434
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robot/parsing/model.py
|
gdw2/robot-framework
|
f25068edf1502e76ba8664d4b5ed1aebe0ee2434
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robot/parsing/model.py
|
gdw2/robot-framework
|
f25068edf1502e76ba8664d4b5ed1aebe0ee2434
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2008-2010 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from robot.errors import DataError
from robot.variables import is_var
from robot.output import LOGGER
from robot import utils
from settings import (Documentation, Fixture, Timeout, Tags, Metadata,
Library, Resource, Variables, Arguments, Return, Template)
from populators import FromFilePopulator, FromDirectoryPopulator
def TestData(parent=None, source=None, include_suites=[]):
if os.path.isdir(source):
return TestDataDirectory(parent, source, include_suites)
return TestCaseFile(parent, source)
class _TestData(object):
def __init__(self, parent=None, source=None):
self.parent = parent
self.source = os.path.abspath(source) if source else None
self.children = []
self._tables = None
def _get_tables(self):
if not self._tables:
self._tables = utils.NormalizedDict({'Setting': self.setting_table,
'Settings': self.setting_table,
'Metadata': self.setting_table,
'Variable': self.variable_table,
'Variables': self.variable_table,
'Keyword': self.keyword_table,
'Keywords': self.keyword_table,
'User Keyword': self.keyword_table,
'User Keywords': self.keyword_table,
'Test Case': self.testcase_table,
'Test Cases': self.testcase_table})
return self._tables
def start_table(self, header_row):
table_name = header_row[0]
try:
table = self._valid_table(self._get_tables()[table_name])
except KeyError:
return None
else:
if table is not None:
table.set_header(header_row)
return table
@property
def name(self):
if not self.source:
return None
name = os.path.splitext(os.path.basename(self.source))[0]
name = name.split('__', 1)[-1] # Strip possible prefix
name = name.replace('_', ' ').strip()
if name.islower():
name = ' '.join(w[0].upper() + w[1:] for w in name.split())
return name
@property
def keywords(self):
return self.keyword_table.keywords
@property
def imports(self):
return self.setting_table.imports
def report_invalid_syntax(self, table, message, level='ERROR'):
initfile = getattr(self, 'initfile', None)
path = os.path.join(self.source, initfile) if initfile else self.source
LOGGER.write("Invalid syntax in file '%s' in table '%s': %s"
% (path, table, message), level)
class TestCaseFile(_TestData):
def __init__(self, parent=None, source=None):
_TestData.__init__(self, parent, source)
self.directory = os.path.dirname(self.source) if self.source else None
self.setting_table = TestCaseFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
if source: # FIXME: model should be decoupled from populating
FromFilePopulator(self).populate(source)
self._validate()
def _validate(self):
if not self.testcase_table.is_started():
raise DataError('File has no test case table.')
def _valid_table(self, table):
return table
def has_tests(self):
return True
def __iter__(self):
for table in [self.setting_table, self.variable_table,
self.testcase_table, self.keyword_table]:
yield table
class ResourceFile(_TestData):
def __init__(self, source=None):
_TestData.__init__(self, source=source)
self.directory = os.path.dirname(self.source) if self.source else None
self.setting_table = ResourceFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
if self.source:
FromFilePopulator(self).populate(source)
self._report_status()
def _report_status(self):
if self.setting_table or self.variable_table or self.keyword_table:
LOGGER.info("Imported resource file '%s' (%d keywords)."
% (self.source, len(self.keyword_table.keywords)))
else:
LOGGER.warn("Imported resource file '%s' is empty." % self.source)
def _valid_table(self, table):
if table is self.testcase_table:
raise DataError('Test case table not allowed in resource file.')
return table
def __iter__(self):
for table in [self.setting_table, self.variable_table,
self.keyword_table]:
yield table
class TestDataDirectory(_TestData):
def __init__(self, parent=None, source=None, include_suites=[]):
_TestData.__init__(self, parent, source)
self.directory = self.source
self.initfile = None
self.setting_table = InitFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
if self.source:
FromDirectoryPopulator().populate(self.source, self, include_suites)
self.children = [ ch for ch in self.children if ch.has_tests() ]
def _valid_table(self, table):
if table is self.testcase_table:
LOGGER.error('Test case table not allowed in test suite init file.')
return None
return table
def add_child(self, path, include_suites):
self.children.append(TestData(parent=self,source=path,
include_suites=include_suites))
def has_tests(self):
return any(ch.has_tests() for ch in self.children)
def __iter__(self):
for table in [self.setting_table, self.variable_table,
self.keyword_table]:
yield table
class _Table(object):
def __init__(self, parent):
self.parent = parent
self.header = None
def set_header(self, header):
self.header = header
@property
def name(self):
return self.header[0]
@property
def source(self):
return self.parent.source
@property
def directory(self):
return self.parent.directory
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax(self.name, message, level)
class _WithSettings(object):
def get_setter(self, setting_name):
if setting_name in self._setters:
return self._setters[setting_name]
self.report_invalid_syntax("Non-existing setting '%s'." % setting_name)
def is_setting(self, setting_name):
return setting_name in self._setters
class _SettingTable(_Table, _WithSettings):
type = 'setting'
def __init__(self, parent):
_Table.__init__(self, parent)
self.doc = Documentation('Documentation', self)
self.suite_setup = Fixture('Suite Setup', self)
self.suite_teardown = Fixture('Suite Teardown', self)
self.test_setup = Fixture('Test Setup', self)
self.test_teardown = Fixture('Test Teardown', self)
self.force_tags = Tags('Force Tags', self)
self.default_tags = Tags('Default Tags', self)
self.test_template = Template('Test Template', self)
self.test_timeout = Timeout('Test Timeout', self)
self.metadata = []
self.imports = []
self._setters = self._get_setters()
def _get_adder(self, adder_method):
def adder(value, comment):
name = value[0] if value else ''
adder_method(name, value[1:], comment)
return adder
def add_metadata(self, name, value='', comment=None):
self.metadata.append(Metadata('Metadata', self, name, value, comment))
return self.metadata[-1]
def add_library(self, name, args=None, comment=None):
self.imports.append(Library(self, name, args, comment=comment))
return self.imports[-1]
def add_resource(self, name, invalid_args=None, comment=None):
self.imports.append(Resource(self, name, invalid_args, comment=comment))
return self.imports[-1]
def add_variables(self, name, args=None, comment=None):
self.imports.append(Variables(self, name, args, comment=comment))
return self.imports[-1]
def __nonzero__(self):
return any(setting.is_set() for setting in self)
class TestCaseFileSettingTable(_SettingTable):
def _get_setters(self):
return utils.NormalizedDict({'Documentation': self.doc.populate,
'Document': self.doc.populate,
'Suite Setup': self.suite_setup.populate,
'Suite Precondition': self.suite_setup.populate,
'Suite Teardown': self.suite_teardown.populate,
'Suite Postcondition': self.suite_teardown.populate,
'Test Setup': self.test_setup.populate,
'Test Precondition': self.test_setup.populate,
'Test Teardown': self.test_teardown.populate,
'Test Postcondition': self.test_teardown.populate,
'Force Tags': self.force_tags.populate,
'Default Tags': self.default_tags.populate,
'Test Template': self.test_template.populate,
'Test Timeout': self.test_timeout.populate,
'Library': self._get_adder(self.add_library),
'Resource': self._get_adder(self.add_resource),
'Variables': self._get_adder(self.add_variables),
'Metadata': self._get_adder(self.add_metadata)})
def __iter__(self):
for setting in [self.doc, self.suite_setup, self.suite_teardown,
self.test_setup, self.test_teardown, self.force_tags,
self.default_tags, self.test_template, self.test_timeout] \
+ self.metadata + self.imports:
yield setting
class ResourceFileSettingTable(_SettingTable):
def _get_setters(self):
return utils.NormalizedDict({'Documentation': self.doc.populate,
'Document': self.doc.populate,
'Library': self._get_adder(self.add_library),
'Resource': self._get_adder(self.add_resource),
'Variables': self._get_adder(self.add_variables)})
def __iter__(self):
for setting in [self.doc] + self.imports:
yield setting
class InitFileSettingTable(_SettingTable):
def _get_setters(self):
return utils.NormalizedDict({'Documentation': self.doc.populate,
'Document': self.doc.populate,
'Suite Setup': self.suite_setup.populate,
'Suite Precondition': self.suite_setup.populate,
'Suite Teardown': self.suite_teardown.populate,
'Suite Postcondition': self.suite_teardown.populate,
'Test Setup': self.test_setup.populate,
'Test Precondition': self.test_setup.populate,
'Test Teardown': self.test_teardown.populate,
'Test Postcondition': self.test_teardown.populate,
'Force Tags': self.force_tags.populate,
'Library': self._get_adder(self.add_library),
'Resource': self._get_adder(self.add_resource),
'Variables': self._get_adder(self.add_variables),
'Metadata': self._get_adder(self.add_metadata)})
def __iter__(self):
for setting in [self.doc, self.suite_setup, self.suite_teardown,
self.test_setup, self.test_teardown, self.force_tags] \
+ self.metadata + self.imports:
yield setting
class VariableTable(_Table):
type = 'variable'
def __init__(self, parent):
_Table.__init__(self, parent)
self.variables = []
def add(self, name, value, comment=None):
self.variables.append(Variable(name, value, comment))
def __iter__(self):
return iter(self.variables)
def __nonzero__(self):
return bool(self.variables)
class TestCaseTable(_Table):
type = 'testcase'
def __init__(self, parent):
_Table.__init__(self, parent)
self.tests = []
def add(self, name):
self.tests.append(TestCase(self, name))
return self.tests[-1]
def __iter__(self):
return iter(self.tests)
def __nonzero__(self):
return bool(self.tests)
def is_started(self):
return bool(self.header)
class KeywordTable(_Table):
type = 'keyword'
def __init__(self, parent):
_Table.__init__(self, parent)
self.keywords = []
def add(self, name):
self.keywords.append(UserKeyword(self, name))
return self.keywords[-1]
def __iter__(self):
return iter(self.keywords)
def __nonzero__(self):
return bool(self.keywords)
class Variable(object):
def __init__(self, name, value, comment=None):
self.name = name.rstrip('= ')
if name.startswith('$') and value == []:
value = ''
if isinstance(value, basestring):
value = [value] # Need to support scalar lists until RF 2.6
self.value = value
self.comment = comment
def as_list(self):
ret = [self.name] + self.value
if self.comment:
ret.append('# %s' % self.comment)
return ret
def is_set(self):
return True
def is_for_loop(self):
return False
class _WithSteps(object):
def add_step(self, content, comment=None):
self.steps.append(Step(content, comment))
return self.steps[-1]
class TestCase(_WithSteps, _WithSettings):
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.doc = Documentation('[Documentation]', self)
self.template = Template('[Template]', self)
self.tags = Tags('[Tags]', self)
self.setup = Fixture('[Setup]', self)
self.teardown = Fixture('[Teardown]', self)
self.timeout = Timeout('[Timeout]', self)
self.steps = []
self._setters = self._get_setters()
def _get_setters(self):
return utils.NormalizedDict({'Documentation': self.doc.populate,
'Document': self.doc.populate,
'Template': self.template.populate,
'Setup': self.setup.populate,
'Precondition': self.setup.populate,
'Teardown': self.teardown.populate,
'Postcondition': self.teardown.populate,
'Tags': self.tags.populate,
'Timeout': self.timeout.populate})
@property
def source(self):
return self.parent.source
@property
def directory(self):
return self.parent.directory
def add_for_loop(self, data):
self.steps.append(ForLoop(data))
return self.steps[-1]
def report_invalid_syntax(self, message, level='ERROR'):
type_ = 'test case' if type(self) is TestCase else 'keyword'
message = "Invalid syntax in %s '%s': %s" % (type_, self.name, message)
self.parent.report_invalid_syntax(message, level)
def __iter__(self):
for element in [self.doc, self.tags, self.setup,
self.template, self.timeout] \
+ self.steps + [self.teardown]:
yield element
class UserKeyword(TestCase):
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.doc = Documentation('[Documentation]', self)
self.args = Arguments('[Arguments]', self)
self.return_ = Return('[Return]', self)
self.timeout = Timeout('[Timeout]', self)
self.steps = []
self._setters = self._get_setters()
def _get_setters(self):
return utils.NormalizedDict({'Documentation': self.doc.populate,
'Document': self.doc.populate,
'Arguments': self.args.populate,
'Return': self.return_.populate,
'Timeout': self.timeout.populate})
def __iter__(self):
for element in [self.args, self.doc, self.timeout] \
+ self.steps + [self.return_]:
yield element
class ForLoop(_WithSteps):
def __init__(self, content):
self.range, index = self._get_range_and_index(content)
self.vars = content[:index]
self.items = content[index+1:]
self.steps = []
def _get_range_and_index(self, content):
for index, item in enumerate(content):
item = item.upper().replace(' ', '')
if item in ['IN', 'INRANGE']:
return item == 'INRANGE', index
return False, len(content)
def is_comment(self):
return False
def is_for_loop(self):
return True
def apply_template(self, template):
return self
def as_list(self):
return [': FOR'] + self.vars + ['IN RANGE' if self.range else 'IN'] + self.items
def __iter__(self):
return iter(self.steps)
class Step(object):
def __init__(self, content, comment=None):
self.assign = self._get_assigned_vars(content)
try:
self.keyword = content[len(self.assign)]
except IndexError:
self.keyword = None
self.args = content[len(self.assign)+1:]
self.comment = comment
def _get_assigned_vars(self, content):
vars = []
for item in content:
if not is_var(item.rstrip('= ')):
break
vars.append(item)
return vars
def is_comment(self):
return not (self.assign or self.keyword or self.args)
def is_for_loop(self):
return False
def apply_template(self, template):
if self.is_comment():
return self
return Step([template] + self.as_list(include_comment=False))
def is_set(self):
return True
def as_list(self, indent=False, include_comment=True):
kw = [self.keyword] if self.keyword is not None else []
ret = self.assign + kw + self.args
if indent:
ret.insert(0, '')
if include_comment and self.comment:
ret.append('# %s' % self.comment)
return ret
| 35.910369
| 89
| 0.577497
|
4a051e8a11b875f3c19bb1fb82d12a764c6df0a3
| 520
|
py
|
Python
|
data.py
|
1Dennis-Hosea/opinion
|
2e38d71c98803c294fea62b9cdc062ff02dee504
|
[
"MIT"
] | null | null | null |
data.py
|
1Dennis-Hosea/opinion
|
2e38d71c98803c294fea62b9cdc062ff02dee504
|
[
"MIT"
] | null | null | null |
data.py
|
1Dennis-Hosea/opinion
|
2e38d71c98803c294fea62b9cdc062ff02dee504
|
[
"MIT"
] | null | null | null |
def Articles():
articles = [
{
'id': 1,
'title':'Article One',
'body':'The crying bood, sadness and anger, the weeping shore'
},
{
'id': 2,
'title':'Article Two',
'body':'The crying bood, sadness and anger, the weeping shore'
},
{
'id': 3,
'title':'Article Three',
'body':'The crying bood, sadness and anger, the weeping shore'
},
]
return articles
| 24.761905
| 76
| 0.442308
|
4a051e9f9f3c7f9b936849b4c32b6bc17ab97795
| 1,541
|
py
|
Python
|
Chapter13/educa/courses/urls.py
|
kableson/panda
|
dbcc112ce02fb7a5bb0acb84041d28c7caed9a2b
|
[
"Apache-2.0"
] | 639
|
2018-05-31T22:28:21.000Z
|
2022-03-21T16:49:47.000Z
|
Chapter13/educa/courses/urls.py
|
kableson/panda
|
dbcc112ce02fb7a5bb0acb84041d28c7caed9a2b
|
[
"Apache-2.0"
] | 43
|
2018-06-26T06:56:28.000Z
|
2022-03-31T18:32:57.000Z
|
Chapter13/educa/courses/urls.py
|
kableson/panda
|
dbcc112ce02fb7a5bb0acb84041d28c7caed9a2b
|
[
"Apache-2.0"
] | 644
|
2018-06-10T22:37:24.000Z
|
2022-03-04T18:34:01.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('mine/',
views.ManageCourseListView.as_view(),
name='manage_course_list'),
path('create/',
views.CourseCreateView.as_view(),
name='course_create'),
path('<pk>/edit/',
views.CourseUpdateView.as_view(),
name='course_edit'),
path('<pk>/delete/',
views.CourseDeleteView.as_view(),
name='course_delete'),
path('<pk>/module/',
views.CourseModuleUpdateView.as_view(),
name='course_module_update'),
path('module/<int:module_id>/content/<model_name>/create/',
views.ContentCreateUpdateView.as_view(),
name='module_content_create'),
path('module/<int:module_id>/content/<model_name>/<id>/',
views.ContentCreateUpdateView.as_view(),
name='module_content_update'),
path('content/<int:id>/delete/',
views.ContentDeleteView.as_view(),
name='module_content_delete'),
path('module/<int:module_id>/',
views.ModuleContentListView.as_view(),
name='module_content_list'),
path('module/order/',
views.ModuleOrderView.as_view(),
name='module_order'),
path('content/order/',
views.ContentOrderView.as_view(),
name='content_order'),
path('subject/<slug:subject>)/',
views.CourseListView.as_view(),
name='course_list_subject'),
path('<slug:slug>/',
views.CourseDetailView.as_view(),
name='course_detail'),
]
| 33.5
| 63
| 0.619079
|
4a05208f5c3bcbbc24a65c9a2c0c9384faac5d1d
| 1,757
|
py
|
Python
|
winregrc/application_identifiers.py
|
fengjixuchui/winreg-kb
|
3e793bee8806f980fe0cce6ad91cb474911a1fb1
|
[
"Apache-2.0"
] | 1
|
2021-02-04T05:32:26.000Z
|
2021-02-04T05:32:26.000Z
|
winregrc/application_identifiers.py
|
scudette/winreg-kb
|
f81b8bcaef8365d0c52bf3c87af2bccb4274bece
|
[
"Apache-2.0"
] | null | null | null |
winregrc/application_identifiers.py
|
scudette/winreg-kb
|
f81b8bcaef8365d0c52bf3c87af2bccb4274bece
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Windows application identifiers (AppID) collector."""
from __future__ import unicode_literals
from winregrc import interface
class ApplicationIdentifier(object):
"""Application identifier
Attributes:
description (str): description.
guid (str): identifier.
"""
def __init__(self, guid, description):
"""Initializes an application identifier.
Args:
guid (str): identifier.
description (str): description.
"""
super(ApplicationIdentifier, self).__init__()
self.description = description
self.guid = guid
class ApplicationIdentifiersCollector(interface.WindowsRegistryKeyCollector):
"""Windows application identifiers collector."""
_APPLICATION_IDENTIFIERS_KEY_PATH = (
'HKEY_LOCAL_MACHINE\\Software\\Classes\\AppID')
def Collect(self, registry, output_writer):
"""Collects the application identifiers.
Args:
registry (dfwinreg.WinRegistry): Windows Registry.
output_writer (OutputWriter): output writer.
Returns:
bool: True if the application identifiers key was found, False if not.
"""
application_identifiers_key = registry.GetKeyByPath(
self._APPLICATION_IDENTIFIERS_KEY_PATH)
if not application_identifiers_key:
return False
for subkey in application_identifiers_key.GetSubkeys():
guid = subkey.name.lower()
# Ignore subkeys that are not formatted as {%GUID%}
if len(guid) != 38 and guid[0] == '{' and guid[37] == '}':
continue
description = self._GetValueAsStringFromKey(subkey, '')
application_identifier = ApplicationIdentifier(
guid, description)
output_writer.WriteApplicationIdentifier(application_identifier)
return True
| 27.453125
| 77
| 0.708025
|
4a05214af516ce0800aa05118e4f1ca8281129c3
| 1,670
|
py
|
Python
|
azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_face_list_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | null | null | null |
azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_face_list_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-11-29T14:46:42.000Z
|
2018-11-29T14:46:42.000Z
|
azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_face_list_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .name_and_user_data_contract_py3 import NameAndUserDataContract
class LargeFaceList(NameAndUserDataContract):
"""Large face list object.
All required parameters must be populated in order to send to Azure.
:param name: User defined name, maximum length is 128.
:type name: str
:param user_data: User specified data. Length should not exceed 16KB.
:type user_data: str
:param large_face_list_id: Required. LargeFaceListId of the target large
face list.
:type large_face_list_id: str
"""
_validation = {
'name': {'max_length': 128},
'user_data': {'max_length': 16384},
'large_face_list_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'user_data': {'key': 'userData', 'type': 'str'},
'large_face_list_id': {'key': 'largeFaceListId', 'type': 'str'},
}
def __init__(self, *, large_face_list_id: str, name: str=None, user_data: str=None, **kwargs) -> None:
super(LargeFaceList, self).__init__(name=name, user_data=user_data, **kwargs)
self.large_face_list_id = large_face_list_id
| 37.954545
| 106
| 0.616766
|
4a0522ad7fb794b12b930c934903f842e5b0329e
| 653
|
py
|
Python
|
other/dingding/dingtalk/api/rest/OapiImChatScenegroupCreateRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
other/dingding/dingtalk/api/rest/OapiImChatScenegroupCreateRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
other/dingding/dingtalk/api/rest/OapiImChatScenegroupCreateRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
'''
Created by auto_sdk on 2021.03.01
'''
from dingtalk.api.base import RestApi
class OapiImChatScenegroupCreateRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.chat_banned_type = None
self.icon = None
self.management_type = None
self.mention_all_authority = None
self.owner_user_id = None
self.searchable = None
self.show_history_type = None
self.subadmin_ids = None
self.template_id = None
self.title = None
self.user_ids = None
self.uuid = None
self.validation_type = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.im.chat.scenegroup.create'
| 24.185185
| 50
| 0.750383
|
4a0522b7975cb85c28718d329951027700919a9a
| 4,990
|
py
|
Python
|
staff/tests.py
|
Gouldian0120/ascii_creator
|
9bcb9ca642dbdacc10cdc942ecb58ede65aa2539
|
[
"MIT"
] | null | null | null |
staff/tests.py
|
Gouldian0120/ascii_creator
|
9bcb9ca642dbdacc10cdc942ecb58ede65aa2539
|
[
"MIT"
] | null | null | null |
staff/tests.py
|
Gouldian0120/ascii_creator
|
9bcb9ca642dbdacc10cdc942ecb58ede65aa2539
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth.models import User
from unittest import mock
def _create_staff_user(username='admin', password='admin'):
user = User.objects.create_user(username=username, password=password)
user.is_staff = True
user.save()
return {'username': username, 'password': password}
def _create_normal_user(username='user', password='user'):
user = User.objects.create_user(username=username, password=password)
return {'username': username, 'password': password}
class TestStaffAuthenticationView(TestCase):
def test_authenticated_requests(self):
"""
GET and POST authenticated requests should return 302 redirect to main page
"""
user = _create_normal_user()
self.client.login(**user)
response = self.client.get(reverse('staff_authentication_url'), follow=True)
self.assertRedirects(response, '/', status_code=302)
response = self.client.post(reverse('staff_authentication_url'), follow=True)
self.assertRedirects(response, '/', status_code=302)
def test_not_authenticated_requests(self):
"""
GET and POST not-authenticated requests should return 200
"""
response = self.client.get(reverse('staff_authentication_url'), follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('staff_authentication_url'), follow=True)
self.assertEqual(response.status_code, 200)
def test_post_wrong_data(self):
"""
Wrong post_data should return errors and status 200
"""
data = {
'username': 'user123',
'password': 'pass123'
}
response = self.client.post(reverse('staff_authentication_url'), data=data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertIn('class="error"', response.content.decode('utf-8'))
@mock.patch("captcha.fields.ReCaptchaField.validate")
def test_post_normal_user_login(self, mock):
"""
Can't login into non-staff account, should return errors and status 200
"""
user = _create_normal_user()
response = self.client.post(reverse('staff_authentication_url'), data=user, follow=True)
self.assertEqual(response.status_code, 200)
self.assertIn('class="error"', response.content.decode('utf-8'))
@mock.patch("captcha.fields.ReCaptchaField.validate")
def test_post_wrong_username(self, mock):
"""
Wrong username register should return errors and status 200
"""
user = _create_staff_user('admin', 'admin')
user['username'] = 'adMin'
response = self.client.post(reverse('staff_authentication_url'), data=user, follow=True)
self.assertEqual(response.status_code, 200)
self.assertIn('class="error"', response.content.decode('utf-8'))
@mock.patch("captcha.fields.ReCaptchaField.validate")
def test_post_wrong_password(self, mock):
"""
Wrong password register should return errors and status 200
"""
user = _create_staff_user('admin', 'admin')
user['password'] = 'adMin'
response = self.client.post(reverse('staff_authentication_url'), data=user, follow=True)
self.assertEqual(response.status_code, 200)
self.assertIn('class="error"', response.content.decode('utf-8'))
def test_no_capthca_right_data(self):
"""
Right username and password but without captcha should return errors and status 200
"""
user = _create_staff_user('admin', 'admin')
response = self.client.post(reverse('staff_authentication_url'), data=user, follow=True)
self.assertEqual(response.status_code, 200)
self.assertIn('class="error"', response.content.decode('utf-8'))
@mock.patch("captcha.fields.ReCaptchaField.validate")
def test_success(self, mock):
"""
Right data and validated captcha should redirect to main page after authentication
"""
user = _create_staff_user('admin', 'admin')
response = self.client.post(reverse('staff_authentication_url'), data=user, follow=True)
self.assertRedirects(response, '/', status_code=302)
self.assertNotIn('class="error"', response.content.decode('utf-8'))
class TestStaffLogout(TestCase):
def test_staff_logged_out(self):
"""
If staff was logged, he should not more see special links, also redirect 302 to main page
"""
user = _create_staff_user('admin', 'admin')
self.client.login(**user)
response = self.client.get(reverse('index_page_url'))
self.assertIn('$Logout', response.content.decode('utf-8'))
response = self.client.get(reverse('staff_logout_url'))
self.assertRedirects(response, '/', status_code=302)
self.assertNotIn('$Logout', response.content.decode('utf-8'))
| 41.932773
| 97
| 0.673146
|
4a05230cf6b931b4583207c98efc10acb5ea5be7
| 226
|
py
|
Python
|
video_production/annotations/venue.py
|
OddballSports-tv/obies-eyes
|
2dd4fc9686f852b9adf89edd3246ad642063ac8b
|
[
"Apache-2.0"
] | null | null | null |
video_production/annotations/venue.py
|
OddballSports-tv/obies-eyes
|
2dd4fc9686f852b9adf89edd3246ad642063ac8b
|
[
"Apache-2.0"
] | 1
|
2022-02-19T20:40:44.000Z
|
2022-02-19T20:40:44.000Z
|
video_production/annotations/venue.py
|
OddballSports-tv/obies-eyes
|
2dd4fc9686f852b9adf89edd3246ad642063ac8b
|
[
"Apache-2.0"
] | null | null | null |
# imports
from .annotation import Annotation
import cv2
class Venue(Annotation):
def __init__(self):
super(Venue, self).__init__()
def _annotate(self, frame, venue=None, *args, **kwargs):
return frame
| 22.6
| 60
| 0.681416
|
4a05231eba1a0b716e4bd48641f8a390834d6ddf
| 23,793
|
py
|
Python
|
pybind/slxos/v17r_1_01a/openflow_state/interface/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17r_1_01a/openflow_state/interface/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17r_1_01a/openflow_state/interface/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class interface(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-openflow-operational - based on the path /openflow-state/interface. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Openflow enabled interface details
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__port','__link','__port_state','__speed','__mac','__port_id','__mode',)
_yang_name = 'interface'
_rest_name = 'interface'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__port_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="port-id", rest_name="port-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
self.__port_state = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-port-state-live': {'value': 3}, u'dcm-port-state-forward': {'value': 4}, u'dcm-port-state-invalid': {'value': 0}, u'dcm-port-state-blocked': {'value': 2}, u'dcm-port-state-link-down': {'value': 1}},), is_leaf=True, yang_name="port-state", rest_name="port-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='port-state', is_config=False)
self.__mac = YANGDynClass(base=unicode, is_leaf=True, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__link = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="link", rest_name="link", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='boolean', is_config=False)
self.__mode = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-port-mode-unknown': {'value': 0}, u'dcm-port-mode-hybrid-l3': {'value': 5}, u'dcm-port-mode-hybrid-l2': {'value': 4}, u'dcm-port-mode-l23': {'value': 3}, u'dcm-port-mode-hybrid-l23': {'value': 6}, u'dcm-port-mode-l3': {'value': 2}, u'dcm-port-mode-l2': {'value': 1}, u'egress-mode': {'value': 7}},), is_leaf=True, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='port-mode', is_config=False)
self.__speed = YANGDynClass(base=unicode, is_leaf=True, yang_name="speed", rest_name="speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__port = YANGDynClass(base=unicode, is_leaf=True, yang_name="port", rest_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'openflow-state', u'interface']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'openflow-state', u'interface']
def _get_port(self):
"""
Getter method for port, mapped from YANG variable /openflow_state/interface/port (string)
YANG Description: Port
"""
return self.__port
def _set_port(self, v, load=False):
"""
Setter method for port, mapped from YANG variable /openflow_state/interface/port (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port() directly.
YANG Description: Port
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="port", rest_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="port", rest_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__port = t
if hasattr(self, '_set'):
self._set()
def _unset_port(self):
self.__port = YANGDynClass(base=unicode, is_leaf=True, yang_name="port", rest_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_link(self):
"""
Getter method for link, mapped from YANG variable /openflow_state/interface/link (boolean)
YANG Description: Link
"""
return self.__link
def _set_link(self, v, load=False):
"""
Setter method for link, mapped from YANG variable /openflow_state/interface/link (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_link is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link() directly.
YANG Description: Link
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="link", rest_name="link", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="link", rest_name="link", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='boolean', is_config=False)""",
})
self.__link = t
if hasattr(self, '_set'):
self._set()
def _unset_link(self):
self.__link = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="link", rest_name="link", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='boolean', is_config=False)
def _get_port_state(self):
"""
Getter method for port_state, mapped from YANG variable /openflow_state/interface/port_state (port-state)
YANG Description: Port State
"""
return self.__port_state
def _set_port_state(self, v, load=False):
"""
Setter method for port_state, mapped from YANG variable /openflow_state/interface/port_state (port-state)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_state() directly.
YANG Description: Port State
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-port-state-live': {'value': 3}, u'dcm-port-state-forward': {'value': 4}, u'dcm-port-state-invalid': {'value': 0}, u'dcm-port-state-blocked': {'value': 2}, u'dcm-port-state-link-down': {'value': 1}},), is_leaf=True, yang_name="port-state", rest_name="port-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='port-state', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_state must be of a type compatible with port-state""",
'defined-type': "brocade-openflow-operational:port-state",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-port-state-live': {'value': 3}, u'dcm-port-state-forward': {'value': 4}, u'dcm-port-state-invalid': {'value': 0}, u'dcm-port-state-blocked': {'value': 2}, u'dcm-port-state-link-down': {'value': 1}},), is_leaf=True, yang_name="port-state", rest_name="port-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='port-state', is_config=False)""",
})
self.__port_state = t
if hasattr(self, '_set'):
self._set()
def _unset_port_state(self):
self.__port_state = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-port-state-live': {'value': 3}, u'dcm-port-state-forward': {'value': 4}, u'dcm-port-state-invalid': {'value': 0}, u'dcm-port-state-blocked': {'value': 2}, u'dcm-port-state-link-down': {'value': 1}},), is_leaf=True, yang_name="port-state", rest_name="port-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='port-state', is_config=False)
def _get_speed(self):
"""
Getter method for speed, mapped from YANG variable /openflow_state/interface/speed (string)
YANG Description: Speed
"""
return self.__speed
def _set_speed(self, v, load=False):
"""
Setter method for speed, mapped from YANG variable /openflow_state/interface/speed (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_speed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_speed() directly.
YANG Description: Speed
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="speed", rest_name="speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """speed must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="speed", rest_name="speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__speed = t
if hasattr(self, '_set'):
self._set()
def _unset_speed(self):
self.__speed = YANGDynClass(base=unicode, is_leaf=True, yang_name="speed", rest_name="speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_mac(self):
"""
Getter method for mac, mapped from YANG variable /openflow_state/interface/mac (string)
YANG Description: MAC
"""
return self.__mac
def _set_mac(self, v, load=False):
"""
Setter method for mac, mapped from YANG variable /openflow_state/interface/mac (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac() directly.
YANG Description: MAC
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mac must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__mac = t
if hasattr(self, '_set'):
self._set()
def _unset_mac(self):
self.__mac = YANGDynClass(base=unicode, is_leaf=True, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_port_id(self):
"""
Getter method for port_id, mapped from YANG variable /openflow_state/interface/port_id (uint32)
YANG Description: OF-Port-ID
"""
return self.__port_id
def _set_port_id(self, v, load=False):
"""
Setter method for port_id, mapped from YANG variable /openflow_state/interface/port_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_id() directly.
YANG Description: OF-Port-ID
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="port-id", rest_name="port-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="port-id", rest_name="port-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""",
})
self.__port_id = t
if hasattr(self, '_set'):
self._set()
def _unset_port_id(self):
self.__port_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="port-id", rest_name="port-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
def _get_mode(self):
"""
Getter method for mode, mapped from YANG variable /openflow_state/interface/mode (port-mode)
YANG Description: Mode
"""
return self.__mode
def _set_mode(self, v, load=False):
"""
Setter method for mode, mapped from YANG variable /openflow_state/interface/mode (port-mode)
If this variable is read-only (config: false) in the
source YANG file, then _set_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mode() directly.
YANG Description: Mode
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-port-mode-unknown': {'value': 0}, u'dcm-port-mode-hybrid-l3': {'value': 5}, u'dcm-port-mode-hybrid-l2': {'value': 4}, u'dcm-port-mode-l23': {'value': 3}, u'dcm-port-mode-hybrid-l23': {'value': 6}, u'dcm-port-mode-l3': {'value': 2}, u'dcm-port-mode-l2': {'value': 1}, u'egress-mode': {'value': 7}},), is_leaf=True, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='port-mode', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mode must be of a type compatible with port-mode""",
'defined-type': "brocade-openflow-operational:port-mode",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-port-mode-unknown': {'value': 0}, u'dcm-port-mode-hybrid-l3': {'value': 5}, u'dcm-port-mode-hybrid-l2': {'value': 4}, u'dcm-port-mode-l23': {'value': 3}, u'dcm-port-mode-hybrid-l23': {'value': 6}, u'dcm-port-mode-l3': {'value': 2}, u'dcm-port-mode-l2': {'value': 1}, u'egress-mode': {'value': 7}},), is_leaf=True, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='port-mode', is_config=False)""",
})
self.__mode = t
if hasattr(self, '_set'):
self._set()
def _unset_mode(self):
self.__mode = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-port-mode-unknown': {'value': 0}, u'dcm-port-mode-hybrid-l3': {'value': 5}, u'dcm-port-mode-hybrid-l2': {'value': 4}, u'dcm-port-mode-l23': {'value': 3}, u'dcm-port-mode-hybrid-l23': {'value': 6}, u'dcm-port-mode-l3': {'value': 2}, u'dcm-port-mode-l2': {'value': 1}, u'egress-mode': {'value': 7}},), is_leaf=True, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='port-mode', is_config=False)
port = __builtin__.property(_get_port)
link = __builtin__.property(_get_link)
port_state = __builtin__.property(_get_port_state)
speed = __builtin__.property(_get_speed)
mac = __builtin__.property(_get_mac)
port_id = __builtin__.property(_get_port_id)
mode = __builtin__.property(_get_mode)
_pyangbind_elements = {'port': port, 'link': link, 'port_state': port_state, 'speed': speed, 'mac': mac, 'port_id': port_id, 'mode': mode, }
| 64.654891
| 810
| 0.704619
|
4a05232714d01b2f3f53df38c445a06fb6d6b6b7
| 11,589
|
py
|
Python
|
src/h3sed/lib/util.py
|
suurjaak/h3sed
|
e86e83d91bea0332493bf3b123c8e6071a27bd4b
|
[
"MIT"
] | null | null | null |
src/h3sed/lib/util.py
|
suurjaak/h3sed
|
e86e83d91bea0332493bf3b123c8e6071a27bd4b
|
[
"MIT"
] | null | null | null |
src/h3sed/lib/util.py
|
suurjaak/h3sed
|
e86e83d91bea0332493bf3b123c8e6071a27bd4b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Miscellaneous utility functions.
------------------------------------------------------------------------------
This file is part of h3sed - Heroes3 Savegame Editor.
Released under the MIT License.
@created 19.11.2011
@modified 15.01.2022
------------------------------------------------------------------------------
"""
import collections
import ctypes
import datetime
import locale
import math
import os
import re
import subprocess
import sys
import struct
import time
import urllib
import warnings
try: int_types = (int, long) # Py2
except Exception: int_types = (int, ) # Py3
try: text_types = (str, unicode) # Py2
except Exception: text_types = (str, ) # Py3
try: string_type = unicode # Py2
except Exception: string_type = str # Py3
def m(o, name, case_insensitive=True):
"""Returns the members of the object or dict, filtered by name."""
members = o.keys() if isinstance(o, dict) else dir(o)
if case_insensitive:
return [i for i in members if name.lower() in i.lower()]
else:
return [i for i in members if name in i]
def bytoi(blob):
"""Converts a string of bytes or a bytearray to unsigned integer."""
fmt = {1: "<B", 2: "<H", 4: "<L", 8: "<Q"}[len(blob)]
return struct.unpack(fmt, blob)[0]
def itoby(v, length):
"""
Converts an unsigned integer to a bytearray of specified length.
"""
fmt = {1: "<B", 2: "<H", 4: "<L", 8: "<Q"}[length]
return bytearray(struct.pack(fmt, v))
def safe_filename(filename):
"""Returns the filename with characters like \:*?"<>| removed."""
return re.sub(r"[\/\\\:\*\?\"\<\>\|\x00-\x1f]", "", filename)
def format_bytes(size, precision=2, max_units=True, with_units=True):
"""
Returns a formatted byte size (e.g. "421.45 MB" or "421,451,273 bytes").
@param precision number of decimals to leave after converting to
maximum units
@param max_units whether to convert value to corresponding maximum
unit, or leave as bytes and add thousand separators
@param with_units whether to include units in result
"""
size, formatted, unit = int(size), "0", "bytes"
if size:
byteunit = "byte" if 1 == size else "bytes"
if max_units:
UNITS = [byteunit, "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]
log = min(len(UNITS) - 1, math.floor(math.log(size, 1024)))
formatted = "%.*f" % (precision, size / math.pow(1024, log))
formatted = formatted.rstrip("0").rstrip(".")
unit = UNITS[int(log)]
else:
formatted = "".join([x + ("," if i and not i % 3 else "")
for i, x in enumerate(str(size)[::-1])][::-1])
unit = byteunit
return formatted + ((" " + unit) if with_units else "")
def format_exc(e):
"""Formats an exception as Class: message, or Class: (arg1, arg2, ..)."""
with warnings.catch_warnings():
warnings.simplefilter("ignore") # DeprecationWarning on e.message
msg = to_unicode(e.message) if getattr(e, "message", None) \
else "(%s)" % ", ".join(map(to_unicode, e.args)) if e.args else ""
result = u"%s%s" % (type(e).__name__, ": " + msg if msg else "")
return result
def plural(word, items=None, numbers=True, single="1", sep="", pref="", suf=""):
"""
Returns the word as 'count words', or '1 word' if count is 1,
or 'words' if count omitted.
@param items item collection or count,
or None to get just the plural of the word
numbers if False, count is omitted from final result
single prefix to use for word if count is 1, e.g. "a"
sep thousand-separator to use for count
pref prefix to prepend to count, e.g. "~150"
suf suffix to append to count, e.g. "150+"
"""
count = len(items) if hasattr(items, "__len__") else items or 0
isupper = word[-1:].isupper()
suffix = "es" if word and word[-1:].lower() in "xyz" else "s" if word else ""
if isupper: suffix = suffix.upper()
if count != 1 and "y" == word[-1:].lower():
word = word[:-1] + ("I" if isupper else "i")
result = word + ("" if 1 == count else suffix)
if numbers and items is not None:
fmtcount = single if 1 == count else "".join([
x + ("," if i and not i % 3 else "")
for i, x in enumerate(str(count)[::-1])][::-1
]) if sep else str(count)
fmtcount = pref + fmtcount + suf
result = "%s %s" % (single if 1 == count else fmtcount, result)
return result.strip()
def unique_path(pathname, suffix="%(ext)s_%(counter)s"):
"""
Returns a unique version of the path. If a file or directory with the
same name already exists, returns a unique version
(e.g. "C:\config.sys_2" if ""C:\config.sys" already exists).
@param suffix string to append, formatted with variables counter, ext
"""
result = pathname
if "linux" in sys.platform and isinstance(result, string_type) \
and "utf-8" != sys.getfilesystemencoding():
result = result.encode("utf-8") # Linux has trouble if locale not UTF-8
path, name = os.path.split(result)
base, ext = os.path.splitext(name)
if len(name) > 255: # Filesystem limitation
name = base[:255 - len(ext) - 2] + ".." + ext
result = os.path.join(path, name)
counter = 2
while os.path.exists(result):
mysuffix = suffix % {"ext": ext, "counter": counter}
name = base + mysuffix
if len(name) > 255:
name = base[:255 - len(mysuffix) - 2] + ".." + mysuffix
result = os.path.join(path, name)
counter += 1
return result
def select_file(filepath):
"""
Tries to open the file directory and select file.
Falls back to opening directory only (select is Windows-only).
"""
if not os.path.exists(filepath):
return start_file(os.path.split(filepath)[0])
try: subprocess.Popen('explorer /select, "%s"' % shortpath(filepath))
except Exception: start_file(os.path.split(filepath)[0])
def add_unique(lst, item, direction=1, maxlen=sys.maxsize):
"""
Adds the item to the list from start or end. If item is already in list,
removes it first. If list is longer than maxlen, shortens it.
@param direction side from which item is added, -1/1 for start/end
@param maxlen maximum length list is allowed to grow to before
shortened from the other direction
"""
if item in lst:
lst.remove(item)
lst.insert(0, item) if direction < 0 else lst.append(item)
if len(lst) > maxlen:
lst[:] = lst[:maxlen] if direction < 0 else lst[-maxlen:]
return lst
def make_unique(value, existing, suffix="_%s", counter=2, case=False):
"""
Returns a unique string, appending suffix % counter as necessary.
@param existing collection of existing strings to check
@oaram case whether uniqueness should be case-sensitive
"""
result, is_present = value, (lambda: result in existing)
if not case:
existing = [x.lower() for x in existing]
is_present = lambda: result.lower() in existing
while is_present(): result, counter = value + suffix % counter, counter + 1
return result
def get(collection, *path, **kwargs):
"""
Returns the value at specified collection path. If path not available,
returns the first keyword argument if any given, or None.
Collection can be a nested structure of dicts, lists, tuples or strings.
E.g. util.get({"root": {"first": [{"k": "v"}]}}, "root", "first", 0, "k").
Also supports named object attributes.
"""
default = (list(kwargs.values()) + [None])[0]
result = collection if path else default
if len(path) == 1 and isinstance(path[0], list): path = path[0]
for p in path:
if isinstance(result, collections.Sequence): # Iterable with index
if isinstance(p, int_types) and p < len(result):
result = result[p]
else:
result = default
elif isinstance(result, collections.Mapping): # Container with lookup
result = result.get(p, default)
else:
result = getattr(result, p, default)
if result == default: break # for p
return result
def to_unicode(value, encoding=None):
"""
Returns the value as a Unicode string. Tries decoding as UTF-8 if
locale encoding fails.
"""
result = value
if not isinstance(value, string_type):
encoding = encoding or locale.getpreferredencoding()
if isinstance(value, bytes):
try:
result = string_type(value, encoding)
except Exception:
result = string_type(value, "utf-8", errors="replace")
else:
result = str(value)
if not isinstance(result, string_type):
result = string_type(result, errors="replace")
return result
def longpath(path):
"""Returns the path in long Windows form ("Program Files" not PROGRA~1)."""
result = path
try:
buf = ctypes.create_unicode_buffer(65536)
GetLongPathNameW = ctypes.windll.kernel32.GetLongPathNameW
if GetLongPathNameW(string_type(path), buf, 65536):
result = buf.value
else:
head, tail = os.path.split(path)
if GetLongPathNameW(string_type(head), buf, 65536):
result = os.path.join(buf.value, tail)
except Exception: pass
return result
def shortpath(path):
"""Returns the path in short Windows form (PROGRA~1 not "Program Files")."""
if isinstance(path, str): return path
from ctypes import wintypes
ctypes.windll.kernel32.GetShortPathNameW.argtypes = [
# lpszLongPath, lpszShortPath, cchBuffer
wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.DWORD
]
ctypes.windll.kernel32.GetShortPathNameW.restype = wintypes.DWORD
buf = ctypes.create_unicode_buffer(4 * len(path))
ctypes.windll.kernel32.GetShortPathNameW(path, buf, len(buf))
return buf.value
def win32_unicode_argv():
"""
Returns Windows command-line arguments converted to Unicode.
@from http://stackoverflow.com/a/846931/145400
"""
result = sys.argv[:]
try:
from ctypes import POINTER, byref, cdll, c_int, windll
from ctypes.wintypes import LPCWSTR, LPWSTR
except Exception: return result
GetCommandLineW = cdll.kernel32.GetCommandLineW
GetCommandLineW.argtypes = []
GetCommandLineW.restype = LPCWSTR
CommandLineToArgvW = windll.shell32.CommandLineToArgvW
CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)]
CommandLineToArgvW.restype = POINTER(LPWSTR)
argc = c_int(0)
argv = CommandLineToArgvW(GetCommandLineW(), byref(argc))
if argc.value:
# Remove Python executable and commands if present
start = argc.value - len(sys.argv)
result = [argv[i] for i in range(start, argc.value)]
#result = [argv[i].encode("utf-8") for i in range(start, argc.value)]
return result
| 37.872549
| 82
| 0.59056
|
4a05235186cffd316eced119a3e5b41907a65701
| 7,223
|
py
|
Python
|
lib/kubernetes/client/models/v1beta1_mutating_webhook_configuration_list.py
|
splunkenizer/splunk_as_a_service_app
|
97c4aaf927d2171bf131126cf9b70489ac75bc5a
|
[
"Apache-2.0"
] | 7
|
2019-12-21T00:14:14.000Z
|
2021-03-11T14:51:37.000Z
|
lib/kubernetes/client/models/v1beta1_mutating_webhook_configuration_list.py
|
splunkenizer/splunk_as_a_service_app
|
97c4aaf927d2171bf131126cf9b70489ac75bc5a
|
[
"Apache-2.0"
] | 29
|
2019-10-09T11:16:21.000Z
|
2020-06-23T09:32:09.000Z
|
lib/kubernetes/client/models/v1beta1_mutating_webhook_configuration_list.py
|
splunkenizer/splunk_as_a_service_app
|
97c4aaf927d2171bf131126cf9b70489ac75bc5a
|
[
"Apache-2.0"
] | 1
|
2021-05-07T10:13:31.000Z
|
2021-05-07T10:13:31.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1MutatingWebhookConfigurationList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1beta1MutatingWebhookConfiguration]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1beta1MutatingWebhookConfigurationList - a model defined in Swagger
"""
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1beta1MutatingWebhookConfigurationList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta1MutatingWebhookConfigurationList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1MutatingWebhookConfigurationList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1MutatingWebhookConfigurationList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1beta1MutatingWebhookConfigurationList.
List of MutatingWebhookConfiguration.
:return: The items of this V1beta1MutatingWebhookConfigurationList.
:rtype: list[V1beta1MutatingWebhookConfiguration]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1beta1MutatingWebhookConfigurationList.
List of MutatingWebhookConfiguration.
:param items: The items of this V1beta1MutatingWebhookConfigurationList.
:type: list[V1beta1MutatingWebhookConfiguration]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1beta1MutatingWebhookConfigurationList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1MutatingWebhookConfigurationList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1MutatingWebhookConfigurationList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1MutatingWebhookConfigurationList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1MutatingWebhookConfigurationList.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The metadata of this V1beta1MutatingWebhookConfigurationList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1MutatingWebhookConfigurationList.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param metadata: The metadata of this V1beta1MutatingWebhookConfigurationList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1MutatingWebhookConfigurationList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 34.070755
| 282
| 0.615534
|
4a05254bb96d1f61dbc3830207fc9ba77f1f8ed3
| 13,541
|
py
|
Python
|
cyclopeps/algs/simple_update.py
|
philliphelms/cyclopeps
|
f024d827a7412f4d9df10d6b9453c2692b1a74c3
|
[
"MIT"
] | null | null | null |
cyclopeps/algs/simple_update.py
|
philliphelms/cyclopeps
|
f024d827a7412f4d9df10d6b9453c2692b1a74c3
|
[
"MIT"
] | null | null | null |
cyclopeps/algs/simple_update.py
|
philliphelms/cyclopeps
|
f024d827a7412f4d9df10d6b9453c2692b1a74c3
|
[
"MIT"
] | null | null | null |
from cyclopeps.tools.utils import *
from cyclopeps.tools.peps_tools import *
from cyclopeps.tools.ops_tools import *
from numpy import float_
def absorb_lambdas(row,peps_col,vert_lambdas,left_lambdas,right_lambdas):
"""
"""
peps1 = peps_col[row].copy()
peps2 = peps_col[row+1].copy()
# Absorb Bottom lambda
if not (row == 0):
peps1 = einsum('ldpru,dD->lDpru',peps1,vert_lambdas[row-1])
# Absorb left lambdas
if left_lambdas is not None:
peps1 = einsum('ldpru,lL->Ldpru',peps1,left_lambdas[row])
peps2 = einsum('ldpru,lL->Ldpru',peps2,left_lambdas[row+1])
# Absorb right lambdas
if right_lambdas is not None:
peps1 = einsum('ldpru,rR->ldpRu',peps1,right_lambdas[row])
peps2 = einsum('ldpru,rR->ldpRu',peps2,right_lambdas[row+1])
# Absorb Top lambda
if not (row == len(peps_col)-2):
peps2 = einsum('ldpru,uU->ldprU',peps2,vert_lambdas[row+1])
# Absorb middle lambda
peps1 = einsum('ldpru,uU->ldprU',peps1,vert_lambdas[row])
return peps1,peps2
def separate_sites(combined_sites,D):
"""
"""
# Do the SVD Decomposition
peps1,Lambda,peps2 = combined_sites.svd(4,
truncate_mbd=D,
return_ent=False,
return_wgt=False)
# Do some renormalization (just to keep numbers reasonable)
Lambda /= einsum('ij,jk->ik',Lambda,Lambda).sqrt().to_val()
# Reshape the results
peps2 = peps2.transpose([1,0,2,3,4])
return peps1,Lambda,peps2
def remove_lambdas(row,peps_col,vert_lambdas,left_lambdas,right_lambdas):
"""
"""
peps1 = peps_col[row].copy()
peps2 = peps_col[row+1].copy()
# Absorb Bottom lambda
if not (row == 0):
peps1 = einsum('ldpru,dD->lDpru',peps1,vert_lambdas[row-1].invert_diag())
# Absorb left lambdas
if left_lambdas is not None:
peps1 = einsum('ldpru,lL->Ldpru',peps1,left_lambdas[row].invert_diag())
peps2 = einsum('ldpru,lL->Ldpru',peps2,left_lambdas[row+1].invert_diag())
# Absorb right lambdas
if right_lambdas is not None:
peps1 = einsum('ldpru,rR->ldpRu',peps1,right_lambdas[row].invert_diag())
peps2 = einsum('ldpru,rR->ldpRu',peps2,right_lambdas[row+1].invert_diag())
# Absorb Top lambda
if not (row == len(peps_col)-2):
peps2 = einsum('ldpru,uU->ldprU',peps2,vert_lambdas[row+1].invert_diag())
# Put them back in the list
peps_col[row] = peps1
peps_col[row+1] = peps2
# Return result
return peps_col
def tebd_step_single_col(ham,peps_col,vert_lambdas,left_lambdas,right_lambdas,mbd,step_size):
"""
"""
# Loop through rows in the column
E = zeros(len(ham),dtype=peps_col[0].dtype)
for row in range(len(ham)):
# Get symmetries for reference
sym1,sym2 = peps_col[row].get_signs(), peps_col[row+1].get_signs()
# Absorb Lambdas into Gamma tensors
peps1,peps2 = absorb_lambdas(row,peps_col,vert_lambdas,left_lambdas,right_lambdas)
# Take the exponential of the hamiltonian
eH = exp_gate(ham[row],-step_size)
# Apply Time Evolution
tmp = einsum('ldpru,LuPRU->ldprLPRU',peps1,peps2)
if len(peps1.legs[2]) == 2:
# Thermal State time evolution
tmp.unmerge_ind(5)
tmp.unmerge_ind(2)
result = einsum('ldpyrLPzRU,pPqQ->ldqyrLQzRU',tmp,eH)
result.merge_inds([6,7])
result.merge_inds([2,3])
else:
# Regular state time evolution
result = einsum('ldprLPRU,pPqQ->ldqrLQRU',tmp,eH)
# Perform SVD
peps1,Lambda,peps2 = separate_sites(result,mbd)
# Put result back into vectors
vert_lambdas[row] = Lambda
peps_col[row] = peps1
peps_col[row+1] = peps2
# Remove Lambdas
peps_col = remove_lambdas(row,peps_col,vert_lambdas,left_lambdas,right_lambdas)
# Update symmetries
peps_col[row].update_signs(sym1)
peps_col[row+1].update_signs(sym2)
# Return the result
return peps_col,vert_lambdas
def tebd_step_col(peps,ham,mbd,step_size):
"""
"""
# Figure out peps size
(Nx,Ny) = peps.shape
# Loop through all columns
E = zeros((len(ham),len(ham[0])),dtype=peps[0][0].dtype)
for col in range(Nx):
# Take TEBD Step
if col == 0:
peps[col],peps.ltensors[0][col] = tebd_step_single_col(ham[col],
peps[col],
peps.ltensors[0][col],
None,
peps.ltensors[1][col],
mbd,
step_size)
elif col == (Nx-1):
peps[col],peps.ltensors[0][col] = tebd_step_single_col(ham[col],
peps[col],
peps.ltensors[0][col],
peps.ltensors[1][col-1],
None,
mbd,
step_size)
else:
peps[col],peps.ltensors[0][col] = tebd_step_single_col(ham[col],
peps[col],
peps.ltensors[0][col],
peps.ltensors[1][col-1],
peps.ltensors[1][col],
mbd,
step_size)
# Return result
return peps
def tebd_step(peps,ham,mbd,step_size):
"""
"""
# Columns ----------------------------------
peps = tebd_step_col(peps,ham[0],mbd,step_size)
# Rows -------------------------------------
peps.rotate(clockwise=True)
peps = tebd_step_col(peps,ham[1],mbd,step_size)
peps.rotate(clockwise=False)
# Return results ---------------------------
return peps
def tebd_steps(peps,ham,mbd,step_size,n_step,conv_tol,chi=None,chi_norm=None,chi_op=None):
"""
"""
nSite = len(peps)*len(peps[0])
# Compute Initial Energy
Eprev = peps.calc_op(ham,chi=chi_op)
mpiprint(0,'Initial Energy/site = {}'.format(Eprev/nSite))
# Do a single tebd step
for iter_cnt in range(n_step):
# Do TEBD Step
peps = tebd_step(peps,ham,mbd,step_size)
# Normalize just in case
peps.normalize(chi=chi_norm)
# Save PEPS
#peps.save()
# Compute Resulting Energy
E = peps.calc_op(ham,chi=chi_op)
# Check for convergence
mpiprint(0,'Energy/site = {} '.format(E/nSite))
if abs((E-Eprev)/E) < conv_tol:
mpiprint(3,'Converged E = {} to an accuracy of ~{}'.format(E,abs(E-Eprev)))
converged = True
break
else:
Eprev = E
converged = False
return E,peps
def run_tebd(Nx,Ny,d,ham,
Zn=None,
peps=None,
backend='numpy',
D=3,
chi=10,
chi_norm=10,
chi_op=10,
thermal=False,
exact_norm_tol=20,
norm_tol=0.1,
singleLayer=True,
max_norm_iter=20,
dtype=float_,
step_size=0.2,
n_step=5,
conv_tol=1e-8,
peps_fname=None,
peps_fdir='./'):
"""
Run the TEBD algorithm for a PEPS
Args:
Nx : int
Lattice size in x-direction
Ny : int
Lattice size in y-direction
d : int
Local physical bond dimension
ham : 3D array
The suzuki-trotter decomposition of the
Hamiltonian. An example of how this is constructed
for the ising transverse field model
is found in /mpo/itf.py
Kwargs:
Zn : int
The Zn symmetry of the PEPS.
If None, then a dense, non-symmetric PEPS will be used.
backend : str
The tensor backend to be used.
Current options are 'numpy' or 'ctf'
peps : PEPS object
The initial guess for the PEPS, in the "Gamma-Lambda"
formalism. If this is not
provided, then a random peps will be used.
Note that the bond dimension D should be the same
as the initial calculation bond dimension, since
no bond reduction or initial increase of bond dimension
is currently implemented.
D : int
The maximum bond dimension (may be a list of
maximum bond dimensions, and a loop will be
performed where it is slowly incremented)
chi : int
The boundary mpo maximum bond dimension
chi_norm : int
The boundary mpo maximum bond dimension used
when the norm is computed
chi_op : int
The boundary mpo maximum bond dimension used
when the operator expectation values are computed
thermal : bool
Whether to do the fu algorithm with a thermal state, i.e.
two physical indices
norm_tol : float
How close to 1. the norm should be before
exact arithmetic is used in the normalization
procedure. See documentation of
peps_tool.normalize_peps() function for more details.
singleLayer : bool
Whether to use a single layer environment
(currently only option implemented)
max_norm_iter : int
The maximum number of normalization iterations
dtype : dtype
The data type for the PEPS
step_size : float
The trotter step size, may be a list of
step sizes
n_step : int
The number of steps to be taken for each
trotter step size. If it is a list, then
len(step_size) == len(n_step) and
len(D) == len(n_step) must both be True.
conv_tol : float
The convergence tolerance
peps_fname : str
The name of the saved peps file
peps_fdir : str
The location where the peps will be saved
"""
t0 = time.time()
mpiprint(0,'\n\nStarting SU TEBD Calculation')
mpiprint(0,'#'*50)
# Ensure the optimization parameters, namely the
# bond dimension, trotter step size, and number
# of trotter steps are compatable.
if hasattr(D,'__len__'):
n_calcs = len(D)
elif hasattr(step_size,'__len__'):
n_calcs = len(step_size)
elif hasattr(n_step,'__len__'):
n_calcs = len(n_step)
elif hasattr(conv_tol,'__len__'):
n_calcs = len(conv_tol)
elif hasattr(chi,'__len__'):
n_calcs = len(chi)
else:
D = [D]
step_size = [step_size]
n_step = [n_step]
conv_tol = [conv_tol]
chi = [chi]
if not hasattr(D,'__len__'):
D = [D]*n_calcs
if not hasattr(step_size,'__len__'):
step_size = [step_size]*n_calcs
if not hasattr(n_step,'__len__'):
n_step = [n_step]*n_calcs
if not hasattr(conv_tol,'__len__'):
conv_tol = [conv_tol]*n_calcs
if not hasattr(chi,'__len__'):
chi = [chi]*n_calcs
# Create a random peps (if one is not provided)
if peps is None:
peps = PEPS(Nx=Nx,
Ny=Ny,
d=d,
D=D[0],
chi=chi[0],
chi_norm=chi_norm,
chi_op=chi_op,
Zn=Zn,
thermal=thermal,
backend=backend,
exact_norm_tol=exact_norm_tol,
norm_tol=norm_tol,
canonical=True,
singleLayer=singleLayer,
max_norm_iter=max_norm_iter,
dtype=dtype,
fname=peps_fname,
fdir=peps_fdir)
# Loop over all (bond dims/step sizes/number of steps)
for Dind in range(len(D)):
mpiprint(0,'\nSU Calculation for (D,chi,dt) = ({},{},{})'.format(D[Dind],chi[Dind],step_size[Dind]))
# Do a tebd evolution for given step size
E,peps = tebd_steps(peps,
ham,
D[Dind],
step_size[Dind],
n_step[Dind],
conv_tol[Dind],
chi = chi[Dind],
chi_norm=chi_norm,
chi_op=chi_op)
# Print out results
mpiprint(0,'\n\n'+'#'*50)
mpiprint(0,'SU TEBD Complete')
mpiprint(0,'-------------')
mpiprint(0,'Total time = {} s'.format(time.time()-t0))
mpiprint(0,'Per Site Energy = {}'.format(E/(Nx*Ny)))
return E,peps
| 36.013298
| 108
| 0.518352
|
4a0525b4782ada53f72af5dcbe490ddab47f1fc2
| 5,615
|
py
|
Python
|
pyglet/gl/glu_info.py
|
pvcraven/pyglet
|
c967c4ab62b7e128107a64622fbfe0f029b10c52
|
[
"BSD-3-Clause"
] | null | null | null |
pyglet/gl/glu_info.py
|
pvcraven/pyglet
|
c967c4ab62b7e128107a64622fbfe0f029b10c52
|
[
"BSD-3-Clause"
] | null | null | null |
pyglet/gl/glu_info.py
|
pvcraven/pyglet
|
c967c4ab62b7e128107a64622fbfe0f029b10c52
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2020 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Information about version and extensions of current GLU implementation.
Usage::
from pyglet.gl import glu_info
if glu_info.have_extension('GLU_EXT_nurbs_tessellator'):
# ...
If multiple contexts are in use you can use a separate GLUInfo object for each
context. Call `set_active_context` after switching to the desired context for
each GLUInfo::
from pyglet.gl.glu_info import GLUInfo
info = GLUInfo()
info.set_active_context()
if info.have_version(1, 3):
# ...
Note that GLUInfo only returns meaningful information if a context has been
created.
"""
import warnings
from ctypes import c_char_p, cast
from pyglet.gl.glu import GLU_EXTENSIONS, GLU_VERSION, gluGetString
from pyglet.compat import asstr
class GLUInfo:
"""Information interface for the GLU library.
A default instance is created automatically when the first OpenGL context
is created. You can use the module functions as a convenience for
this default instance's methods.
If you are using more than one context, you must call `set_active_context`
when the context is active for this `GLUInfo` instance.
"""
have_context = False
version = '0.0'
extensions = []
_have_info = False
def set_active_context(self):
"""Store information for the currently active context.
This method is called automatically for the default context.
"""
self.have_context = True
if not self._have_info:
self.extensions = asstr(cast(gluGetString(GLU_EXTENSIONS), c_char_p).value).split()
self.version = asstr(cast(gluGetString(GLU_VERSION), c_char_p).value)
self._have_info = True
def have_version(self, major, minor=0):
"""Determine if a version of GLU is supported.
:Parameters:
`major` : int
The major revision number (typically 1).
`minor` : int
The minor revision number.
:rtype: bool
:return: True if the requested or a later version is supported.
"""
if not self.have_context:
warnings.warn('No GL context created yet.')
ver = '%s.0.0' % self.version.strip().split(' ', 1)[0]
imajor, iminor, irelease = [int(v) for v in ver.split('.', 3)[:3]]
return (imajor > major or
(imajor == major and iminor > minor) or
(imajor == major and iminor == minor))
def get_version(self):
"""Get the current GLU version.
:return: the GLU version
:rtype: str
"""
if not self.have_context:
warnings.warn('No GL context created yet.')
return self.version
def have_extension(self, extension):
"""Determine if a GLU extension is available.
:Parameters:
`extension` : str
The name of the extension to test for, including its
``GLU_`` prefix.
:return: True if the extension is provided by the implementation.
:rtype: bool
"""
if not self.have_context:
warnings.warn('No GL context created yet.')
return extension in self.extensions
def get_extensions(self):
"""Get a list of available GLU extensions.
:return: a list of the available extensions.
:rtype: list of str
"""
if not self.have_context:
warnings.warn('No GL context created yet.')
return self.extensions
# Single instance useful for apps with only a single context
# (or all contexts have the same GLU driver, a common case).
_glu_info = GLUInfo()
set_active_context = _glu_info.set_active_context
have_version = _glu_info.have_version
get_version = _glu_info.get_version
have_extension = _glu_info.have_extension
get_extensions = _glu_info.get_extensions
| 35.314465
| 95
| 0.666963
|
4a05266c07b0731d0ddc820a8f2cffa4684a8e4c
| 38
|
py
|
Python
|
proton/__init__.py
|
sergedroz/proton-python-client
|
4557a1c50706b8c66593524df0997fcc3f742f6f
|
[
"MIT"
] | 3
|
2020-10-22T03:06:59.000Z
|
2020-10-22T10:19:07.000Z
|
proton/__init__.py
|
umair-akbar/proton-python-client
|
5746825d9b134c430678c36d986dae624fe8a407
|
[
"MIT"
] | null | null | null |
proton/__init__.py
|
umair-akbar/proton-python-client
|
5746825d9b134c430678c36d986dae624fe8a407
|
[
"MIT"
] | null | null | null |
from .api import Session, ProtonError
| 19
| 37
| 0.815789
|
4a05275d3a3c4fc5297d04e0dcd37813e1de9a1f
| 19,925
|
py
|
Python
|
postgresqleu/invoices/views.py
|
bradfordboyle/pgeu-system
|
bbe70e7a94092c10f11a0f74fda23079532bb018
|
[
"MIT"
] | 11
|
2020-08-20T11:16:02.000Z
|
2022-03-12T23:25:04.000Z
|
postgresqleu/invoices/views.py
|
bradfordboyle/pgeu-system
|
bbe70e7a94092c10f11a0f74fda23079532bb018
|
[
"MIT"
] | 71
|
2019-11-18T10:11:22.000Z
|
2022-03-27T16:12:57.000Z
|
postgresqleu/invoices/views.py
|
bradfordboyle/pgeu-system
|
bbe70e7a94092c10f11a0f74fda23079532bb018
|
[
"MIT"
] | 18
|
2019-11-18T09:56:31.000Z
|
2022-01-08T03:16:43.000Z
|
from django.shortcuts import render, get_object_or_404
from django.forms.models import inlineformset_factory
from django.forms import ModelMultipleChoiceField
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseForbidden
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.db.models import Q, Count, Max
from django.contrib import messages
from django.conf import settings
import base64
import io
from datetime import timedelta
from decimal import Decimal
from postgresqleu.util.auth import authenticate_backend_group
from postgresqleu.util.pagination import simple_pagination
from postgresqleu.util.request import get_int_or_error
from postgresqleu.util.time import today_global
from .models import Invoice, InvoiceRow, InvoiceHistory, InvoicePaymentMethod, VatRate
from .models import InvoiceRefund
from .forms import InvoiceForm, InvoiceRowForm, RefundForm
from .util import InvoiceWrapper, InvoiceManager, InvoicePresentationWrapper
from .payment import PaymentMethodWrapper
def paid(request):
return _homeview(request, Invoice.objects.filter(paidat__isnull=False, deleted=False, finalized=True), paid=True)
def unpaid(request):
return _homeview(request, Invoice.objects.filter(paidat=None, deleted=False, finalized=True), unpaid=True)
def pending(request):
return _homeview(request, Invoice.objects.filter(finalized=False, deleted=False), pending=True)
def deleted(request):
return _homeview(request, Invoice.objects.filter(deleted=True), deleted=True)
def _homeview(request, invoice_objects, unpaid=False, pending=False, deleted=False, paid=False, searchterm=None):
# Utility function for all main invoice views, so make the shared permissions
# check here.
authenticate_backend_group(request, 'Invoice managers')
# Add info about refunds to all invoices
invoice_objects = invoice_objects.extra(select={
'has_refund': 'EXISTS (SELECT 1 FROM invoices_invoicerefund r WHERE r.invoice_id=invoices_invoice.id)',
})
# Render a list of all invoices
(invoices, paginator, page_range) = simple_pagination(request, invoice_objects, 50)
has_pending = Invoice.objects.filter(finalized=False).exists()
has_unpaid = Invoice.objects.filter(finalized=True, paidat__isnull=False).exists()
return render(request, 'invoices/home.html', {
'invoices': invoices,
'paid': paid,
'unpaid': unpaid,
'pending': pending,
'deleted': deleted,
'has_pending': has_pending,
'has_unpaid': has_unpaid,
'searchterm': searchterm,
'page_range': page_range,
'breadcrumbs': [('/invoiceadmin/', 'Invoices'), ],
'helplink': 'payment',
})
def search(request):
# Authenticate early, so we don't end up leaking information in case
# the user shouldn't have it. This might lead to an extra round of
# authentication in some cases, but it's not exactly expensive.
authenticate_backend_group(request, 'Invoice managers')
if 'term' in request.POST:
term = request.POST['term']
elif 'term' in request.GET:
term = request.GET['term']
else:
term = ''
if term.strip() == '':
messages.error(request, "No search term specified")
return HttpResponseRedirect('/invoiceadmin/')
try:
invoiceid = int(term)
try:
invoice = Invoice.objects.get(pk=invoiceid)
return HttpResponseRedirect("/invoiceadmin/%s/" % invoice.id)
except Invoice.DoesNotExist:
messages.warning(request, "No invoice with id %s found." % invoiceid)
return HttpResponseRedirect("/invoiceadmin/")
except ValueError:
# Not an integer, so perform an actual search...
pass
invoices = Invoice.objects.filter(Q(recipient_name__icontains=term) | Q(recipient_address__icontains=term) | Q(title__icontains=term))
if len(invoices) == 0:
messages.warning(request, "No invoice matching '%s' found." % term)
return HttpResponseRedirect("/invoiceadmin/")
if len(invoices) == 1:
return HttpResponseRedirect("/invoiceadmin/%s/" % invoices[0].id)
messages.info(request, "Showing %s search hits for %s" % (len(invoices), term))
return _homeview(request, invoices, searchterm=term)
@transaction.atomic
def oneinvoice(request, invoicenum):
authenticate_backend_group(request, 'Invoice managers')
# Called to view an invoice, to edit one, and to create a new one,
# since they're all based on the same model and form.
if invoicenum == 'new':
invoice = Invoice(
invoicedate=today_global(),
duedate=today_global() + timedelta(days=30),
)
else:
invoice = get_object_or_404(Invoice, pk=invoicenum)
def rowfield_callback(field, **kwargs):
f = field.formfield()
if invoice.finalized and f:
if type(f.widget).__name__ == 'TextInput':
f.widget.attrs['readonly'] = "readonly"
else:
f.widget.attrs['disabled'] = True
return f
can_delete = not invoice.finalized
InvoiceRowInlineFormset = inlineformset_factory(Invoice, InvoiceRow, InvoiceRowForm, can_delete=can_delete, formfield_callback=rowfield_callback)
if request.method == 'POST':
if request.POST['submit'] == 'Delete':
# No need to validate before deleting. But we do a double check
# that the invoice is really not finalized.
if invoice.finalized:
raise Exception("Cannot delete a finalized invoice!")
invoiceid = invoice.id # Need to save this away since we delete it
invoice.delete()
messages.info(request, "Invoice %s deleted." % invoiceid)
return HttpResponseRedirect('/invoiceadmin/')
# Disabled SELECTs are not included in the POST. Therefor, we must copy the
# data over for those fields.
postcopy = request.POST.copy()
if not invoicenum == 'new':
for fld in ('accounting_account', 'accounting_object', ):
if fld not in postcopy:
postcopy[fld] = getattr(invoice, fld)
form = InvoiceForm(data=postcopy, instance=invoice)
if form.instance.finalized:
formset = InvoiceRowInlineFormset(instance=invoice)
else:
formset = InvoiceRowInlineFormset(data=postcopy, instance=invoice)
formset.forms[0].empty_permitted = False
if form.is_valid():
if formset.is_valid() or form.instance.finalized:
if form.instance.finalized:
# When finalized, only a very limited set of fields can be
# edited. This doesn't include the invoice rows, so don't
# even bother to save the fieldset.
form.instance.save(update_fields=[fn for fn in form.available_in_finalized if not isinstance(form[fn].field, ModelMultipleChoiceField)])
for m in form.instance.allowedmethods.all():
if m not in form.cleaned_data['allowedmethods']:
form.instance.allowedmethods.remove(m)
for i in form.cleaned_data['allowedmethods']:
form.instance.allowedmethods.add(i)
else:
# Need to set totalamount to something here, so it doesn't
# cause an exception. It'll get fixed when we finalize!
if not form.instance.finalized:
form.instance.total_amount = -1
form.save()
formset.save()
if request.POST['submit'] == 'Finalize':
# Finalize this invoice. It's already been saved..
wrapper = InvoiceWrapper(form.instance)
wrapper.finalizeInvoice()
elif request.POST['submit'] == 'Preview':
return HttpResponseRedirect("/invoiceadmin/%s/preview/" % form.instance.pk)
return HttpResponseRedirect("/invoiceadmin/%s/" % form.instance.pk)
# Else fall through
else:
form = InvoiceForm(instance=invoice)
formset = InvoiceRowInlineFormset(instance=invoice)
if invoice.processor:
manager = InvoiceManager()
processor = manager.get_invoice_processor(invoice)
adminurl = processor.get_admin_url(invoice)
else:
adminurl = None
return render(request, 'invoices/invoiceform.html', {
'form': form,
'formset': formset,
'invoice': invoice,
'adminurl': adminurl,
'currency_symbol': settings.CURRENCY_SYMBOL,
'vatrates': VatRate.objects.all(),
'breadcrumbs': [('/invoiceadmin/', 'Invoices'), ],
'helplink': 'payment',
})
def flaginvoice(request, invoicenum):
authenticate_backend_group(request, 'Invoice managers')
transaction.set_autocommit(False)
invoice = get_object_or_404(Invoice, pk=invoicenum)
reason = request.POST.get('reason', '')
if not reason:
return HttpResponseForbidden("Can't flag an invoice without a reason!")
# Manually flag an invoice. What we do is call the invoice manager
# with a fake transaction info. The invoice manager will know to call
# whatever submodule generated the invoice.
mgr = InvoiceManager()
str = io.StringIO()
def payment_logger(msg):
str.write(msg)
(r, i, p) = mgr.process_incoming_payment(invoice.invoicestr,
invoice.total_amount,
request.POST['reason'],
0, # We assume this was a bank payment without cost
settings.ACCOUNTING_MANUAL_INCOME_ACCOUNT,
0, # costaccount
logger=payment_logger)
if r != InvoiceManager.RESULT_OK:
# It will always be a match (since we use invoicestr), but something else can go wrong
# so capture the error message.
transaction.rollback()
return HttpResponse("Failed to process payment flagging:\n%s" % str.getvalue(),
content_type="text/plain")
# The invoice manager will have flagged the invoice properly as well,
# so we can just return the user right back
transaction.commit()
return HttpResponseRedirect("/invoiceadmin/%s/" % invoice.id)
@transaction.atomic
def cancelinvoice(request, invoicenum):
authenticate_backend_group(request, 'Invoice managers')
invoice = get_object_or_404(Invoice, pk=invoicenum)
reason = request.POST.get('reason', '')
if not reason:
return HttpResponseForbidden("Can't cancel an invoice without a reason!")
manager = InvoiceManager()
try:
manager.cancel_invoice(invoice, reason, request.user.username)
except Exception as ex:
messages.warning(request, "Failed to cancel: %s" % ex)
return HttpResponseRedirect("/invoiceadmin/%s/" % invoice.id)
@transaction.atomic
def extend_cancel(request, invoicenum):
authenticate_backend_group(request, 'Invoice managers')
invoice = get_object_or_404(Invoice, pk=invoicenum)
try:
days = int(request.GET.get('days', 5))
except Exception as e:
days = 5
invoice.canceltime += timedelta(days=days)
invoice.save()
InvoiceHistory(invoice=invoice, txt='Extended autocancel by {0} days to {1}'.format(days, invoice.canceltime)).save()
return HttpResponseRedirect("/invoiceadmin/%s/" % invoice.id)
@transaction.atomic
def refundinvoice(request, invoicenum):
authenticate_backend_group(request, 'Invoice managers')
invoice = get_object_or_404(Invoice, pk=invoicenum)
if request.method == 'POST':
form = RefundForm(data=request.POST, invoice=invoice)
if form.is_valid():
# Do some sanity checking
if form.cleaned_data['vatrate']:
vatamount = (Decimal(form.cleaned_data['amount']) * form.cleaned_data['vatrate'].vatpercent / Decimal(100)).quantize(Decimal('0.01'))
if vatamount > invoice.total_refunds['remaining']['vatamount']:
messages.error(request, "Unable to refund, VAT amount mismatch!")
return HttpResponseRedirect('.')
else:
vatamount = 0
mgr = InvoiceManager()
r = mgr.refund_invoice(invoice,
form.cleaned_data['reason'],
Decimal(form.cleaned_data['amount']),
vatamount,
form.cleaned_data['vatrate'],
)
if invoice.can_autorefund:
messages.info(request, "Refund initiated.")
else:
messages.info(request, "Refund flagged.")
return HttpResponseRedirect(".")
else:
form = RefundForm(invoice=invoice)
# Check if all invoicerows have the same VAT rate (NULL or specified)
vinfo = invoice.invoicerow_set.all().aggregate(n=Count('vatrate', distinct=True), v=Max('vatrate'))
return render(request, 'invoices/refundform.html', {
'form': form,
'invoice': invoice,
'breadcrumbs': [('/invoiceadmin/', 'Invoices'), ('/invoiceadmin/{0}/'.format(invoice.pk), 'Invoice #{0}'.format(invoice.pk)), ],
'helplink': 'payment',
})
def previewinvoice(request, invoicenum):
authenticate_backend_group(request, 'Invoice managers')
invoice = get_object_or_404(Invoice, pk=invoicenum)
# We assume there is no PDF yet
wrapper = InvoiceWrapper(invoice)
r = HttpResponse(content_type='application/pdf')
r.write(wrapper.render_pdf_invoice(True))
return r
@transaction.atomic
def emailinvoice(request, invoicenum):
authenticate_backend_group(request, 'Invoice managers')
if request.method != 'POST':
raise HttpResponse('Must be POST', status=401)
if 'reason' not in request.POST:
return HttpResponse('Reason is missing!', status=401)
if request.POST['reason'] not in ('initial', 'reminder'):
return HttpResponse('Invalid reason given!', status=401)
invoice = get_object_or_404(Invoice, pk=invoicenum)
if not invoice.finalized:
return HttpResponse("Not finalized!", status=401)
# Ok, it seems we're good to go...
wrapper = InvoiceWrapper(invoice)
if request.POST['reason'] == 'initial':
wrapper.email_invoice()
elif request.POST['reason'] == 'reminder':
wrapper.email_reminder()
else:
raise Exception("Cannot happen")
return HttpResponse("OK")
# --------------------------------------------------------------------------
#
# Views that are viewable both by admins and end users
# (if they have permissions)
#
# --------------------------------------------------------------------------
@login_required
def viewinvoice(request, invoiceid):
invoice = get_object_or_404(Invoice, pk=invoiceid, deleted=False, finalized=True)
if invoice.recipient_user != request.user:
# End users can only view their own invoices, but invoice managers can view all
authenticate_backend_group(request, 'Invoice managers')
return render(request, 'invoices/userinvoice.html', {
'invoice': InvoicePresentationWrapper(invoice, "%s/invoices/%s/" % (settings.SITEBASE, invoice.pk)),
})
def viewinvoice_secret(request, invoiceid, invoicesecret):
invoice = get_object_or_404(Invoice, pk=invoiceid, deleted=False, finalized=True, recipient_secret=invoicesecret)
return render(request, 'invoices/userinvoice.html', {
'invoice': InvoicePresentationWrapper(invoice, "%s/invoices/%s/%s/" % (settings.SITEBASE, invoice.pk, invoice.recipient_secret)),
'fromsecret': True,
})
@login_required
def viewinvoicepdf(request, invoiceid):
invoice = get_object_or_404(Invoice, pk=invoiceid)
if invoice.recipient_user != request.user:
# End users can only view their own invoices, but invoice managers can view all
authenticate_backend_group(request, 'Invoice managers')
r = HttpResponse(content_type='application/pdf')
r.write(base64.b64decode(invoice.pdf_invoice))
return r
def viewinvoicepdf_secret(request, invoiceid, invoicesecret):
invoice = get_object_or_404(Invoice, pk=invoiceid, recipient_secret=invoicesecret)
r = HttpResponse(content_type='application/pdf')
r.write(base64.b64decode(invoice.pdf_invoice))
return r
@login_required
def viewreceipt(request, invoiceid):
invoice = get_object_or_404(Invoice, pk=invoiceid)
if invoice.recipient_user != request.user:
# End users can only view their own invoices, but invoice managers can view all
authenticate_backend_group(request, 'Invoice managers')
r = HttpResponse(content_type='application/pdf')
r.write(base64.b64decode(invoice.pdf_receipt))
return r
def viewreceipt_secret(request, invoiceid, invoicesecret):
invoice = get_object_or_404(Invoice, pk=invoiceid, recipient_secret=invoicesecret)
r = HttpResponse(content_type='application/pdf')
r.write(base64.b64decode(invoice.pdf_receipt))
return r
@login_required
def viewrefundnote(request, invoiceid, refundid):
invoice = get_object_or_404(Invoice, pk=invoiceid)
if invoice.recipient_user != request.user:
# End users can only view their own invoices, but invoice managers can view all
authenticate_backend_group(request, 'Invoice managers')
refund = get_object_or_404(InvoiceRefund, invoice=invoiceid, pk=refundid)
r = HttpResponse(content_type='application/pdf')
r.write(base64.b64decode(refund.refund_pdf))
return r
def viewrefundnote_secret(request, invoiceid, invoicesecret, refundid):
invoice = get_object_or_404(Invoice, pk=invoiceid, recipient_secret=invoicesecret)
refund = get_object_or_404(InvoiceRefund, invoice=invoice, pk=refundid)
r = HttpResponse(content_type='application/pdf')
r.write(base64.b64decode(refund.refund_pdf))
return r
@login_required
def userhome(request):
invoices = Invoice.objects.filter(recipient_user=request.user, deleted=False, finalized=True)
return render(request, 'invoices/userhome.html', {
'invoices': invoices,
})
def banktransfer(request):
if any(k not in request.GET for k in ('invoice', 'key', 'prv')):
return HttpResponse("Required parameter missing")
invoice = get_object_or_404(Invoice, pk=get_int_or_error(request.GET, 'invoice'), recipient_secret=request.GET['key'])
method = get_object_or_404(InvoicePaymentMethod, pk=get_int_or_error(request.GET, 'prv'))
wrapper = PaymentMethodWrapper(method, invoice)
return HttpResponse(wrapper.implementation.render_page(request, invoice))
@login_required
@transaction.atomic
def dummy_payment(request, invoiceid, invoicesecret):
if not settings.DEBUG:
return HttpResponse("Dummy payments not enabled")
invoice = get_object_or_404(Invoice, pk=invoiceid, recipient_secret=invoicesecret)
manager = InvoiceManager()
if invoice.processor:
processor = manager.get_invoice_processor(invoice)
returnurl = processor.get_return_url(invoice)
else:
returnurl = "%s/invoices/%s/" % (settings.SITEBASE, invoice.pk)
# We'll just cheat and use the Adyen account
manager.process_incoming_payment_for_invoice(invoice, invoice.total_amount, 'Dummy payment', 0, settings.ACCOUNTING_ADYEN_AUTHORIZED_ACCOUNT, 0, None, None, InvoicePaymentMethod.objects.get(classname='postgresqleu.util.payment.dummy.DummyPayment'))
return HttpResponseRedirect(returnurl)
| 39.691235
| 252
| 0.665245
|
4a05276a817543a45d6b97cd32d9497504ffae1c
| 1,916
|
py
|
Python
|
limix_qep/moments/poisson/poisson_moments.py
|
Horta/limix-qep
|
b7f537396efb5cf0911f1870469eb02f2657a3b8
|
[
"MIT"
] | null | null | null |
limix_qep/moments/poisson/poisson_moments.py
|
Horta/limix-qep
|
b7f537396efb5cf0911f1870469eb02f2657a3b8
|
[
"MIT"
] | null | null | null |
limix_qep/moments/poisson/poisson_moments.py
|
Horta/limix-qep
|
b7f537396efb5cf0911f1870469eb02f2657a3b8
|
[
"MIT"
] | null | null | null |
from __future__ import division
from numpy import arange
from numpy import sqrt
from numpy import log
from numpy import exp
from limix_math.special import normal_pdf
from limix_math.special import normal_logpdf
class PoissonMoments(object):
def __init__(self, nintervals):
super(PoissonMoments, self).__init__()
self._nintervals = nintervals
def compute(self, y, eta, tau, lmom0, mu, var):
from scipy.integrate import quad
normal_mu = eta / tau
normal_var = 1 / tau
for i in range(len(y)):
def int0(x):
mui = normal_mu[i]
sai = sqrt(normal_var[i])
n0 = normal_logpdf((x - mui) / sai) - log(sai)
n1 = y[i] * x - exp(x) - log(arange(1, y[i] + 1)).sum()
# print((n0, n1))
return exp(n0 + n1)
r = quad(int0, -30, 30)
assert r[1] < 1e-6
lmom0[i] = log(r[0])
for i in range(len(y)):
def int0(x):
mui = normal_mu[i]
sai = sqrt(normal_var[i])
n0 = normal_logpdf((x - mui) / sai) - log(sai)
n1 = y[i] * x - exp(x) - log(arange(1, y[i] + 1)).sum()
# print((n0, n1))
return x * exp(n0 + n1)
r = quad(int0, -30, 30)
assert r[1] < 1e-6
mu[i] = r[0]
for i in range(len(y)):
def int0(x):
mui = normal_mu[i]
sai = sqrt(normal_var[i])
n0 = normal_logpdf((x - mui) / sai) - log(sai)
n1 = y[i] * x - exp(x) - log(arange(1, y[i] + 1)).sum()
# print((n0, n1))
return x * x * exp(n0 + n1)
r = quad(int0, -30, 30)
assert r[1] < 1e-6
var[i] = r[0]
mu[:] = mu / exp(lmom0)
var[:] = var / exp(lmom0) - mu * mu
| 30.412698
| 71
| 0.463987
|
4a0528120f8d26557e2caccfda192303ef9897f5
| 2,581
|
py
|
Python
|
src/model.py
|
marekgalovic/ray-qmix-sc2
|
e999939c3a827d4c64b29b3f99eb0342a0708233
|
[
"Apache-2.0"
] | 4
|
2021-11-15T06:46:26.000Z
|
2022-02-22T06:31:45.000Z
|
src/model.py
|
marekgalovic/ray-qmix-sc2
|
e999939c3a827d4c64b29b3f99eb0342a0708233
|
[
"Apache-2.0"
] | null | null | null |
src/model.py
|
marekgalovic/ray-qmix-sc2
|
e999939c3a827d4c64b29b3f99eb0342a0708233
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn.functional as F
class AgentNet(torch.nn.Module):
def __init__(self, obs_dim, n_actions, hidden_dim, **kwargs):
super().__init__(**kwargs)
self._obs_dim = obs_dim
self._hidden_dim = hidden_dim
self._n_actions = n_actions
self.l1 = torch.nn.Linear(obs_dim, hidden_dim)
self.l2 = torch.nn.Linear(hidden_dim, n_actions)
self.rnn = torch.nn.GRUCell(hidden_dim, hidden_dim)
def forward(self, obs, rnn_state):
bs, n_agents, obs_dim = obs.shape
obs = obs.view((bs * n_agents, obs_dim))
rnn_state = rnn_state.view(bs * n_agents, self._hidden_dim)
y = self.l1(obs)
rnn_state = self.rnn(y, rnn_state)
q = self.l2(rnn_state)
return (
q.view((bs, n_agents, self._n_actions)),
rnn_state.view((bs, n_agents, self._hidden_dim))
)
class VDN(torch.nn.Module):
def forward(self, states, agent_qs):
return torch.sum(agent_qs, -1)
class MonotonicHyperNetLayer(torch.nn.Module):
def __init__(self, in_dim, state_dim, out_dim, hyper_hidden_dim, final_bias=False, **kwargs):
super().__init__(**kwargs)
self._in_dim = in_dim
self._out_dim = out_dim
self.W = torch.nn.Sequential(
torch.nn.Linear(state_dim, hyper_hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hyper_hidden_dim, in_dim * out_dim)
)
if final_bias:
self.b = torch.nn.Sequential(
torch.nn.Linear(state_dim, in_dim),
torch.nn.ReLU(),
torch.nn.Linear(in_dim, out_dim)
)
else:
self.b = torch.nn.Linear(state_dim, out_dim)
def forward(self, input, hyper_input):
w = torch.abs(self.W(hyper_input).reshape((hyper_input.size(0), self._out_dim, self._in_dim)))
b = self.b(hyper_input)
return torch.einsum('ikj,ij->ik', w, input) + b
class QMIX(torch.nn.Module):
def __init__(self, state_dim=3, n_agents=2, hidden_dim=8, hypernet_hidden_dim=8, **kwargs):
super().__init__(**kwargs)
self._n_agents = n_agents
self._hidden_dim = hidden_dim
self.l1 = MonotonicHyperNetLayer(n_agents, state_dim, hidden_dim, hypernet_hidden_dim)
self.l2 = MonotonicHyperNetLayer(hidden_dim, state_dim, 1, hypernet_hidden_dim, final_bias=True)
def forward(self, states, agent_qs):
y = F.elu(self.l1(agent_qs, states))
y = self.l2(y, states)
return y.reshape((states.size(0),))
| 30.364706
| 104
| 0.622239
|
4a05286380eb2b421e337bff8ec53a97de4a4d25
| 40
|
py
|
Python
|
voxblox_tango_interface/python/voxblox_tango_interface/__init__.py
|
ethz-asl/voxblox_deprecated
|
9833ea48584e5189e3edfa33f18bf536f64b1488
|
[
"BSD-3-Clause"
] | 2
|
2021-03-24T03:40:17.000Z
|
2021-05-17T10:23:20.000Z
|
voxblox_tango_interface/python/voxblox_tango_interface/__init__.py
|
WANG-KX/voxblox
|
915410e8d6ce77300a8118419cb919176c9100b9
|
[
"BSD-3-Clause"
] | 1
|
2018-09-18T13:56:48.000Z
|
2018-09-18T13:56:48.000Z
|
voxblox_tango_interface/python/voxblox_tango_interface/__init__.py
|
WANG-KX/voxblox
|
915410e8d6ce77300a8118419cb919176c9100b9
|
[
"BSD-3-Clause"
] | 1
|
2018-04-18T19:34:48.000Z
|
2018-04-18T19:34:48.000Z
|
from voxblox_tango_interfacepy import *
| 20
| 39
| 0.875
|
4a0528902e979386cf34ad7d8213f445233f1418
| 7,752
|
py
|
Python
|
fluent.syntax/fluent/syntax/serializer.py
|
olleolleolle/python-fluent
|
9730d3f90a4bff7f43614d85b5c9e20205c10d3b
|
[
"Apache-2.0"
] | null | null | null |
fluent.syntax/fluent/syntax/serializer.py
|
olleolleolle/python-fluent
|
9730d3f90a4bff7f43614d85b5c9e20205c10d3b
|
[
"Apache-2.0"
] | null | null | null |
fluent.syntax/fluent/syntax/serializer.py
|
olleolleolle/python-fluent
|
9730d3f90a4bff7f43614d85b5c9e20205c10d3b
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from . import ast
def indent(content):
return " ".join(
content.splitlines(True)
)
def includes_new_line(elem):
return isinstance(elem, ast.TextElement) and "\n" in elem.value
def is_select_expr(elem):
return (
isinstance(elem, ast.Placeable) and
isinstance(elem.expression, ast.SelectExpression))
class FluentSerializer(object):
HAS_ENTRIES = 1
def __init__(self, with_junk=False):
self.with_junk = with_junk
def serialize(self, resource):
if not isinstance(resource, ast.Resource):
raise Exception('Unknown resource type: {}'.format(type(resource)))
state = 0
parts = []
for entry in resource.body:
if not isinstance(entry, ast.Junk) or self.with_junk:
parts.append(self.serialize_entry(entry, state))
if not state & self.HAS_ENTRIES:
state |= self.HAS_ENTRIES
return "".join(parts)
def serialize_entry(self, entry, state=0):
if isinstance(entry, ast.Message):
return serialize_message(entry)
if isinstance(entry, ast.Term):
return serialize_term(entry)
if isinstance(entry, ast.Comment):
if state & self.HAS_ENTRIES:
return "\n{}\n".format(serialize_comment(entry, "#"))
return "{}\n".format(serialize_comment(entry, "#"))
if isinstance(entry, ast.GroupComment):
if state & self.HAS_ENTRIES:
return "\n{}\n".format(serialize_comment(entry, "##"))
return "{}\n".format(serialize_comment(entry, "##"))
if isinstance(entry, ast.ResourceComment):
if state & self.HAS_ENTRIES:
return "\n{}\n".format(serialize_comment(entry, "###"))
return "{}\n".format(serialize_comment(entry, "###"))
if isinstance(entry, ast.Junk):
return serialize_junk(entry)
raise Exception('Unknown entry type: {}'.format(type(entry)))
def serialize_expression(self, expr):
return serialize_expression(expr)
def serialize_comment(comment, prefix="#"):
prefixed = "\n".join([
prefix if len(line) == 0 else "{} {}".format(prefix, line)
for line in comment.content.splitlines(False)
])
# Add the trailing line break.
return '{}\n'.format(prefixed)
def serialize_junk(junk):
return junk.content
def serialize_message(message):
parts = []
if message.comment:
parts.append(serialize_comment(message.comment))
parts.append("{} =".format(message.id.name))
if message.value:
parts.append(serialize_value(message.value))
if message.attributes:
for attribute in message.attributes:
parts.append(serialize_attribute(attribute))
parts.append("\n")
return ''.join(parts)
def serialize_term(term):
parts = []
if term.comment:
parts.append(serialize_comment(term.comment))
parts.append("-{} =".format(term.id.name))
parts.append(serialize_value(term.value))
if term.attributes:
for attribute in term.attributes:
parts.append(serialize_attribute(attribute))
parts.append("\n")
return ''.join(parts)
def serialize_attribute(attribute):
return "\n .{} ={}".format(
attribute.id.name,
indent(serialize_value(attribute.value))
)
def serialize_value(value):
if isinstance(value, ast.Pattern):
return serialize_pattern(value)
if isinstance(value, ast.VariantList):
return serialize_variant_list(value)
raise Exception('Unknown value type: {}'.format(type(value)))
def serialize_pattern(pattern):
content = "".join([
serialize_element(elem)
for elem in pattern.elements])
start_on_new_line = any(
includes_new_line(elem) or is_select_expr(elem)
for elem in pattern.elements)
if start_on_new_line:
return '\n {}'.format(indent(content))
return ' {}'.format(content)
def serialize_variant_list(varlist):
content = "".join([
serialize_variant(variant)
for variant in varlist.variants])
return '\n {{{}\n }}'.format(indent(content))
def serialize_variant(variant):
return "\n{}[{}]{}".format(
" *" if variant.default else " ",
serialize_variant_key(variant.key),
indent(serialize_value(variant.value))
)
def serialize_element(element):
if isinstance(element, ast.TextElement):
return element.value
if isinstance(element, ast.Placeable):
return serialize_placeable(element)
raise Exception('Unknown element type: {}'.format(type(element)))
def serialize_placeable(placeable):
expr = placeable.expression
if isinstance(expr, ast.Placeable):
return "{{{}}}".format(serialize_placeable(expr))
if isinstance(expr, ast.SelectExpression):
# Special-case select expressions to control the withespace around the
# opening and the closing brace.
return "{{ {}}}".format(serialize_select_expression(expr))
if isinstance(expr, ast.Expression):
return "{{ {} }}".format(serialize_expression(expr))
def serialize_expression(expression):
if isinstance(expression, ast.StringLiteral):
return '"{}"'.format(expression.raw)
if isinstance(expression, ast.NumberLiteral):
return expression.value
if isinstance(expression, ast.MessageReference):
return expression.id.name
if isinstance(expression, ast.FunctionReference):
return expression.id.name
if isinstance(expression, ast.TermReference):
return '-{}'.format(expression.id.name)
if isinstance(expression, ast.VariableReference):
return '${}'.format(expression.id.name)
if isinstance(expression, ast.AttributeExpression):
return serialize_attribute_expression(expression)
if isinstance(expression, ast.VariantExpression):
return serialize_variant_expression(expression)
if isinstance(expression, ast.CallExpression):
return serialize_call_expression(expression)
if isinstance(expression, ast.SelectExpression):
return serialize_select_expression(expression)
if isinstance(expression, ast.Placeable):
return serialize_placeable(expression)
raise Exception('Unknown expression type: {}'.format(type(expression)))
def serialize_select_expression(expr):
parts = []
selector = "{} ->".format(
serialize_expression(expr.selector))
parts.append(selector)
for variant in expr.variants:
parts.append(serialize_variant(variant))
parts.append("\n")
return "".join(parts)
def serialize_attribute_expression(expr):
return "{}.{}".format(
serialize_expression(expr.ref),
expr.name.name,
)
def serialize_variant_expression(expr):
return "{}[{}]".format(
serialize_expression(expr.ref),
serialize_variant_key(expr.key),
)
def serialize_call_expression(expr):
callee = serialize_expression(expr.callee)
positional = ", ".join(
serialize_expression(arg) for arg in expr.positional)
named = ", ".join(
serialize_named_argument(arg) for arg in expr.named)
if len(expr.positional) > 0 and len(expr.named) > 0:
return '{}({}, {})'.format(callee, positional, named)
return '{}({})'.format(callee, positional or named)
def serialize_named_argument(arg):
return "{}: {}".format(
arg.name.name,
serialize_expression(arg.value)
)
def serialize_variant_key(key):
if isinstance(key, ast.Identifier):
return key.name
else:
return serialize_expression(key)
| 30.163424
| 79
| 0.653638
|
4a05289a5a8419799ba562eea00de25afd9b6fb2
| 1,048
|
py
|
Python
|
pe-solution/src/main/python/pep_719.py
|
filippovitale/pe
|
b036acb164bc0efce18299341b04a7acf226c7db
|
[
"MIT"
] | null | null | null |
pe-solution/src/main/python/pep_719.py
|
filippovitale/pe
|
b036acb164bc0efce18299341b04a7acf226c7db
|
[
"MIT"
] | 6
|
2021-08-16T05:50:40.000Z
|
2021-08-16T05:50:41.000Z
|
pe-solution/src/main/python/pep_719.py
|
filippovitale/pe
|
b036acb164bc0efce18299341b04a7acf226c7db
|
[
"MIT"
] | null | null | null |
from functools import cache
from math import sqrt
tot, miss = 0, 0
@cache
def is_split_and_addable(remaining: int, expected: int):
global miss
global tot
miss += 1
remaining_digits = str(remaining)
for i in range(1, len(remaining_digits)):
left_split = int(remaining_digits[:i])
right_split = int(remaining_digits[i:])
r = right_split
e = expected - left_split
if r == e:
return True
if e < 0:
return False
if r < e:
continue
tot += 1
if is_split_and_addable(r, e):
return True
return False
def calculate_t(n: int):
global tot
result = 0
for sq_n in range(2, int(sqrt(n)) + 1):
n = sq_n * sq_n
tot += 1
if is_split_and_addable(n, sq_n):
result += n
return result
if __name__ == "__main__":
assert calculate_t(10 ** 4) == 41333
print(calculate_t(10 ** 12))
# hit ratio: 26%
# print(f"hit ratio: {(1 - (miss / tot)) * 100:.0f}%")
| 22.297872
| 58
| 0.557252
|
4a0529c741ad857293e064fb5b35a7721c81c5c9
| 106,592
|
py
|
Python
|
circles/__init__.py
|
Mrjoulin/exam-project
|
9796b4de0344a66264093579cd6643735b8d6e68
|
[
"MIT"
] | null | null | null |
circles/__init__.py
|
Mrjoulin/exam-project
|
9796b4de0344a66264093579cd6643735b8d6e68
|
[
"MIT"
] | null | null | null |
circles/__init__.py
|
Mrjoulin/exam-project
|
9796b4de0344a66264093579cd6643735b8d6e68
|
[
"MIT"
] | null | null | null |
import sys
import sip
import random
import smtplib as smtp
from PIL import Image, ImageDraw, ImageFont
import logging
from functools import partial
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import Qt, pyqtSlot, QEvent
from circles.utils.utils import *
from circles.db.db import *
logging.basicConfig(
format='[%(filename)s:%(lineno)s - %(funcName)20s()]%(levelname)s:%(name)s:%(message)s',
level=logging.INFO
)
user_information = {'success': False, 'payload': {}}
NUMBER_OF_TASKS_9_CLASS = 15
NUMBER_OF_TASKS_10_CLASS = 9
EULERO_EMAIL = 'eulerocircles@gmail.com'
EULERO_PASSWORD = 'xsw2zaq1'
class Main(QWidget):
def __init__(self):
super().__init__()
# Background RGB
self.backgroundRad = 255
self.backgroundGreen = 255 # 181
self.backgroundBlue = 255 # 100
# link clicked
self.link_clicked = False
# Start
logging.info('Start Welcome window')
self.initUI()
def initUI(self):
self.setAutoFillBackground(True)
p = self.palette()
p.setColor(self.backgroundRole(), QColor(self.backgroundRad, self.backgroundGreen, self.backgroundBlue))
self.setPalette(p)
logging.info(f'Set background rgb{self.backgroundRad, self.backgroundGreen, self.backgroundBlue}')
self.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))
self.welcome_window()
# TODO Redesign the app
# TODO Create a design for other windows
self.adjustSize()
self.setGeometry(self.frameGeometry())
self.move(150, 150)
self.setWindowTitle('Euler circles')
self.show()
def welcome_window(self):
grid = QGridLayout()
grid.setSpacing(20)
photo = QLabel(self)
namePhoto = 'photo/MainWindowCircles.png'
pixmap = QPixmap(namePhoto)
pixmap2 = pixmap.scaled(550, 550, Qt.KeepAspectRatio)
photo.setPixmap(pixmap2)
logging.info(f"Add photo '{namePhoto}' in welcome window")
buttons = QHBoxLayout()
buttons.setSpacing(20)
nameBattons = ['Зарегистрироваться', 'Войти', 'Пропустить']
for name in nameBattons:
btn = QPushButton(name, self)
btn.setStyleSheet("background-color: rgb(223, 209, 21)")
btn.clicked.connect(self.welcome_button_click)
buttons.addWidget(btn, 0, Qt.AlignCenter)
#buttons.setAlignment(Qt.AlignTop)
info = QGridLayout()
info.setSpacing(10)
infoTxt = ['Здравствуй!',
'Это приложение содержит материал\nдля проверки ваших умений решать\nзадачи с применением кругов Эйлера',
'Войдите в свой аккаунт, или зарегистрируйте новый']
positions = [(i, j) for i in range(3) for j in range(1)]
fonts = [
[QFontDatabase.addApplicationFont('fonts/Montserrat-Medium.ttf'), "Montserrat Medium"],
[QFontDatabase.addApplicationFont('fonts/Montserrat-Bold.ttf'), "Montserrat Bold"]
]
logging.info(f'Set fonts in app: {fonts}')
#font, ok = QFontDialog.getFont()
#if ok: print(font.toString())
buttons_block = QVBoxLayout()
buttons_block.setSpacing(10)
for position, name in zip(positions, infoTxt):
label = QLabel(name, self)
label.setAlignment(Qt.AlignCenter)
font = QFont()
if position[0] == 0:
font.setFamily("Montserrat Bold")
font.setPointSize(24)
font.setBold(True)
label.setFont(font)
info.addWidget(label, *position)
elif position[0] == 1:
font.setFamily("Montserrat Medium")
font.setPointSize(18)
label.setFont(font)
info.addWidget(label, *position)
else:
label.setAlignment(Qt.AlignCenter)
font.setFamily("Montserrat Medium")
font.setPointSize(12)
label.setFont(font)
buttons_block.addWidget(label)
buttons_block.addLayout(buttons)
info.addLayout(buttons_block, 3, 0)
grid.addWidget(photo, 0, 0)
grid.addItem(info, 0, 1)
if self.layout() is not None:
self.delete_items_of_layout(self.layout())
sip.delete(self.layout())
logging.info('Set layout in welcome window')
self.setLayout(grid)
@pyqtSlot()
def welcome_button_click(self):
sender = self.sender()
logging.info(f"The '{sender.text()}' button was pressed")
if sender.text() == 'Войти':
self.login()
elif sender.text() == 'Зарегистрироваться':
self.sign_up()
else:
self.menu()
def sign_up(self):
self.delete_items_of_layout(self.layout())
if self.layout() is not None:
sip.delete(self.layout())
logging.info('Sign up window started')
self.init_sign()
self.adjustSize()
self.setGeometry(450, 300, 500, 300)
self.setWindowTitle('Sign in')
self.show()
def init_sign(self):
title = QLabel('Пожалуйста, введите свои данные:')
name = QLabel('Имя')
surname = QLabel('Фамилия')
patronymic = QLabel('Отчество')
email = QLabel('Email')
password = QLabel('Пароль( > 6 символов)')
repeatPassword = QLabel('Повторите пароль')
title.setAlignment(Qt.AlignCenter)
title.setFont(QFont("Montserrat Bold", 18))
name.setFont(QFont('Montserrat Medium'))
surname.setFont(QFont('Montserrat Medium'))
patronymic.setFont(QFont("Montserrat Medium"))
email.setFont(QFont('Montserrat Medium'))
password.setFont(QFont('Montserrat Medium'))
repeatPassword.setFont(QFont('Montserrat Medium'))
self.nameRegistrationEdit = QLineEdit(self)
self.surnameRegistrationEdit = QLineEdit(self)
self.patronymicRegistrationEdit = QLineEdit(self)
self.patronymicRegistrationEdit.setPlaceholderText('Не обязательное поле')
self.emailRegistrationEdit = QLineEdit(self)
self.passwordRegistrationEdit = QLineEdit(self)
self.passwordRegistrationEdit.setEchoMode(QLineEdit.Password)
self.repeatPasswordRegistrationEdit = QLineEdit(self)
self.repeatPasswordRegistrationEdit.setEchoMode(QLineEdit.Password)
self.checkBox = QCheckBox('Даю согласие на обработку персональных данных', self)
cancelButton = QPushButton('Отмена', self)
continueButton = QPushButton('Продолжить', self)
cancelButton.clicked.connect(self.sign_up_button_click)
continueButton.clicked.connect(self.sign_up_button_click)
info = [
[title],
[name, self.nameRegistrationEdit],
[surname, self.surnameRegistrationEdit],
[patronymic, self.patronymicRegistrationEdit],
[email, self.emailRegistrationEdit],
[password, self.passwordRegistrationEdit],
[repeatPassword, self.repeatPasswordRegistrationEdit],
[self.checkBox],
[cancelButton, continueButton]
]
layout = QVBoxLayout(self)
layout.setSpacing(20)
for item in info:
hbox = QHBoxLayout()
hbox.setSpacing(20)
for value in item:
hbox.addWidget(value)
layout.addLayout(hbox)
logging.info('Set layout in sign up')
self.setLayout(layout)
@pyqtSlot()
def sign_up_button_click(self):
sender = self.sender()
logging.info(f"The '{sender.text()}' button was pressed")
if sender.text() == 'Отмена':
self.initUI()
else:
logging.info('Data checking')
alph = 'абвгдеёжзийклмнопрстуфхцчшщыьъэюя'
error_status = False
if self.nameRegistrationEdit.text() == '' or self.surnameRegistrationEdit.text() == '' \
or self.emailRegistrationEdit.text() == '':
self.on_error('Пожалуйста, заполните все поля')
return
for letter in self.nameRegistrationEdit.text().lower():
if letter not in alph or letter.isdigit():
self.on_error('Имя должно состоять только из букв русского алфавита')
self.nameRegistrationEdit.setStyleSheet("background-color: rgb(255, 50, 50)")
error_status = True
for letter in self.surnameRegistrationEdit.text().lower():
if letter not in alph or letter.isdigit():
self.on_error('Фамилия должна состоять только из букв русского алфавита')
self.surnameRegistrationEdit.setStyleSheet("background-color: rgb(255, 50, 50)")
error_status = True
for letter in self.patronymicRegistrationEdit.text().lower():
if letter not in alph or letter.isdigit():
self.on_error('Отчество должно состоять только из букв русского алфавита')
self.patronymicRegistrationEdit.setStyleSheet("background-color: rgb(255, 50, 50)")
error_status = True
try:
if is_registered_email(self.emailRegistrationEdit.text()):
self.on_error('Ваш адрес электронной почты уже зарегистрирован')
self.emailRegistrationEdit.setStyleSheet("background-color: rgb(255, 50, 50)")
error_status = True
except Exception as e:
self.on_exception(e)
if '@' not in self.emailRegistrationEdit.text() or '.' not in self.emailRegistrationEdit.text():
self.on_error('Пожалуйста, указывайте ваш действительный почтовый адрес')
self.emailRegistrationEdit.setStyleSheet("background-color: rgb(255, 50, 50)")
error_status = True
if len(self.passwordRegistrationEdit.text()) < 6:
self.on_error('Ваш пароль слишком короткий!')
self.passwordRegistrationEdit.setStyleSheet("background-color: rgb(255, 50, 50)")
error_status = True
if self.passwordRegistrationEdit.text() != self.repeatPasswordRegistrationEdit.text():
self.on_error('Пароли не совпадают!')
self.repeatPasswordRegistrationEdit.setStyleSheet("background-color: rgb(255, 50, 50)")
error_status = True
if not self.checkBox.isChecked():
self.on_error('Пожалуйста, примите соглашение на обработку персональных данных')
return
# If there was a error
if error_status:
self.passwordRegistrationEdit.clear()
self.repeatPasswordRegistrationEdit.clear()
return
self.delete_items_of_layout(self.layout())
try:
waitMessage = QLabel('Пожалуйста, подождите немного, идёт загрузка данных', self)
waitMessage.setAlignment(Qt.AlignCenter)
waitMessage.setFont(QFont("Montserrat Bold", 20))
self.layout().addWidget(waitMessage)
logging.info('Set data in database')
user_information['payload'] = add_user(name=self.nameRegistrationEdit.text(),
surname=self.surnameRegistrationEdit.text(),
patronymic=self.patronymicRegistrationEdit.text(),
email=self.emailRegistrationEdit.text(),
password=self.passwordRegistrationEdit.text())
user_information['success'] = True
self.menu()
except Exception as e:
self.on_exception(e)
@pyqtSlot()
def error_button(self):
self.__init__()
def login(self):
self.delete_items_of_layout(self.layout())
if self.layout() is not None:
sip.delete(self.layout())
logging.info('Login window started')
self.init_login()
self.adjustSize()
self.setGeometry(450, 300, 500, 300)
self.setWindowTitle('Login')
self.show()
def init_login(self):
title = QLabel('Введите свой email и пароль,\n чтобы войти в аккаунт')
email = QLabel('Email')
password = QLabel('Пароль')
title.setAlignment(Qt.AlignCenter)
fontTitle = QFont("Montserrat Medium", 20)
fontTitle.setBold(True)
title.setFont(fontTitle)
email.setFont(QFont('Montserrat Medium'))
password.setFont(QFont('Montserrat Medium'))
self.emailLoginEdit = QLineEdit(self)
self.passwordLoginEdit = QLineEdit(self)
self.passwordLoginEdit.setEchoMode(QLineEdit.Password)
cancelButton = QPushButton('Отмена', self)
continueButton = QPushButton('Продолжить', self)
cancelButton.clicked.connect(self.login_button_click)
continueButton.clicked.connect(self.login_button_click)
info = [
[title],
[email, self.emailLoginEdit],
[password, self.passwordLoginEdit],
[cancelButton, continueButton]
]
layout = QVBoxLayout(self)
layout.setSpacing(20)
for item in info:
hbox = QHBoxLayout()
hbox.setSpacing(20)
for value in item:
hbox.addWidget(value)
layout.addLayout(hbox)
logging.info('Set layout in login')
self.setLayout(layout)
@pyqtSlot()
def login_button_click(self):
sender = self.sender()
logging.info(f"The '{sender.text()}' button was pressed")
if sender.text() == 'Отмена':
self.initUI()
else:
if self.emailLoginEdit.text() == '' or self.passwordLoginEdit.text() == '':
self.on_error('Все поля, должны быть заполнены!')
return
logging.info('Check user info')
try:
status = check_user(self.emailLoginEdit.text(), self.passwordLoginEdit.text())
logging.info('Check status - ' + str(bool(status)))
if status is not None:
user_information['success'] = True
user_information['payload'] = status
self.menu()
else:
self.on_error('Неверный логин и/или пароль!')
self.passwordLoginEdit.clear()
return
except Exception as e:
self.on_exception(e)
def menu(self):
self.delete_items_of_layout(self.layout())
if self.layout() is not None:
sip.delete(self.layout())
logging.info('Menu window started')
self.init_menu()
self.setAutoFillBackground(True)
p = self.palette()
p.setColor(self.backgroundRole(), QColor(self.backgroundRad, self.backgroundGreen, self.backgroundBlue))
self.setPalette(p)
logging.info(f'Set background rgb{self.backgroundRad, self.backgroundGreen, self.backgroundBlue}')
self.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))
self.adjustSize()
self.setGeometry(300, 200, 600, 300)
self.setWindowTitle('Menu')
self.show()
def init_menu(self):
user_status_seperator = QLabel(' | ')
if user_information['success']:
info = user_information['payload']
user_status_sign = QLabel("Вход выполнен:")
user_status_name = QLabel(info['surname'] + ' ' + info['name'] + ' ' + info['patronymic'])
user_status_email = QLabel(info['email'])
user_status_name.setStyleSheet("text-decoration: underline; color: blue;")
user_status_email.setStyleSheet("text-decoration: underline; color: blue;")
user_status_name.mousePressEvent = self.user_exit
user_status_email.mousePressEvent = self.user_exit
user_status = [user_status_sign, user_status_name, user_status_seperator, user_status_email]
else:
user_status_sign = QLabel('Зарегистрироваться')
user_status_login = QLabel('Войти')
user_status_sign.setStyleSheet("text-decoration: underline; color: blue;")
user_status_login.setStyleSheet("text-decoration: underline; color: blue;")
user_status_sign.mousePressEvent = self.mouse_press_event_sign_up
user_status_login.mousePressEvent = self.mouse_press_event_login
user_status = [user_status_sign, user_status_seperator, user_status_login]
user_profile = QHBoxLayout()
user_profile.setSpacing(10)
user_profile.addStretch(1)
for item in user_status:
item.setFont(QFont('Montserrat Medium', 12))
item.adjustSize()
user_profile.addWidget(item)
user_profile.sizeHint()
teacher_option = QLabel('Для решения варианта учителя,\nвведите номер варианта')
create_option = QLabel('Вы можете создать свой вариант')
auth_tests = QLabel('Или выберете нужный класс для\nтренировки решения задач')
for item in [teacher_option, create_option, auth_tests]:
item.setFont(QFont('Montserrat Medium', 16))
item.setAlignment(Qt.AlignTop)
create_option.setAlignment(Qt.AlignCenter)
self.teacher_option_edit = QLineEdit(self)
teacher_option_ok_button = QPushButton('ОК', self)
teacher_option_ok_button.setStyleSheet("background-color: rgb(223, 209, 21)")
teacher_option_ok_button.clicked.connect(self.get_teacher_option_button_click)
teacher_option_widgets = QHBoxLayout()
teacher_option_widgets.addWidget(self.teacher_option_edit)
teacher_option_widgets.addWidget(teacher_option_ok_button)
create_option_button = QPushButton('Создать', self)
create_option_button.setStyleSheet("background-color: rgb(223, 209, 21)")
create_option_button.clicked.connect(self.create_teacher_option)
topics_for_6_7_classes = QPushButton('6-7 класс', self)
topics_for_8_9_classes = QPushButton('8-9 класс', self)
topics_for_10_11_classes = QPushButton('10-11 класс', self)
for item in [topics_for_6_7_classes, topics_for_8_9_classes, topics_for_10_11_classes]:
item.setStyleSheet("background-color: rgb(223, 209, 21)")
item.clicked.connect(self.task_button_click)
info = [
[
[user_profile]
],
[
[teacher_option, teacher_option_widgets],
[auth_tests]
],
[
[create_option, create_option_button],
[topics_for_6_7_classes, topics_for_8_9_classes, topics_for_10_11_classes]
]
]
layout = QVBoxLayout(self)
for items in info:
hbox = QHBoxLayout()
hbox.setSpacing(40)
for item in items:
vbox = QVBoxLayout()
vbox.setSpacing(20)
for val in item:
if val.isWidgetType():
vbox.addWidget(val)
else:
vbox.addLayout(val)
hbox.addLayout(vbox)
layout.addLayout(hbox)
logging.info('Sey layout in menu')
self.setLayout(layout)
def user_exit(self, event):
logging.info('User exit question')
reply = QMessageBox.question(self, 'Message',
"Вы уверены, что хотите выйти из аккаунта?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
logging.info('User answer - YES')
logging.info('User exit')
user_information['success'] = False
user_information['payload'] = {}
self.menu()
else:
logging.info('User answer - NO')
def mouse_press_event_sign_up(self, event):
self.sign_up()
def mouse_press_event_login(self, event):
self.login()
def get_teacher_option_button_click(self):
if not user_information['success']:
self.on_error('Для испольования данной функции\nВы должны быть зарегестрированны')
return
try:
options = get_teacher_option(self.teacher_option_edit.text())
except Exception as e:
self.on_exception(e)
return
if options == []:
self.on_error('Введённого вами варианта не существует!')
else:
self.options = options
self.get_stack_teacher_tasks()
def get_stack_teacher_tasks(self):
layout = QVBoxLayout()
layout.setSpacing(10)
title = QLabel('По вашему запросу найдены \nследующие варианты:')
fontTitle = QFont("Montserrat Medium", 20)
fontTitle.setBold(True)
title.setFont(fontTitle)
title.setAlignment(Qt.AlignCenter)
options = QVBoxLayout()
options.setSpacing(5)
number_line_cnt = 1
for option in self.options:
line = QHBoxLayout()
line.setSpacing(10)
number_line = QLabel(str(number_line_cnt) + '.')
number_line.setFont(QFont("Montserrat Medium", 14))
line_option = QHBoxLayout()
line_option.setSpacing(10)
line_text = QLabel(option['topic'][:30])
line_text.setFont(QFont("Montserrat Medium", 14))
line_text.setStyleSheet('color: grey')
if self.teacher_option_edit.text().lower() == option['topic'].lower():
line_number = QLabel(f"Вариант №{str(option['number_option'])}")
line_number.setFont(QFont("Montserrat Medium", 14))
line_text.setStyleSheet('background-color: yellow')
line_option.addWidget(line_number)
elif self.teacher_option_edit.text().isdigit():
line_number = QHBoxLayout()
line_number.addStretch(1)
line_number_text = QLabel(f'Вариант №')
line_number_text.setFont(QFont("Montserrat Medium", 14))
line_number_int = QLabel(str(option['number_option']))
line_number_int.setFont(QFont("Montserrat Medium", 14))
line_number_int.setStyleSheet('background-color: yellow')
line_number.addWidget(line_number_text)
line_number.addWidget(line_number_int)
line_option.addLayout(line_number)
elif self.teacher_option_edit.text().lower() == option['email_teacher'].lower():
line_number = QLabel(option['email_teacher'])
line_number.setFont(QFont("Montserrat Medium", 14))
line_number.setStyleSheet('background-color: yellow')
line_option.addWidget(line_number)
else:
line_number = QHBoxLayout()
line_number.addStretch(1)
for item in option['name_teacher'].split():
line_number_text = QLabel(item)
line_number_text.setFont(QFont("Montserrat Medium", 14))
if item.lower() in self.teacher_option_edit.text().lower().split():
line_number_text.setStyleSheet('background-color: yellow')
line_number.addWidget(line_number_text)
line_option.addLayout(line_number)
line_option.addWidget(line_text)
view_button = QPushButton('Просмотр')
view_button.setStyleSheet("background-color: rgb(223, 209, 21)")
view_button.clicked.connect(partial(self.get_teacher_option_view_button_click, number_line_cnt - 1))
line.addWidget(number_line)
line.addLayout(line_option)
line.addWidget(view_button)
options.addLayout(line)
number_line_cnt += 1
last_line = QHBoxLayout()
last_line.setSpacing(10)
button_left_box = QHBoxLayout()
button_left = QPushButton('Назад')
button_left.setStyleSheet("background-color: rgb(244, 29, 29)")
button_left.clicked.connect(self.menu)
button_left_box.addWidget(button_left)
button_left_box.setAlignment(Qt.AlignLeft)
text_right_box = QHBoxLayout()
text_right = QLabel('Нажмите "Просмотр" для решения варианта')
text_right.setFont(QFont("Montserrat Medium", 14))
text_right.setStyleSheet('color: grey')
text_right_box.addWidget(text_right)
text_right_box.setAlignment(Qt.AlignRight)
last_line.addLayout(button_left_box)
last_line.addLayout(text_right_box)
layout.addWidget(title)
layout.addLayout(options)
layout.addLayout(last_line)
self.delete_items_of_layout(self.layout())
if self.layout() is not None:
sip.delete(self.layout())
logging.info('Add layout in stack teachers options window')
self.setLayout(layout)
def send_email_teacher_option(self):
sender = self.sender().text()
if sender == 'Сохранить':
msg = 'Спасибо за ваш новый вариант на Eulero!\n\nИнформация о варианте:\n' \
'Тема варианта: {topic}\n' \
'Количество кругов Эйлера: {number_of_circles}\n' \
' Процент правильных ответов для отценки 5: {to_5}\n' \
' Процент правильных ответов для отценки 4: {to_4}\n' \
' Процент правильных ответов для отценки 5: {to_3}\n' \
'Показывать оценку по завершению: {mark}\n' \
'Номер варианта: {number}\n'.format(
topic=self.topic_edit.text(), number_of_circles=self.number_of_circles_combo.currentText(),
to_5=self.procent_of_right_for_5.text(), to_4=self.procent_of_right_for_4.text(),
to_3=self.procent_of_right_for_3.text(), mark='Да' if self.check_box_mark.isChecked() else 'Нет',
number=str(self.random_number)
)
from_email = user_information['payload']['email']
else:
msg = 'Информация о прохождении вашего варианта на Eulero!\n\nИнформация о варианте:\n' \
'Тема варианта: {topic}\n' \
' Процент правильных ответов для отценки 5: {to_5}%\n' \
' Процент правильных ответов для отценки 4: {to_4}%\n' \
' Процент правильных ответов для отценки 5: {to_3}%\n' \
'Показывать оценку по завершению: {mark}\n' \
'Номер варианта: {number}'.format(
topic=self.option['topic'],
to_5=self.option['procent_to_5'], to_4=self.option['procent_to_4'],
to_3=self.option['procent_to_3'], mark='Да' if self.option['show_mark'] else 'Нет',
number=str(self.option['number_option'])
)
msg += '\n\nИнформация о проходившем пользователе:\n' \
'{availabilitypatronymic}: {name}\n' \
'email: {user_email}\n' \
'{option_passed}\n' \
'Вариант был найден {what_find}по номеру варианта'.format(
availabilitypatronymic='ФИО' if user_information['payload']['patronymic'] != '' else 'Фамилия, Имя',
name=f"{user_information['payload']['surname']} {user_information['payload']['name']} "
f"{user_information['payload']['patronymic']}",
user_email=user_information['payload']['email'],
option_passed='Процент верно решённых заданий: {percent}'.format(
percent=str(self.right_tasks * 100 // len(self.option['tasks']))
) if sender == 'Продолжить' else ' Внимание!\nВариант не был пройден пользователем до конца!',
what_find='' if str(self.option['number_option']) == self.teacher_option_edit.text() else 'не '
)
from_email = self.option['email_teacher']
logging.info(f'Send message from: {from_email}, Message:\n{msg}\n')
server = smtp.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(EULERO_EMAIL, EULERO_PASSWORD)
server.sendmail(EULERO_EMAIL, from_email, f'From: {EULERO_EMAIL}\nTo: {from_email}\nSubject: '
f'Уведомление от Eulero!\n\n{msg}'.encode('utf-8'))
server.quit()
self.menu()
def get_teacher_option_view_button_click(self, number_line):
self.option = self.options[number_line]
self.get_teacher_option()
def get_teacher_option(self):
layout = QVBoxLayout()
layout.setSpacing(10)
title = QLabel(f'Решить вариант №{str(self.option["number_option"])}')
fontTitle = QFont("Montserrat Medium", 20)
fontTitle.setBold(True)
title.setFont(fontTitle)
title.setAlignment(Qt.AlignCenter)
topic = QLabel(f'Тема: {self.option["topic"]}')
topic.setFont(QFont("Montserrat Bold", 18))
topic.setAlignment(Qt.AlignCenter)
name_teacher = QLabel(f'Учитель: {self.option["name_teacher"]}')
name_teacher.setFont(QFont("Montserrat Medium", 16))
name_teacher.setAlignment(Qt.AlignCenter)
if self.option['show_mark']:
procents = QVBoxLayout()
procents.addStretch(1)
procents.addSpacing(5)
procent_titel = QLabel("Процент верных ответов:")
procent_titel.setFont(QFont("Montserrat Medium", 14))
procent_titel.setAlignment(Qt.AlignCenter)
procents.addWidget(procent_titel)
for i in range(5, 2, -1):
procent_text = f"procent_to_{str(i)}"
procent = QLabel(f'На оценку {str(i)} - {self.option[procent_text]}%')
procent.setFont(QFont("Montserrat Medium", 14))
procent.setAlignment(Qt.AlignCenter)
procents.addWidget(procent)
buttons = QHBoxLayout()
buttons.setSpacing(10)
buttons.addStretch(1)
cancelButton = QPushButton('Отмена', self)
continueButton = QPushButton('Приступить', self)
cancelButton.setStyleSheet("background-color: rgb(223, 209, 21)")
continueButton.setStyleSheet("background-color: rgb(223, 209, 21)")
cancelButton.clicked.connect(self.get_stack_teacher_tasks)
continueButton.clicked.connect(self.get_teacher_tasks)
buttons.addWidget(cancelButton)
buttons.addWidget(continueButton)
layout.addWidget(title)
layout.addWidget(topic)
layout.addWidget(name_teacher)
if self.option['show_mark']:
layout.addLayout(procents)
layout.addLayout(buttons)
self.delete_items_of_layout(self.layout())
if self.layout() is not None:
sip.delete(self.layout())
logging.info('Add layout in get teacher option window')
self.setLayout(layout)
self.is_teacher_option_passed = False
self.number_of_task = 0
self.right_tasks = 0
self.right_tasks_numbers = []
self.already_been = []
def get_teacher_tasks(self):
self.class_of_tasks = 'teacher task'
self.answer_photo = 'all'
if len(self.option['tasks']) > self.number_of_task:
self.task = self.option['tasks'][self.number_of_task]
self.new_task()
else:
self.is_teacher_option_passed = True
self.get_teacher_option_final_window()
def get_teacher_option_final_window(self):
self.delete_items_of_layout(self.layout())
if self.layout() is not None:
sip.delete(self.layout())
layout = QVBoxLayout()
layout.setSpacing(10)
title = QLabel(f'Итоги решения варианта №{self.option["number_option"]}')
fontTitle = QFont("Montserrat Medium", 20)
fontTitle.setBold(True)
title.setFont(fontTitle)
title.setAlignment(Qt.AlignCenter)
topic = QLabel(f'Тема: {self.option["topic"]}')
topic.setFont(QFont("Montserrat Bold", 18))
topic.setAlignment(Qt.AlignCenter)
name_teacher = QLabel(f'Учитель: {self.option["name_teacher"]}')
name_teacher.setFont(QFont("Montserrat Medium", 16))
name_teacher.setAlignment(Qt.AlignCenter)
stack_tasks = QVBoxLayout()
stack_tasks.setSpacing(10)
for task in self.option['tasks']:
line = QHBoxLayout()
line.setSpacing(10)
line_content = QHBoxLayout()
line_content.setSpacing(0)
if task['8-9 class']['table 8-9 class']:
class_of_table = '8-9 class'
elif task['10-11 class']['table 10-11 class']:
class_of_table = '10-11 class'
else:
class_of_table = 'other'
number_of_task = task[class_of_table]['payload']['number of task']
line_content_number = QLabel(f"Задание №{number_of_task}")
line_content_number.setFont(QFont("Montserrat Medium", 14))
if task[class_of_table]['payload']['text task'][:30] == task[class_of_table]['payload']['text task']:
line_content_text = QLabel(' ' + task[class_of_table]['payload']['text task'])
else:
line_content_text = QLabel(' ' + task[class_of_table]['payload']['text task'][:27] + '...')
line_content_text.setFont(QFont("Montserrat Medium", 14))
if number_of_task in self.right_tasks_numbers:
line_content_number.setStyleSheet('background-color: rgb(118, 206, 103)')
line_content_text.setStyleSheet('color: rgb(61, 61, 61); background-color: rgb(118, 206, 103)')
else:
line_content_number.setStyleSheet('background-color: rgb(237, 73, 73)')
line_content_text.setStyleSheet('color: rgb(61, 61, 61); background-color: rgb(237, 73, 73)')
line_content.addWidget(line_content_number)
line_content.addWidget(line_content_text)
line_view_button = QPushButton('Просмотр')
line_view_button.setStyleSheet("background-color: rgb(223, 209, 21)")
line_view_button.clicked.connect(partial(self.get_teacher_task_final_view, number_of_task - 1))
line.addLayout(line_content)
line.addWidget(line_view_button)
stack_tasks.addLayout(line)
procent_mark = QHBoxLayout()
procent_mark.setSpacing(10)
procent_of_right = self.right_tasks * 100 // len(self.option['tasks'])
if self.option['show_mark']:
procent = QLabel("Ваша оценка: ")
procent.setFont(QFont("Montserrat Medium", 16))
mark = QLabel('2')
for mark_number in range(3, 6):
if procent_of_right >= int(self.option[f'procent_to_{mark_number}']):
mark = QLabel(str(mark_number))
mark.setFont(QFont("Montserrat Medium", 24))
else:
procent = QLabel("Процент ваших правильных ответов:")
procent.setFont(QFont("Montserrat Medium", 14))
mark = QLabel(str(procent_of_right))
mark.setFont(QFont("Montserrat Medium", 24))
mark.setStyleSheet('color: rgb(244, 29, 29)')
procent_mark.addWidget(procent)
procent_mark.addWidget(mark)
last_line = QHBoxLayout()
last_line.setSpacing(10)
self.class_of_tasks = 'teacher window'
question_of_task = QLabel('У Вас возник вопрос? Задайте его нам')
question_of_task.setFont(QFont("Montserrat Medium", 12))
question_of_task.setStyleSheet("text-decoration: underline; color: blue;")
question_of_task.mousePressEvent = self.question_button_click
button_box = QHBoxLayout()
continue_button = QPushButton('Продолжить')
continue_button.setStyleSheet("background-color: rgb(223, 209, 21)")
continue_button.clicked.connect(self.send_email_teacher_option)
button_box.addWidget(continue_button)
button_box.setAlignment(Qt.AlignRight)
last_line.addWidget(question_of_task)
last_line.addLayout(button_box)
layout.addWidget(title)
layout.addWidget(topic)
layout.addWidget(name_teacher)
layout.addLayout(stack_tasks)
layout.addLayout(procent_mark)
layout.addLayout(last_line)
logging.info('Set layout in get teacher option final window')
self.setLayout(layout)
self.adjustSize()
def get_teacher_task_final_view(self, number_task):
self.task = self.option['tasks'][number_task]
self.class_of_tasks = 'final preview task'
self.new_task()
def create_teacher_option(self):
if not user_information['success']:
self.on_error('Для испольования данной функции\nВы должны быть зарегестрированны')
return
self.delete_items_of_layout(self.layout())
if self.layout() is not None:
sip.delete(self.layout())
logging.info('Teacher option window started')
self.init_teacher_option()
self.setAutoFillBackground(True)
p = self.palette()
p.setColor(self.backgroundRole(), QColor(self.backgroundRad, self.backgroundGreen, self.backgroundBlue))
self.setPalette(p)
logging.info(f'Set background rgb{self.backgroundRad, self.backgroundGreen, self.backgroundBlue}')
self.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))
self.adjustSize()
self.setGeometry(300, 200, 600, 300)
self.setWindowTitle('Teacher Option')
self.show()
def init_teacher_option(self):
layout = QVBoxLayout(self)
layout.setSpacing(10)
title = QLabel('Создать вариант')
fontTitle = QFont("Montserrat Medium", 20)
fontTitle.setBold(True)
title.setFont(fontTitle)
title.setAlignment(Qt.AlignCenter)
topic_box = QHBoxLayout()
topic_box.setSpacing(10)
topic = QLabel('Тема: ')
topic.setFont(QFont("Montserrat Medium", 16))
self.topic_edit = QLineEdit()
self.topic_edit.setPlaceholderText('Введите сюда тему варианта')
topic_box.addWidget(topic)
topic_box.addWidget(self.topic_edit)
class_of_task = QHBoxLayout()
class_of_task_text = QLabel('Задания, ориентируемые на')
class_of_task_text.setFont(QFont("Montserrat Medium", 14))
self.class_of_task_combo = QComboBox()
self.class_of_task_combo.addItems(['5 класс', '6 класс', '7 класс', '8 класс',
'9 класс', '10 класс', '11 класс', 'любой класс'])
class_of_task.addWidget(class_of_task_text)
class_of_task.addWidget(self.class_of_task_combo)
number_of_circles = QHBoxLayout()
number_of_circles_text = QLabel('Количество кругов Эйлера:')
number_of_circles_text.setFont(QFont("Montserrat Medium", 14))
self.number_of_circles_combo = QComboBox()
self.number_of_circles_combo.addItems(['3 окружности', '4 окружности'])
number_of_circles.addWidget(number_of_circles_text)
number_of_circles.addWidget(self.number_of_circles_combo)
procent = QVBoxLayout()
procent.setSpacing(5)
procent.addStretch(1)
procent_of_right_in_tasks = QLabel('Процент правильных ответов для оценки:')
procent_of_right_in_tasks.setFont(QFont("Montserrat Medium", 14))
procent.addWidget(procent_of_right_in_tasks)
procents_for_mark = {'5': '90', '4': '75', '3': '50'}
self.procent_of_right_for_5 = QLineEdit()
self.procent_of_right_for_4 = QLineEdit()
self.procent_of_right_for_3 = QLineEdit()
for mark, procents in procents_for_mark.items():
line = QHBoxLayout()
line.addStretch(1)
text_line = QLabel(f'% - "{mark}"')
text_line.setFont(QFont("Montserrat Medium", 14))
default = QLabel(f'(по умолчанию {procents}%)')
default.setFont(QFont("Montserrat Medium", 14))
default.setStyleSheet("color: grey")
if mark == '5':
line.addWidget(self.procent_of_right_for_5)
elif mark == '4':
line.addWidget(self.procent_of_right_for_4)
else:
line.addWidget(self.procent_of_right_for_3)
line.addWidget(text_line)
line.addWidget(default)
line.setAlignment(Qt.AlignLeft)
procent.addLayout(line)
self.check_box_mark = QCheckBox('Показывать оценку по окончанию прохождения теста', self)
self.check_box_mark.setChecked(True)
procent.addWidget(self.check_box_mark)
number_of_tasks = QHBoxLayout()
number_of_tasks.addStretch(1)
number_of_tasks_text = QLabel("Количество задач: ")
number_of_tasks_text.setFont(QFont("Montserrat Medium", 14))
number_of_tasks_text_default = QLabel('(можно оставить незаполненным)')
number_of_tasks_text_default.setFont(QFont("Montserrat Medium", 12))
number_of_tasks_text_default.setStyleSheet('color: grey;')
self.number_of_tasks_edit = QLineEdit()
number_of_tasks.addWidget(number_of_tasks_text)
number_of_tasks.addWidget(self.number_of_tasks_edit)
number_of_tasks.addWidget(number_of_tasks_text_default)
buttons = QHBoxLayout()
buttons.setSpacing(10)
buttons.addStretch(1)
cancelButton = QPushButton('Отмена', self)
continueButton = QPushButton('Приступить', self)
cancelButton.setStyleSheet("background-color: rgb(223, 209, 21)")
continueButton.setStyleSheet("background-color: rgb(223, 209, 21)")
cancelButton.clicked.connect(self.menu)
continueButton.clicked.connect(self.teacher_option_button_click)
buttons.addWidget(cancelButton)
buttons.addWidget(continueButton)
layout.addWidget(title)
layout.addLayout(topic_box)
layout.addLayout(class_of_task)
layout.addLayout(number_of_circles)
layout.addLayout(procent)
layout.addLayout(number_of_tasks)
layout.addLayout(buttons)
layout.setAlignment(Qt.AlignLeft)
logging.info('Set layout in teacher option')
self.setLayout(layout)
self.teacher_tasks = []
self.teacher_tasks_appended = False
def teacher_option_button_click(self):
if self.topic_edit.text() == '':
self.on_error('Введите тему варианта!')
return
if self.procent_of_right_for_5.text().isdigit():
if float(self.procent_of_right_for_5.text()) > 100.0 or float(self.procent_of_right_for_5.text()) < 1.0:
self.procent_of_right_for_5.setText('90')
else:
self.procent_of_right_for_5.setText('90')
if self.procent_of_right_for_4.text().isdigit():
if float(self.procent_of_right_for_4.text()) > float(self.procent_of_right_for_5.text()) or\
float(self.procent_of_right_for_4.text()) < 1.0:
if 75.0 < float(self.procent_of_right_for_5.text()):
self.procent_of_right_for_4.setText('75')
else:
self.procent_of_right_for_4.setText(str(float(self.procent_of_right_for_5.text()) / 2))
else:
self.procent_of_right_for_4.setText('75')
if self.procent_of_right_for_3.text().isdigit():
if float(self.procent_of_right_for_3.text()) > float(self.procent_of_right_for_4.text()) or\
float(self.procent_of_right_for_3.text()) < 1.0:
if 75.0 < float(self.procent_of_right_for_4.text()):
self.procent_of_right_for_3.setText('50')
else:
self.procent_of_right_for_3.setText(str(float(self.procent_of_right_for_4.text()) / 2))
else:
self.procent_of_right_for_3.setText('50')
if self.number_of_tasks_edit.text().isdigit():
if int(self.number_of_tasks_edit.text()) > 20 or int(self.number_of_tasks_edit.text()) < 1:
self.number_of_tasks_edit.setText('20')
else:
self.number_of_tasks_edit.setText('20')
combo_text = self.class_of_task_combo.currentText()
self.number_of_task = 0
self.before_teacher_option_task_usual()
def before_teacher_option_task_usual(self):
sender = self.sender().text()
self.table_9 = False if sender != 'Таблица 8-9 класс' else True
self.table_10 = False if sender != 'Таблица 10-11 класс' else True
self.no_table = False if self.table_9 or self.table_10 else True
self.teacher_option_task_usual()
def teacher_option_task_usual(self):
sender = self.sender().text()
self.class_of_tasks = 'preview task'
if int(self.number_of_tasks_edit.text()) <= self.number_of_task:
self.teacher_tasks.append(self.task)
self.teacher_option_final_window()
return
if sender != 'Назад' and sender != 'Таблица 8-9 класс' and sender != 'Таблица 10-11 класс' and \
sender != 'Текстовое поле':
if sender != 'Добавить задачу':
self.teacher_tasks_appended = not self.teacher_tasks_appended
if not self.teacher_tasks_appended:
self.teacher_tasks.append(self.task)
self.teacher_tasks_appended = True
self.number_of_task += 1
layout = QVBoxLayout()
layout.setSpacing(10)
title = QLabel(f'Задача №{str(self.number_of_task)}')
fontTitle = QFont("Montserrat Medium", 20)
fontTitle.setBold(True)
title.setFont(fontTitle)
title.setAlignment(Qt.AlignCenter)
info_block = QHBoxLayout()
info_block.addStretch(1)
info_text = QLabel('обязательное поле')
info_star = QLabel('*')
info_star.setFont(QFont("Montserrat Medium", 14))
info_star.setStyleSheet("color: red")
info_text.setFont(QFont("Montserrat Medium", 14))
info_block.addWidget(info_star)
info_block.addWidget(info_text)
task = QVBoxLayout()
task.addStretch(1)
text_task = QHBoxLayout()
text_task.addStretch(1)
text_task_without_star = QLabel('{text} задачи:'.format(text='Текст' if self.no_table else 'Таблица'))
text_task_without_star.setFont(QFont("Montserrat Medium", 14))
info_star = QLabel('*')
info_star.setFont(QFont("Montserrat Medium", 14))
info_star.setStyleSheet("color: red")
text_task.addWidget(info_star)
text_task.addWidget(text_task_without_star)
text_task.setAlignment(Qt.AlignLeft)
if self.table_9:
table = QVBoxLayout()
table.setSpacing(5)
letters = ['А', 'Б', 'В', 'Г']
if sender == 'Назад':
text = [i.text() for i in self.table_tasks]
number = [i.text() for i in self.table_circles_numbers]
else:
text = ['', '', '', '']
number = ['', '', '', '']
self.table_tasks = []
self.table_circles_numbers = []
for letter in range(len(letters)):
line = QHBoxLayout()
line.setSpacing(5)
line_text = QLabel(letters[letter])
line_text.setFont(QFont('Montserrat Medium', 14))
line_edit = QLineEdit()
line_edit.setPlaceholderText('Запрос')
line_edit.setText(text[letter])
self.table_tasks.append(line_edit)
line_sectors_edit = QLineEdit()
line_sectors_edit.setPlaceholderText('Сектора кругов Эйлера')
line_sectors_edit.setText(number[letter])
self.table_circles_numbers.append(line_sectors_edit)
line.addWidget(line_text)
line.addWidget(line_edit)
line.addWidget(line_sectors_edit)
table.addLayout(line)
text_question = QHBoxLayout()
text_question.setSpacing(5)
text_question.addStretch(1)
question = QLabel(f"Расположить буквы в порядке ")
question.setFont(QFont('Montserrat Medium', 14))
index = self.question_combo.findText(self.question_combo.currentText(), Qt.MatchFixedString) \
if sender == 'Назад' else 0
self.question_combo = QComboBox()
self.question_combo.addItems(['возрастания', 'убывания'])
self.question_combo.setCurrentIndex(index)
text_question.addWidget(question)
text_question.addWidget(self.question_combo)
table.addLayout(text_question)
table.addLayout(table)
elif self.table_10:
table = QGridLayout()
request = QLabel("Запрос")
request.setFont(QFont('Montserrat Medium', 14))
find = QLabel('Найдено страниц')
find.setFont(QFont('Montserrat Medium', 14))
table.addWidget(request, 0, 0)
table.addWidget(find, 0, 1)
if sender == 'Назад':
text = [i.text() for i in self.table_tasks]
number = [i.text() for i in self.table_circles_numbers]
else:
text = ['', '', '', '', '', '']
number = ['', '', '', '', '', '']
self.table_tasks = []
self.table_circles_numbers = []
for i in range(6):
line_edit = QLineEdit()
line_edit.setPlaceholderText('Запрос')
line_edit.setText(text[i])
self.table_tasks.append(line_edit)
line_sectors_edit = QLineEdit()
line_sectors_edit.setPlaceholderText('Найдено страниц')
line_sectors_edit.setText(number[i])
self.table_circles_numbers.append(line_sectors_edit)
table.addWidget(line_edit, i + 1, 0)
table.addWidget(line_sectors_edit, i + 1, 1)
question = QLabel('Найти: ')
question.setFont(QFont('Montserrat Medium', 14))
self.question_text_edit = QLineEdit()
self.question_text_edit.setPlaceholderText('Вопрос')
table.addWidget(question, 7, 0)
table.addWidget(self.question_text_edit, 7, 1)
else:
if sender == 'Назад':
text = self.text_task.toPlainText()
else:
text = ''
self.text_task = QTextEdit()
self.text_task.setPlaceholderText('Введите текст задачи сюда')
self.text_task.setText(text)
buttons_table = QHBoxLayout()
buttons_table.setSpacing(50)
button_9_class = QPushButton('Текстовое поле' if self.table_9 else 'Таблица 8-9 класс')
button_9_class.setStyleSheet("background-color: rgb(223, 209, 21)")
button_9_class.clicked.connect(self.before_teacher_option_task_usual)
button_10_class = QPushButton('Текстовое поле' if self.table_10 else 'Таблица 10-11 класс')
button_10_class.setStyleSheet("background-color: rgb(223, 209, 21)")
button_10_class.clicked.connect(self.before_teacher_option_task_usual)
buttons_table.addWidget(button_9_class)
buttons_table.addWidget(button_10_class)
task.addLayout(text_task)
if self.no_table:
task.addWidget(self.text_task)
else:
task.addLayout(table)
task.addLayout(buttons_table)
photo = QLabel(self)
if self.number_of_circles_combo.currentText() == '3 окружности':
namePhoto = 'photo/threeCircles.png'
else:
namePhoto = 'photo/fourCircles.png'
pixmap = QPixmap(namePhoto)
pixmap2 = pixmap.scaled(300, 300, Qt.KeepAspectRatio)
photo.setPixmap(pixmap2)
logging.info(f"Add photo '{namePhoto}' in teacher options window")
names_box = QVBoxLayout()
names_box.addStretch(1)
names_box.setSpacing(5)
names_title = QLabel('Введите названия множеств:')
names_title.setFont(QFont("Montserrat Medium", 14))
names_perms = QHBoxLayout()
names_perms.setSpacing(10)
names_perms.addStretch(1)
names_text = ['1: ', '2: ', '3: ']
if self.number_of_circles_combo.currentText() == '4 окружности':
names_text.append('4: ')
if sender == 'Назад':
text = [i.text() for i in self.names_edits]
else:
text = ['', '', '', '']
self.names_edits = []
for label in range(len(names_text)):
self.name_edit = QLineEdit()
self.name_edit.setPlaceholderText(f'"{str(label + 1)}" по умолчанию')
self.name_edit.setText(text[label])
text_perm = QLabel(names_text[label])
text_perm.setFont(QFont("Montserrat Medium", 14))
names_perms.addWidget(text_perm)
names_perms.addWidget(self.name_edit)
self.names_edits.append(self.name_edit)
names_box.addWidget(names_title)
names_box.addLayout(names_perms)
answer = QHBoxLayout()
answer.setSpacing(5)
answer_text = QLabel('Ответ:')
answer_text.setFont(QFont("Montserrat Medium", 14))
if sender == 'Назад':
text = self.answer_edit.text()
else:
text = ''
self.answer_edit = QLineEdit()
self.answer_edit.setPlaceholderText('Введите сюда ответ')
self.answer_edit.setText(text)
info_star = QLabel('*')
info_star.setFont(QFont("Montserrat Medium", 14))
info_star.setStyleSheet("color: red")
answer.addWidget(info_star)
answer.addWidget(answer_text)
answer.addWidget(self.answer_edit)
if not self.table_9:
answer_picture = QLabel('Сектора кругов Эйлера:')
answer_picture.setFont(QFont("Montserrat Medium", 14))
if sender == 'Назад':
text = self.answer_picture_edit.text()
else:
text = ''
self.answer_picture_edit = QLineEdit()
self.answer_picture_edit.setPlaceholderText('ТОЛЬКО номера секторов')
self.answer_picture_edit.setText(text)
answer.addWidget(answer_picture)
answer.addWidget(self.answer_picture_edit)
explanation = QVBoxLayout()
explanation.setSpacing(5)
explanation.addStretch(1)
explanation_text = QHBoxLayout()
explanation_text.addStretch(1)
explanation_text_without_star = QLabel('Объяснение задачи:')
explanation_text_without_star.setFont(QFont("Montserrat Medium", 14))
info_star = QLabel('*')
info_star.setFont(QFont("Montserrat Medium", 14))
info_star.setStyleSheet("color: red")
explanation_text.addWidget(info_star)
explanation_text.addWidget(explanation_text_without_star)
if self.table_9:
if sender == 'Назад':
check = self.auth_explanation_check_box.isChecked()
else:
check = False
self.auth_explanation_check_box = QCheckBox('Автоматическое объяснение')
self.auth_explanation_check_box.setChecked(check)
explanation_text.addWidget(self.auth_explanation_check_box)
if sender == 'Назад':
text = self.explanation_edit.toPlainText()
else:
text = ''
self.explanation_edit = QTextEdit()
self.explanation_edit.setPlaceholderText('Введите сюда объяснение задачи')
self.explanation_edit.setText(text)
explanation.addLayout(explanation_text)
explanation.addWidget(self.explanation_edit)
buttons = QHBoxLayout()
buttons.setSpacing(10)
buttons.addStretch(1)
continue_button = QPushButton('Предпросмотр')
continue_button.setStyleSheet("background-color: rgb(63, 137, 255)")
continue_button.clicked.connect(self.teacher_option_task_button_click)
exit_button = QPushButton('Завершить')
exit_button.setStyleSheet("background-color: rgb(244, 29, 29)")
exit_button.clicked.connect(self.exit_button_click)
buttons.addWidget(exit_button)
buttons.addWidget(continue_button)
titel_box = QHBoxLayout()
titel_box.addWidget(title)
titel_box.addLayout(info_block)
task_box = QHBoxLayout()
task_box.addLayout(task)
task_box.addWidget(photo)
layout.addLayout(titel_box)
layout.addLayout(task_box)
layout.addLayout(names_box)
layout.addLayout(answer)
layout.addLayout(explanation)
layout.addLayout(buttons)
if self.layout() is not None:
self.delete_items_of_layout(self.layout())
sip.delete(self.layout())
logging.info('Set layout in task')
self.setLayout(layout)
self.adjustSize()
self.setGeometry(300, 150, 750, 300)
self.setWindowTitle('Teacher Task')
self.show()
def teacher_option_task_button_click(self):
logging.info(str(self.table_9) + str(self.table_10) + str(self.no_table))
if self.no_table:
if self.text_task.toPlainText() == '':
self.on_error('Введите текст задачи!')
return
if self.answer_edit .text() == '':
self.on_error('Введите ответ на задачу!')
return
if self.no_table or self.table_10 or (self.table_9 and not self.auth_explanation_check_box.isChecked()):
if self.explanation_edit.toPlainText() == '':
self.on_error('Введите объяснение задачи!')
return
names_perms = []
for name in range(len(self.names_edits)):
if self.names_edits[name].text() != '':
names_perms.append(self.names_edits[name].text())
else:
names_perms.append(str(name + 1))
payload_9_class = {}
payload_10_class = {}
payload_other = {}
if self.table_9:
for i in range(4):
if self.table_tasks[i].text() == '':
self.on_error('Заполните все поля запроса!')
return
for number in self.table_circles_numbers[i].text():
if not number.isdigit() or int(number) < 1 \
or int(number) > 7:
self.table_circles_numbers[i].setText('')
payload_9_class = {
'number of task': self.number_of_task,
'text task': f'А: {self.table_tasks[0].text()} Б: {self.table_tasks[1].text()} '
f'В: {self.table_tasks[2].text()} Г: {self.table_tasks[3].text()}',
'request': {
'А': f'{self.table_tasks[0].text()};{self.table_circles_numbers[0].text()}',
'Б': f'{self.table_tasks[1].text()};{self.table_circles_numbers[1].text()}',
'В': f'{self.table_tasks[2].text()};{self.table_circles_numbers[2].text()}',
'Г': f'{self.table_tasks[3].text()};{self.table_circles_numbers[3].text()}'
},
'options': names_perms,
'question': self.question_combo.currentText(),
'answer': self.answer_edit.text(),
'sectors circles': None,
'auth explanation': self.auth_explanation_check_box.isChecked(),
'explanation': self.explanation_edit.toPlainText()
}
elif self.table_10:
requests = []
finds = []
if self.question_text_edit.text() == '':
self.on_error('Введите то, что нужно найти')
return
for i in range(6):
if self.table_tasks[i].text() != '':
requests.append(self.table_tasks[i].text())
if self.table_circles_numbers[1].text() != '':
finds.append(self.table_circles_numbers[i].text())
else:
self.on_error('Введите количество найденных страниц для запроса')
return
if len(requests) < 3:
self.on_error('Введите больше запросов')
return
text_task = ''
for request, find in zip(requests, finds):
text_task += request + ' ' + find + '; '
payload_10_class = {
'number of task': self.number_of_task,
'text task': f'А: {self.table_tasks[0].text()} Б: {self.table_tasks[1].text()} '
f'В: {self.table_tasks[2].text()} Г: {self.table_tasks[3].text()}',
'request': requests,
'find': finds,
'options': names_perms,
'question': f'{self.question_text_edit.text()};{self.answer_picture_edit.text()}',
'answer': self.answer_edit.text(),
'sectors circles': None,
'explanation': self.explanation_edit.toPlainText()
}
else:
number_answer = ''
for numeral in self.answer_picture_edit.text():
try:
if int(numeral) < 1 or int(numeral) > 7:
number_answer = ''
break
else:
number_answer += str(numeral)
except ValueError:
number_answer = ''
payload_other = {
'number of task': self.number_of_task,
'text task': self.text_task.toPlainText(),
'options': names_perms,
'answer': self.answer_edit.text(),
'sectors circles': number_answer,
'explanation': self.explanation_edit.toPlainText()
}
self.task = {
'8-9 class': {'table 8-9 class': self.table_9, 'payload': payload_9_class},
'10-11 class': {'table 10-11 class': self.table_10, 'payload': payload_10_class},
'other': {'no table': self.no_table, 'payload': payload_other}
}
self.setWindowTitle('Preview')
self.new_task()
def teacher_option_final_window(self):
layout = QVBoxLayout()
layout.setSpacing(15)
title = QLabel(self.topic_edit.text())
fontTitle = QFont("Montserrat Medium", 20)
fontTitle.setBold(True)
title.setFont(fontTitle)
title.setAlignment(Qt.AlignCenter)
stack_task = QVBoxLayout()
stack_task.setSpacing(5)
stack_task.addStretch(1)
stack_task_text = QLabel('Добавленные задачи:')
stack_task_text.setFont(QFont("Montserrat Medium", 18))
stack_task_text.setAlignment(Qt.AlignCenter)
stack_task.addWidget(stack_task_text)
logging.info(self.teacher_tasks)
for item in self.teacher_tasks:
line = QHBoxLayout()
line.setSpacing(10)
frame = QFrame()
frame.setFrameShape(QFrame.Box)
task = QHBoxLayout()
task.addSpacing(5)
if item['8-9 class']['table 8-9 class']:
table = '8-9 class'
elif item['10-11 class']['table 10-11 class']:
table = '10-11 class'
else:
table = 'other'
task_number = QLabel(f'Задание №{item[table]["payload"]["number of task"]}')
task_number.setFont(QFont("Montserrat Medium", 14))
if len(item[table]["payload"]['text task']) > 40:
task_text = QLabel(item[table]["payload"]['text task'][:40] + '...')
else:
task_text = QLabel(item[table]["payload"]['text task'])
task_text.setFont(QFont("Montserrat Medium", 14))
task_text.setStyleSheet('color: grey')
task.addWidget(task_number)
task.addWidget(task_text)
remove_button = QPushButton('Удалить')
remove_button.setStyleSheet("background-color: rgb(244, 29, 29)")
remove_button.clicked.connect(partial(self.delete_teacher_task, item[table]["payload"]["number of task"]))
line.addLayout(task)
line.addWidget(remove_button)
line.setAlignment(Qt.AlignCenter)
stack_task.addLayout(line)
if len(self.teacher_tasks) < 20:
button_box = QHBoxLayout()
self.number_of_tasks_edit.setText(str(int(self.number_of_tasks_edit.text()) + 1))
add_button = QPushButton('Добавить задачу')
add_button.setStyleSheet("background-color: rgb(223, 209, 21)")
add_button.clicked.connect(self.teacher_option_task_usual)
button_box.addWidget(add_button)
button_box.setAlignment(Qt.AlignCenter)
number_of_option = QHBoxLayout()
number_of_option.setSpacing(10)
self.random_number = random.randint(100000, 999999)
number_of_option_text = QLabel(f'Номер варианта - {str(self.random_number)}')
number_of_option_text.setFont(QFont("Montserrat Medium", 14))
number_of_option_info = QLabel('(сохраните его для дальнейшего доступа)')
number_of_option_info.setFont(QFont("Montserrat Medium", 14))
number_of_option_info.setStyleSheet('color: grey')
number_of_option.addWidget(number_of_option_text)
number_of_option.addWidget(number_of_option_info)
number_of_option.setAlignment(Qt.AlignCenter)
open_access = QHBoxLayout()
open_access.addSpacing(5)
self.open_access = QCheckBox('Открытый оступ')
self.open_access.setChecked(True)
open_access_text = QLabel('(кроме № варианта, ещё по теме, вашему email и ФИО)')
open_access_text.setFont(QFont("Montserrat Medium", 13))
open_access_text.setStyleSheet('color: grey')
open_access.addWidget(self.open_access)
open_access.addWidget(open_access_text)
open_access.setAlignment(Qt.AlignCenter)
buttons = QHBoxLayout()
buttons.setSpacing(10)
button_left_box = QHBoxLayout()
button_left = QPushButton('Не сохранять')
button_left.setStyleSheet("background-color: rgb(244, 29, 29)")
button_left.clicked.connect(self.not_save_teacher_option)
button_left_box.addWidget(button_left)
button_left_box.setAlignment(Qt.AlignLeft)
button_right_box = QHBoxLayout()
button_right = QPushButton('Сохранить')
button_right.setStyleSheet('background-color: rgb(140, 255, 0)')
button_right.clicked.connect(self.save_teacher_option)
button_right_box.addWidget(button_right)
button_right_box.setAlignment(Qt.AlignRight)
buttons.addLayout(button_left_box)
buttons.addLayout(button_right_box)
layout.addWidget(title)
layout.addLayout(stack_task)
if len(self.teacher_tasks) < 20:
layout.addLayout(button_box)
layout.addLayout(number_of_option)
layout.addLayout(open_access)
layout.addLayout(buttons)
self.delete_items_of_layout(self.layout())
if self.layout() is not None:
sip.delete(self.layout())
logging.info('Add layout in teacher option final window')
self.setLayout(layout)
self.adjustSize()
def save_teacher_option(self):
try:
status = add_teacher_option(
number_option=self.random_number,
open_access=self.open_access.isChecked(),
name_teacher=user_information['payload']['name'] + ' ' + user_information['payload']['surname'] + ' ' +
user_information['payload']['patronymic'],
email_teacher=user_information['payload']['email'],
topic=self.topic_edit.text(),
number_of_circles=self.number_of_circles_combo.currentText(),
procent_to_5=self.procent_of_right_for_5.text(),
procent_to_4=self.procent_of_right_for_4.text(),
procent_to_3=self.procent_of_right_for_3.text(),
show_mark=self.check_box_mark.isChecked(),
tasks=self.teacher_tasks
)
if status:
QMessageBox.information(self, 'Спасибо', 'Ваш вариант успешно добавлен!\nПроверьте свою почту для\n'
'получения информации о варианте')
self.send_email_teacher_option()
except Exception as e:
self.on_exception(e)
def not_save_teacher_option(self):
logging.info('Close event')
reply = QMessageBox.question(self, 'Message',
"Вы уверены, что не будете созранять ваш вариант?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
logging.info('User answer - YES')
logging.info('Close app')
self.menu()
else:
logging.info('User answer - NO')
def delete_teacher_task(self, number_of_task):
for item in self.teacher_tasks:
if item['8-9 class']['table 8-9 class']:
table = '8-9 class'
elif item['10-11 class']['table 10-11 class']:
table = '10-11 class'
else:
table = 'other'
if int(item[table]['payload']['number of task']) == int(number_of_task):
self.teacher_tasks.remove(item)
break
for number in range(number_of_task - 1, len(self.teacher_tasks)):
if self.teacher_tasks[number]['8-9 class']['table 8-9 class']:
table = '8-9 class'
elif self.teacher_tasks[number]['10-11 class']['table 10-11 class']:
table = '10-11 class'
else:
table = 'other'
self.teacher_tasks[number][table]['payload']['number of task'] -= 1
self.number_of_task -= 1
self.teacher_option_final_window()
def task_button_click(self):
sender = self.sender()
logging.info(f"The '{sender.text()}' button was pressed")
self.class_of_tasks = sender.text()
self.task_window()
def task_window(self):
self.delete_items_of_layout(self.layout())
if self.layout() is not None:
sip.delete(self.layout())
logging.info('Topics window started')
self.init_task()
self.setAutoFillBackground(True)
p = self.palette()
p.setColor(self.backgroundRole(), QColor(self.backgroundRad, self.backgroundGreen, self.backgroundBlue))
self.setPalette(p)
logging.info(f'Set background rgb{self.backgroundRad, self.backgroundGreen, self.backgroundBlue}')
self.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))
self.adjustSize()
self.setGeometry(300, 200, 600, 300)
self.setWindowTitle('Tests')
self.show()
def init_task(self):
self.number_of_task = 0
self.right_tasks = 0
self.right_tasks_numbers = []
self.already_been = []
self.new_task()
def new_task(self):
sender = self.sender().text()
self.user_can_ask_a_question = True
if sender != 'Окно задачи' and sender != 'Предпросмотр':
self.number_of_task += 1
self.is_new_task = True
grid = QGridLayout()
if self.class_of_tasks == 'preview task':
title = QLabel('Окно задачи')
else:
title = QLabel(f'Задача №{str(self.number_of_task)}')
fontTitle = QFont("Montserrat Medium", 20)
fontTitle.setBold(True)
title.setFont(fontTitle)
title.setAlignment(Qt.AlignCenter)
if self.class_of_tasks != 'preview task' and self.number_of_task != 1:
procent_of_rigth = QLabel(f'Верно решёных - {str(self.right_tasks * 100 // (self.number_of_task - 1))} %')
procent_of_rigth.setFont(QFont("Montserrat Medium", 14))
grid.addWidget(procent_of_rigth, 0, 1)
if self.class_of_tasks == 'preview task' or self.class_of_tasks == 'final preview task' or \
self.class_of_tasks == 'teacher task':
if self.task['8-9 class']['table 8-9 class']:
self.info = self.task['8-9 class']['payload']
logging.info('Task Info: ' + str(self.info))
self.requests = {}
self.answer_photo = 'all'
text_task = self.get_text_task_8_9_class()
elif self.task['10-11 class']['table 10-11 class']:
logging.info('10-11 class')
self.info = self.task['10-11 class']['payload']
logging.info('Task Info: ' + str(self.info))
text_task = self.get_text_task_10_11_class()
else:
logging.info('other')
self.info = self.task['other']['payload']
logging.info('Task Info: ' + str(self.info))
text_task_body_split = ''
number_of_letter = 0
spliter = self.info['text task']
while number_of_letter + 40 <= len(spliter) - 1:
row_long = 40
while spliter[number_of_letter + row_long] != ' ':
row_long -= 1
if row_long <= 0:
break
if number_of_letter < 0:
text_task_body_split += spliter[0:number_of_letter + row_long] + '\n'
else:
text_task_body_split += spliter[number_of_letter:number_of_letter + row_long] + '\n'
number_of_letter += row_long + 1
try:
text_task_body_split += self.info['text task'][number_of_letter:]
except IndexError:
pass
text_task = QLabel(text_task_body_split)
text_task.setFont(QFont("Montserrat Medium", 14))
if self.class_of_tasks == 'teacher task':
self.answer_edit = QLineEdit()
self.answer_edit.setPlaceholderText('Введите сюда ваш ответ')
elif self.class_of_tasks == '6-7 класс':
QMessageBox.information(self, 'Внимание', 'Данная функция находится на стадии разработки.\n'
'Приносим извинения за неудобства.')
self.menu()
return
elif self.class_of_tasks == '8-9 класс':
task = Tests9Class(number_of_task=self.number_of_task % NUMBER_OF_TASKS_9_CLASS,
already_been=self.already_been)
if task.return_task['success']:
self.info = task.return_task['payload']
logging.info('Task Info: ' +
str(self.info['options']) + ' ' +
str(self.info['question']) + ' ' +
str(self.info['answer']))
self.requests = {}
self.answer_photo = 'all'
text_task = self.get_text_task_8_9_class()
self.answer_edit = QLineEdit()
self.answer_edit.setPlaceholderText('Введите сюда ваш ответ (только число)')
else:
task = Tests10Class(number_of_task=self.number_of_task % NUMBER_OF_TASKS_10_CLASS,
already_been=self.already_been)
if task.return_task['success']:
self.info = task.return_task['payload']
logging.info('Task Info: ' +
str(self.info['request']) + ' ' +
str(self.info['find']) + ' ' +
str(self.info['question']) + ' ' +
str(self.info['answer']))
text_task = self.get_text_task_10_11_class()
self.answer_edit = QLineEdit()
self.answer_edit.setPlaceholderText('Введите сюда ваш ответ (только число)')
buttons_right = QHBoxLayout()
buttons_right.setSpacing(10)
buttons_right.addStretch(1)
continue_button = QPushButton('Далее')
continue_button.setStyleSheet("background-color: rgb(63, 137, 255)")
exit_button = QPushButton('Завершить')
exit_button.setStyleSheet("background-color: rgb(244, 29, 29)")
if self.class_of_tasks == 'preview task':
continue_button.clicked.connect(self.before_teacher_option_task_usual)
exit_button.clicked.connect(self.before_exit_button_click)
elif self.class_of_tasks == 'final preview task':
continue_button.clicked.connect(self.answer_task)
exit_button.clicked.connect(self.get_teacher_option_final_window)
else:
continue_button.clicked.connect(self.answer_task)
exit_button.clicked.connect(self.exit_button_click)
buttons_right.addWidget(exit_button)
buttons_right.addWidget(continue_button)
if self.class_of_tasks == 'preview task':
buttons_left = QHBoxLayout()
buttons_left.setSpacing(60)
forward_button = QPushButton('Назад')
forward_button.setStyleSheet("background-color: rgb(223, 209, 21)")
forward_button.clicked.connect(self.teacher_option_task_usual)
answer_button = QPushButton('Окно ответа')
answer_button.setStyleSheet("background-color: rgb(223, 209, 21)")
answer_button.clicked.connect(self.answer_task)
buttons_left.addWidget(forward_button)
buttons_left.addWidget(answer_button)
grid.addWidget(title, 0, 0)
if self.class_of_tasks == 'preview task' or self.class_of_tasks == 'final preview task' \
or self.class_of_tasks == 'teacher task':
if self.task['other']['no table']:
grid.addWidget(text_task, 1, 0)
else:
grid.addLayout(text_task, 1, 0)
if self.class_of_tasks == 'preview task':
grid.addLayout(buttons_left, 2, 0)
elif self.class_of_tasks != 'final preview task':
grid.addWidget(self.answer_edit, 2, 0)
else:
grid.addLayout(text_task, 1, 0)
grid.addWidget(self.answer_edit, 2, 0)
grid.addWidget(self.overlay_photo('new', None), 1, 1)
grid.addLayout(buttons_right, 2, 1)
if self.layout() is not None:
self.delete_items_of_layout(self.layout())
sip.delete(self.layout())
logging.info('Set layout in task')
self.setLayout(grid)
def get_text_task_8_9_class(self):
text_task = QVBoxLayout()
#text_task.addStretch(1)
title_request = QLabel('Запросы к поисковому серверу')
title_request.setFont(QFont('Montserrat Medium', 16))
title_request.setAlignment(Qt.AlignCenter)
text_task.addWidget(title_request)
for letter, request in self.info['request'].items():
request_text, request_number = request.split(';')
self.requests[request_text] = request_number
line = QLabel(letter + ': ' + request_text)
line.setFont(QFont('Montserrat Medium', 14))
line.setFrameStyle(QFrame.Box)
text_task.addWidget(line)
text_question = self.info['question']
question = QLabel(f"Напишите буквы в порядке {text_question}\nколичества страниц, найденых сервером.")
question.setFont(QFont('Montserrat Medium', 14))
question.setAlignment(Qt.AlignCenter)
text_task.addWidget(question)
return text_task
def get_text_task_10_11_class(self):
text_task = QGridLayout()
title_find = QLabel('Найдено страниц')
title_request = QLabel('Запрос')
for item in [title_request, title_find]:
item.setFont(QFont('Montserrat Medium', 14))
item.setFrameStyle(QFrame.Box)
text_task.addWidget(title_request, 0, 0)
text_task.addWidget(title_find, 0, 1)
row = 1
for request, find in zip(self.info['request'], self.info['find']):
position = 0
for item in [QLabel(request), QLabel(str(find))]:
item.setFont(QFont('Montserrat Medium', 14))
item.setFrameStyle(QFrame.Box)
text_task.addWidget(item, row, position)
position += 1
row += 1
text_question, self.number_question = (self.info['question'].split(';'))
question = QLabel(f"Найти: {text_question}")
question.setFont(QFont('Montserrat Medium', 14))
question.setAlignment(Qt.AlignCenter)
text_task.addWidget(question, row, 0)
return text_task
def answer_task(self):
if self.class_of_tasks == '8-9 класс' or \
((self.class_of_tasks == 'preview task' or self.class_of_tasks == 'final preview task'
or self.class_of_tasks == 'teacher task') and
self.task['8-9 class']['table 8-9 class']):
self.answer_task_9()
else:
self.answer_task_10()
def answer_task_9(self):
if self.class_of_tasks == '8-9 класс' or self.class_of_tasks == 'teacher task':
if len(self.answer_edit.text()) < 4:
self.on_error('В ответе должна содержаться соответствующая\nвопросу последовательность букв!')
return
if self.answer_edit.text().upper() == self.info['answer'].upper():
logging.info('Right answer')
title = QLabel('Верный ответ!')
title.setStyleSheet("color: green")
if self.is_new_task:
self.right_tasks += 1
else:
logging.info('Wrong answer')
title = QLabel('Неправильный ответ!')
title.setStyleSheet("color: red")
self.is_new_task = False
fontTitle = QFont("Montserrat Medium", 20)
fontTitle.setBold(True)
title.setFont(fontTitle)
title.setAlignment(Qt.AlignCenter)
decision_status = QVBoxLayout()
decision_status.setSpacing(1)
decision_status.addStretch(1)
procent_of_right = self.right_tasks * 100 // self.number_of_task
procent = QLabel(f'Верно решёных - {str(procent_of_right)} %')
procent.setFont(QFont("Montserrat Medium", 14))
result = QLabel('Оптимальный результат - более 90 %')
result.setFont(QFont("Montserrat Medium", 14))
if procent_of_right >= 90:
result.setStyleSheet("color: green")
else:
result.setStyleSheet("color: red")
decision_status.addWidget(procent)
decision_status.addWidget(result)
else:
title = QLabel('Окно ответа')
fontTitle = QFont("Montserrat Medium", 20)
fontTitle.setBold(True)
title.setFont(fontTitle)
title.setAlignment(Qt.AlignCenter)
grid = QGridLayout()
explanation_block = QVBoxLayout()
explanation_block.setSpacing(10)
if (self.class_of_tasks == 'preview task' or self.class_of_tasks == 'final preview task') \
and not self.info['auth explanation']:
explanation = ''
number_of_letter = 0
text_explanation = self.info['explanation']
while number_of_letter + 40 <= len(text_explanation) - 1:
row_long = 40
while text_explanation[number_of_letter + row_long] != ' ':
row_long -= 1
if number_of_letter < 0:
explanation += text_explanation[0:number_of_letter + row_long] + '\n'
else:
explanation += text_explanation[number_of_letter:number_of_letter + row_long] + '\n'
number_of_letter += row_long + 1
try:
explanation += self.info['explanation'][number_of_letter:]
except IndexError:
pass
exp = QLabel(explanation)
exp.setFont(QFont("Montserrat Medium", 14))
explanation_block.addWidget(exp)
else:
explanation = QLabel(f'Давайте рассмотрим области кругов Эйлера,\n' +
f'которые покрывает каждый поисковый запрос\n' +
f'и расположим их в порядке {self.info["question"]}:')
explanation.setFont(QFont("Montserrat Medium", 14))
explanation.setAlignment(Qt.AlignCenter)
explanation_block.addWidget(explanation)
explanation_grid = QGridLayout()
row = 0
for letter in self.info['answer']:
let = QLabel(letter + ': ')
let.setFont(QFont("Montserrat Medium", 14))
let.setAlignment(Qt.AlignCenter)
text_bnt = self.info['request'][letter].split(';')[0].split('&')
text_bnt_with_union = text_bnt[0]
for union_number in range(1, len(text_bnt)):
text_bnt_with_union += '&&' + text_bnt[union_number]
btn = QPushButton(text_bnt_with_union)
btn.setStyleSheet("color: blue; font: 14pt Montserrat-Medium")
btn.clicked.connect(self.explanation_button_click)
explanation_grid.addWidget(let, row, 0)
explanation_grid.addWidget(btn, row, 1, 1, 5)
row += 1
explanation_block.addLayout(explanation_grid)
end = QLabel(f'Получив в ответе - {self.info["answer"]}')
end.setFont(QFont("Montserrat Medium", 14))
end.setAlignment(Qt.AlignCenter)
explanation_block.addWidget(end)
if self.class_of_tasks != 'preview task' and self.class_of_tasks != 'final preview task':
question_of_task = QLabel('Возник вопрос по заданию? Задайте его нам')
question_of_task.setFont(QFont("Montserrat Medium", 12))
question_of_task.setStyleSheet("text-decoration: underline; color: blue;")
question_of_task.mousePressEvent = self.question_button_click
buttons_right = QHBoxLayout()
buttons_right.setSpacing(10)
buttons_right.addStretch(1)
continue_button = QPushButton('Продолжить')
continue_button.setStyleSheet("background-color: rgb(63, 137, 255)")
exit_button = QPushButton('Завершить')
exit_button.setStyleSheet("background-color: rgb(244, 29, 29)")
if self.class_of_tasks == 'preview task':
continue_button.clicked.connect(self.before_teacher_option_task_usual)
exit_button.clicked.connect(self.exit_button_click)
elif self.class_of_tasks == 'final preview task':
continue_button.clicked.connect(self.get_teacher_option_final_window)
exit_button.clicked.connect(self.get_teacher_option_final_window)
else:
if self.class_of_tasks == 'teacher task':
continue_button.clicked.connect(self.get_teacher_tasks)
else:
continue_button.clicked.connect(self.new_task)
exit_button.clicked.connect(self.exit_button_click)
buttons_right.addWidget(exit_button)
buttons_right.addWidget(continue_button)
if self.class_of_tasks == 'preview task':
buttons_left = QHBoxLayout()
buttons_left.setSpacing(60)
forward_button = QPushButton('Назад')
forward_button.setStyleSheet("background-color: rgb(223, 209, 21)")
forward_button.clicked.connect(self.teacher_option_task_usual)
answer_button = QPushButton('Окно задачи')
answer_button.setStyleSheet("background-color: rgb(223, 209, 21)")
answer_button.clicked.connect(self.new_task)
buttons_left.addWidget(forward_button)
buttons_left.addWidget(answer_button)
grid.addWidget(title, 0, 0)
grid.addLayout(explanation_block, 1, 0)
grid.addLayout(buttons_right, 2, 1)
grid.addWidget(self.overlay_photo('answer', self.answer_photo), 1, 1)
if self.class_of_tasks != 'preview task' and self.class_of_tasks != 'final preview task':
grid.addLayout(decision_status, 0, 1)
grid.addWidget(question_of_task, 2, 0)
elif self.class_of_tasks != 'final preview window':
grid.addLayout(buttons_left, 2, 0)
if self.layout() is not None:
self.delete_items_of_layout(self.layout())
sip.delete(self.layout())
logging.info('Set layout in answer')
self.setLayout(grid)
def answer_task_10(self):
if self.answer_edit.text() == '':
self.on_error('Введите ответ на задачу!')
return
grid = QGridLayout()
if self.class_of_tasks != 'preview task':
try:
if self.answer_edit.text() == str(self.info['answer']):
logging.info('Right answer')
title = QLabel('Верный ответ!')
title.setStyleSheet("color: green")
if self.is_new_task:
self.right_tasks += 1
self.right_tasks_numbers.append(self.number_of_task)
else:
logging.info('Wrong answer')
title = QLabel('Неправильный ответ!')
title.setStyleSheet("color: red")
self.is_new_task = False
fontTitle = QFont("Montserrat Medium", 20)
fontTitle.setBold(True)
title.setFont(fontTitle)
title.setAlignment(Qt.AlignCenter)
except ValueError:
self.on_error('В ответе должно содержаться одно число -\nколичество страниц найденых по запросу')
return
decision_status = QVBoxLayout()
decision_status.setSpacing(1)
decision_status.addStretch(1)
procent_of_right = self.right_tasks*100 // self.number_of_task
procent = QLabel(f'Верно решёных - {str(procent_of_right)} %')
procent.setFont(QFont("Montserrat Medium", 14))
result = QLabel('Оптимальный результат - более 90 %')
result.setFont(QFont("Montserrat Medium", 14))
if procent_of_right >= 90:
result.setStyleSheet("color: green")
else:
result.setStyleSheet("color: red")
decision_status.addWidget(procent)
decision_status.addWidget(result)
else:
title = QLabel('Окно ответа')
fontTitle = QFont("Montserrat Medium", 20)
fontTitle.setBold(True)
title.setFont(fontTitle)
title.setAlignment(Qt.AlignCenter)
explanation = ''
number_of_letter = 0
text_explanation = self.info['explanation']
while number_of_letter + 40 <= len(text_explanation) - 1:
row_long = 40
while text_explanation[number_of_letter + row_long] != ' ':
row_long -= 1
if number_of_letter < 0:
explanation += text_explanation[0:number_of_letter + row_long] + '\n'
else:
explanation += text_explanation[number_of_letter:number_of_letter + row_long] + '\n'
number_of_letter += row_long + 1
try:
explanation += self.info['explanation'][number_of_letter:]
except IndexError:
pass
exp = QLabel(explanation)
exp.setFont(QFont("Montserrat Medium", 14))
if self.class_of_tasks != 'preview task':
question_of_task = QLabel('Возник вопрос по заданию? Задайте его нам')
question_of_task.setFont(QFont("Montserrat Medium", 12))
question_of_task.setStyleSheet("text-decoration: underline; color: blue;")
question_of_task.mousePressEvent = self.question_button_click
buttons_right = QHBoxLayout()
buttons_right.setSpacing(10)
buttons_right.addStretch(1)
continue_button = QPushButton('Продолжить')
continue_button.setStyleSheet("background-color: rgb(63, 137, 255)")
exit_button = QPushButton('Завершить')
exit_button.setStyleSheet("background-color: rgb(244, 29, 29)")
if self.class_of_tasks == 'preview task':
continue_button.clicked.connect(self.before_teacher_option_task_usual)
exit_button.clicked.connect(self.exit_button_click)
elif self.class_of_tasks == 'final preview task':
continue_button.clicked.connect(self.get_teacher_option_final_window)
exit_button.clicked.connect(self.get_teacher_option_final_window)
else:
if self.class_of_tasks == 'teacher task':
continue_button.clicked.connect(self.get_teacher_tasks)
else:
continue_button.clicked.connect(self.new_task)
exit_button.clicked.connect(self.exit_button_click)
buttons_right.addWidget(exit_button)
buttons_right.addWidget(continue_button)
if self.class_of_tasks == 'preview task':
buttons_left = QHBoxLayout()
buttons_left.setSpacing(60)
forward_button = QPushButton('Назад')
forward_button.setStyleSheet("background-color: rgb(223, 209, 21)")
forward_button.clicked.connect(self.teacher_option_task_usual)
answer_button = QPushButton('Окно задачи')
answer_button.setStyleSheet("background-color: rgb(223, 209, 21)")
answer_button.clicked.connect(self.new_task)
buttons_left.addWidget(forward_button)
buttons_left.addWidget(answer_button)
grid.addWidget(title, 0, 0)
grid.addWidget(exp, 1, 0)
grid.addLayout(buttons_right, 2, 1)
if self.class_of_tasks != 'preview task':
if self.class_of_tasks == 'teacher task':
if self.task['10-11 class']['table 10-11 class']:
grid.addWidget(self.overlay_photo('answer', self.number_question), 1, 1)
else:
grid.addWidget(self.overlay_photo('answer', self.info['sectors circles']), 1, 1)
else:
grid.addWidget(self.overlay_photo('answer', self.number_question), 1, 1)
if self.class_of_tasks != 'final preview task':
grid.addLayout(decision_status, 0, 1)
grid.addWidget(question_of_task, 2, 0)
else:
if self.task['10-11 class']['table 10-11 class']:
grid.addWidget(self.overlay_photo('answer', self.number_question), 1, 1)
else:
grid.addWidget(self.overlay_photo('answer', self.info['sectors circles']), 1, 1)
grid.addLayout(buttons_left, 2, 0)
if self.layout() is not None:
self.delete_items_of_layout(self.layout())
sip.delete(self.layout())
logging.info('Set layout in answer')
self.setLayout(grid)
def explanation_button_click(self):
sender = self.sender().text().split('&')
request_text = sender[0]
for i in range(1, len(sender)):
if sender[i] != '':
request_text += '&' + sender[i]
self.answer_photo = self.requests[str(request_text)]
self.answer_task_9()
def before_exit_button_click(self):
self.teacher_tasks_appended = False
self.exit_button_click()
def exit_button_click(self):
logging.info('Exit button click')
if self.class_of_tasks == 'preview task':
text = 'создание варианта'
else:
text = 'тестирование'
reply = QMessageBox.question(self, 'Message',
f"Вы уверены, что хотите завершить {text}?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
logging.info('User answer - YES')
if self.class_of_tasks == 'preview task':
if not self.teacher_tasks_appended:
try:
self.teacher_tasks.append(self.task)
self.teacher_tasks_appended = True
except Exception as e:
logging.error(e)
else:
self.number_of_task -= 1
logging.info('Run to teacher option final window')
self.teacher_option_final_window()
elif self.class_of_tasks == 'teacher task':
logging.info('Run to sending email teacher option')
self.send_email_teacher_option()
else:
logging.info('Return to menu')
self.menu()
else:
logging.info('User answer - NO')
def question_button_click(self, event):
if self.user_can_ask_a_question:
self.link_clicked = True
self.close()
super().__init__()
self.setAutoFillBackground(True)
p = self.palette()
p.setColor(self.backgroundRole(), QColor(self.backgroundRad, self.backgroundGreen, self.backgroundBlue))
self.setPalette(p)
logging.info(f'Set background rgb{self.backgroundRad, self.backgroundGreen, self.backgroundBlue}')
title = QLabel('Введите ваш вопрос в это поле')
fontTitle = QFont("Montserrat Medium", 20)
fontTitle.setBold(True)
title.setFont(fontTitle)
title.setAlignment(Qt.AlignCenter)
self.question_edit = QTextEdit()
self.question_edit.setPlaceholderText('Введите сюда ваш вопрос')
buttons = QHBoxLayout()
buttons.addStretch(5)
buttons.addSpacing(10)
cancel_button = QPushButton('Отмена')
continue_button = QPushButton('Продолжить')
cancel_button.setStyleSheet("background-color: rgb(223, 209, 21)")
continue_button.setStyleSheet("background-color: rgb(223, 209, 21)")
cancel_button.clicked.connect(self.continue_question_button_click)
continue_button.clicked.connect(self.continue_question_button_click)
buttons.addWidget(cancel_button)
buttons.addWidget(continue_button)
content = QVBoxLayout(self)
content.addSpacing(10)
content.addWidget(title)
content.addWidget(self.question_edit)
content.addLayout(buttons)
self.setLayout(content)
self.adjustSize()
self.setGeometry(self.frameGeometry())
self.move(150, 150)
self.setWindowTitle('Ask a question')
self.show()
else:
self.on_error('Вы не можете задавать более\n1 вопроса по задаче')
def continue_question_button_click(self):
sender = self.sender()
if sender.text() == 'Отмена':
if self.class_of_tasks == 'teacher window':
self.get_teacher_option_final_window()
else:
self.answer_task()
return
if self.question_edit.toPlainText() != '':
if user_information['success']:
add_user_question_for_task(user_information['payload']['email'],
self.question_edit.toPlainText(),
self.class_of_tasks,
self.number_of_task)
else:
self.on_error('Вопрос могут задавать только\n зарегестрированные пользователи')
return
else:
self.on_error('Пожалуйста, введите вопрос')
return
self.user_can_ask_a_question = False
title = QLabel("Спсибо за ваш вопрос!\nВ скором времени мы вам ответим.", self)
fontTitle = QFont("Montserrat Medium", 20)
fontTitle.setBold(True)
title.setFont(fontTitle)
title.setAlignment(Qt.AlignCenter)
btn = QPushButton('Вернуться')
btn.setStyleSheet("background-color: rgb(223, 209, 21)")
if self.class_of_tasks == 'teacher window':
btn.clicked.connect(self.get_teacher_option_final_window)
else:
btn.clicked.connect(self.answer_task)
self.delete_items_of_layout(self.layout())
if self.layout() is not None:
sip.delete(self.layout())
box = QVBoxLayout()
box.addWidget(title)
box.addWidget(btn)
self.setLayout(box)
def overlay_photo(self, status_task, overlay):
if self.number_of_task % NUMBER_OF_TASKS_9_CLASS != 1 or self.class_of_tasks != '10-11 класс':
if self.class_of_tasks == '10-11 класс':
names = [self.info['request'][i] for i in range(3)]
else:
names = self.info['options']
else:
names = self.info['request'][0].split('|')
if status_task == 'new':
img = Image.open('photo/taskCircles/all.png')
if self.class_of_tasks != 'preview task' and len(self.already_been) < 12:
self.already_been += [names]
else:
self.already_been = []
else:
if overlay == 'all' or overlay == '':
img = Image.open('photo/taskCircles/all.png')
elif overlay == 'four_all':
img = Image.open('photo/taskCircles/four_all.png')
elif overlay == 'four_center':
img = Image.open('photo/taskCircles/four_all_grey.png')
sector = Image.open(f'photo/taskCircles/four_center.png').convert("RGBA")
img.paste(sector, None, sector)
else:
img = Image.open('photo/taskCircles/all_grey.png').convert("RGBA")
for number_photo in overlay:
sector = Image.open(f'photo/taskCircles/{number_photo}.png').convert("RGBA")
img.paste(sector, None, sector)
draw = ImageDraw.Draw(img)
draw.text((65, 120), names[0], fill=(0, 0, 0),
font=ImageFont.truetype("fonts/Montserrat-Medium.ttf", 16))
draw.text((250, 120), names[1], fill=(0, 0, 0),
font=ImageFont.truetype("fonts/Montserrat-Medium.ttf", 16))
if overlay == 'four_all' or overlay == 'four_center':
draw.text((65, 270), names[2], fill=(0, 0, 0),
font=ImageFont.truetype("fonts/Montserrat-Medium.ttf", 16))
draw.text((250, 270), names[3], fill=(0, 0, 0),
font=ImageFont.truetype("fonts/Montserrat-Medium.ttf", 16))
else:
draw.text((150, 270), names[2], fill=(0, 0, 0),
font=ImageFont.truetype("fonts/Montserrat-Medium.ttf", 16))
namePhoto = 'photo/taskCircles/newTask.png'
img.save(namePhoto)
photo = QLabel()
pixmap = QPixmap(namePhoto)
pixmap2 = pixmap.scaled(390, 390, Qt.KeepAspectRatio)
photo.setPixmap(pixmap2)
logging.info(f"Add photo '{namePhoto}' in answer window")
return photo
def closeEvent(self, event):
sender = self.sender()
if sender is not None or self.link_clicked:
self.link_clicked = False
event.accept()
return
logging.info('Close event')
reply = QMessageBox.question(self, 'Message',
"Вы уверены, что хотите выйти?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
logging.info('User answer - YES')
logging.info('Close app')
event.accept()
else:
logging.info('User answer - NO')
event.ignore()
def delete_items_of_layout(self, layout):
if layout is not None:
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.setParent(None)
else:
self.delete_items_of_layout(item.layout())
def on_exception(self, e):
self.delete_items_of_layout(self.layout())
if check_network_connection():
error_message = QLabel('Извините, возникла какая-то ошибка\n'
'Нажмите назад, чтобы вернуться назад', self)
else:
error_message = QLabel('Пожалуйста, проверьте ваше Интернет соединение\n'
'Нажмите назад, чтобы вернуться назад', self)
error_message.setAlignment(Qt.AlignCenter)
error_message.setFont(QFont("Montserrat Bold", 20))
hbox = QHBoxLayout()
btn = QPushButton('Назад', self)
btn.clicked.connect(self.error_button)
hbox.addWidget(btn)
self.layout().addWidget(error_message)
self.layout().addChildLayout(hbox)
logging.error('An error has occurred : ' + str(e))
def on_error(self, e):
logging.error('An error has occurred ' + str(e))
QMessageBox().critical(self, 'Внимание!', e)
if __name__ == '__main__':
app = QApplication(sys.argv)
logging.info('Start app')
ex = Main()
sys.exit(app.exec_())
| 43.25974
| 124
| 0.597418
|
4a052cb3d4b9137cec8e595a7a7641f18f91e274
| 400
|
py
|
Python
|
device/actuator/ACDCI_UC8/act_server/actions/delete.py
|
seanbrhn3/openc2-oif-device
|
805c55e44f59f7c86c5bf347a6fc3787e6903df4
|
[
"Apache-2.0"
] | null | null | null |
device/actuator/ACDCI_UC8/act_server/actions/delete.py
|
seanbrhn3/openc2-oif-device
|
805c55e44f59f7c86c5bf347a6fc3787e6903df4
|
[
"Apache-2.0"
] | null | null | null |
device/actuator/ACDCI_UC8/act_server/actions/delete.py
|
seanbrhn3/openc2-oif-device
|
805c55e44f59f7c86c5bf347a6fc3787e6903df4
|
[
"Apache-2.0"
] | null | null | null |
"""
Delete Target functions
"""
from ..utils import Dispatch, exceptions
Delete = Dispatch("delete")
@Delete.register
def default(*extra_args, **extra_kwargs):
return exceptions.target_not_implemented()
@Delete.register(key="acdci_lw:drone")
def drone(act, target=[], args={}, *extra_args, **extra_kwargs):
# return exceptions.not_implemented()
return dict(
status=200
)
| 20
| 64
| 0.7025
|
4a052cbda8b72bff502871302d3c6770e741140d
| 4,719
|
py
|
Python
|
pontoon/teams/tests/test_utils.py
|
udacity/pontoon
|
e15a03a0c987615385b2a8c537bb18c99567f77e
|
[
"BSD-3-Clause"
] | 1
|
2018-12-24T11:15:35.000Z
|
2018-12-24T11:15:35.000Z
|
pontoon/teams/tests/test_utils.py
|
udacity/pontoon
|
e15a03a0c987615385b2a8c537bb18c99567f77e
|
[
"BSD-3-Clause"
] | 9
|
2020-09-06T05:18:03.000Z
|
2022-02-26T14:28:38.000Z
|
pontoon/teams/tests/test_utils.py
|
udacity/pontoon
|
e15a03a0c987615385b2a8c537bb18c99567f77e
|
[
"BSD-3-Clause"
] | 1
|
2019-05-25T23:24:42.000Z
|
2019-05-25T23:24:42.000Z
|
"""
Tests related to the utils provided in pontoon.teams.libraries
"""
import pytest
from pontoon.base.models import (
PermissionChangelog,
)
from pontoon.teams.utils import (
log_user_groups,
log_group_members,
)
from pontoon.test.factories import (
GroupFactory,
UserFactory,
)
@pytest.fixture
def translators_group():
return GroupFactory.create(
name='some translators',
)
@pytest.mark.django_db
def test_log_group_members_empty(translators_group, user_a):
log_group_members(translators_group, user_a, ([], []))
assert list(PermissionChangelog.objects.all()) == []
@pytest.mark.django_db
def test_log_group_members_added(
translators_group,
user_a,
assert_permissionchangelog
):
member0, member1, member2 = UserFactory.create_batch(size=3)
added_members = [member0, member2]
log_group_members(user_a, translators_group, (added_members, []))
changelog_entry0, changelog_entry1 = PermissionChangelog.objects.all()
assert_permissionchangelog(
changelog_entry0,
'added',
user_a,
member0,
translators_group
)
assert_permissionchangelog(
changelog_entry1,
'added',
user_a,
member2,
translators_group
)
@pytest.mark.django_db
def test_log_group_members_removed(
translators_group,
user_a,
assert_permissionchangelog
):
member0, member1, member2 = UserFactory.create_batch(size=3)
removed_members = [member0, member2]
log_group_members(user_a, translators_group, ([], removed_members))
changelog_entry0, changelog_entry1 = PermissionChangelog.objects.all()
assert_permissionchangelog(
changelog_entry0,
'removed',
user_a,
member0,
translators_group
)
assert_permissionchangelog(
changelog_entry1,
'removed',
user_a,
member2,
translators_group
)
@pytest.mark.django_db
def test_log_group_members_mixed(
translators_group,
user_a,
assert_permissionchangelog
):
member0, member1, member2 = UserFactory.create_batch(size=3)
added_members = [member2]
removed_members = [member0]
log_group_members(
user_a,
translators_group,
(added_members, removed_members)
)
changelog_entry0, changelog_entry1 = PermissionChangelog.objects.all()
assert_permissionchangelog(
changelog_entry0,
'added',
user_a,
member2,
translators_group
)
assert_permissionchangelog(
changelog_entry1,
'removed',
user_a,
member0,
translators_group
)
@pytest.mark.django_db
def test_log_user_groups_empty(user_a, user_b):
log_user_groups(user_a, user_b, ([], []))
assert list(PermissionChangelog.objects.all()) == []
@pytest.mark.django_db
def test_log_user_groups_added(
user_a,
user_b,
assert_permissionchangelog,
):
group0, group1, group2 = GroupFactory.create_batch(size=3)
added_groups = [group0, group2]
log_user_groups(user_a, user_b, (added_groups, []))
changelog_entry0, changelog_entry1 = PermissionChangelog.objects.all()
assert_permissionchangelog(
changelog_entry0,
'added',
user_a,
user_b,
group0
)
assert_permissionchangelog(
changelog_entry1,
'added',
user_a,
user_b,
group2
)
@pytest.mark.django_db
def test_log_user_groups_removed(
user_a,
user_b,
assert_permissionchangelog,
):
group0, group1, group2 = GroupFactory.create_batch(size=3)
removed_members = [group0, group2]
log_user_groups(user_a, user_b, ([], removed_members))
changelog_entry0, changelog_entry1 = PermissionChangelog.objects.all()
assert_permissionchangelog(
changelog_entry0,
'removed',
user_a,
user_b,
group0
)
assert_permissionchangelog(
changelog_entry1,
'removed',
user_a,
user_b,
group2
)
@pytest.mark.django_db
def test_log_user_groups_mixed(
user_a,
user_b,
assert_permissionchangelog,
):
group0, group1, group2 = GroupFactory.create_batch(size=3)
added_groups = [group2]
removed_groups = [group0]
log_user_groups(user_a, user_b, (added_groups, removed_groups))
changelog_entry0, changelog_entry1 = PermissionChangelog.objects.all()
assert_permissionchangelog(
changelog_entry0,
'added',
user_a,
user_b,
group2
)
assert_permissionchangelog(
changelog_entry1,
'removed',
user_a,
user_b,
group0
)
| 21.352941
| 74
| 0.667726
|
4a052d4b4af247f8ac768384357c005bc5d64405
| 4,320
|
py
|
Python
|
airflow/contrib/operators/mongo_to_s3.py
|
ltxhxpdd123/gateway-airflow
|
b839509d21a985c7a46fb5c6d54a4b77f1bbc4ae
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2019-08-21T07:30:14.000Z
|
2019-08-21T07:30:14.000Z
|
airflow/contrib/operators/mongo_to_s3.py
|
ltxhxpdd123/gateway-airflow
|
b839509d21a985c7a46fb5c6d54a4b77f1bbc4ae
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 6
|
2020-07-07T20:12:49.000Z
|
2021-09-29T17:26:42.000Z
|
airflow/contrib/operators/mongo_to_s3.py
|
ltxhxpdd123/gateway-airflow
|
b839509d21a985c7a46fb5c6d54a4b77f1bbc4ae
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2019-10-25T04:22:14.000Z
|
2019-10-25T04:22:14.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from airflow.contrib.hooks.mongo_hook import MongoHook
from airflow.hooks.S3_hook import S3Hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from bson import json_util
class MongoToS3Operator(BaseOperator):
"""
Mongo -> S3
A more specific baseOperator meant to move data
from mongo via pymongo to s3 via boto
things to note
.execute() is written to depend on .transform()
.transform() is meant to be extended by child classes
to perform transformations unique to those operators needs
"""
template_fields = ['s3_key', 'mongo_query']
# pylint: disable=too-many-instance-attributes
@apply_defaults
def __init__(self,
mongo_conn_id,
s3_conn_id,
mongo_collection,
mongo_query,
s3_bucket,
s3_key,
mongo_db=None,
*args, **kwargs):
super(MongoToS3Operator, self).__init__(*args, **kwargs)
# Conn Ids
self.mongo_conn_id = mongo_conn_id
self.s3_conn_id = s3_conn_id
# Mongo Query Settings
self.mongo_db = mongo_db
self.mongo_collection = mongo_collection
# Grab query and determine if we need to run an aggregate pipeline
self.mongo_query = mongo_query
self.is_pipeline = True if isinstance(
self.mongo_query, list) else False
# S3 Settings
self.s3_bucket = s3_bucket
self.s3_key = s3_key
# KWARGS
self.replace = kwargs.pop('replace', False)
def execute(self, context):
"""
Executed by task_instance at runtime
"""
s3_conn = S3Hook(self.s3_conn_id)
# Grab collection and execute query according to whether or not it is a pipeline
if self.is_pipeline:
results = MongoHook(self.mongo_conn_id).aggregate(
mongo_collection=self.mongo_collection,
aggregate_query=self.mongo_query,
mongo_db=self.mongo_db
)
else:
results = MongoHook(self.mongo_conn_id).find(
mongo_collection=self.mongo_collection,
query=self.mongo_query,
mongo_db=self.mongo_db
)
# Performs transform then stringifies the docs results into json format
docs_str = self._stringify(self.transform(results))
# Load Into S3
s3_conn.load_string(
string_data=docs_str,
key=self.s3_key,
bucket_name=self.s3_bucket,
replace=self.replace
)
return True
@staticmethod
def _stringify(iterable, joinable='\n'):
"""
Takes an iterable (pymongo Cursor or Array) containing dictionaries and
returns a stringified version using python join
"""
return joinable.join(
[json.dumps(doc, default=json_util.default) for doc in iterable]
)
@staticmethod
def transform(docs):
"""
Processes pyMongo cursor and returns an iterable with each element being
a JSON serializable dictionary
Base transform() assumes no processing is needed
ie. docs is a pyMongo cursor of documents and cursor just
needs to be passed through
Override this method for custom transformations
"""
return docs
| 33.488372
| 88
| 0.639815
|
4a052e47a7d9949161fd4edbc12c0d5faa27ff82
| 295
|
py
|
Python
|
tests/eth2/beacon/types/test_voluntary_exits.py
|
mhchia/trinity
|
e40e475064ca4605887706e9b0e4f8e2349b10cd
|
[
"MIT"
] | 2
|
2020-01-30T21:51:00.000Z
|
2020-07-22T14:51:05.000Z
|
tests/eth2/beacon/types/test_voluntary_exits.py
|
mhchia/trinity
|
e40e475064ca4605887706e9b0e4f8e2349b10cd
|
[
"MIT"
] | null | null | null |
tests/eth2/beacon/types/test_voluntary_exits.py
|
mhchia/trinity
|
e40e475064ca4605887706e9b0e4f8e2349b10cd
|
[
"MIT"
] | null | null | null |
import ssz
from eth2.beacon.types.voluntary_exits import (
VoluntaryExit,
)
def test_defaults(sample_voluntary_exit_params):
exit = VoluntaryExit(**sample_voluntary_exit_params)
assert exit.signature[0] == sample_voluntary_exit_params['signature'][0]
assert ssz.encode(exit)
| 22.692308
| 76
| 0.772881
|
4a052ec8886c45f8ccdcc1be948c69b89e08fcf7
| 2,478
|
py
|
Python
|
taxamo/models/refunds.py
|
javor/taxamo-python
|
47696115fa76c0f6950717132eee2b39c9fdbce7
|
[
"Apache-2.0"
] | null | null | null |
taxamo/models/refunds.py
|
javor/taxamo-python
|
47696115fa76c0f6950717132eee2b39c9fdbce7
|
[
"Apache-2.0"
] | null | null | null |
taxamo/models/refunds.py
|
javor/taxamo-python
|
47696115fa76c0f6950717132eee2b39c9fdbce7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
Copyright 2014-2020 by Taxamo
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Refunds:
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'refund_unique_id': 'str',
'refund_note_url': 'str',
'refund_note_number': 'str',
'refund_key': 'str',
'line_key': 'str',
'refund_timestamp': 'str',
'refund_note_number_expanded': 'str',
'amount': 'number',
'informative': 'bool',
'tax_amount': 'number',
'tax_rate': 'number',
'total_amount': 'number',
'refund_reason': 'str'
}
#Refund custom identifier.
self.refund_unique_id = None # str
#Refund note image url.
self.refund_note_url = None # str
#Sequential refund note number.
self.refund_note_number = None # str
#Refund identifier.
self.refund_key = None # str
#Line identifier.
self.line_key = None # str
#Refund timestamp in UTC timezone.
self.refund_timestamp = None # str
#Refund note number with CN-{{invoice-number}}-{{refund-number}} pattern.
self.refund_note_number_expanded = None # str
#Amount, excluding tax, that was refunded.
self.amount = None # number
#Was this refund applied to an informative line?
self.informative = None # bool
#Calculated tax amount, that was refunded.
self.tax_amount = None # number
#Tax rate for the line that was used for the refund calculation.
self.tax_rate = None # number
#Total amount, including tax, that was refunded.
self.total_amount = None # number
#Refund reason, displayed on the credit note.
self.refund_reason = None # str
| 36.441176
| 81
| 0.621872
|
4a052f2034db86c68cc9923083e58aea1fb770c5
| 1,705
|
py
|
Python
|
config/wsgi.py
|
b3h3rkz/deven
|
27991dfae7e6e4851622edeac5efae8a2ff11a1c
|
[
"MIT"
] | 1
|
2016-08-29T12:18:02.000Z
|
2016-08-29T12:18:02.000Z
|
config/wsgi.py
|
b3h3rkz/deven
|
27991dfae7e6e4851622edeac5efae8a2ff11a1c
|
[
"MIT"
] | null | null | null |
config/wsgi.py
|
b3h3rkz/deven
|
27991dfae7e6e4851622edeac5efae8a2ff11a1c
|
[
"MIT"
] | null | null | null |
"""
WSGI config for deven project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 46.081081
| 79
| 0.801173
|
4a052f6ec6f3227f609a1aff67e2878c9dff1a77
| 9,932
|
py
|
Python
|
conda/cli/main_clean.py
|
zdog234/conda
|
0fb7eedfa6a55668be50b388f9a7e05facda756b
|
[
"BSD-3-Clause"
] | null | null | null |
conda/cli/main_clean.py
|
zdog234/conda
|
0fb7eedfa6a55668be50b388f9a7e05facda756b
|
[
"BSD-3-Clause"
] | null | null | null |
conda/cli/main_clean.py
|
zdog234/conda
|
0fb7eedfa6a55668be50b388f9a7e05facda756b
|
[
"BSD-3-Clause"
] | 1
|
2019-11-13T04:22:14.000Z
|
2019-11-13T04:22:14.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import defaultdict
import fnmatch
from logging import getLogger
from os import listdir, lstat, walk, unlink
from os.path import getsize, isdir, join, exists
import sys
from ..base.constants import CONDA_TARBALL_EXTENSION
from ..common.constants import CONDA_TEMP_EXTENSION
from ..base.context import context
log = getLogger(__name__)
def find_tarballs():
from ..core.package_cache_data import PackageCacheData
pkgs_dirs = defaultdict(list)
totalsize = 0
part_ext = CONDA_TARBALL_EXTENSION + '.part'
for package_cache in PackageCacheData.writable_caches(context.pkgs_dirs):
pkgs_dir = package_cache.pkgs_dir
if not isdir(pkgs_dir):
continue
root, _, filenames = next(walk(pkgs_dir))
for fn in filenames:
if fn.endswith(CONDA_TARBALL_EXTENSION) or fn.endswith(part_ext):
pkgs_dirs[pkgs_dir].append(fn)
totalsize += getsize(join(root, fn))
return pkgs_dirs, totalsize
def rm_tarballs(args, pkgs_dirs, totalsize, verbose=True):
from .common import confirm_yn
from ..gateways.disk.delete import rm_rf
from ..utils import human_bytes
if verbose:
for pkgs_dir in pkgs_dirs:
print('Cache location: %s' % pkgs_dir)
if not any(pkgs_dirs[i] for i in pkgs_dirs):
if verbose:
print("There are no tarballs to remove")
return
if verbose:
print("Will remove the following tarballs:")
print()
for pkgs_dir in pkgs_dirs:
print(pkgs_dir)
print('-'*len(pkgs_dir))
fmt = "%-40s %10s"
for fn in pkgs_dirs[pkgs_dir]:
size = getsize(join(pkgs_dir, fn))
print(fmt % (fn, human_bytes(size)))
print()
print('-' * 51) # From 40 + 1 + 10 in fmt
print(fmt % ('Total:', human_bytes(totalsize)))
print()
if not context.json or not context.always_yes:
confirm_yn()
if context.json and args.dry_run:
return
for pkgs_dir in pkgs_dirs:
for fn in pkgs_dirs[pkgs_dir]:
try:
if rm_rf(join(pkgs_dir, fn)):
if verbose:
print("Removed %s" % fn)
else:
if verbose:
print("WARNING: cannot remove, file permissions: %s" % fn)
except (IOError, OSError) as e:
if verbose:
print("WARNING: cannot remove, file permissions: %s\n%r" % (fn, e))
else:
log.info("%r", e)
def find_pkgs():
# TODO: This doesn't handle packages that have hard links to files within
# themselves, like bin/python3.3 and bin/python3.3m in the Python package
warnings = []
from ..gateways.disk.link import CrossPlatformStLink
cross_platform_st_nlink = CrossPlatformStLink()
pkgs_dirs = defaultdict(list)
for pkgs_dir in context.pkgs_dirs:
if not exists(pkgs_dir):
if not context.json:
print("WARNING: {0} does not exist".format(pkgs_dir))
continue
pkgs = [i for i in listdir(pkgs_dir) if isdir(join(pkgs_dir, i, 'info'))]
for pkg in pkgs:
breakit = False
for root, dir, files in walk(join(pkgs_dir, pkg)):
for fn in files:
try:
st_nlink = cross_platform_st_nlink(join(root, fn))
except OSError as e:
warnings.append((fn, e))
continue
if st_nlink > 1:
# print('%s is installed: %s' % (pkg, join(root, fn)))
breakit = True
break
if breakit:
break
else:
pkgs_dirs[pkgs_dir].append(pkg)
totalsize = 0
pkgsizes = defaultdict(list)
for pkgs_dir in pkgs_dirs:
for pkg in pkgs_dirs[pkgs_dir]:
pkgsize = 0
for root, dir, files in walk(join(pkgs_dir, pkg)):
for fn in files:
# We don't have to worry about counting things twice: by
# definition these files all have a link count of 1!
size = lstat(join(root, fn)).st_size
totalsize += size
pkgsize += size
pkgsizes[pkgs_dir].append(pkgsize)
return pkgs_dirs, warnings, totalsize, pkgsizes
def rm_pkgs(args, pkgs_dirs, warnings, totalsize, pkgsizes, verbose=True):
from .common import confirm_yn
from ..gateways.disk.delete import rm_rf
from ..utils import human_bytes
if verbose:
for pkgs_dir in pkgs_dirs:
print('Cache location: %s' % pkgs_dir)
for fn, exception in warnings:
print(exception)
if not any(pkgs_dirs[i] for i in pkgs_dirs):
if verbose:
print("There are no unused packages to remove")
return
if verbose:
print("Will remove the following packages:")
for pkgs_dir in pkgs_dirs:
print(pkgs_dir)
print('-' * len(pkgs_dir))
print()
fmt = "%-40s %10s"
for pkg, pkgsize in zip(pkgs_dirs[pkgs_dir], pkgsizes[pkgs_dir]):
print(fmt % (pkg, human_bytes(pkgsize)))
print()
print('-' * 51) # 40 + 1 + 10 in fmt
print(fmt % ('Total:', human_bytes(totalsize)))
print()
if not context.json or not context.always_yes:
confirm_yn()
if context.json and args.dry_run:
return
for pkgs_dir in pkgs_dirs:
for pkg in pkgs_dirs[pkgs_dir]:
if verbose:
print("removing %s" % pkg)
rm_rf(join(pkgs_dir, pkg))
def rm_index_cache():
from ..gateways.disk.delete import rm_rf
from ..core.package_cache_data import PackageCacheData
for package_cache in PackageCacheData.writable_caches():
rm_rf(join(package_cache.pkgs_dir, 'cache'))
def rm_rf_pkgs_dirs():
from .common import confirm_yn
from ..common.io import dashlist
from ..gateways.disk.delete import rm_rf
from ..core.package_cache_data import PackageCacheData
writable_pkgs_dirs = tuple(
pc.pkgs_dir for pc in PackageCacheData.writable_caches() if isdir(pc.pkgs_dir)
)
if not context.json or not context.always_yes:
print("Remove all contents from the following package caches?%s"
% dashlist(writable_pkgs_dirs))
confirm_yn()
for pkgs_dir in writable_pkgs_dirs:
rm_rf(pkgs_dir)
return writable_pkgs_dirs
def clean_tmp_files(path=None):
if not path:
path = sys.prefix
for root, dirs, fns in walk(path):
for fn in fns:
if (fnmatch.fnmatch(fn, "*.trash") or
fnmatch.fnmatch(fn, "*" + CONDA_TEMP_EXTENSION)):
file_path = join(root, fn)
try:
unlink(file_path)
except EnvironmentError:
log.warn("File at {} could not be cleaned up. "
"It's probably still in-use.".format(file_path))
def _execute(args, parser):
json_result = {
'success': True
}
one_target_ran = False
if args.source_cache:
print("WARNING: 'conda clean --source-cache' is deprecated.\n"
" Use 'conda build purge-all' to remove source cache files.",
file=sys.stderr)
if args.force_pkgs_dirs:
writable_pkgs_dirs = rm_rf_pkgs_dirs()
json_result['pkgs_dirs'] = writable_pkgs_dirs
# we return here because all other clean operations target individual parts of
# package caches
return json_result
if args.tarballs or args.all:
pkgs_dirs, totalsize = find_tarballs()
first = sorted(pkgs_dirs)[0] if pkgs_dirs else ''
json_result['tarballs'] = {
'pkgs_dir': first, # Backwards compatibility
'pkgs_dirs': dict(pkgs_dirs),
'files': pkgs_dirs[first], # Backwards compatibility
'total_size': totalsize
}
rm_tarballs(args, pkgs_dirs, totalsize, verbose=not (context.json or context.quiet))
one_target_ran = True
if args.index_cache or args.all:
json_result['index_cache'] = {
'files': [join(context.pkgs_dirs[0], 'cache')]
}
rm_index_cache()
one_target_ran = True
if args.packages or args.all:
pkgs_dirs, warnings, totalsize, pkgsizes = find_pkgs()
first = sorted(pkgs_dirs)[0] if pkgs_dirs else ''
json_result['packages'] = {
'pkgs_dir': first, # Backwards compatibility
'pkgs_dirs': dict(pkgs_dirs),
'files': pkgs_dirs[first], # Backwards compatibility
'total_size': totalsize,
'warnings': warnings,
'pkg_sizes': {i: dict(zip(pkgs_dirs[i], pkgsizes[i])) for i in pkgs_dirs},
}
rm_pkgs(args, pkgs_dirs, warnings, totalsize, pkgsizes,
verbose=not (context.json or context.quiet))
one_target_ran = True
if args.all:
clean_tmp_files(sys.prefix)
elif args.tempfiles:
for path in args.tempfiles:
clean_tmp_files(path)
if not one_target_ran:
from ..exceptions import ArgumentError
raise ArgumentError("At least one removal target must be given. See 'conda clean --help'.")
return json_result
def execute(args, parser):
from .common import stdout_json
json_result = _execute(args, parser)
if context.json:
stdout_json(json_result)
| 34.013699
| 99
| 0.591019
|
4a0530a16aa769aa8fa0e19bf307c4d0c5f89216
| 19,767
|
py
|
Python
|
manila_ui/tests/dashboards/project/test_data.py
|
dinaSap/manila-ui
|
3b8d685dd7830da3c8c703bab3355902dcb1e610
|
[
"Apache-2.0"
] | null | null | null |
manila_ui/tests/dashboards/project/test_data.py
|
dinaSap/manila-ui
|
3b8d685dd7830da3c8c703bab3355902dcb1e610
|
[
"Apache-2.0"
] | null | null | null |
manila_ui/tests/dashboards/project/test_data.py
|
dinaSap/manila-ui
|
3b8d685dd7830da3c8c703bab3355902dcb1e610
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 NetApp, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from manilaclient.v2 import messages
from manilaclient.v2 import security_services
from manilaclient.v2 import share_export_locations
from manilaclient.v2 import share_group_snapshots
from manilaclient.v2 import share_group_types
from manilaclient.v2 import share_groups
from manilaclient.v2 import share_instances
from manilaclient.v2 import share_networks
from manilaclient.v2 import share_replicas
from manilaclient.v2 import share_servers
from manilaclient.v2 import share_snapshot_export_locations
from manilaclient.v2 import share_snapshots
from manilaclient.v2 import share_types
from manilaclient.v2 import shares
class FakeAPIClient(object):
client = "fake_client"
share = shares.Share(
shares.ShareManager(FakeAPIClient),
{'id': "11023e92-8008-4c8b-8059-7f2293ff3887",
'status': 'available',
'size': 40,
'name': 'Share name',
'description': 'Share description',
'is_public': False,
'share_proto': 'NFS',
'metadata': {},
'created_at': '2014-01-27 10:30:00',
'share_server_id': '1',
'share_network_id': '7f3d1c33-8d00-4511-29df-a2def31f3b5d',
'availability_zone': 'Test AZ',
'replication_type': 'readable',
'share_group_id': 'fake_share_group_id',
'mount_snapshot_support': False})
nameless_share = shares.Share(
shares.ShareManager(FakeAPIClient),
{'id': "4b069dd0-6eaa-4272-8abc-5448a68f1cce",
'status': 'available',
'size': 10,
'name': '',
'description': '',
'share_proto': 'NFS',
'export_location': "/dev/hda",
'metadata': {},
'created_at': '2010-11-21 18:34:25',
'share_type': 'vol_type_1',
'share_server_id': '1',
'share_network_id': '7f3d1c33-8d00-4511-29df-a2def31f3b5d',
'availability_zone': 'Test AZ',
'replication_type': None,
'mount_snapshot_support': False})
share_with_metadata = shares.Share(
shares.ShareManager(FakeAPIClient),
{'id': "0ebb3748-c1dr-4bb6-8315-0354e7691fff",
'status': 'available',
'size': 40,
'name': 'Share with metadata',
'description': 'Share description',
'share_proto': 'NFS',
'metadata': {'aaa': 'bbb'},
'created_at': '2016-06-31 00:00:00',
'share_server_id': '1',
'share_network_id': '7f3d1c33-8d00-4511-29df-a2def31f3b5d',
'availability_zone': 'Test AZ',
'replication_type': 'readable',
'mount_snapshot_support': False})
other_share = shares.Share(
shares.ShareManager(FakeAPIClient),
{'id': "21023e92-8008-1234-8059-7f2293ff3889",
'status': 'in-use',
'size': 10,
'name': u'my_share',
'description': '',
'share_proto': 'NFS',
'metadata': {},
'created_at': '2013-04-01 10:30:00',
'share_type': None,
'share_server_id': '1',
'share_network_id': '7f3d1c33-8d00-4511-29df-a2def31f3b5d',
'availability_zone': 'Test AZ',
'replication_type': 'readable',
'mount_snapshot_support': False})
share_replica = share_replicas.ShareReplica(
share_replicas.ShareReplicaManager(FakeAPIClient),
{'id': '11023e92-8008-4c8b-8059-replica00001',
'availability_zone': share.availability_zone,
'host': 'fake_host_1',
'share_id': share.id,
'status': 'available',
'replica_state': 'active',
'created_at': '2016-07-19 19:46:13',
'updated_at': '2016-07-19 19:47:14'}
)
share_replica2 = share_replicas.ShareReplica(
share_replicas.ShareReplicaManager(FakeAPIClient),
{'id': '11023e92-8008-4c8b-8059-replica00002',
'availability_zone': share.availability_zone,
'host': 'fake_host_2',
'share_id': share.id,
'status': 'available',
'replica_state': 'in_sync',
'created_at': '2016-07-19 20:46:13',
'updated_at': '2016-07-19 20:47:14'}
)
share_replica3 = share_replicas.ShareReplica(
share_replicas.ShareReplicaManager(FakeAPIClient),
{'id': '11023e92-8008-4c8b-8059-replica00003',
'availability_zone': share.availability_zone,
'host': 'fake_host_3',
'share_id': share.id,
'status': 'available',
'replica_state': 'active',
'created_at': '2016-07-19 21:46:13',
'updated_at': '2016-07-19 21:47:14'}
)
share_mount_snapshot = shares.Share(
shares.ShareManager(FakeAPIClient),
{'id': "11023e92-8008-4c8b-8059-7f2293ff3888",
'status': 'available',
'size': 40,
'name': 'Share name',
'description': 'Share description',
'share_proto': 'NFS',
'metadata': {},
'created_at': '2014-01-27 10:30:00',
'share_server_id': '1',
'share_network_id': '7f3d1c33-8d00-4511-29df-a2def31f3b5d',
'availability_zone': 'Test AZ',
'replication_type': 'readable',
'mount_snapshot_support': True})
admin_export_location = share_export_locations.ShareExportLocation(
share_export_locations.ShareExportLocationManager(FakeAPIClient),
{'id': '6921e862-88bc-49a5-a2df-efeed9acd583',
'path': '1.1.1.1:/path/to/admin/share',
'preferred': False,
'is_admin_only': True,
'share_instance_id': 'e1c2d35e-fe67-4028-ad7a-45f668732b1d'}
)
user_export_location = share_export_locations.ShareExportLocation(
share_export_locations.ShareExportLocationManager(FakeAPIClient),
{'id': 'b6bd76ce-12a2-42a9-a30a-8a43b503867d',
'path': '2.2.2.2:/path/to/user/share',
'preferred': True,
'is_admin_only': False,
'share_instance_id': 'e1c2d35e-fe67-4028-ad7a-45f668732b1d'}
)
export_locations = [admin_export_location, user_export_location]
admin_snapshot_export_locations = [
share_snapshot_export_locations.ShareSnapshotExportLocation(
share_snapshot_export_locations.ShareSnapshotExportLocationManager(
FakeAPIClient),
{'id': '6921e862-88bc-49a5-a2df-efeed9acd584',
'path': '1.1.1.1:/path/to/admin/share',
'is_admin_only': True,
'share_snapshot_instance_id': 'e1c2d35e-fe67-4028-ad7a-45f668732b1e'}
),
share_snapshot_export_locations.ShareSnapshotExportLocation(
share_snapshot_export_locations.ShareSnapshotExportLocationManager(
FakeAPIClient),
{'id': '6921e862-88bc-49a5-a2df-efeed9acd585',
'path': '1.1.1.2:/path/to/admin/share',
'is_admin_only': False,
'share_snapshot_instance_id': 'e1c2d35e-fe67-4028-ad7a-45f668732b1f'}
)
]
user_snapshot_export_locations = [
share_snapshot_export_locations.ShareSnapshotExportLocation(
share_snapshot_export_locations.ShareSnapshotExportLocationManager(
FakeAPIClient),
{'id': 'b6bd76ce-12a2-42a9-a30a-8a43b503867e',
'path': '1.1.1.1:/path/to/user/share_snapshot'}
),
share_snapshot_export_locations.ShareSnapshotExportLocation(
share_snapshot_export_locations.ShareSnapshotExportLocationManager(
FakeAPIClient),
{'id': 'b6bd76ce-12a2-42a9-a30a-8a43b503867f',
'path': '1.1.1.2:/not/too/long/path/to/user/share_snapshot'}
)
]
rule = collections.namedtuple('Access', ['access_type', 'access_to', 'state',
'id', 'access_level', 'access_key'])
user_rule = rule('user', 'someuser', 'active',
'10837072-c49e-11e3-bd64-60a44c371189', 'rw', '')
ip_rule = rule('ip', '1.1.1.1', 'active',
'2cc8e2f8-c49e-11e3-bd64-60a44c371189', 'rw', '')
cephx_rule = rule('cephx', 'alice', 'active',
'235481bc-1a84-11e6-9666-68f728a0492e', 'rw',
'AQAdFCNYDCapMRAANuK/CiEZbog2911a+t5dcQ==')
snapshot = share_snapshots.ShareSnapshot(
share_snapshots.ShareSnapshotManager(FakeAPIClient),
{'id': '5f3d1c33-7d00-4511-99df-a2def31f3b5d',
'name': 'test snapshot',
'description': 'share snapshot',
'size': 40,
'status': 'available',
'share_id': '11023e92-8008-4c8b-8059-7f2293ff3887'})
snapshot_mount_support = share_snapshots.ShareSnapshot(
share_snapshots.ShareSnapshotManager(FakeAPIClient),
{'id': '5f3d1c33-7d00-4511-99df-a2def31f3b5e',
'name': 'test snapshot',
'description': 'share snapshot',
'size': 40,
'status': 'available',
'share_id': '11023e92-8008-4c8b-8059-7f2293ff3888'})
inactive_share_network = share_networks.ShareNetwork(
share_networks.ShareNetworkManager(FakeAPIClient),
{'id': '6f3d1c33-8d00-4511-29df-a2def31f3b5d',
'name': 'test_share_net',
'description': 'test share network',
'status': 'INACTIVE',
'neutron_net_id': 'fake_neutron_net_id',
'neutron_subnet_id': 'fake_neutron_subnet_id'})
active_share_network = share_networks.ShareNetwork(
share_networks.ShareNetworkManager(FakeAPIClient),
{'id': '7f3d1c33-8d00-4511-29df-a2def31f3b5d',
'name': 'test_share_net',
'description': 'test share network',
'status': 'ACTIVE',
'neutron_net_id': 'fake_neutron_net_id',
'neutron_subnet_id': 'fake_neutron_subnet_id'})
sec_service = security_services.SecurityService(
security_services.SecurityServiceManager(FakeAPIClient),
{'id': '7f3d1c33-8d10-4511-29df-a2def31f3b5d',
'server': '1.1.1.1',
'dns_ip': '2.2.2.2',
'user': 'someuser',
'password': 'somepass',
'type': 'active_directory',
'name': 'test-sec-service',
'description': 'test security service',
'domain': 'testdomain',
})
share_instance = share_instances.ShareInstance(
share_instances.ShareInstanceManager(FakeAPIClient),
{'id': 'fake_share_instance_no_ss_id',
'status': 'available',
'host': 'host1@backend1#pool1',
'availability_zone': 'zone1',
'share_id': 'fake_share_id_1',
'share_network_id': 'fake_share_network_id_1',
'share_server_id': 'fake_share_server_id_1',
'created_at': '2016-04-26 13:14:15'}
)
share_instance_no_ss = share_instances.ShareInstance(
share_instances.ShareInstanceManager(FakeAPIClient),
{'id': 'fake_share_instance_id',
'status': 'available',
'host': 'host2@backend2#pool2',
'availability_zone': 'zone2',
'share_id': 'fake_share_id_2',
'share_network_id': None,
'share_server_id': None,
'created_at': '2016-04-26 14:15:16'}
)
share_server = share_servers.ShareServer(
share_servers.ShareServerManager(FakeAPIClient),
{'id': 'fake_share_server_id1',
'status': 'active',
'share_network_id': 'fake_share_network_id1',
'share_network_name': 'fake_share_network_name1',
'project_id': 'fake_project_id1',
'backend_details': {
'foo_key': 'foo_value',
'bar_key_foo': 'bar_value_foo',
},
'host': 'fakehost1@fakebackend1#fakepool1'}
)
share_server_errored = share_servers.ShareServer(
share_servers.ShareServerManager(FakeAPIClient),
{'id': 'fake_share_server_id2',
'status': 'error',
'share_network_id': 'fake_share_network_id2',
'share_network_name': 'fake_share_network_name2',
'project_id': 'fake_project_id2',
'backend_details': {},
'host': 'fakehost2@fakebackend2#fakepool2'}
)
share_type = share_types.ShareType(
share_types.ShareTypeManager(FakeAPIClient),
{'id': 'share-type-id1',
'name': 'test-share-type1',
'share_type_access:is_public': True,
'extra_specs': {
'snapshot_support': True,
'driver_handles_share_servers': False}
}
)
share_type_private = share_types.ShareType(
share_types.ShareTypeManager(FakeAPIClient),
{'id': 'share-type-id2',
'name': 'test-share-type2',
'share_type_access:is_public': False,
'extra_specs': {'driver_handles_share_servers': False}}
)
share_type_dhss_true = share_types.ShareType(
share_types.ShareTypeManager(FakeAPIClient),
{'id': 'share-type-id3',
'name': 'test-share-type3',
'share_type_access:is_public': True,
'extra_specs': {'driver_handles_share_servers': True}}
)
share_type_alt = share_types.ShareType(
share_types.ShareTypeManager(FakeAPIClient),
{'id': 'share-type-id4',
'name': 'test-share-type4',
'share_type_access:is_public': True,
'extra_specs': {
'snapshot_support': True,
'driver_handles_share_servers': False}
}
)
share_group_type = share_group_types.ShareGroupType(
share_group_types.ShareGroupTypeManager(FakeAPIClient),
{'id': 'fake_share_group_type_id1',
'name': 'fake_share_group_type_name',
'share_types': [share_type.id],
'group_specs': {'k1': 'v1', 'k2': 'v2'},
'is_public': True}
)
share_group_type_private = share_group_types.ShareGroupType(
share_group_types.ShareGroupTypeManager(FakeAPIClient),
{'id': 'fake_private_share_group_type_id2',
'name': 'fake_private_share_group_type_name',
'share_types': [share_type.id, share_type_private.id],
'group_specs': {'k1': 'v1', 'k2': 'v2'},
'is_public': False}
)
share_group_type_dhss_true = share_group_types.ShareGroupType(
share_group_types.ShareGroupTypeManager(FakeAPIClient),
{'id': 'fake_share_group_type_id3',
'name': 'fake_share_group_type_name',
'share_types': [share_type_dhss_true.id],
'group_specs': {'k3': 'v3', 'k4': 'v4'},
'is_public': True}
)
share_group_type_alt = share_group_types.ShareGroupType(
share_group_types.ShareGroupTypeManager(FakeAPIClient),
{'id': 'fake_share_group_type_id4',
'name': 'fake_share_group_type_name',
'share_types': [share_type_alt.id],
'group_specs': {'k5': 'v5', 'k6': 'v6'},
'is_public': True}
)
share_group = share_groups.ShareGroup(
share_groups.ShareGroupManager(FakeAPIClient),
{'id': 'fake_share_group_id',
'name': 'fake_share_group_name',
'description': 'fake sg description',
'status': 'available',
'share_types': [share_type.id],
'share_group_type_id': share_group_type.id,
'source_share_group_snapshot_id': None,
'share_network_id': None,
'share_server_id': None,
'availability_zone': None,
'host': 'fake_host_987654321',
'consistent_snapshot_support': None,
'created_at': '2017-05-31T13:36:15.000000',
'project_id': 'fake_project_id_987654321'}
)
share_group_nameless = share_groups.ShareGroup(
share_groups.ShareGroupManager(FakeAPIClient),
{'id': 'fake_nameless_share_group_id',
'name': None,
'status': 'available',
'share_types': [share_type.id],
'share_group_type_id': share_group_type.id,
'source_share_group_snapshot_id': None,
'share_network_id': None,
'share_server_id': None,
'availability_zone': None,
'host': 'fake_host_987654321',
'consistent_snapshot_support': None,
'created_at': '2017-05-31T13:36:15.000000',
'project_id': 'fake_project_id_987654321'}
)
share_group_dhss_true = share_groups.ShareGroup(
share_groups.ShareGroupManager(FakeAPIClient),
{'id': 'fake_dhss_true_share_group_id',
'name': 'fake_dhss_true_share_group_name',
'status': 'available',
'share_types': [share_type_dhss_true.id],
'share_group_type_id': share_group_type_dhss_true.id,
'source_share_group_snapshot_id': None,
'share_network_id': 'fake_share_network_id',
'share_server_id': 'fake_share_server_id',
'availability_zone': None,
'host': 'fake_host_987654321',
'consistent_snapshot_support': 'pool',
'created_at': '2017-05-31T23:59:59.000000',
'project_id': 'fake_project_id_987654321'}
)
share_group_snapshot = share_group_snapshots.ShareGroupSnapshot(
share_group_snapshots.ShareGroupSnapshotManager(FakeAPIClient),
{'id': 'fake_share_group_snapshot_id_1',
'name': 'fake_share_group_snapshot_name',
'status': 'available',
'share_group_id': share_group.id,
'description': 'fake sgs description',
'created_at': '2017-06-01T13:13:13.000000',
'project_id': 'fake_project_id_987654321',
'members': [
{'share_id': 'fake_share_id_1', 'id': 'fake_ssi_id_1', 'size': 1},
{'share_id': 'fake_share_id_2', 'id': 'fake_ssi_id_2', 'size': 2},
]}
)
share_group_snapshot_nameless = share_group_snapshots.ShareGroupSnapshot(
share_group_snapshots.ShareGroupSnapshotManager(FakeAPIClient),
{'id': 'fake_share_group_snapshot_id_2_nameless',
'name': None,
'status': 'available',
'share_group_id': share_group_nameless.id,
'description': 'fake nameless sgs description',
'created_at': '2017-06-02T14:14:14.000000',
'project_id': 'fake_project_id_987654321',
'members': []}
)
# Manila Limits
limits = {"totalSharesUsed": 1,
"totalShareSnapshotsUsed": 1,
"totalShareGigabytesUsed": 500,
"totalSnapshotGigabytesUsed": 500,
"maxTotalShares": 10,
"maxTotalShareSnapshots": 10,
"maxTotalShareGigabytes": 1000,
"maxTotalSnapshotGigabytes": 1000,
}
limits_negative = {"totalSharesUsed": 10,
"totalShareSnapshotsUsed": 10,
"totalShareGigabytesUsed": 1000,
"totalSnapshotGigabytesUsed": 1000,
"maxTotalShares": 10,
"maxTotalShareSnapshots": 10,
"maxTotalShareGigabytes": 1000,
"maxTotalSnapshotGigabytes": 1000,
}
# Manila User Messages
fake_message_1 = messages.Message(
messages.MessageManager(FakeAPIClient),
{'resource_id': "351cc796-2d79-4a08-b878-a8ed933b6b68",
'message_level': 'ERROR',
'user_message': 'allocate host: No storage could be allocated for'
' this share request. Trying again with a different'
' size or share type may succeed.',
'expires_at': '2017-07-10T10:27:43.000000',
'id': '4b319d29-d5b7-4b6e-8e7c-8d6e53f3c3d5',
'created_at': '2017-07-10T10:26:43.000000',
'detail_id': '002',
'request_id': 'req-24e7ccb6-a7d5-4ddd-a8e4-d8f72a4509c8',
'project_id': '2e3de76b49b444fd9dc7ca9f7048ce6b',
'resource_type': 'SHARE',
'action_id': '001'})
fake_message_2 = messages.Message(
messages.MessageManager(FakeAPIClient),
{'resource_id': "25b4c0cc-e711-4c6f-b9fd-72d6b5c62bce",
'message_level': 'ERROR',
'user_message': 'Driver does not expect share-network to be provided '
'with current configuration.',
'expires_at': '2018-09-10T09:37:45.000000',
'id': 'd01d03ee-7758-4175-a6b5-853329dd2f4e',
'created_at': '2018-09-10T09:36:45.000000',
'detail_id': '003',
'request_id': 'req-fa568ab0-d6b3-4b32-899d-637ee006fed4',
'project_id': '2e3de76b49b444fd9dc7ca9f7048ce6b',
'resource_type': 'SHARE',
'action_id': '002'})
fake_message_3 = messages.Message(
messages.MessageManager(FakeAPIClient),
{'resource_id': "4befdc84-2796-44e1-8645-14b651bfb787",
'message_level': 'ERROR',
'user_message': 'Share has no replica with "replica_state" '
'set to "active"',
'expires_at': '2020-09-09T22:37:13.000000',
'id': '1e1c493f-d07d-48e9-93a8-ef5ad4b8ca8a',
'created_at': '2020-09-09T22:36:13.000000',
'detail_id': '004',
'request_id': 'req-29449bc8-d0ec-4d6b-b37c-85d0f04251b1',
'project_id': '2e3de76b49b444fd9dc7ca9f7048ce6b',
'resource_type': 'SHARE_REPLICA',
'action_id': '002'})
| 36.47048
| 78
| 0.677645
|
4a05315aa7f03de64adc96679942bfa7e3457b0a
| 2,378
|
py
|
Python
|
pytudes/_2021/educative/grokking_the_coding_interview/in_place_reversal_of_a_linked_list/_2__reverse_a_sublist__medium.py
|
TeoZosa/pytudes
|
4f01ab20f936bb4b3f42d1946180d4a20fd95fbf
|
[
"Apache-2.0"
] | 1
|
2022-02-08T09:47:35.000Z
|
2022-02-08T09:47:35.000Z
|
pytudes/_2021/educative/grokking_the_coding_interview/in_place_reversal_of_a_linked_list/_2__reverse_a_sublist__medium.py
|
TeoZosa/pytudes
|
4f01ab20f936bb4b3f42d1946180d4a20fd95fbf
|
[
"Apache-2.0"
] | 62
|
2021-04-02T23:41:16.000Z
|
2022-03-25T13:16:10.000Z
|
pytudes/_2021/educative/grokking_the_coding_interview/in_place_reversal_of_a_linked_list/_2__reverse_a_sublist__medium.py
|
TeoZosa/pytudes
|
4f01ab20f936bb4b3f42d1946180d4a20fd95fbf
|
[
"Apache-2.0"
] | null | null | null |
"""https://www.educative.io/courses/grokking-the-coding-interview/qVANqMonoB2
Categories:
- Linked List
- Blind 75
Examples:
>>> assert (reverse_sub_list(None,0,0) is None)
"""
from pytudes._2021.utils.linked_list import NodeType, convert_list_to_linked_list
def reverse_sub_list(head: NodeType, p: int, q: int) -> NodeType:
"""Driver method for `_reverse_sub_list()`"""
return _reverse_sub_list(head, p, q)
def _reverse_sub_list(head: NodeType, start_pos: int, end_pos: int) -> NodeType:
"""
Given the head of a LinkedList and 1-indexed positions `start_pos` and
`end_pos`, reverse the LinkedList from position `start_pos` to `end_pos`.
Returns:
the head of the in-place sublist-reversed LinkedList
Examples:
>>> head = convert_list_to_linked_list([1,2,3,4,5])
>>> head.as_list()
[1, 2, 3, 4, 5]
>>> head = _reverse_sub_list(head,2,4)
>>> head.as_list()
[1, 4, 3, 2, 5]
>>> head = convert_list_to_linked_list([1,2,3,4,5])
>>> head = _reverse_sub_list(head,10,15)
>>> head.as_list()
[1, 2, 3, 4, 5]
>>> assert (_reverse_sub_list(None,0,0) is None)
>>> assert (head == _reverse_sub_list(head,0,1))
>>> assert (head == _reverse_sub_list(head,1,1))
"""
## EDGE CASES ##
if not head: # Vacuous linked list
return head
if start_pos < 1: # Not 1-indexed
return head
if end_pos <= start_pos: # Invalid range
return head
"""Algorithm"""
## INITIALIZE VARS ##
curr_pos = 1
## FIND sublist 1 & 2 ##
curr, prev = head, None
while curr is not None and curr_pos < start_pos:
prev, curr = curr, curr.next
curr_pos += 1
s2_head, s1_tail = curr, prev
if s2_head is None: # No sublist to reverse <=> list size < start_pos
return head
## REVERSE sublist2 ##
curr, prev = s2_head, None
while curr is not None and start_pos <= curr_pos <= end_pos:
curr.next, prev, curr = prev, curr, curr.next # reverse node
curr_pos += 1
# POST-CONDITION: list size < end_pos XOR end_pos < curr_pos
s3_head, s2_reversed_head = curr, prev
s2_reversed_tail = s2_head # Readable alias
## RE-LINK sublists ##
s1_tail.next = s2_reversed_head
s2_reversed_tail.next = s3_head
return head
| 29.358025
| 81
| 0.620269
|
4a0531ddec4e16f50fabe366772c119799bb10b2
| 12,361
|
py
|
Python
|
kenshi.py
|
Nattfarinn/kenshi-data-extractor
|
6cd7e726b3d51872f2742c90557b0a288552120d
|
[
"MIT"
] | null | null | null |
kenshi.py
|
Nattfarinn/kenshi-data-extractor
|
6cd7e726b3d51872f2742c90557b0a288552120d
|
[
"MIT"
] | null | null | null |
kenshi.py
|
Nattfarinn/kenshi-data-extractor
|
6cd7e726b3d51872f2742c90557b0a288552120d
|
[
"MIT"
] | 1
|
2020-12-01T01:49:37.000Z
|
2020-12-01T01:49:37.000Z
|
import struct
def flip_dict(input_dict):
return {value: key for key, value in input_dict.items()}
EXTRA_ITEM_REMOVED = (2147483647, 2147483647, 2147483647)
FILE_TYPE = {
15: "SAVE",
16: "MOD"
}
FILE_TYPE_MAPPING = flip_dict(FILE_TYPE)
RECORD_DATATYPE = {
-2147483646: "NEW",
-2147483647: "CHANGED",
-2147483645: "CHANGED_RENAMED"
}
RECORD_DATATYPE_MAPPING = flip_dict(RECORD_DATATYPE)
RECORD_TYPE = {
0: "BUILDING",
1: "CHARACTER",
2: "WEAPON",
3: "ARMOUR",
4: "ITEM",
5: "ANIMAL_ANIMATION",
6: "ATTACHMENT",
7: "RACE",
9: "NATURE",
10: "FACTION",
13: "TOWN",
16: "LOCATIONAL_DAMAGE",
17: "COMBAT_TECHNIQUE",
18: "DIALOGUE",
19: "DIALOGUE_LINE",
21: "RESEARCH",
22: "AI_TASK",
24: "ANIMATION",
25: "STATS",
26: "PERSONALITY",
27: "CONSTANTS",
28: "BIOMES",
29: "BUILDING_PART",
30: "INSTANCE_COLLECTION",
31: "DIALOG_ACTION",
34: "PLATOON",
36: "GAMESTATE_CHARACTER",
37: "GAMESTATE_FACTION",
38: "GAMESTATE_TOWN_INSTANCE_LIST",
41: "INVENTORY_STATE",
42: "INVENTORY_ITEM_STATE",
43: "REPEATABLE_BUILDING_PART_SLOT",
44: "MATERIAL_SPEC",
45: "MATERIAL_SPECS_COLLECTION",
46: "CONTAINER",
47: "MATERIAL_SPECS_CLOTHING",
49: "VENDOR_LIST",
50: "MATERIAL_SPECS_WEAPON",
51: "WEAPON_MANUFACTURER",
52: "SQUAD_TEMPLATE",
53: "ROAD",
55: "COLOR_DATA",
56: "CAMERA",
57: "MEDICAL_STATE",
59: "FOLIAGE_LAYER",
60: "FOLIAGE_MESH",
61: "GRASS",
62: "BUILDING_FUNCTIONALITY",
63: "DAY_SCHEDULE",
64: "NEW_GAME_STARTOFF",
66: "CHARACTER_APPEARANCE",
67: "GAMESTATE_AI",
68: "WILDLIFE_BIRDS",
69: "MAP_FEATURES",
70: "DIPLOMATIC_ASSAULTS",
71: "SINGLE_DIPLOMATIC_ASSAULT",
72: "AI_PACKAGE",
73: "DIALOGUE_PACKAGE",
74: "GUN_DATA",
76: "ANIMAL_CHARACTER",
77: "UNIQUE_SQUAD_TEMPLATE",
78: "FACTION_TEMPLATE",
80: "WEATHER",
81: "SEASON",
82: "EFFECT",
83: "ITEM_PLACEMENT_GROUP",
84: "WORD_SWAPS",
86: "NEST_ITEM",
87: "CHARACTER_PHYSICS_ATTACHMENT",
88: "LIGHT",
89: "HEAD",
92: "FOLIAGE_BUILDING",
93: "FACTION_CAMPAIGN",
94: "GAMESTATE_TOWN",
95: "BIOME_GROUP",
96: "EFFECT_FOG_VOLUME",
97: "FARM_DATA",
98: "FARM_PART",
99: "ENVIRONMENT_RESOURCES",
100: "RACE_GROUP",
101: "ARTIFACTS",
102: "MAP_ITEM",
103: "BUILDINGS_SWAP",
104: "ITEMS_CULTURE",
105: "ANIMATION_EVENT",
107: "CROSSBOW",
109: "AMBIENT_SOUND",
110: "WORLD_EVENT_STATE",
111: "LIMB_REPLACEMENT",
112: "BASE_ANIMATIONS"
}
RECORD_TYPE_MAPPING = flip_dict(RECORD_TYPE)
def get_file_type(type_id):
try:
return FILE_TYPE[type_id]
except KeyError:
return "UNKNOWN_FILE_TYPE[%d]" % type_id
def get_record_type(type_id):
try:
return RECORD_TYPE[type_id]
except KeyError:
return "UNKNOWN_RECORD_TYPE[%d]" % type_id
def get_record_datatype(type_id):
try:
return RECORD_DATATYPE[type_id]
except KeyError:
return "UNKNOWN_RECORD_DATATYPE[%d]" % type_id
class BinaryFileReader:
def __init__(self, file_path):
self.file_path = file_path
self.handle = open(file_path, "rb")
def int(self):
binary = self.handle.read(4)
return struct.unpack("i", binary)[0]
def string(self):
binary = self.handle.read(self.int())
return binary.decode("utf-8")
def char(self):
binary = self.handle.read(1)
return struct.unpack("c", binary)[0].decode("utf-8")
def bool(self):
binary = self.handle.read(1)
return struct.unpack("?", binary)[0]
def float(self):
binary = self.handle.read(4)
return struct.unpack("f", binary)[0]
def vec3i(self):
return self.int(), self.int(), self.int()
def vec3f(self):
return self.float(), self.float(), self.float()
def vec4f(self):
return self.float(), self.float(), self.float(), self.float()
class BinaryFileWriter:
def __init__(self, file_path):
self.file_path = file_path
self.handle = open(file_path, "wb")
def int(self, value):
self.handle.write(struct.pack("i", value))
def string(self, value):
self.int(len(value))
self.handle.write(str.encode(value))
def char(self, value):
self.handle.write(str.encode(value))
def bool(self, value):
self.handle.write(struct.pack("?", value))
def float(self, value):
self.handle.write(struct.pack("f", value))
def vec3i(self, value):
self.int(value[0])
self.int(value[1])
self.int(value[2])
def vec3f(self, value):
self.float(value[0])
self.float(value[1])
self.float(value[2])
def vec4f(self, value):
self.float(value[0])
self.float(value[1])
self.float(value[2])
self.float(value[3])
def filename(self, value):
self.string(value)
class ModFileWriter(BinaryFileWriter):
def __init__(self, file_path, version, author, description,
dependencies, references):
super().__init__(file_path)
self.int(FILE_TYPE_MAPPING["MOD"])
self.int(version)
self.string(author)
self.string(description)
self.string(dependencies)
self.string(references)
self.int(0)
def records(self, records):
self.int(len(records))
for string_id, record in records.items():
self.int(record["instance_count"])
self.int(record["type_id"])
self.int(record["id"])
self.string(record["name"])
self.string(string_id)
self.int(record["datatype_id"])
for field_type in ("bool", "float", "int", "vec3f", "vec4f",
"string", "filename"):
self.fields(record["fields"][field_type], field_type)
self.extra(record["extra"])
self.instances(record["instances"])
def extra(self, extra):
self.int(len(extra))
for extra_category, extra_fields in extra.items():
self.string(extra_category)
self.int(len(extra_fields))
for field_name, field_value in extra_fields.items():
self.string(field_name)
self.vec3i(field_value)
def fields(self, fields, field_type):
self.int(len(fields))
for field_name, field_value in fields.items():
self.string(field_name)
getattr(self, field_type)(field_value)
def instances(self, instances):
self.int(len(instances))
for instance_id, instance_item in instances.items():
self.string(instance_id)
self.string(instance_item["target"])
self.vec3f(instance_item["position"])
self.vec4f(instance_item["rotation"])
instance_states = instance_item["states"]
self.int(len(instance_states))
for instance_state in instance_states:
self.string(instance_state)
class ModFileReader(BinaryFileReader):
def __init__(self, file_path):
super().__init__(file_path)
self.file_type = get_file_type(self.int())
if self.file_type != "MOD":
raise Exception("%s is not a mod file, %s found." % (
file_path, self.file_type))
self.version = self.int()
self.author = self.string()
self.description = self.string()
self.dependencies = self.string().split(",")
self.references = self.string().split(",")
self.flags = self.int()
self.record_count = self.int()
self.records = {}
for _ in range(0, self.record_count):
instance_count = self.int()
record_type = self.int()
record_id = self.int()
name = self.string()
string_id = self.string()
datatype = self.int()
fields_bool = self.fields(self.bool)
fields_float = self.fields(self.float)
fields_int = self.fields(self.int)
fields_vec3f = self.fields(self.vec3f)
fields_vec4f = self.fields(self.vec4f)
fields_string = self.fields(self.string)
fields_filename = self.fields(self.string)
extra = self.extras()
instances = self.instances()
self.records[string_id] = {
"instance_count": instance_count,
"type_id": record_type,
"type": get_record_type(record_type),
"name": name,
"id": record_id,
"datatype_id": datatype,
"datatype": get_record_datatype(datatype),
"fields": {
"bool": fields_bool,
"float": fields_float,
"int": fields_int,
"vec3f": fields_vec3f,
"vec4f": fields_vec4f,
"string": fields_string,
"filename": fields_filename,
},
"extra": extra,
"instances": instances,
}
def fields(self, value_type):
fields = {}
for _ in range(0, self.int()):
name = self.string()
value = value_type()
fields[name] = value
return fields
def extras(self):
extras = {}
length = self.int()
for _ in range(0, length):
name = self.string()
extras[name] = self.extra_items()
return extras
def extra_items(self):
items = {}
length = self.int()
for _ in range(0, length):
name = self.string()
value = self.vec3i()
items[name] = value
return items
def instances(self):
instances = {}
length = self.int()
for _ in range(0, length):
instance_id = self.string()
instances[instance_id] = {
"target": self.string(),
"position": self.vec3f(),
"rotation": self.vec4f(),
"states": self.instance_states()
}
return instances
def instance_states(self):
return [self.string() for _ in range(0, self.int())]
def is_renamed(record):
return record["datatype"] == "CHANGED_RENAMED"
def is_new(record):
return record["datatype"] == "NEW"
def merge_fields(record, old_record):
for field_type, fields in record["fields"].items():
for field_name, field_value in fields.items():
old_record["fields"][field_type][
field_name] = field_value
def merge_extra(record, old_record):
for extra_category, items in record["extra"].items():
if extra_category not in old_record["extra"]:
old_record["extra"][extra_category] \
= {}
for item_name, item_value in items.items():
if item_value == EXTRA_ITEM_REMOVED:
old_record["extra"][
extra_category].pop(item_name, None)
else:
old_record["extra"][
extra_category][item_name] = item_value
def merge_records(mod_file, records):
for string_id, record in mod_file.records.items():
if string_id not in records:
records[string_id] = record
else:
old_record = records[string_id]
if is_new(record):
raise Exception("Element %s from %s already exists."
% (string_id, mod_file.file_path))
else:
if is_renamed(record):
old_record["name"] = record["name"]
merge_fields(record, old_record)
merge_extra(record, old_record)
| 29.713942
| 73
| 0.551654
|
4a053247fb1ed6f42e7772f470b8f13a010b2c33
| 19,345
|
py
|
Python
|
src/prefect/agent/agent.py
|
louisditzel/prefect
|
b1a02fee623b965e756a38aa09059db780ab67eb
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/prefect/agent/agent.py
|
louisditzel/prefect
|
b1a02fee623b965e756a38aa09059db780ab67eb
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/prefect/agent/agent.py
|
louisditzel/prefect
|
b1a02fee623b965e756a38aa09059db780ab67eb
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import ast
import functools
import logging
import math
import os
import signal
import sys
import threading
import time
from concurrent.futures import Future, ThreadPoolExecutor
from contextlib import contextmanager
from typing import Any, Generator, Iterable, Set
import pendulum
from prefect import config
from prefect.client import Client
from prefect.engine.state import Failed, Submitted
from prefect.serialization import state
from prefect.utilities.context import context
from prefect.utilities.exceptions import AuthorizationError
from prefect.utilities.graphql import GraphQLResult, with_args
ascii_name = r"""
____ __ _ _ _
| _ \ _ __ ___ / _| ___ ___| |_ / \ __ _ ___ _ __ | |_
| |_) | '__/ _ \ |_ / _ \/ __| __| / _ \ / _` |/ _ \ '_ \| __|
| __/| | | __/ _| __/ (__| |_ / ___ \ (_| | __/ | | | |_
|_| |_| \___|_| \___|\___|\__| /_/ \_\__, |\___|_| |_|\__|
|___/
"""
@contextmanager
def exit_handler(agent: "Agent") -> Generator:
exit_event = threading.Event()
def _exit_handler(*args: Any, **kwargs: Any) -> None:
agent.logger.info("Keyboard Interrupt received: Agent is shutting down.")
exit_event.set()
original = signal.getsignal(signal.SIGINT)
try:
signal.signal(signal.SIGINT, _exit_handler)
yield exit_event
except SystemExit:
pass
finally:
signal.signal(signal.SIGINT, original)
class Agent:
"""
Base class for Agents. Information on using the Prefect agents can be found at
https://docs.prefect.io/orchestration/agents/overview.html
This Agent class is a standard point for executing Flows in Prefect Cloud. It is meant
to have subclasses which inherit functionality from this class. The only piece that
the subclasses should implement is the `deploy_flows` function, which specifies how to run a Flow on the given platform. It is built in this
way to keep Prefect Cloud logic standard but allows for platform specific
customizability.
In order for this to operate `PREFECT__CLOUD__AGENT__AUTH_TOKEN` must be set as an
environment variable or in your user configuration file.
Args:
- name (str, optional): An optional name to give this agent. Can also be set through
the environment variable `PREFECT__CLOUD__AGENT__NAME`. Defaults to "agent"
- labels (List[str], optional): a list of labels, which are arbitrary string identifiers used by Prefect
Agents when polling for work
- env_vars (dict, optional): a dictionary of environment variables and values that will be set
on each flow run that this agent submits for execution
- max_polls (int, optional): maximum number of times the agent will poll Prefect Cloud for flow runs;
defaults to infinite
"""
def __init__(
self,
name: str = None,
labels: Iterable[str] = None,
env_vars: dict = None,
max_polls: int = None,
) -> None:
self.name = name or config.cloud.agent.get("name", "agent")
self.labels = list(
labels or ast.literal_eval(config.cloud.agent.get("labels", "[]"))
)
self.env_vars = env_vars or dict()
self.max_polls = max_polls
self.log_to_cloud = config.logging.log_to_cloud
token = config.cloud.agent.get("auth_token")
self.client = Client(api_token=token)
if config.backend == "cloud":
self._verify_token(token)
self.client.attach_headers({"X-PREFECT-AGENT-ID": self._register_agent()})
logger = logging.getLogger(self.name)
logger.setLevel(config.cloud.agent.get("level"))
if not any([isinstance(h, logging.StreamHandler) for h in logger.handlers]):
ch = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(context.config.logging.format)
formatter.converter = time.gmtime # type: ignore
ch.setFormatter(formatter)
logger.addHandler(ch)
self.logger = logger
self.submitting_flow_runs = set() # type: Set[str]
def _verify_token(self, token: str) -> None:
"""
Checks whether a token with a `RUNNER` scope was provided
Args:
- token (str): The provided agent token to verify
Raises:
- AuthorizationError: if token is empty or does not have a RUNNER role
"""
if not token:
raise AuthorizationError("No agent API token provided.")
# Check if RUNNER role
result = self.client.graphql(query="query { auth_info { api_token_scope } }")
if (
not result.data # type: ignore
or result.data.auth_info.api_token_scope != "RUNNER" # type: ignore
):
raise AuthorizationError("Provided token does not have a RUNNER scope.")
def _register_agent(self) -> str:
"""
Register this agent with Prefect Cloud and retrieve agent ID
Returns:
- The agent ID as a string
"""
agent_id = self.client.register_agent(
agent_type=type(self).__name__, name=self.name, labels=self.labels
)
return agent_id
def start(self) -> None:
"""
The main entrypoint to the agent. This function loops and constantly polls for
new flow runs to deploy
"""
try:
with exit_handler(self) as exit_event:
self.agent_connect()
# Loop intervals for query sleep backoff
loop_intervals = {
0: 0.25,
1: 0.5,
2: 1.0,
3: 2.0,
4: 4.0,
5: 8.0,
6: 10.0,
}
index = 0
remaining_polls = math.inf if self.max_polls is None else self.max_polls
# the max workers default has changed in 3.8. For stable results the
# default 3.8 behavior is elected here.
max_workers = min(32, (os.cpu_count() or 1) + 4)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
self.logger.debug("Max Workers: {}".format(max_workers))
while (
not exit_event.wait(timeout=loop_intervals[index])
and remaining_polls
):
self.heartbeat()
if self.agent_process(executor):
index = 0
elif index < max(loop_intervals.keys()):
index += 1
remaining_polls -= 1
self.logger.debug(
"Next query for flow runs in {} seconds".format(
loop_intervals[index]
)
)
finally:
self.on_shutdown()
def on_shutdown(self) -> None:
"""
Invoked when the event loop is exiting and the agent is shutting down. Intended
as a hook for child classes to optionally implement.
"""
def agent_connect(self) -> None:
"""
Verify agent connection to Prefect API by querying
"""
print(ascii_name)
self.logger.info(
"Starting {} with labels {}".format(type(self).__name__, self.labels)
)
self.logger.info(
"Agent documentation can be found at https://docs.prefect.io/orchestration/"
)
self.logger.info(
"Agent connecting to the Prefect API at {}".format(config.cloud.api)
)
try:
self.client.graphql(query="query { hello }")
except Exception as exc:
self.logger.error(
"There was an error connecting to {}".format(config.cloud.api)
)
self.logger.error(exc)
self.logger.info("Waiting for flow runs...")
def deploy_and_update_flow_run(self, flow_run: "GraphQLResult") -> None:
"""
Deploy a flow run and update Cloud with the resulting deployment info.
If any errors occur when submitting the flow run, capture the error and log to Cloud.
Args:
- flow_run (GraphQLResult): The specific flow run to deploy
"""
# Deploy flow run and mark failed if any deployment error
try:
self.update_state(flow_run)
deployment_info = self.deploy_flow(flow_run)
if getattr(flow_run, "id", None):
self.client.write_run_logs(
[
dict(
flow_run_id=getattr(flow_run, "id"), # type: ignore
name=self.name,
message="Submitted for execution: {}".format(
deployment_info
),
level="INFO",
)
]
)
except Exception as exc:
## if the state update failed, we don't want to follow up with another state update
if "State update failed" in str(exc):
self.logger.debug("Updating Flow Run state failed: {}".format(str(exc)))
return
self.logger.error(
"Logging platform error for flow run {}".format(
getattr(flow_run, "id", "UNKNOWN") # type: ignore
)
)
if getattr(flow_run, "id", None):
self.client.write_run_logs(
[
dict(
flow_run_id=getattr(flow_run, "id"), # type: ignore
name=self.name,
message=str(exc),
level="ERROR",
)
]
)
self.mark_failed(flow_run=flow_run, exc=exc)
def on_flow_run_deploy_attempt(self, fut: "Future", flow_run_id: str) -> None:
"""
Indicates that a flow run deployment has been deployed (sucessfully or otherwise).
This is intended to be a future callback hook, called in the agent's main thread
when the background thread has completed the deploy_and_update_flow_run() call, either
successfully, in error, or cancelled. In all cases the agent should be open to
attempting to deploy the flow run if the flow run id is still in the Cloud run queue.
Args:
- fut (Future): a callback requirement, the future which has completed or been cancelled.
- flow_run_id (str): the id of the flow run that the future represents.
"""
self.submitting_flow_runs.remove(flow_run_id)
self.logger.debug("Completed flow run submission (id: {})".format(flow_run_id))
def agent_process(self, executor: "ThreadPoolExecutor") -> bool:
"""
Full process for finding flow runs, updating states, and deploying.
Args:
- executor (ThreadPoolExecutor): the interface to submit flow deployments in background threads
Returns:
- bool: whether or not flow runs were found
"""
flow_runs = None
try:
flow_runs = self.query_flow_runs()
if flow_runs:
self.logger.info(
"Found {} flow run(s) to submit for execution.".format(
len(flow_runs)
)
)
for flow_run in flow_runs:
fut = executor.submit(self.deploy_and_update_flow_run, flow_run)
self.submitting_flow_runs.add(flow_run.id)
fut.add_done_callback(
functools.partial(
self.on_flow_run_deploy_attempt, flow_run_id=flow_run.id
)
)
except Exception as exc:
self.logger.error(exc)
return bool(flow_runs)
def query_flow_runs(self) -> list:
"""
Query Prefect Cloud for flow runs which need to be deployed and executed
Returns:
- list: A list of GraphQLResult flow run objects
"""
self.logger.debug("Querying for flow runs")
# keep a copy of what was curringly running before the query (future callbacks may be updating this set)
currently_submitting_flow_runs = self.submitting_flow_runs.copy()
# Get scheduled flow runs from queue
mutation = {
"mutation($input: get_runs_in_queue_input!)": {
"get_runs_in_queue(input: $input)": {"flow_run_ids"}
}
}
now = pendulum.now("UTC")
result = self.client.graphql(
mutation,
variables={
"input": {"before": now.isoformat(), "labels": list(self.labels),}
},
)
# we queried all of the available flow runs, however, some may have already been pulled
# by this agent and are in the process of being submitted in the background. We do not
# want to act on these "duplicate" flow runs until we've been assured that the background
# thread has attempted to submit the work (successful or otherwise).
flow_run_ids = set(result.data.get_runs_in_queue.flow_run_ids) # type: ignore
if flow_run_ids:
msg = "Found flow runs {}".format(
result.data.get_runs_in_queue.flow_run_ids
)
else:
msg = "No flow runs found"
already_submitting = flow_run_ids & currently_submitting_flow_runs
target_flow_run_ids = flow_run_ids - already_submitting
if already_submitting:
msg += " ({} already submitting: {})".format(
len(already_submitting), list(already_submitting)
)
self.logger.debug(msg)
# Query metadata fow flow runs found in queue
query = {
"query": {
with_args(
"flow_run",
{
# match flow runs in the flow_run_ids list
"where": {
"id": {"_in": list(target_flow_run_ids)},
"_or": [
# who are EITHER scheduled...
{"state": {"_eq": "Scheduled"}},
# OR running with task runs scheduled to start more than 3 seconds ago
{
"state": {"_eq": "Running"},
"task_runs": {
"state_start_time": {
"_lte": str(now.subtract(seconds=3)) # type: ignore
}
},
},
],
}
},
): {
"id": True,
"version": True,
"state": True,
"serialized_state": True,
"parameters": True,
"flow": {"id", "name", "environment", "storage", "version"},
with_args(
"task_runs",
{
"where": {
"state_start_time": {
"_lte": str(now.subtract(seconds=3)) # type: ignore
}
}
},
): {"id", "version", "task_id", "serialized_state"},
}
}
}
if target_flow_run_ids:
self.logger.debug("Querying flow run metadata")
result = self.client.graphql(query)
return result.data.flow_run # type: ignore
else:
return []
def update_state(self, flow_run: GraphQLResult) -> None:
"""
After a flow run is grabbed this function sets the state to Submitted so it
won't be picked up by any other processes
Args:
- flow_run (GraphQLResult): A GraphQLResult flow run object
"""
self.logger.debug(
"Updating states for flow run {}".format(flow_run.id) # type: ignore
)
# Set flow run state to `Submitted` if it is currently `Scheduled`
if state.StateSchema().load(flow_run.serialized_state).is_scheduled():
self.logger.debug(
"Flow run {} is in a Scheduled state, updating to Submitted".format(
flow_run.id # type: ignore
)
)
self.client.set_flow_run_state(
flow_run_id=flow_run.id,
version=flow_run.version,
state=Submitted(
message="Submitted for execution",
state=state.StateSchema().load(flow_run.serialized_state),
),
)
# Set task run states to `Submitted` if they are currently `Scheduled`
for task_run in flow_run.task_runs:
if state.StateSchema().load(task_run.serialized_state).is_scheduled():
self.logger.debug(
"Task run {} is in a Scheduled state, updating to Submitted".format(
task_run.id # type: ignore
)
)
self.client.set_task_run_state(
task_run_id=task_run.id,
version=task_run.version,
state=Submitted(
message="Submitted for execution.",
state=state.StateSchema().load(task_run.serialized_state),
),
)
def mark_failed(self, flow_run: GraphQLResult, exc: Exception) -> None:
"""
Mark a flow run as `Failed`
Args:
- flow_run (GraphQLResult): A GraphQLResult flow run object
- exc (Exception): An exception that was raised to use as the `Failed`
message
"""
self.client.set_flow_run_state(
flow_run_id=flow_run.id,
version=flow_run.version,
state=Failed(message=str(exc)),
)
self.logger.error("Error while deploying flow: {}".format(repr(exc)))
def deploy_flow(self, flow_run: GraphQLResult) -> str:
"""
Meant to be overridden by a platform specific deployment option
Args:
- flow_run (GraphQLResult): A GraphQLResult flow run object
Returns:
- str: Information about the deployment
Raises:
- ValueError: if deployment attempted on unsupported Storage type
"""
raise NotImplementedError()
def heartbeat(self) -> None:
"""
Meant to be overridden by a platform specific heartbeat option
"""
if __name__ == "__main__":
Agent().start()
| 38.080709
| 144
| 0.537193
|
4a053265c70730b62a3db8cc282ec8bc01edd1ce
| 64,015
|
py
|
Python
|
tools/latseq_logs.py
|
Orange-OpenSource/LatSeq
|
14c3bd210c4f9502c353a6de59be8e54435e6869
|
[
"BSD-3-Clause"
] | null | null | null |
tools/latseq_logs.py
|
Orange-OpenSource/LatSeq
|
14c3bd210c4f9502c353a6de59be8e54435e6869
|
[
"BSD-3-Clause"
] | null | null | null |
tools/latseq_logs.py
|
Orange-OpenSource/LatSeq
|
14c3bd210c4f9502c353a6de59be8e54435e6869
|
[
"BSD-3-Clause"
] | 2
|
2021-06-22T09:32:45.000Z
|
2021-11-11T12:57:06.000Z
|
#!/usr/bin/python3
#################################################################################
# Software Name : LatSeq
# Version: 1.0
# SPDX-FileCopyrightText: Copyright (c) 2020-2021 Orange Labs
# SPDX-License-Identifier: BSD-3-Clause
#
# This software is distributed under the BSD 3-clause,
# the text of which is available at https://opensource.org/licenses/BSD-3-Clause
# or see the "license.txt" file for more details.
#
# Author: Flavien Ronteix--Jacquet
# Software description: LatSeq rebuild journeys script
#################################################################################
"""Process latseq logs module
This modules is used to process latseq logs and provides
some useful statistics and stats
Example:
python3 tools/latseq_logs.py -l data/latseq.simple.lseq
Attributes:
none
TODO
* Retransmissions
* find ALL in and out points (dynamically). Should I do ?
* APIify with flask to be called easily by the others modules
https://programminghistorian.org/en/lessons/creating-apis-with-python-and-flask#creating-a-basic-flask-application
* Rebuild_packet with multithreading...
* Uniformize output to julia processing
* Alex Algorithm container
"""
import sys
import os
import argparse
import re
import datetime
import operator
import statistics
import numpy
from copy import deepcopy
import pickle
import simplejson as json
import decimal
from tqdm import tqdm
import logging
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] [%(levelname)s] %(message)s",
stream=sys.stderr
)
# import math
import rdtsctots
#
# GLOBALS
#
# Reducing search space
# 4ms treshold to seek the next trace
DURATION_TO_SEARCH_PKT = decimal.Decimal(0.1)
# 4ms treshold to find segmentation
DURATION_TO_SEARCH_FORKS = decimal.Decimal(0.1)
# TODO: limit time to search concatenation: or use the properties like size ?
# DURATION_TO_SEARCH_CONCA = 0.005 # 5ms to find concatenation
DURATION_TO_SEARCH_RETX = decimal.Decimal(0.01) # 10ms : Set according to drx-RetransmissionTimerDL RRC config (3GPP-TS38.321) for MAC and max_seq_num for RLC (3GPP-TS38.322)
# decimal.getcontext().prec = 6 # fix precision to 6 (precision of timestamp, then do not be more precise). BE CAREFUL : precision is different to fix place after point
S_TO_MS = 1000
KWS_BUFFER = ['tx', 'rx', 'retx'] # buffer keywords
KWS_NO_CONCATENATION = ['pdcp.in'] # TODO
KWS_IN_D = ['ip.in'] # TODO : put in conf file and verify why when add 'ip' it breaks rebuild
KWS_OUT_D = ['phy.out.ant']
KWS_IN_U = ['phy.in.ant']
KWS_OUT_U = ['ip.out']
VERBOSITY = False # Verbosity for rebuild phase False by default
#
# UTILS
#
def epoch_to_datetime(epoch: decimal.Decimal) -> str:
"""Convert an epoch to datetime"""
return datetime.datetime.fromtimestamp(
float(epoch)).strftime('%Y%m%d_%H%M%S.%f')
def dstamp_to_epoch(dstamptime: str) -> decimal.Decimal:
"""Convert a dstamptime to float epoch"""
return decimal.Decimal(datetime.datetime.strptime(
dstamptime, "%Y%m%d_%H%M%S.%f"
).timestamp())
def path_to_str(pathP: list) -> str:
"""Use to get a string representing a path from a list"""
if len(pathP) < 1:
return ""
if len(pathP) < 2:
return pathP[0]
res = f"{pathP[0]} -> "
for i in range(1, len(pathP) - 1):
res += f"{pathP[i]} -> "
return res + f"{pathP[-1]}"
def dict_ids_to_str(idsP: dict) -> str:
return '.'.join([f"{k}{v}" for k,v in idsP.items()])
def make_immutable_list(listP: list) -> tuple:
return tuple(listP)
def write_string_to_stdout(sstream: str):
try:
sys.stdout.write(sstream + '\n')
except IOError as e:
if e.errno == errno.EPIPE: # Ignore broken pipe Error
logging.warning("write_string_to_stdout() : Broken pipe error")
return
else:
logging.error(f"write_string_to_stdout() : {e}")
exit()
#
# CLASSES
#
class latseq_log:
"""class for log processing associated to a log file
Args:
logpathP (str): path to the log file
Attributes:
logpath (str): path to the log file
initialized (bool): become true when __init__ is successfully done
raw_inputs (:obj:`list` of :obj:`str`): list of lines from logpath file
raw_infos (:obj:`list` of :obj:`str`): list of lines from logpath
inputs (:obj:`list` of :obj:`list`): list of lines after a first pass
of processing from raw_inputs
inputs[i][0] : Timestamp
inputs[i][1] : Direction
inputs[i][2] : Src point
inputs[i][3] : Dst point
inputs[i][4] : Properties
inputs[i][5] : Global identifiers
inputs[i][6] : Local identifiers
infos (:obj:`list` of :obj:`list`): list of information lines
infos[i][0] : Timestamp
infos[i][1] : Point
infos[i][2] : Properties
infos[i][3] : Context identifiers
dataids (:obj:`list` of :obj:`str`): list of dataids found in the logs
points (:obj:`dict` of :obj:`list`): list of points
points[i] (:obj:`dict`): a point
points[i]['dir'] (:obj:`list` of int): list of direction where this point can be found
points[i]['count'] (int): number of occurences of this point on `inputs`
points[i]['next'] (:obj:`list` of str): list of possible next points
points[i]['duration'] (:obj:`list` of float): list of duration for this point in the `journey`. WARNING: Computed at a rebuild journey function... not in build_points
pointsInD (:obj:`list` of str): list of input points for Downlink
pointsInU (:obj:`list` of str): list of input points for Uplink
pointsOutD (:obj:`list` of str): list of output points for Downlink
pointsOutU (:obj:`list` of str): list of output points for Uplink
paths (:obj:`list` of :obj:`list`):
list[0] is a list of all DownLink paths possibles
list[0][i] : ordered list of points' name
list[1] is a list of all UpLink paths possibles
timestamps (:obj:`list` of float): list of timestamps in the logs
journeys (:obj:`dict`): the dictionnary containing journeys
journeys[i] (:obj:`dict`): a journey
journeys[i]['dir'] (int): 0 if a Downlink journey, 1 otherwise
journeys[i]['glob'] (:obj:`dict`): the globals context ids to match necessary
journeys[i]['completed'] (bool): True if the journey is compete, e.g. journey from an in to an out point
journeys[i]['ts_in'] (float): timestamp at which the journey begins
journeys[i]['ts_out'] (float): timestamp at which the journey ends if `completed`
journeys[i]['next_points'] (:obj:`list`): the next points' identifier expected
journeys[i]['set'] (:obj:`list` of :obj:`tuple`): list of measures
journeys[i]['set'][s][0] (int): corresponding id in `input`
journeys[i]['set'][s][1] (float): timestamp
journeys[i]['set'][s][2] (string): segment
journeys[i]['set_ids'] (:obj:`list`): the last measurement point identifier added
journeys[i]['path'] (int): the path id according to self.paths
out_journeys (:obj:`list`): the list of measurements like `raw_inputs` but ordered, filtered and with unique identifier (uid) by journey
out_journeys[o] : a log line of out_journeys = a log line from input (if input is present in a journey)
out_journeys[o][0] (Decimal): timestamp
out_journeys[o][1] (char): direction, U/D
out_journeys[o][2] (str): segment
out_journeys[o][3] (str): properties
out_journeys[o][4] (str): data identifier with journey id(s) associated to this measurement
"""
def __init__(self, logpathP: str):
self.logpath = logpathP
self.initialized = False
# Open and Read the logpath file
if not self.logpath:
raise AssertionError("Error, no logpath provided")
try:
self.raw_inputs = list()
self.raw_infos = list()
self._read_log()
except FileNotFoundError:
raise FileNotFoundError(f"Error, {logpathP} not found")
except IOError:
raise IOError(f"Error at Reading {logpathP}")
else:
# Filter raw_inputs to fill inputs
try:
self.inputs = list()
self.infos = list()
self.dataids = list()
self._clean_log()
except Exception:
raise ValueError(f"Error in Cleaning or Filtering {logpathP}")
# Build points
try:
self.points = dict() # the couple (key, "next") is basically a graph
self.pointsInD = KWS_IN_D
self.pointsOutD = KWS_OUT_D
self.pointsInU = KWS_IN_U
self.pointsOutU = KWS_OUT_U
self._build_points()
except Exception:
raise Exception("Error at getting points")
else:
# Build paths
try:
self.paths = [[], []]
self._build_paths()
except Exception as e:
raise e
# Build timestamps
self.timestamps = list()
self._build_timestamp()
# Returns
self.initialized = True
return
def _read_file(self):
"""Read the content of the file pointed by `logpath`
Returns:
str: the content of the log file
Raises:
IOError: error at opening the log file
"""
try:
with open(self.logpath, 'r') as f:
logging.info(f"latseq_log._read_file() : Reading {self.logpath} ...")
return f.read()
except IOError:
raise IOError(f"error at opening ({self.logpath})")
def _read_log(self):
"""Read log file `logpath` to fill up `raw_inputs` with cleaned string entries
Filters : comments, empty lines and malformed lines
"""
for l in self._read_file().splitlines():
if not l: # line is not empty
continue
# Match pattern
# https://www.tutorialspoint.com/python/python_reg_expressions.htm
if re.match(r'#.*$', l, re.M):
continue
tmp = l.split(' ')
if len(tmp) < 4:
logging.warning(f"latseq_log._read_log() : {l} is a malformed line")
continue
if tmp[1] == 'S': # synchronisation-type line
continue
if tmp[1] == 'I': # information-type line
self.raw_infos.append(tuple([
decimal.Decimal(tmp[0]),
tmp[2],
tmp[3]
]))
continue
# TODO : rendre dynamique cette valeur avec
# le format donne par le header
self.raw_inputs.append(tuple([
decimal.Decimal(tmp[0]),
0 if tmp[1] == 'D' else 1,
tmp[2],
tmp[3]]))
def _clean_log(self):
"""Clean logs from `raw_inputs` to `inputs`
Extract ids and values from pattern id123, 'id': 123
Transform the string entry in tuple entry
At the end, `input` is made immutable for the rest of the program
Filters :
rnti65535
Attributes:
inputs (:obj:`list` of :obj:`tuple`) : list of log elements
inputs[i][0] : Timestamp
inputs[i][1] : Direction
inputs[i][2] : Src point
inputs[i][3] : Dst point
inputs[i][4] : Properties
inputs[i][5] : Global identifiers
inputs[i][6] : Local identifiers
infos (:obj:`list` of :obj:`list`): list of information lines
infos[i][0] : Timestamp
infos[i][1] : Point
infos[i][2] : Properties
infos[i][3] : Context identifiers
Raises:
ValueError : Error at parsing a line
"""
# patterns dataidO to detect
match_ids = re.compile("([a-zA-Z]+)([0-9]+)")
# First process informations
self.raw_infos.sort(key=operator.itemgetter(1, 0)) # sort by point followed timestamp
for i in self.raw_infos:
# process infomation line
try:
i_points = tuple(i[1].split('.'))
tmp_infos_d = dict()
meas_ctxt = i[2].split(':') # Left part represents properties, right parts optional local identifier
for d in meas_ctxt[0].split('.'):
try:
did = match_ids.match(d).groups()
except Exception:
continue
else:
tmp_infos_d[did[0]] = did[1]
if len(meas_ctxt) == 2: # measurement context identifier
tmp_ctxt_d = dict()
for c in meas_ctxt[1].split('.'):
try:
dic = match_ids.match(d).groups()
except Exception:
continue
else:
tmp_ctxt_d[dic[0]] = dic[1]
# TODO : A problem here, tmp_infos_d == tmp_ctxt_d at yielding
self.infos.append((
i[0],
i_points,
deepcopy(tmp_infos_d),
deepcopy(tmp_ctxt_d),
))
else:
self.infos.append((
i[0],
i_points,
deepcopy(tmp_infos_d),
))
# not other processing needed for infos
except Exception:
logging.error(f"latseq_log._clean_log() : at parsing information line {i}")
# sort by timestamp. important assumption for the next methods
self.raw_inputs.sort(key=operator.itemgetter(0))
# match_emptyrnti = re.compile("rnti65535")
for e in self.raw_inputs:
# an entry is a timestamp, a direction,
# an in point an out point, a size,
# a list of glibal context data id and local data id
# skip lines which matches the following re
if re.search("rnti65535", e[3]):
continue
# process line
try:
e_points = e[2].split('--')
dataids = e[3].split(':')
if len(dataids) < 3:
continue
ptmp = {}
# properties values
if dataids[0] != '':
for p in dataids[0].split('.'):
try:
dip = match_ids.match(p).groups()
except Exception:
continue
else:
ptmp[dip[0]] = dip[1]
# global context ids
ctmp = {}
if dataids[1] != '':
for c in dataids[1].split('.'):
try:
# dic[0] is the global context identifier
# dic[1] the value associated
dic = match_ids.match(c).groups()
except Exception:
continue
else:
ctmp[dic[0]] = dic[1]
if dic[0] not in self.dataids:
self.dataids.append(dic[0])
dtmp = {}
# local context ids
if dataids[2] != '':
for d in dataids[2].split('.'):
try:
# did[0] is the local context identifier
# did[1] the value associated
did = match_ids.match(d).groups()
except Exception:
continue
else:
if did[0] not in dtmp:
dtmp[did[0]] = did[1]
else: # case we have multiple value for the same id
if isinstance(dtmp[did[0]], list):
dtmp[did[0]].append(did[1])
else:
tmpl = [dtmp[did[0]], did[1]]
del dtmp[did[0]]
dtmp[did[0]] = tmpl
if did[0] not in self.dataids:
self.dataids.append(did[0])
self.inputs.append((
e[0],
e[1],
e_points[0],
e_points[1],
deepcopy(ptmp),
deepcopy(ctmp),
deepcopy(dtmp)
))
except Exception:
raise ValueError(f"Error at parsing line {e}")
self.inputs = make_immutable_list(self.inputs)
def _build_points(self):
"""Build graph of measurement `points` and find in and out points
Attributes:
points (:obj:`dict`):
points['point']['next'] (:obj:`list` of str): list of next point possible
points['point']['count'] (int): number of occurence of this point
points['point']['dir'] (list): could be 0, 1 or 0 and 1
"""
# Build graph
for e in self.raw_inputs:
e_points = e[2].split('--') # [0] is src point and [1] is dest point
if e_points[0] not in self.points:
# list of pointers and direction 0 for D and 1 for U
self.points[e_points[0]] = {}
self.points[e_points[0]]['next'] = []
self.points[e_points[0]]['count'] = 0
self.points[e_points[0]]['dir'] = [e[1]]
if e_points[1] not in self.points[e_points[0]]['next']:
# Get combinations of dest point
# ex. rlc.seg.um : rlc, rlc.seg, rlc.seg.um
destpt = e_points[1].split('.')
for i in range(len(destpt)):
tmps = ""
j = 0
while j <= i:
tmps += f"{destpt[j]}."
j += 1
self.points[e_points[0]]['next'].append(tmps[:-1])
if e_points[1] not in self.points:
self.points[e_points[1]] = {}
self.points[e_points[1]]['next'] = []
self.points[e_points[1]]['count'] = 1
self.points[e_points[1]]['dir'] = [e[1]]
self.points[e_points[0]]['count'] += 1
if e[1] not in self.points[e_points[0]]['dir']:
self.points[e_points[0]]['dir'].append(e[1])
# The IN and OUT are not fixed in the __init__ before calling this method
if not hasattr(self, 'pointsInD') or not hasattr(self, 'pointsInU') or not hasattr(self, 'pointsOutD') or not hasattr(self, 'pointsOutU'):
# Find IN et OUT points dynamically
tmpD = [x[0] for x,y in self.points if y[1]==0]
tmpDin = tmpD
tmpDout = []
tmpU = [x[0] for x in self.points if x[1]==1]
tmpUin = tmpU
tmpUout = []
for p in self.points:
# case D
if p[1] == 0:
# if not pointed by anyone, then, it is the input
for e in p[0]:
tmpDin.remove(e)
# if pointed but not in keys, it is the output
if e not in tmpD:
tmpDout.append(e)
elif p[1] == 1:
# if not pointed by anyone, then, it is the input
for e in p[0]:
tmpUin.remove(e)
# if pointed but not in keys, it is the output
if e not in tmpU:
tmpUout.append(e)
else:
logging.error(f"latseq_log._build_points() : Unknown direction for {p[0]} : {p[1]}")
self.pointsInD = tmpDin
self.pointsOutD = tmpDout
self.pointsInU = tmpUin
self.pointsOutU = tmpUout
def _build_paths(self):
"""Build all possible `paths` in the graph `points`
BFS is used as algorithm to build all paths possible between an IN and OUT point
"""
def _find_all_paths(graphP: dict, startP: str, endP: str, pathP=[]):
tmppath = pathP + [startP]
if startP == endP:
return [tmppath]
if startP not in graphP:
return []
paths = []
for p in graphP[startP]['next']:
if p not in tmppath:
newpaths = _find_all_paths(graphP, p, endP, tmppath)
for newpath in newpaths:
paths.append(newpath)
return paths
# build downlink paths
for i in self.pointsInD:
for o in self.pointsOutD:
self.paths[0].extend(_find_all_paths(self.points, i, o))
for i in self.pointsInU:
for o in self.pointsOutU:
self.paths[1].extend(_find_all_paths(self.points, i, o))
if len(self.paths[0]) == 0 and len(self.paths[1]) == 0:
raise Exception("Error no paths found in Downlink nor in Uplink")
elif len(self.paths[0]) == 0:
logging.info("latseq_log._build_paths() : no path found in Downlink")
elif len(self.paths[1]) == 0:
logging.info("latseq_log._build_paths() : no path found in Uplink")
else: # make immutable paths
for dp in range(len(self.paths)):
for p in range(len(self.paths[dp])):
self.paths[dp][p] = make_immutable_list(self.paths[dp][p])
def _build_timestamp(self):
"""Build `timestamps` a :obj:`list` of Decimal of timestamp
"""
self.timestamps = list(map(lambda x: x[0], self.raw_inputs))
def rebuild_packets_journey_recursively(self):
"""Rebuild the packets journey from a list of measure recursively
Algorithm:
for each input packet, try to rebuild the journey with the next measurements (depth limited)
Args:
inputs: ordered and cleaned inputs
Attributs:
journeys (:obj:`dict`): the dictionnary of journey
out_journeys (:obj:`list`): the list of journeys prepare for output
"""
self.journeys = dict()
# Case: the instance has not been initialized correctly
if not self.initialized:
try:
self(self.logpath)
except Exception:
raise Exception("Impossible to rebuild packet because this instance of latseq_log has not been initialized correctly")
nb_meas = len(self.inputs) # number of measure in self.inputs
info_meas = {}
list_meas = list(range(nb_meas)) # list of measures not in a journey
if VERBOSITY:
pbar = tqdm(range(nb_meas), file=sys.__stderr__)
point_added = {} # point added
pointer = 0 # base pointer on the measure in self.inputs for the current journey's input
local_pointer = 0 # pointer on the current tested measure candidate for the current journey
def _measure_ids_in_journey(p_gids: list, p_lids: list, j_gids: list, j_last_element: dict):
"""Returns the dict of common identifiers if the measure is in the journey
Otherwise returns an empty dictionnary
Algorithm:
All global identifiers should match.
All common identifiers' values should match
Arguments:
p_gids : Trace global ids
p_lids : Trace local ids
j_gids : Journey global ids
j_last_element : Last traces added to journey
Returns:
(list, :obj:`dict`): returns
A list of global ids if journeys global ids empty and trace match
A dict of matched identifiers.
Empty if the point is not in journey (false)
"""
if j_gids: # if global ids of journeys not empty
for k in p_gids: # for all global ids, first filter
if k in j_gids:
if p_gids[k] != j_gids[k]:
return () # False
else: # The global context id is not in the contet of this journey, continue
return () # False
res_matched = {}
# for all local ids in measurement point
for k_lid in p_lids:
if k_lid in j_last_element[6]: # if the local ids are present in the 2 points
# Case : multiple value for the same identifier
if isinstance(j_last_element[6][k_lid], list):
match_local_in_list = False
for v in j_last_element[6][k_lid]:
if p_lids[k_lid] == v: # We want only one matches the id
match_local_in_list = True
res_matched[k_lid] = v
# remove the multiple value for input to keep only the one used
j_last_element[6][k_lid] = v
break # for v in j_last_lids[k_lid]
if not match_local_in_list:
return ()
# Case : normal case, one value per identifier
else:
if p_lids[k_lid] != j_last_element[6][k_lid]: # the local id k_lid do not match
return ()
else:
res_matched[k_lid] = p_lids[k_lid]
if not j_gids: # If no global ids for journeys and trace match
return (p_gids, res_matched)
else:
return ([],res_matched)
def _get_next(listP: list, endP: int, pointerP: int) -> int:
pointerP += 1
while pointerP not in listP and pointerP < endP - 1:
pointerP += 1
return pointerP
def _rec_rebuild(pointerP: int, local_pointerP: int, parent_journey_id: int):
"""rebuild journey from a parent measure
Args:
pointerP (int): the index in inputs of the parent measure
local_pointerP (int): the index in inputs of the current measure candidate for the journey
parent_journey_id (int): the id of the current journey
Returns:
bool: if the journey is completed
"""
seg_list = {}
# max local pointer to consider. DEPTH_TO_SEARCH impact the algorithm's speed
# max_duration_to_search the NEXT fingerprint, not the latency of all journey
# max_local_pointer = min(local_pointerP + DEPTH_TO_SEARCH_PKT, nb_meas)
max_duration_to_search = self.inputs[pointerP][0] + DURATION_TO_SEARCH_PKT
# LOOP: the journey is not completed and we still have local_pointer to consider
while not self.journeys[parent_journey_id]['completed'] and local_pointerP < nb_meas and self.inputs[local_pointerP][0] < max_duration_to_search:
# if local_pointerP not in list_meas:
# print(f"error at removing : {local_pointerP}")
# continue
tmp_p = self.inputs[local_pointerP]
# Case: Time treshold to complete journey reached
# if tmp_p[0] > max_duration_to_search:
# break
# Case: wrong direction
if tmp_p[1] != self.journeys[parent_journey_id]['dir']:
local_pointerP = _get_next(list_meas, nb_meas, local_pointerP)
continue
# Case: the measurement point is an input
if tmp_p[1] == 0: # Downlink
if tmp_p[2] in self.pointsInD:
local_pointerP = _get_next(list_meas, nb_meas, local_pointerP)
continue
else: # Uplink
if tmp_p[2] in self.pointsInU:
local_pointerP = _get_next(list_meas, nb_meas, local_pointerP)
continue
# Case: the measurement point is too far away
# and tmp_p[2] not in self.journeys[parent_journey_id]['last_points']
if tmp_p[2] not in self.journeys[parent_journey_id]['next_points']:
local_pointerP = _get_next(list_meas, nb_meas, local_pointerP)
continue
# Case: Concatenation
# Do not list_meas.remove(local_pointerP) because of segmentations
# Case: Normal
# Here get the first occurence who is matching
matched_ids = _measure_ids_in_journey(
tmp_p[5],
tmp_p[6],
self.journeys[parent_journey_id]['glob'],
self.inputs[self.journeys[parent_journey_id]['set'][-1][0]]
)
if not matched_ids:
local_pointerP = _get_next(list_meas, nb_meas, local_pointerP)
continue
# Case: find a match
# list_meas.remove(local_pointerP)
# sys.stderr.write(f"Add {local_pointerP} to {parent_journey_id}\n")
logging.debug(f"Add {local_pointerP} to {parent_journey_id}")
if local_pointerP not in point_added:
point_added[local_pointerP] = [parent_journey_id]
else:
point_added[local_pointerP].append(parent_journey_id)
if matched_ids[0]:
self.journeys[parent_journey_id]['glob'].update(matched_ids[0])
seg_local_pointer = _get_next(list_meas, nb_meas, local_pointerP)
# Case : search for segmentation
# Find all forks possible
# seg local pointer to consider for segmentations.
# DEPTH_TO_SEARCH_FORKS impact the algorithm's complexity
# max_seg_pointer = min(local_pointerP + DEPTH_TO_SEARCH_FORKS, nb_meas - 1)
max_seg_duration = tmp_p[0] + DURATION_TO_SEARCH_FORKS
# LOOP: we still have a seg local pointer to consider
while seg_local_pointer < nb_meas and self.inputs[seg_local_pointer][0] < max_seg_duration:
seg_tmp_p = self.inputs[seg_local_pointer]
# Case: time treshold reached
# if seg_tmp_p[0] > max_seg_duration:
# break
# Case: wrong direction
if seg_tmp_p[1] != self.journeys[parent_journey_id]['dir']:
seg_local_pointer = _get_next(list_meas, nb_meas, seg_local_pointer)
continue
# Case: the src point are different, not a candidate for segmentation
if seg_tmp_p[2] != tmp_p[2]:
seg_local_pointer = _get_next(list_meas, nb_meas, seg_local_pointer)
continue
seg_matched_ids = _measure_ids_in_journey(
seg_tmp_p[5],
seg_tmp_p[6],
self.journeys[parent_journey_id]['glob'],
self.inputs[self.journeys[parent_journey_id]['set'][-1][0]])
# Case: find a match, then a segmentation
if seg_matched_ids:
if local_pointerP not in seg_list:
seg_list[local_pointerP] = {}
seg_list[local_pointerP][seg_local_pointer] = seg_matched_ids[1]
logging.debug(f"Seg {seg_local_pointer} of {local_pointerP} to {parent_journey_id}")
seg_local_pointer = _get_next(list_meas, nb_meas, seg_local_pointer)
continue
seg_local_pointer = _get_next(list_meas, nb_meas, seg_local_pointer)
# end while seg_local_pointer < nb_meas
# At this point, we have completed all the possible fork
self.journeys[parent_journey_id]['set'].append((
self.inputs.index(tmp_p),
tmp_p[0],
f"{tmp_p[2]}--{tmp_p[3]}"))
self.journeys[parent_journey_id]['set_ids'].update(matched_ids[1])
# Try to find a path id
if isinstance(self.journeys[parent_journey_id]['path'], dict):
paths_to_remove = []
for path in self.journeys[parent_journey_id]['path']:
if self.paths[self.journeys[parent_journey_id]['dir']][path][self.journeys[parent_journey_id]['path'][path]] != tmp_p[2]:
paths_to_remove.append(path)
else:
if len(self.paths[self.journeys[parent_journey_id]['dir']][path]) > 1:
self.journeys[newid]['path'][path] += 1
for ptorm in paths_to_remove:
self.journeys[parent_journey_id]['path'].pop(ptorm)
if len(self.journeys[parent_journey_id]['path']) == 1: # We find the path id
tmp_path = list(self.journeys[newid]['path'].keys())[0]
del self.journeys[parent_journey_id]['path']
self.journeys[parent_journey_id]['path'] = tmp_path
if tmp_p[3] in tmpOut: # this is the last input before the great farewell
self.journeys[parent_journey_id]['next_points'] = None
self.journeys[parent_journey_id]['ts_out'] = tmp_p[0]
self.journeys[parent_journey_id]['completed'] = True
# properties of journey inherit from propertiesof last segment
self.journeys[parent_journey_id]['properties'] = tmp_p[4].copy()
else: # continue to rebuild journey
self.journeys[parent_journey_id]['next_points'] = self.points[tmp_p[2]]['next']
local_pointerP = _get_next(list_meas, nb_meas, local_pointerP)
# end while local_pointerP < nb_meas
# Case: We finished to rebuild the first journey,
# We find segmentation for one or more points
# Retrieves all point of the first journey
# If brother(s) for a point for this first journey
# rebuild new journey from this brother to the end
# Looks like a tree
if seg_list and self.journeys[parent_journey_id]['completed']:
for p in self.journeys[parent_journey_id]['set']:
if p[0] in seg_list: # There is a brother
# For all brothers
for s in seg_list[p[0]]: # seg_local_pointer : seg_matched_ids
# Create a new path
# TODO: what to do when the value is exactly the same ?
seg_p = self.inputs[s]
segid = len(self.journeys)
self.journeys[segid] = deepcopy(self.journeys[parent_journey_id])
self.journeys[segid]['set_ids']['uid'] = str(segid)
# Remove all elements after p
del self.journeys[segid]['set'][self.journeys[segid]['set'].index(p):]
self.journeys[segid]['set'].append((
s,
seg_p[0],
f"{seg_p[2]}--{seg_p[3]}"))
self.journeys[segid]['completed'] = False
self.journeys[segid]['set_ids'].update(seg_list[p[0]][s])
# sys.stderr.write(f"Add {s} to {segid}\n")
if s not in point_added:
point_added[s] = [segid]
else:
point_added[s].append(segid)
# list_meas.remove(seg_local_pointer)
if seg_p[3] in tmpOut: # this is the last input before the great farewell
self.journeys[segid]['next_points'] = None
self.journeys[segid]['ts_out'] = seg_p[0]
self.journeys[segid]['completed'] = True
continue
self.journeys[segid]['next_points'] = self.points[seg_p[2]]['next']
seg_local_pointer_next = _get_next(list_meas, nb_meas, s)
_rec_rebuild(pointerP, seg_local_pointer_next, segid)
#pointerP = _get_next(list_meas, nb_meas, pointerP)
return self.journeys[parent_journey_id]['completed']
# LOOP: for all inputs, try to build the journeys
while pointer < nb_meas:
# current_i += 1
# if current_i % 100 == 0:
# print(f"{current_i} / {total_i}")
# if pointer > 2000:
# break
if VERBOSITY:
pbar.n = pointer
pbar.refresh()
p = self.inputs[pointer]
# p[0] float : ts
# p[1] int : direction
# p[2] str : src point
# p[3] str : dst point
# p[4] dict : properties ids
# p[5] dict : global ids
# p[6] dict : local ids
# Get the correct set of IN/OUT for the current direction
if p[1] == 0: # Downlink
tmpIn = self.pointsInD
tmpOut = self.pointsOutD
else: # Uplink
tmpIn = self.pointsInU
tmpOut = self.pointsOutU
# Case: the current measure is not an input measure, continue
if p[2] not in tmpIn:
pointer = _get_next(list_meas, nb_meas, pointer)
continue
# this is a packet in arrival, create a new journey
newid = len(self.journeys)
self.journeys[newid] = dict()
self.journeys[newid]['dir'] = p[1] # direction for this journey
self.journeys[newid]['glob'] = p[5] # global ids as a first filter
self.journeys[newid]['ts_in'] = p[0] # timestamp of arrival
self.journeys[newid]['set'] = list() # set of measurements ids and properties (tuple())
# self.journeys[newid]['set'][0] : id dans inputs
# self.journeys[newid]['set'][1] : ts for this input
# self.journeys[newid]['set'][2] : corresponding segment
self.journeys[newid]['set'].append((
self.inputs.index(p),
p[0],
f"{p[2]}--{p[3]}"))
self.journeys[newid]['set_ids'] = dict() # dict of local ids
self.journeys[newid]['set_ids'] = {'uid': str(newid)}
self.journeys[newid]['set_ids'].update(p[6])
self.journeys[newid]['next_points'] = self.points[p[2]]['next'] # list of possible next points
if self.journeys[newid]['set'][-1][0] not in point_added:
point_added[self.journeys[newid]['set'][-1][0]] = [newid]
# path number of this journey according to self.paths
if not hasattr(self, 'paths'): # Paths not construct, it should because it is done at init
self.journeys[newid]['completed'] = False
continue
self.journeys[newid]['path'] = dict() # list of index on path lists
for path in range(len(self.paths[self.journeys[newid]['dir']])):
self.journeys[newid]['path'][path] = 0
paths_to_remove = []
for path in self.journeys[newid]['path']:
if self.paths[self.journeys[newid]['dir']][path][self.journeys[newid]['path'][path]] != p[2]:
paths_to_remove.append(path)
else:
if len(self.paths[self.journeys[newid]['dir']][path]) > 1:
self.journeys[newid]['path'][path] += 1
for ptorm in paths_to_remove:
self.journeys[newid]['path'].pop(ptorm)
if len(self.journeys[newid]['path']) == 1: # We find the path id
tmp_path = list(self.journeys[newid]['path'].keys())[0]
del self.journeys[newid]['path']
self.journeys[newid]['path'] = tmp_path
# self.journeys[newid]['last_points'] = [p[2]]
self.journeys[newid]['completed'] = False # True if the journey is complete
# list_meas.remove(pointer) # Remove from the list
local_pointer = _get_next(list_meas, nb_meas, pointer)
# Try to rebuild the journey from this packet
# Assumption: the measures are ordered by timestamp,
# means that the next point is necessary after the current
# input point in the list of inputs
# TODO : Give a list of measurement to consider instead of local pointer only ?
_rec_rebuild(pointer, local_pointer, newid)
pointer = _get_next(list_meas, nb_meas, pointer)
# Remove all useless journeys dict keys for the next
tmp_file = self.logpath
for k in self.journeys:
self.journeys[k]['uid'] = self.journeys[k]['set_ids']['uid']
del self.journeys[k]['next_points']
self.journeys[k]['file'] = tmp_file
if isinstance(self.journeys[k]['path'], dict):
self.journeys[k]['completed'] = False
if VERBOSITY:
pbar.close()
# Store latseq_logs object
self.store_object()
# build out_journeys
self._build_out_journeys()
def _build_out_journeys(self):
"""Build out_journeys. Compute 'duration' for each points present in each journeys.
Attributes:
out_journeys (:obj:`list`): the list of measurements like `raw_inputs` but ordered, filtered and with unique identifier (uid) by journey
out_journeys[o] : a log line of out_journeys = a log line from input (if input is present in a journey)
out_journeys[o][0] (Decimal): timestamp
out_journeys[o][1] (char): direction, U/D
out_journeys[o][2] (str): segment
out_journeys[o][3] (str): properties
out_journeys[o][4] (str): data identifier with journey id(s) associated to this measurement
Returns:
int: size of out_journeys list
Raises:
AttributeError: journeys not present in `latseq_logs` object
"""
if not hasattr(self, 'journeys'):
logging.error("latseq_log._build_out_journeys() First rebuild journeys")
raise AttributeError('journeys not present in object, first try rebuild journeys')
self.out_journeys = list()
nb_meas = len(self.inputs)
added_out_j = {}
points_added = {}
# retrieves all journey to build out_journeys
for j in self.journeys:
# Case : The journey is incomplete
if not self.journeys[j]['completed']:
continue
for e in self.journeys[j]['set']: # for all elements in set of ids
# List all points used for the out journeys
if e[0] not in points_added:
points_added[e[0]] = [j]
else:
points_added[e[0]].append(j)
e_tmp = self.inputs[e[0]] # Get the point in the inputs list
if e[0] not in added_out_j: # create a new entry for this point in out journeys
added_out_j[e[0]] = len(self.out_journeys)
tmp_uid = self.journeys[j]['set_ids']['uid']
tmp_str = f"uid{tmp_uid}:{dict_ids_to_str(self.journeys[j]['glob'])}.{dict_ids_to_str(e_tmp[6])}"
# have segment corresponding to journey's path
src_point_s = e_tmp[2]
while src_point_s not in self.paths[self.journeys[j]['dir']][self.journeys[j]['path']]:
src_point_s = '.'.join(src_point_s.split('.')[:-1])
dst_point_s = e_tmp[3]
while dst_point_s not in self.paths[self.journeys[j]['dir']][self.journeys[j]['path']]:
dst_point_s = '.'.join(dst_point_s.split('.')[:-1])
tmp_seg = f"{src_point_s}--{dst_point_s}"
# build out_journeys for lseqj
self.out_journeys.append([
e_tmp[0], # [0] : timestamp
'D' if e_tmp[1] == 0 else 'U', # [1] : dir
tmp_seg, # [2] : segment
e_tmp[4], # [3] : properties
tmp_str]) # [4] : data id
else: # update the current entry
self.out_journeys[added_out_j[e[0]]][4] = f"uid{self.journeys[j]['set_ids']['uid']}." + self.out_journeys[added_out_j[e[0]]][4]
# points latency
tmp_point = self.points[e_tmp[2]]
if 'duration' not in tmp_point:
tmp_point['duration'] = {}
if e_tmp[2] in self.pointsInD or e_tmp[2] in self.pointsInU: # Is an in points
tmp_point['duration'][tmp_uid] = 0
else: # Is a mid point because out could not be in e_tmp[2]
current_index = self.journeys[j]['set'].index(e)
prev_ts = self.inputs[self.journeys[j]['set'][current_index - 1][0]][0]
tmp_point['duration'][tmp_uid] = e_tmp[0] - prev_ts
self.out_journeys.sort(key=operator.itemgetter(0))
orphans = 0
# Check which points (clean inputs) are not in the completed journeys
for e in range(nb_meas):
if e not in points_added:
if VERBOSITY:
tmp_str = f"{float(self.inputs[e][0])} "
tmp_str += "D " if self.inputs[e][1] == 0 else "U "
tmp_str += f"{self.inputs[e][2]}--{self.inputs[e][3]}"
logging.info(f"latseq_log._build_out_journeys() : inputs({e}) [{tmp_str}] is missing in completed journeys")
orphans += 1
# TODO : export all orphans as clean output to be compared with original cleaned output in a file
logging.info(f"latseq_log._build_out_journeys() : {orphans} orphans / {nb_meas} measurements")
self.store_object()
return len(self.out_journeys)
# GETTERS
def get_filename(self) -> str:
"""Get filename used for this instance of latseq_logs
Returns:
filename (str)
"""
return self.logpath.split('/')[-1]
def get_list_of_points(self) -> list:
"""Get the list of points in the file
Returns:
points (:obj:`list` of str)
"""
return list(self.points.keys())
def get_list_timestamp(self) -> list:
"""Get the timestamps in `input` file
Returns:
list of timestamps
"""
if not self.timestamps:
self._build_timestamp()
return self.timestamps
def get_log_file_stats(self):
"""Get stats of the logfile
Returns:
file_stats (:obj:`dict`): name, nb_raw_meas, nb_meas, points
"""
return {
"name": self.logpath,
"nb_raw_meas": len(self.raw_inputs),
"nb_meas": len(self.inputs),
"points": self.get_list_of_points()
}
def get_paths(self):
"""Get paths found in the file
Returns:
paths (:obj:`dict` of :obj:`list`): 0 for Downlink paths and 1 for Uplink paths
"""
if len(self.paths[0]) == 0 and len(self.paths[1]) == 0:
self._build_paths()
return {'D': self.paths[0], 'U': self.paths[1]}
# YIELDERS
def yield_clean_inputs(self):
"""Yielder of cleaned inputs
Yields:
measurement line of log (str)
Raises:
ValueError: line malformed
"""
try:
for i in self.inputs:
tmp_str = f"{i[0]} "
tmp_str += "U " if i[1] else "D "
tmp_str += f"(len{i[4]['len']}) "
tmp_str += f"{i[2]}--{i[3]} "
for g in i[5]:
tmp_str += f"{g}{i[5][g]}."
tmp_str = tmp_str[:-1] + ':'
for l in i[6]:
tmp_str += f"{l}{i[6][l]}."
yield tmp_str[:-1]
except Exception:
raise ValueError(f"{i} is malformed")
def yield_journeys(self):
"""Yielder of journeys
Yields:
journey element (:obj:`dict`)
Raises:
ValueError: Impossible to yield a journey from self.journeys
Exception: Impossible to rebuild journeys
"""
try:
if not hasattr(self, 'journeys'):
try:
self.rebuild_packets_journey_recursively()
except Exception:
raise Exception("[ERROR] to rebuild journeys")
for j in self.journeys:
if self.journeys[j]["completed"]:
yield self.journeys[j]
except Exception:
raise ValueError(f"[ERROR] to yield journeys for {self.logpath}")
def yield_out_journeys(self):
"""Yielder for cleaned inputs
Yields:
str: A line of input
Raises:
ValueError : if the entry in out_journeys is malformed
"""
if not hasattr(self, 'out_journeys'):
if not self._build_out_journeys():
logging.error("latseq_log.yield_out_journeys() : to build out_journeys")
exit(-1)
def _build_header():
res_str = "#funcId "
paths = [path for dir in self.get_paths() for path in self.get_paths()[dir]] # flatten the dict
added_points = []
for p in paths:
for j in p:
if j not in added_points:
res_str += f"{j} "
added_points.append(j)
return res_str
try:
yield _build_header()
for e in self.out_journeys:
try:
yield f"{epoch_to_datetime(e[0])} {e[1]} ({e[3]['len']})\t{e[2]}\t{e[4]}"
except KeyError:
yield f"{epoch_to_datetime(e[0])} {e[1]} \t{e[2]}\t{e[4]}"
except Exception:
raise ValueError(f"{e} is malformed")
def yield_out_metadata(self):
"""Yielder for cleaned meta data sort by points and by timestamp
"""
try:
for i in self.infos: # for all informations
tmp_ctxt_s = ""
# TODO : set to 4 when infos construct fixed
if len(i) == 0: # ctxt identifier
tmp_ctxt_l = []
for c in i[3]:
tmp_ctxt_l.append(f"{c}{i[3][c]}")
tmp_ctxt_s = ".".join(tmp_ctxt_l)
for im in i[2]: # for all individual information in i (one line in trace can generate multiple line in output)
yield f"{i[0]}\t{'.'.join(i[1])}{tmp_ctxt_s}.{im}\t{i[2][im]}"
except Exception:
raise ValueError(f"{i} is malformed")
def yield_points(self):
"""Yielder for points
Yields:
:obj:`dict`: point's name with corresponding self.points dict element
"""
# Warning for stats if journeys has not been rebuilt
if "duration" not in self.points[next(iter(self.points.keys()))]:
logging.warning("latseq_log.yield_points() : points without duration, first rebuild journeys for stat")
for p in self.points:
self.points[p]['point'] = p
yield self.points[p]
def yield_global_csv(self):
"""Yielder for a csv file from journeys
Yields:
str: csv line of timestamp by measurement point (column) and journeys (line)
"""
points = self.get_list_of_points()
# Yields header
NB_PREAMBLE = 3
yield "journeys uid, dir, path_id, " + ", ".join(points) + "\n"
# Yields one line per journey
for j in self.journeys:
if not self.journeys[j]['completed']:
continue
tmp_tab = (len(points) + NB_PREAMBLE)*['']
tmp_tab[0] = str(self.journeys[j]['uid'])
tmp_tab[1] = str(self.journeys[j]['dir'])
tmp_tab[2] = str(self.journeys[j]['path'])
for i in self.journeys[j]['set']:
tmp_tab[points.index(self.inputs[i[0]][3])+NB_PREAMBLE] = str(self.inputs[i[0]][0])
yield ", ".join(tmp_tab) + "\n"
def yield_matrix(self):
"""Yield a line for matrix file for journeys
Yields:
str: csv string per matrix
"""
tmp_d = {} # key=path direction + path type
points = self.get_list_of_points()
for j in self.journeys:
if not self.journeys[j]['completed']:
continue
tmp_path_id = f"{self.journeys[j]['dir']}.{self.journeys[j]['path']}"
# New matrix for this journey
if tmp_path_id not in tmp_d:
tmp_header = "uid;"
tmp_l = f"{self.journeys[j]['uid']};"
tmp_tm1 = self.journeys[j]['ts_in']
for i in self.journeys[j]['set']:
tmp_i = self.inputs[i[0]]
tmp_header += f"{tmp_i[2]}--{tmp_i[3]};"
tmp_l += "{:.6f};".format(tmp_i[0] - tmp_tm1)
tmp_tm1 = tmp_i[0]
tmp_d[tmp_path_id] = [tmp_header]
tmp_d[tmp_path_id].append(tmp_l)
# Add a line to an existing matrix
else:
tmp_l = f"{self.journeys[j]['uid']};"
tmp_tm1 = self.journeys[j]['ts_in']
for i in self.journeys[j]['set']:
tmp_i = self.inputs[i[0]]
tmp_l += "{:.6f};".format(tmp_i[0] - tmp_tm1)
tmp_tm1 = tmp_i[0]
tmp_d[tmp_path_id].append(tmp_l)
# end for self.journeys
res = []
for k in tmp_d:
res.append(f"{'D' if k.split('.')[0] == '0' else 'U'}{k.split('.')[1]}")
for l in tmp_d[k]:
res.append(l)
res.append("")
for e in res:
yield e
# WRITERS TO FILE
def out_journeys_to_file(self):
""" Saves out_journey to a lseqj file
Attributes:
out_journeys
Raises:
IOError: Error at writing lseqj files
"""
# Save out_journeys to file type lseqj
out_journeyspath = self.logpath.replace('lseq', 'lseqj')
def _build_header() -> str:
res_str = "#funcId "
paths = [path for dir in self.get_paths() for path in self.get_paths()[dir]] # flatten the dict
added_points = []
for p in paths:
if p not in added_points:
res_str += f"{p} "
added_points.append(p)
return res_str + "\n"
try:
with open(out_journeyspath, 'w+') as f:
logging.info(f"latseq_log.out_journeys_to_file() : Writing latseq.lseqj ...")
f.write(_build_header()) # write header
for e in self.yield_out_journeys():
f.write(f"{e}\n")
except IOError as e:
logging.error(f"latseq_log.out_journeys_to_file() : on writing({self.logpath})")
raise e
def store_object(self):
"""Store latseq_log object into a pickle file
Raises:
IOError: Error at saving file to pkl
TODO:
handle pickle error
"""
pickle_file = self.logpath.replace("lseq", "pkl")
try:
with open(pickle_file, 'wb') as fout:
pickle.dump(self, fout, pickle.HIGHEST_PROTOCOL)
except IOError:
logging.error(f"[ERROR] latseq_log.store_object() : at saving {pickle_file}")
logging.info(f"[INFO] latseq_log.store_object() : Saving lseq instance to {pickle_file}")
def paths_to_str(self) -> str:
"""Stringify paths
Returns:
str: paths
"""
res = f"Paths found in {self.logpath} \n"
i, j = 0, 0
for d in self.get_paths():
if i == 0:
res += "Downlink paths\n"
if i == 1:
res += "Uplink paths\n"
for p in d:
if p:
res += f"\tpath {j} : "
res += path_to_str(p)
res += "\n"
j += 1
i += 1
return res
#
# MAIN
#
if __name__ == "__main__":
# Arguments
parser = argparse.ArgumentParser("./latseq_logs.py",
description="LatSeq Analysis Module - Log processing component")
parser.add_argument(
"-c",
"--config",
type=str,
dest="configfile",
help="[WIP] Config file for the parser"
)
parser.add_argument(
"-f",
"--flask",
dest="flask",
action='store_true',
help="[DEPRECATED] Run parser as flask service"
)
parser.add_argument(
"-C",
"--clean",
dest="clean",
action='store_true',
help="Clean previous saves and rerun"
)
parser.add_argument(
"-i",
"--req_inputs",
dest="req_inputs",
action='store_true',
help="Request cleaned input measurements in the case of command line script"
)
parser.add_argument(
"-o",
"--out_journeys",
dest="req_outj",
action='store_true',
help="Request out journeys points from log file to stdout"
)
parser.add_argument(
"-j",
"--journeys",
dest="req_journeys",
action='store_true',
help="Request journeys from log file to stdout"
)
parser.add_argument(
"-p",
"--points",
dest="req_points",
action='store_true',
help="Request points from log file to stdout"
)
parser.add_argument(
"-r",
"--paths",
"--routes",
dest="req_paths",
action='store_true',
help="Request paths from log file to stdout"
)
parser.add_argument(
"--notrdtsc",
dest="notrdtsc",
action='store_true',
help="lseq already converted to rdtsc"
)
parser.add_argument(
"-m",
"--metadata",
dest="req_metadata",
action="store_true",
help="Request metadata from log file to stdout"
)
parser.add_argument(
"-M",
"--mat",
dest="req_matrix",
action='store_true',
help="Request matrix of points and journeys from log file to stdout",
)
parser.add_argument(
"-v",
"--verbosity",
dest="verbosity",
action="store_true",
help="Verbosity for rebuilding phase especially"
)
parser.add_argument(
"-x",
"--csv",
dest="req_csv",
action='store_true',
help="[DEPRECATED] Request csv with journeys and points"
)
parser.add_argument(
"-l",
"--log",
type=str,
dest="logname",
help="Log file",
required=True
)
args = parser.parse_args()
# Phase 1 : We init latseq_logs class
if not args.logname: # No logfile
logging.error("[ERROR] __main__ : No log file provided")
exit(-1)
if args.logname.split('.')[-1] != "lseq":
logging.error("[ERROR] __main__ : No LatSeq log file provided (.lseq)")
exit(-1)
# Logger handler
root_logger = logging.getLogger()
# Verbosity level
if args.verbosity:
VERBOSITY = True
root_logger.setLevel(logging.DEBUG)
candidate_pickle_file = args.logname.replace('lseq', 'pkl')
if args.clean: # clean pickles and others stuff
if os.path.exists(candidate_pickle_file):
os.remove(candidate_pickle_file)
try: # Try load a previous session
with open(candidate_pickle_file, 'rb') as fin:
try:
lseq = pickle.load(fin)
logging.info(f"__main__ : load lseq instance from {candidate_pickle_file}")
except EOFError:
raise FileNotFoundError
except FileNotFoundError:
try:
logging.info(f"__main__ : create a new lseq instance")
if not args.notrdtsc:
ro = rdtsctots.rdtsctots(args.logname)
ro.write_rdtsctots(args.logname)
lseq = latseq_log(args.logname) # Build latseq_log object
except Exception as e:
logging.error(f"__main__ : {args.logname}, {e}")
exit(-1)
lseq.store_object()
# Phase 2A : case Flask
if args.flask:
logging.info("__main__ : Run a flask server")
logging.error("__main__ : Flask server not implemented yet")
exit(1)
# Phase 2B : case run as command line script
else:
# -i, --inputs
if args.req_inputs:
for i in lseq.yield_clean_inputs():
write_string_to_stdout(i)
# -o, --out_journeys
elif args.req_outj:
for o in lseq.yield_out_journeys():
write_string_to_stdout(o)
# -j, --journeys
elif args.req_journeys:
for j in lseq.yield_journeys():
write_string_to_stdout(json.dumps(j))
# -p, --points
elif args.req_points:
for p in lseq.yield_points():
write_string_to_stdout(json.dumps(p))
# -r, --routes
elif args.req_paths:
write_string_to_stdout(json.dumps(lseq.get_paths()))
# -m, --metadata
elif args.req_metadata:
for m in lseq.yield_out_metadata():
write_string_to_stdout(m)
# -M, --mat
elif args.req_matrix:
for r in lseq.yield_matrix():
write_string_to_stdout(r)
# -x, --csv
elif args.req_csv:
for l in lseq.yield_global_csv():
write_string_to_stdout(l)
| 43.224173
| 182
| 0.523471
|
4a053301f9c15d49a1f3b6a6cadee33bdaac002a
| 7,724
|
py
|
Python
|
AirzoneCloud/Zone.py
|
max13fr/AirzoneCloud
|
ffeeb3b0bb47cc44eb8f7b45772aef0ef7a04d12
|
[
"MIT"
] | 14
|
2020-04-22T22:44:49.000Z
|
2022-02-15T06:50:25.000Z
|
AirzoneCloud/Zone.py
|
max13fr/AirzoneCloud
|
ffeeb3b0bb47cc44eb8f7b45772aef0ef7a04d12
|
[
"MIT"
] | 8
|
2020-04-29T18:09:40.000Z
|
2022-01-02T07:35:50.000Z
|
AirzoneCloud/Zone.py
|
max13fr/AirzoneCloud
|
ffeeb3b0bb47cc44eb8f7b45772aef0ef7a04d12
|
[
"MIT"
] | 9
|
2020-05-13T20:02:05.000Z
|
2021-12-28T15:01:15.000Z
|
import logging
from .contants import MODES_CONVERTER
_LOGGER = logging.getLogger(__name__)
class Zone:
"""Manage a Airzonecloud zone"""
_api = None
_system = None
_data = {}
def __init__(self, api, system, data):
self._api = api
self._system = system
self._data = data
# log
_LOGGER.info("Init {}".format(self.str_complete))
_LOGGER.debug(data)
def __str__(self):
return "Zone(name={}, is_on={}, mode={}, current_temp={}, target_temp={})".format(
self.name,
self.is_on,
self.mode,
self.current_temperature,
self.target_temperature,
)
@property
def str_complete(self):
return "Zone(name={}, is_on={}, mode={}, current_temperature={} target_temperature={}, id={}, system_number={}, zone_number={})".format(
self.name,
self.is_on,
self.mode,
self.current_temperature,
self.target_temperature,
self.id,
self.system_number,
self.zone_number,
)
#
# getters
#
@property
def name(self):
return self._data.get("name")
@property
def current_temperature(self):
if self._data.get("temp") is not None:
return float(self._data.get("temp"))
return None
@property
def current_humidity(self):
if self._data.get("humidity") is not None:
return float(self._data.get("humidity"))
return None
@property
def target_temperature(self):
if self._data.get("consign") is not None:
return float(self._data.get("consign"))
return None
@property
def max_temp(self):
if self._data.get("upper_conf_limit") is not None:
return float(self._data.get("upper_conf_limit"))
return None
@property
def min_temp(self):
if self._data.get("lower_conf_limit") is not None:
return float(self._data.get("lower_conf_limit"))
return None
@property
def is_on(self):
return bool(int(self._data.get("state", 0)))
@property
def mode(self):
return MODES_CONVERTER[self.mode_raw]["name"]
@property
def mode_description(self):
return MODES_CONVERTER[self.mode_raw]["description"]
@property
def mode_raw(self):
return str(self._data.get("mode"))
@property
def id(self):
return self._data.get("id")
@property
def device_id(self):
return self._data.get("device_id")
@property
def system_number(self):
return self._data.get("system_number")
@property
def zone_number(self):
return self._data.get("zone_number")
#
# setters
#
def turn_on(self):
""" Turn zone on """
_LOGGER.info("call turn_on() on {}".format(self.str_complete))
self._send_event("state", 1)
self._data["state"] = "1"
return True
def turn_off(self):
""" Turn zone off """
_LOGGER.info("call turn_off() on {}".format(self.str_complete))
self._send_event("state", 0)
self._data["state"] = "0"
return True
def set_temperature(self, temperature):
""" Set target_temperature for this zone """
_LOGGER.info(
"call set_temperature({}) on {}".format(temperature, self.str_complete)
)
temperature = float(temperature)
if self.min_temp is not None and temperature < self.min_temp:
temperature = self.min_temp
if self.max_temp is not None and temperature > self.max_temp:
temperature = self.max_temp
self._send_event("consign", temperature)
self._data["consign"] = str(temperature)
return True
#
# parent system
#
@property
def system(self):
""" Get parent system """
return self._system
#
# Refresh zone data
#
def refresh(self):
""" Refresh current zone data (call refresh on parent system) """
self.system.refresh()
#
# private
#
def _send_event(self, option, value):
""" Send an event for current zone """
payload = {
"event": {
"cgi": "modzona",
"device_id": self.device_id,
"system_number": self.system_number,
"zone_number": self.zone_number,
"option": option,
"value": value,
}
}
return self._api._send_event(payload)
def _set_data_refreshed(self, data):
""" Set data refreshed (call by parent system on refresh_zones()) """
self._data = data
_LOGGER.info("Data refreshed for {}".format(self.str_complete))
#
# Zone raw data example
#
# {
# "id": "...",
# "system_id": "...",
# "device_id": "...",
# "modes": "1111111011",
# "warning": "0",
# "name": "Living room",
# "system_number": "1",
# "zone_number": "6",
# "state": "1",
# "consign": "21.5",
# "temp": "21.4",
# "mode": "5",
# "velocity": None,
# "show_velocity": None,
# "sleep": "0",
# "lower_conf_limit": "18.0",
# "upper_conf_limit": "30.0",
# "master": "1",
# "velMax": None,
# "eco": "2",
# "prog_enabled": "1",
# "speed_prog_mode": "0",
# "show_ventilation": "1",
# "updated_at": 1587190474,
# "setup_type": "0",
# "class": "Zone",
# "last_update": 1587190474,
# "next_schedule_number": 4,
# "led": None,
# "offset": None,
# "cold_offset": None,
# "heat_offset": None,
# "scene": None,
# "air_flow": None,
# "humidity": "42",
# "coldConsign": "",
# "heatConsign": "",
# "auto": None,
# "temperature_unit": None,
# "vla": None,
# "config": {
# "id": "...",
# "cold_values": "1",
# "heat_values": "1",
# "cold_angle": None,
# "heat_angle": None,
# "swing_horizontal": None,
# "swing_vertical": None,
# "antifreeze": "0",
# "vla": None,
# "zone_number": "6",
# "slave": None,
# "master": None,
# "basic_mode": "0",
# "ambient_temp": "24.6",
# "heat_type": None,
# "cold_type": None,
# "heat_type_config": "1",
# "cold_type_config": "1",
# "ventilation": None,
# "q_weight": None,
# "window": None,
# "presence": None,
# "spray_dew": None,
# "local_vent": None,
# "tact_fw": "3. 7",
# "firm_lm": None,
# "manufacturer": None,
# "led": None,
# "velMax": None,
# "confort_cold_consign": None,
# "confort_heat_consign": None,
# "eco_cold_consign": None,
# "eco_heat_consign": None,
# "unocupied_cold_consign": None,
# "unocupied_heat_consign": None,
# "vacation_cold_consign": None,
# "vacation_heat_consign": None,
# "firm_ws": "3.173",
# "offset": None,
# "errors": "0",
# "zone_id": "...",
# "automatic_weight": None,
# "autochange_differential": None,
# "offset_environment_cold": None,
# "offset_environment_heat": None,
# "eco_function": None,
# "heat_constant_ventilation": None,
# "cold_constant_ventilation": None,
# "v_min_module_010": None,
# "v_max_module_010": None,
# "cold_battery_temperature": None,
# "heat_battery_temperature": None,
# "VAF_coldstage": None,
# "VAF_heatstage": None,
# "VAF_radiantstage": None,
# },
# }
| 26.542955
| 144
| 0.537934
|
4a0533080af1c954bc764a3a8b5ec2bcd30f6b62
| 14,814
|
py
|
Python
|
sunpy/coordinates/tests/test_frames.py
|
ShubhamPandey28/sunpy
|
04d61f4cfce52032a342b0dcd3809260367a9939
|
[
"BSD-2-Clause"
] | null | null | null |
sunpy/coordinates/tests/test_frames.py
|
ShubhamPandey28/sunpy
|
04d61f4cfce52032a342b0dcd3809260367a9939
|
[
"BSD-2-Clause"
] | null | null | null |
sunpy/coordinates/tests/test_frames.py
|
ShubhamPandey28/sunpy
|
04d61f4cfce52032a342b0dcd3809260367a9939
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from sunpy.time import parse_time
import astropy.units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.coordinates import (UnitSphericalRepresentation,
SphericalRepresentation,
CartesianRepresentation,
SkyCoord)
from ... import sun
from ..frames import (Helioprojective,
HeliographicStonyhurst,
Heliocentric,
HeliographicCarrington)
RSUN_METERS = sun.constants.get('radius').si.to(u.m)
DSUN_METERS = sun.constants.get('mean distance').si.to(u.m)
def init_frame(frame, args, kwargs):
if args and kwargs:
return frame(*args, **kwargs)
elif args:
return frame(*args)
elif kwargs:
return frame(**kwargs)
"""
These are common 2D params, kwargs are frame specific
"""
two_D_parameters = [
([0 * u.deg, 0 * u.arcsec], None),
([0 * u.deg, 0 * u.arcsec], {'obstime': '2011/01/01T00:00:00'}),
([0 * u.deg, 0 * u.arcsec], {'representation_type': 'unitspherical'}),
([UnitSphericalRepresentation(0 * u.deg, 0 * u.arcsec)], None),
([UnitSphericalRepresentation(0 * u.deg, 0 * u.arcsec)], None), (
[UnitSphericalRepresentation(0 * u.deg, 0 * u.arcsec)],
{'obstime': '2011/01/01T00:00:00'})
]
"""
These are common 3D params, kwargs are frame specific
"""
three_D_parameters = [
([0 * u.deg, 0 * u.arcsec, 1 * u.Mm], None),
([0 * u.deg, 0 * u.arcsec, 1 * u.Mm], {'obstime': '2011/01/01T00:00:00'}),
([0 * u.deg, 0 * u.arcsec, 1 * u.Mm], {'representation_type': 'spherical'}),
([SphericalRepresentation(0 * u.deg, 0 * u.arcsec, 1 * u.Mm)],
None),
([SphericalRepresentation(0 * u.deg, 0 * u.arcsec, 1 * u.Mm)], None), (
[SphericalRepresentation(0 * u.deg, 0 * u.arcsec, 1 * u.Mm)],
{'obstime': '2011/01/01T00:00:00'})
]
# ==============================================================================
# Helioprojective Tests
# ==============================================================================
@pytest.mark.parametrize('args, kwargs',
two_D_parameters + [(None, {'Tx': 0 * u.deg,
'Ty': 0 * u.arcsec})])
def test_create_hpc_2d(args, kwargs):
hpc1 = init_frame(Helioprojective, args, kwargs)
# Check we have the right class!
assert isinstance(hpc1, Helioprojective)
rep_kwarg = kwargs.get('representation_type', None) if kwargs else None
if rep_kwarg and rep_kwarg == 'unitspherical':
# Check that we have a unitspherical representation
assert isinstance(hpc1._data, UnitSphericalRepresentation)
else:
# Check that we have a 2D wrap180 representation
assert isinstance(hpc1._data, UnitSphericalRepresentation)
# Check the attrs are correct
assert hpc1.Tx == 0 * u.arcsec
assert hpc1.Ty == 0 * u.arcsec
# Check the attrs are in the correct default units
assert hpc1.Tx.unit is u.arcsec
assert hpc1.Ty.unit is u.arcsec
@pytest.mark.parametrize(
'args, kwargs',
three_D_parameters + [(None, {'Tx': 0 * u.deg,
'Ty': 0 * u.arcsec,
'distance': 1 * u.Mm}),
([0 * u.deg, 0 * u.arcsec], {'distance': 1 * u.Mm})])
def test_create_3d(args, kwargs):
hpc1 = init_frame(Helioprojective, args, kwargs)
# Check we have the right class!
assert isinstance(hpc1, Helioprojective)
rep_kwarg = kwargs.get('representation_type', None) if kwargs else None
if rep_kwarg and rep_kwarg == 'spherical':
# Check that we have a unitspherical representation
assert isinstance(hpc1._data, SphericalRepresentation)
else:
# Check that we have a 2D wrap180 representation
assert isinstance(hpc1._data, SphericalRepresentation)
# Check the attrs are correct
assert hpc1.Tx == 0 * u.arcsec
assert hpc1.Ty == 0 * u.arcsec
assert hpc1.distance == 1 * u.Mm
# Check the attrs are in the correct default units
assert hpc1.Tx.unit is u.arcsec
assert hpc1.Ty.unit is u.arcsec
assert hpc1.distance.unit is u.Mm
def test_cart_init():
hpc1 = Helioprojective(CartesianRepresentation(0 * u.km, 0 * u.km, 1 *
u.Mm))
assert isinstance(hpc1, Helioprojective)
assert isinstance(hpc1._data, CartesianRepresentation)
# Test HPC Calculate Distance
def test_hpc_distance():
hpc1 = Helioprojective(0 * u.deg, 0 * u.arcsec,
observer=HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU))
assert isinstance(hpc1, Helioprojective)
# Check that we have a 2D wrap180 representation
assert isinstance(hpc1._data, UnitSphericalRepresentation)
# Check the attrs are correct
assert hpc1.Tx == 0 * u.arcsec
assert hpc1.Ty == 0 * u.arcsec
hpc2 = hpc1.calculate_distance()
assert isinstance(hpc2._data, SphericalRepresentation)
# Check the attrs are correct
assert hpc2.Tx == 0 * u.arcsec
assert hpc2.Ty == 0 * u.arcsec
assert_quantity_allclose(hpc2.distance, DSUN_METERS - RSUN_METERS)
def test_hpc_distance_cartesian():
# Test detection of distance in other representations
hpc1 = Helioprojective(CartesianRepresentation(0 * u.km, 0 * u.km, 1 * u.Mm))
assert isinstance(hpc1, Helioprojective)
assert isinstance(hpc1._data, CartesianRepresentation)
assert hpc1.calculate_distance() is hpc1
def test_hpc_distance_off_limb():
hpc1 = Helioprojective(1500 * u.arcsec, 0 * u.arcsec,
observer=HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU))
assert isinstance(hpc1, Helioprojective)
# Check that we have a 2D wrap180 representation
assert isinstance(hpc1._data, UnitSphericalRepresentation)
# Check the attrs are correct
assert hpc1.Tx == 1500 * u.arcsec
assert hpc1.Ty == 0 * u.arcsec
hpc2 = hpc1.calculate_distance()
assert isinstance(hpc2._data, SphericalRepresentation)
# Check the attrs are correct
assert hpc2.Tx == 1500 * u.arcsec
assert hpc2.Ty == 0 * u.arcsec
assert_quantity_allclose(hpc2.distance, u.Quantity(np.nan, u.km))
def test_hpc_distance_3D():
hpc1 = Helioprojective(1500 * u.arcsec, 0 * u.arcsec, 100 * u.Mm)
assert isinstance(hpc1, Helioprojective)
# Check that we have a 2D wrap180 representation
assert isinstance(hpc1._data, SphericalRepresentation)
# Check the attrs are correct
assert hpc1.Tx == 1500 * u.arcsec
assert hpc1.Ty == 0 * u.arcsec
hpc2 = hpc1.calculate_distance()
assert hpc2 is hpc1
def test_wrapping_on():
hpc1 = Helioprojective(359.9*u.deg, 10*u.deg)
assert_quantity_allclose(hpc1.Tx, -0.1*u.deg)
assert_quantity_allclose(hpc1.Tx.wrap_angle, 180*u.deg)
def test_wrapping_off():
hpc1 = Helioprojective(359.9*u.deg, 10*u.deg, wrap_longitude=False)
assert_quantity_allclose(hpc1.Tx, 359.9*u.deg)
assert_quantity_allclose(hpc1.Tx.wrap_angle, 360*u.deg)
# ==============================================================================
# ## Heliographic Tests
# ==============================================================================
def test_HEE_creation():
# Smoke test to make sure HEE constructors work fine
_ = HeliographicStonyhurst(lon=0*u.deg, lat=90*u.deg,
obstime=parse_time('2018-12-21'))
_ = HeliographicStonyhurst(lon=0*u.deg, lat=90*u.deg, radius=1*u.km,
obstime=parse_time('2018-12-21'))
_ = HeliographicStonyhurst(x=1*u.km, y=1*u.km, z=1*u.km,
obstime=parse_time('2018-12-21'),
representation_type='cartesian')
@pytest.mark.parametrize('frame',
[HeliographicStonyhurst, HeliographicCarrington])
@pytest.mark.parametrize("args, kwargs", two_D_parameters[:2] + two_D_parameters[4:] +
[(None, {'lat': 0*u.deg, 'lon': 0*u.arcsec})])
def test_create_hgs_2d(frame, args, kwargs):
hgs1 = init_frame(frame, args, kwargs)
# Check we have the right class!
assert isinstance(hgs1, frame)
# Check Carrington first because it's a subclass of Stonyhurst
if isinstance(hgs1, HeliographicCarrington):
# Check that we have a 2D wrap180 representation
assert isinstance(hgs1._data, SphericalRepresentation)
elif isinstance(hgs1, HeliographicStonyhurst):
# Check that we have a 2D wrap180 representation
assert isinstance(hgs1._data, SphericalRepresentation)
# Check the attrs are correct
assert hgs1.lon == 0 * u.deg
assert hgs1.lat == 0 * u.deg
# Check the attrs are in the correct default units
assert hgs1.lon.unit is u.deg
assert hgs1.lat.unit is u.deg
assert hgs1.radius.unit is u.km
@pytest.mark.parametrize('frame',
[HeliographicStonyhurst, HeliographicCarrington])
@pytest.mark.parametrize("args, kwargs", two_D_parameters[2:4])
def test_create_hgs_force_2d(frame, args, kwargs):
hgs1 = init_frame(frame, args, kwargs)
# Check we have the right class!
assert isinstance(hgs1, frame)
rep_kwarg = kwargs.get('representation_type', None) if kwargs else None
if rep_kwarg == 'unitspherical':
assert isinstance(hgs1._data, UnitSphericalRepresentation)
# Check the attrs are correct
assert hgs1.lon == 0 * u.deg
assert hgs1.lat == 0 * u.deg
# Check the attrs are in the correct default units
assert hgs1.lon.unit is u.deg
assert hgs1.lat.unit is u.deg
@pytest.mark.parametrize('frame',
[HeliographicStonyhurst, HeliographicCarrington])
@pytest.mark.parametrize(
"args, kwargs",
three_D_parameters + [(None, {'lat': 0 * u.deg,
'lon': 0 * u.arcsec,
'radius': 1 * u.Mm}),
([0 * u.deg, 0 * u.arcsec], {'radius': 1 * u.Mm})])
def test_create_hgs_3d(frame, args, kwargs):
hgs1 = init_frame(frame, args, kwargs)
# Check we have the right class!
assert isinstance(hgs1, frame)
rep_kwarg = kwargs.get('representation_type', None) if kwargs else None
if rep_kwarg == 'spherical':
assert isinstance(hgs1._data, SphericalRepresentation)
else:
# Check Carrington first because it's a subclass of Stonyhurst
if isinstance(hgs1, HeliographicCarrington):
# Check that we have a 2D wrap180 representation
assert isinstance(hgs1._data, SphericalRepresentation)
elif isinstance(hgs1, HeliographicStonyhurst):
# Check that we have a 2D wrap180 representation
assert isinstance(hgs1._data, SphericalRepresentation)
# Check the attrs are correct
assert hgs1.lon == 0 * u.deg
assert hgs1.lat == 0 * u.deg
assert hgs1.radius == 1 * u.Mm
# Check the attrs are in the correct default units
assert hgs1.lon.unit is u.deg
assert hgs1.lat.unit is u.deg
assert hgs1.radius.unit is u.Mm
def test_hgs_cart_init():
hpc1 = HeliographicStonyhurst(CartesianRepresentation(0 * u.km,
0 * u.km,
1 * u.Mm))
assert isinstance(hpc1, HeliographicStonyhurst)
assert isinstance(hpc1._data, CartesianRepresentation)
def test_hgs_wrapping_on():
hpc1 = HeliographicStonyhurst(350*u.deg, 10*u.deg)
assert_quantity_allclose(hpc1.lon, -10*u.deg)
assert_quantity_allclose(hpc1.lon.wrap_angle, 180*u.deg)
def test_hgs_wrapping_off():
hpc1 = HeliographicStonyhurst(350*u.deg, 10*u.deg, wrap_longitude=False)
assert_quantity_allclose(hpc1.lon, 350*u.deg)
assert_quantity_allclose(hpc1.lon.wrap_angle, 360*u.deg)
def test_hgc_wrapping_360():
hpc1 = HeliographicCarrington(350*u.deg, 10*u.deg)
assert_quantity_allclose(hpc1.lon, 350*u.deg)
assert_quantity_allclose(hpc1.lon.wrap_angle, 360*u.deg)
# ==============================================================================
# ## Heliocentric Tests
# ==============================================================================
@pytest.mark.parametrize(
'args, kwargs',
[((10 * u.km, 10 * u.km, 10 * u.km), None), (None, {'x': 10 * u.km,
'y': 10 * u.km,
'z': 10 * u.km}),
([CartesianRepresentation(10 * u.km, 10 * u.km, 10 * u.km)], None),
([CartesianRepresentation(10 * u.km, 10 * u.km, 10 * u.km)],
{'obstime': '2011/01/01T00:00:00'})])
def test_create_hcc_3d(args, kwargs):
hcc = init_frame(Heliocentric, args, kwargs)
assert isinstance(hcc, Heliocentric)
assert isinstance(hcc._data, CartesianRepresentation)
assert hcc.x == 10 * u.km
assert hcc.y == 10 * u.km
assert hcc.z == 10 * u.km
# Check the attrs are in the correct default units
assert hcc.x.unit is u.km
assert hcc.y.unit is u.km
assert hcc.z.unit is u.km
# ==============================================================================
# SkyCoord Tests
# ==============================================================================
two_D_parameters = [
([0 * u.deg, 0 * u.arcsec], {}),
([UnitSphericalRepresentation(0 * u.deg, 0 * u.arcsec)], {}),
([UnitSphericalRepresentation(0 * u.deg, 0 * u.arcsec)], {}),
([SphericalRepresentation(0 * u.deg, 0 * u.arcsec, 1*u.one)], {}),
]
@pytest.mark.parametrize("args, kwargs",
two_D_parameters + [([0 * u.deg, 0 * u.arcsec],
{'representation_type': 'unitspherical'})])
def test_skycoord_hpc(args, kwargs):
"""
Test that when instantiating a HPC frame with SkyCoord calculate distance
still works.
"""
sc = SkyCoord(*args, **kwargs, frame="helioprojective", obstime="2011-01-01T00:00:00")
# Test the transform to HGS because it will force a `calculate_distance` call.
hgs = sc.transform_to("heliographic_stonyhurst")
assert isinstance(hgs.frame, HeliographicStonyhurst)
@pytest.mark.parametrize("args, kwargs", two_D_parameters)
def test_skycoord_hgs(args, kwargs):
"""
Test that when instantiating a HPC frame with SkyCoord correctly replaces
distance.
Note: We only need to test HGS here not HGC as they share the same
constructor.
"""
RSUN_METERS = sun.constants.get('radius').si
sc = SkyCoord(*args, **kwargs, frame="heliographic_stonyhurst", obstime="2011-01-01T00:00:00")
assert_quantity_allclose(sc.radius, RSUN_METERS)
| 35.782609
| 98
| 0.609964
|
4a05352d871bda0229c893c6aef06610b6a6e7ca
| 2,116
|
py
|
Python
|
utils/json_handler.py
|
AnyKeyShik/Kektor3000
|
3d1735bf82cabbeafe2593ef36fa23ae135a4940
|
[
"MIT"
] | null | null | null |
utils/json_handler.py
|
AnyKeyShik/Kektor3000
|
3d1735bf82cabbeafe2593ef36fa23ae135a4940
|
[
"MIT"
] | 4
|
2020-04-06T23:46:14.000Z
|
2020-04-07T00:57:18.000Z
|
utils/json_handler.py
|
AnyKeyShik/Kektor3000
|
3d1735bf82cabbeafe2593ef36fa23ae135a4940
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import pkg_resources
from utils import debug
class JsonHandler(object):
"""
Handler for simple get messages and consts for auth
"""
_resource_package = None
_resource_path = None
_tags = None
_tags_user = None
_commands = None
_commands_user = None
_ratings = None
_ignored = None
_consts = None
_auth_consts = None
_messages = None
_TAG = "JsonHandler"
def __init__(self):
self._resource_package = __name__
self._resource_path = '/'.join(('../static', 'auth_consts.json'))
template = pkg_resources.resource_stream(self._resource_package, self._resource_path)
line = template.read().decode('utf-8')
self._auth_consts = json.loads(line)
debug(self._TAG, "Get authentication data")
template.close()
self._resource_path = '/'.join(('../static', 'consts.json'))
template = pkg_resources.resource_stream(self._resource_package, self._resource_path)
line = template.read().decode('utf-8')
self._consts = json.loads(line)
debug(self._TAG, "Get consts")
template.close()
self._resource_path = '/'.join(('../static', 'messages.json'))
template = pkg_resources.resource_stream(self._resource_package, self._resource_path)
line = template.read().decode('utf-8')
self._messages = json.loads(line)
debug(self._TAG, "Get messages for answers")
template.close()
@property
def auth_constants(self):
"""
Get app constants for auth
:return: app constants for auth
:rtype: dict
"""
return self._auth_consts
@property
def constants(self):
"""
Get app constants
:return: app constants for auth
:rtype: dict
"""
return self._consts
@property
def messages(self):
"""
Get messages for send as answer
:return: messages templates
:rtype: dict
"""
return self._messages
json_handler = JsonHandler()
| 23.511111
| 93
| 0.610586
|
4a05353dbf07816718fbbf358e1c2f5679470058
| 6,145
|
py
|
Python
|
tests/test_dagfactory.py
|
coopergillan/dag-factory
|
0f95ef49473264d2cc9695b3833ebdf41ad759db
|
[
"MIT"
] | null | null | null |
tests/test_dagfactory.py
|
coopergillan/dag-factory
|
0f95ef49473264d2cc9695b3833ebdf41ad759db
|
[
"MIT"
] | null | null | null |
tests/test_dagfactory.py
|
coopergillan/dag-factory
|
0f95ef49473264d2cc9695b3833ebdf41ad759db
|
[
"MIT"
] | null | null | null |
import os
import datetime
import pytest
from dagfactory import dagfactory
here = os.path.dirname(__file__)
TEST_DAG_FACTORY = os.path.join(here, "fixtures/dag_factory.yml")
INVALID_YAML = os.path.join(here, "fixtures/invalid_yaml.yml")
INVALID_DAG_FACTORY = os.path.join(here, "fixtures/invalid_dag_factory.yml")
def test_validate_config_filepath_valid():
dagfactory.DagFactory._validate_config_filepath(TEST_DAG_FACTORY)
def test_validate_config_filepath_invalid():
with pytest.raises(Exception):
dagfactory.DagFactory._validate_config_filepath("config.yml")
def test_load_config_valid():
expected = {
"default": {
"default_args": {
"owner": "default_owner",
"start_date": datetime.date(2018, 3, 1),
"end_date": datetime.date(2018, 3, 5),
"retries": 1,
"retry_delay_sec": 300,
},
"concurrency": 1,
"max_active_runs": 1,
"dagrun_timeout_sec": 600,
"default_view": "tree",
"orientation": "LR",
"schedule_interval": "0 1 * * *",
},
"example_dag": {
"default_args": {"owner": "custom_owner", "start_date": "2 days"},
"description": "this is an example dag",
"schedule_interval": "0 3 * * *",
"tasks": {
"task_1": {
"operator": "airflow.operators.bash_operator.BashOperator",
"bash_command": "echo 1",
},
"task_2": {
"operator": "airflow.operators.bash_operator.BashOperator",
"bash_command": "echo 2",
"dependencies": ["task_1"],
},
"task_3": {
"operator": "airflow.operators.bash_operator.BashOperator",
"bash_command": "echo 3",
"dependencies": ["task_1"],
},
},
},
"example_dag2": {
"tasks": {
"task_1": {
"operator": "airflow.operators.bash_operator.BashOperator",
"bash_command": "echo 1",
},
"task_2": {
"operator": "airflow.operators.bash_operator.BashOperator",
"bash_command": "echo 2",
"dependencies": ["task_1"],
},
"task_3": {
"operator": "airflow.operators.bash_operator.BashOperator",
"bash_command": "echo 3",
"dependencies": ["task_1"],
},
}
},
}
actual = dagfactory.DagFactory._load_config(TEST_DAG_FACTORY)
assert actual == expected
def test_load_config_invalid():
with pytest.raises(Exception):
dagfactory.DagFactory._load_config(INVALID_YAML)
def test_get_dag_configs():
td = dagfactory.DagFactory(TEST_DAG_FACTORY)
expected = {
"example_dag": {
"default_args": {"owner": "custom_owner", "start_date": "2 days"},
"description": "this is an example dag",
"schedule_interval": "0 3 * * *",
"tasks": {
"task_1": {
"operator": "airflow.operators.bash_operator.BashOperator",
"bash_command": "echo 1",
},
"task_2": {
"operator": "airflow.operators.bash_operator.BashOperator",
"bash_command": "echo 2",
"dependencies": ["task_1"],
},
"task_3": {
"operator": "airflow.operators.bash_operator.BashOperator",
"bash_command": "echo 3",
"dependencies": ["task_1"],
},
},
},
"example_dag2": {
"tasks": {
"task_1": {
"operator": "airflow.operators.bash_operator.BashOperator",
"bash_command": "echo 1",
},
"task_2": {
"operator": "airflow.operators.bash_operator.BashOperator",
"bash_command": "echo 2",
"dependencies": ["task_1"],
},
"task_3": {
"operator": "airflow.operators.bash_operator.BashOperator",
"bash_command": "echo 3",
"dependencies": ["task_1"],
},
}
},
}
actual = td.get_dag_configs()
assert actual == expected
def test_get_default_config():
td = dagfactory.DagFactory(TEST_DAG_FACTORY)
expected = {
"default_args": {
"owner": "default_owner",
"start_date": datetime.date(2018, 3, 1),
"end_date": datetime.date(2018, 3, 5),
"retries": 1,
"retry_delay_sec": 300,
},
"concurrency": 1,
"max_active_runs": 1,
"dagrun_timeout_sec": 600,
"default_view": "tree",
"orientation": "LR",
"schedule_interval": "0 1 * * *",
}
actual = td.get_default_config()
assert actual == expected
def test_generate_dags_valid():
td = dagfactory.DagFactory(TEST_DAG_FACTORY)
td.generate_dags(globals())
assert "example_dag" in globals()
assert "example_dag2" in globals()
assert "fake_example_dag" not in globals()
def test_generate_dags_with_removal_valid():
td = dagfactory.DagFactory(TEST_DAG_FACTORY)
td.generate_dags(globals())
assert "example_dag" in globals()
assert "example_dag2" in globals()
assert "fake_example_dag" not in globals()
del td.config['example_dag']
del td.config['example_dag2']
td.clean_dags(globals())
assert "example_dag" not in globals()
assert "example_dag2" not in globals()
assert "fake_example_dag" not in globals()
def test_generate_dags_invalid():
td = dagfactory.DagFactory(INVALID_DAG_FACTORY)
with pytest.raises(Exception):
td.generate_dags(globals())
| 33.037634
| 79
| 0.523678
|
4a05360d8ccb06f1e2662826f1085fc5a034833d
| 521
|
py
|
Python
|
cacahuate/indexes.py
|
categulario/cacahuate
|
830b1656c8b5d183af637a47f9f3bcd04287f6b4
|
[
"MIT"
] | null | null | null |
cacahuate/indexes.py
|
categulario/cacahuate
|
830b1656c8b5d183af637a47f9f3bcd04287f6b4
|
[
"MIT"
] | null | null | null |
cacahuate/indexes.py
|
categulario/cacahuate
|
830b1656c8b5d183af637a47f9f3bcd04287f6b4
|
[
"MIT"
] | null | null | null |
from pymongo import MongoClient
def create_indexes(config):
# Create index
mongo = MongoClient(config['MONGO_URI'])
db = getattr(mongo, config['MONGO_DBNAME'])
db.execution.create_index("id", unique=True)
db.execution.create_index("status")
db.execution.create_index("started_at")
db.execution.create_index("finished_at")
db.pointer.create_index("status")
db.pointer.create_index("execution.id")
db.pointer.create_index("started_at")
db.pointer.create_index("finished_at")
| 28.944444
| 48
| 0.725528
|
4a05364f872b9177150debf55d79c3194a6b0438
| 17,479
|
py
|
Python
|
doc/conf.py
|
richardsimko/salt
|
a5fef269068c5bf8dba654b9060ef6ba0ccca31e
|
[
"Apache-2.0"
] | null | null | null |
doc/conf.py
|
richardsimko/salt
|
a5fef269068c5bf8dba654b9060ef6ba0ccca31e
|
[
"Apache-2.0"
] | null | null | null |
doc/conf.py
|
richardsimko/salt
|
a5fef269068c5bf8dba654b9060ef6ba0ccca31e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# pylint: disable=C0103,W0622
'''
Sphinx documentation for Salt
'''
import sys
import os
import re
import types
import time
from sphinx.directives import TocTree
class Mock(object):
'''
Mock out specified imports.
This allows autodoc to do its thing without having oodles of req'd
installed libs. This doesn't work with ``import *`` imports.
This Mock class can be configured to return a specific values at specific names, if required.
http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
'''
def __init__(self, mapping=None, *args, **kwargs): # pylint: disable=unused-argument
"""
Mapping allows autodoc to bypass the Mock object, but actually assign
a specific value, expected by a specific attribute returned.
"""
self.__mapping = mapping or {}
__all__ = []
def __call__(self, *args, **kwargs):
# If mocked function is used as a decorator, expose decorated function.
# if args and callable(args[-1]):
# functools.update_wrapper(ret, args[0])
return Mock(mapping=self.__mapping)
def __getattr__(self, name):
if name in self.__mapping:
data = self.__mapping.get(name)
elif name in ('__file__', '__path__'):
data = '/dev/null'
elif name in ('__mro_entries__', '__qualname__'):
raise AttributeError("'Mock' object has no attribute '%s'" % (name))
else:
data = Mock(mapping=self.__mapping)
return data
def __iter__(self):
return self
@staticmethod
def __next__():
raise StopIteration
# For Python 2
next = __next__
def mock_decorator_with_params(*oargs, **okwargs): # pylint: disable=unused-argument
'''
Optionally mock a decorator that takes parameters
E.g.:
@blah(stuff=True)
def things():
pass
'''
def inner(fn, *iargs, **ikwargs): # pylint: disable=unused-argument
if hasattr(fn, '__call__'):
return fn
return Mock()
return inner
MOCK_MODULES = [
# Python stdlib
'user',
# salt core
'concurrent',
'Crypto',
'Crypto.Signature',
'Crypto.Cipher',
'Crypto.Hash',
'Crypto.PublicKey',
'Crypto.Random',
'Crypto.Signature',
'Crypto.Signature.PKCS1_v1_5',
'M2Crypto',
'msgpack',
'yaml',
'yaml.constructor',
'yaml.nodes',
'yaml.parser',
'yaml.scanner',
'zmq',
'zmq.eventloop',
'zmq.eventloop.ioloop',
# third-party libs for cloud modules
'libcloud',
'libcloud.compute',
'libcloud.compute.base',
'libcloud.compute.deployment',
'libcloud.compute.providers',
'libcloud.compute.types',
'libcloud.loadbalancer',
'libcloud.loadbalancer.types',
'libcloud.loadbalancer.providers',
'libcloud.common',
'libcloud.common.google',
# third-party libs for netapi modules
'cherrypy',
'cherrypy.lib',
'cherrypy.process',
'cherrypy.wsgiserver',
'cherrypy.wsgiserver.ssl_builtin',
'tornado',
'tornado.concurrent',
'tornado.escape',
'tornado.gen',
'tornado.httpclient',
'tornado.httpserver',
'tornado.httputil',
'tornado.ioloop',
'tornado.iostream',
'tornado.netutil',
'tornado.simple_httpclient',
'tornado.stack_context',
'tornado.web',
'tornado.websocket',
'tornado.locks',
'ws4py',
'ws4py.server',
'ws4py.server.cherrypyserver',
'ws4py.websocket',
# modules, renderers, states, returners, et al
'ClusterShell',
'ClusterShell.NodeSet',
'MySQLdb',
'MySQLdb.cursors',
'OpenSSL',
'avahi',
'boto.regioninfo',
'concurrent',
'dbus',
'django',
'dns',
'dns.resolver',
'dson',
'hjson',
'jnpr',
'jnpr.junos',
'jnpr.junos.utils',
'jnpr.junos.utils.config',
'jnpr.junos.utils.sw',
'keyring',
'libvirt',
'lxml',
'lxml.etree',
'msgpack',
'nagios_json',
'napalm',
'netaddr',
'netaddr.IPAddress',
'netaddr.core',
'netaddr.core.AddrFormatError',
'ntsecuritycon',
'psutil',
'pycassa',
'pyconnman',
'pyiface',
'pymongo',
'pyroute2',
'pyroute2.ipdb',
'rabbitmq_server',
'redis',
'rpm',
'rpmUtils',
'rpmUtils.arch',
'salt.ext.six.moves.winreg',
'twisted',
'twisted.internet',
'twisted.internet.protocol',
'twisted.internet.protocol.DatagramProtocol',
'win32security',
'yum',
'zfs',
]
MOCK_MODULES_MAPPING = {
'cherrypy': {'config': mock_decorator_with_params},
'ntsecuritycon': {
'STANDARD_RIGHTS_REQUIRED': 0,
'SYNCHRONIZE': 0,
},
'psutil': {'total': 0}, # Otherwise it will crash Sphinx
}
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock(mapping=MOCK_MODULES_MAPPING.get(mod_name))
# Define a fake version attribute for the following libs.
sys.modules['libcloud'].__version__ = '0.0.0'
sys.modules['msgpack'].version = (1, 0, 0)
sys.modules['psutil'].version_info = (3, 0, 0)
sys.modules['pymongo'].version = '0.0.0'
sys.modules['tornado'].version_info = (0, 0, 0)
sys.modules['boto.regioninfo']._load_json_file = {'endpoints': None}
# -- Add paths to PYTHONPATH ---------------------------------------------------
try:
docs_basepath = os.path.abspath(os.path.dirname(__file__))
except NameError:
# sphinx-intl and six execute some code which will raise this NameError
# assume we're in the doc/ directory
docs_basepath = os.path.abspath(os.path.dirname('.'))
addtl_paths = (
os.pardir, # salt itself (for autodoc)
'_ext', # custom Sphinx extensions
)
for addtl_path in addtl_paths:
sys.path.insert(0, os.path.abspath(os.path.join(docs_basepath, addtl_path)))
# We're now able to import salt
import salt.version
formulas_dir = os.path.join(os.pardir, docs_basepath, 'formulas')
# ----- Intersphinx Settings ------------------------------------------------>
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None)
}
# <---- Intersphinx Settings -------------------------------------------------
# -- General Configuration -----------------------------------------------------
# Set a var if we're building docs for the live site or not
on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
project = 'Salt'
version = salt.version.__version__
latest_release = '2019.2.0' # latest release
previous_release = '2018.3.4' # latest release from previous branch
previous_release_dir = '2018.3' # path on web server for previous branch
next_release = '' # next release
next_release_dir = '' # path on web server for next release branch
today = ''
copyright = ''
if on_saltstack:
today = "Generated on " + time.strftime("%B %d, %Y") + " at " + time.strftime("%X %Z") + "."
copyright = time.strftime("%Y")
# < --- START do not merge these settings to other branches START ---> #
build_type = 'latest' # latest, previous, develop, next
release = latest_release
# < --- END do not merge these settings to other branches END ---> #
# Set google custom search engine
if release == latest_release:
search_cx = '011515552685726825874:ht0p8miksrm' # latest
elif release.startswith('2018.3'):
search_cx = '011515552685726825874:vadptdpvyyu' # 2018.3
elif release.startswith('2017.7'):
search_cx = '011515552685726825874:w-hxmnbcpou' # 2017.7
elif release.startswith('2016.11'):
search_cx = '011515552685726825874:dlsj745pvhq' # 2016.11
else:
search_cx = '011515552685726825874:x17j5zl74g8' # develop
needs_sphinx = '1.3'
spelling_lang = 'en_US'
language = 'en'
locale_dirs = [
'_locale',
]
master_doc = 'contents'
templates_path = ['_templates']
exclude_patterns = ['_build', '_incl/*', 'ref/cli/_includes/*.rst']
extensions = [
'saltdomain', # Must come early
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'httpdomain',
'youtube',
#'saltautodoc', # Must be AFTER autodoc
#'shorturls',
]
try:
import sphinxcontrib.spelling # false positive, pylint: disable=unused-import
except ImportError:
pass
else:
extensions += ['sphinxcontrib.spelling']
modindex_common_prefix = ['salt.']
autosummary_generate = True
# strip git rev as there won't necessarily be a release based on it
stripped_release = re.sub(r'-\d+-g[0-9a-f]+$', '', release)
# Define a substitution for linking to the latest release tarball
rst_prolog = """\
.. |current_release_doc| replace:: :doc:`/topics/releases/{release}`
.. |saltrepo| replace:: https://github.com/saltstack/salt
.. _`salt-users`: https://groups.google.com/forum/#!forum/salt-users
.. _`salt-announce`: https://groups.google.com/forum/#!forum/salt-announce
.. _`salt-packagers`: https://groups.google.com/forum/#!forum/salt-packagers
.. _`salt-slack`: https://saltstackcommunity.herokuapp.com/
.. |windownload| raw:: html
<p>Python2 x86: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-x86-Setup.exe"><strong>Salt-Minion-{release}-x86-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-x86-Setup.exe.md5"><strong>md5</strong></a></p>
<p>Python2 AMD64: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-AMD64-Setup.exe"><strong>Salt-Minion-{release}-AMD64-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
<p>Python3 x86: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-x86-Setup.exe"><strong>Salt-Minion-{release}-x86-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-x86-Setup.exe.md5"><strong>md5</strong></a></p>
<p>Python3 AMD64: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-AMD64-Setup.exe"><strong>Salt-Minion-{release}-AMD64-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
.. |osxdownloadpy2| raw:: html
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-py2-x86_64.pkg"><strong>salt-{release}-py2-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-py2-x86_64.pkg.md5"><strong>md5</strong></a></p>
.. |osxdownloadpy3| raw:: html
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-py3-x86_64.pkg"><strong>salt-{release}-py3-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-py3-x86_64.pkg.md5"><strong>md5</strong></a></p>
""".format(release=stripped_release)
# A shortcut for linking to tickets on the GitHub issue tracker
extlinks = {
'blob': ('https://github.com/saltstack/salt/blob/%s/%%s' % 'develop', None),
'issue': ('https://github.com/saltstack/salt/issues/%s', 'issue #'),
'pull': ('https://github.com/saltstack/salt/pull/%s', 'PR #'),
'formula_url': ('https://github.com/saltstack-formulas/%s', ''),
}
# ----- Localization -------------------------------------------------------->
locale_dirs = ['locale/']
gettext_compact = False
# <---- Localization ---------------------------------------------------------
### HTML options
# set 'HTML_THEME=saltstack' to use previous theme
html_theme = os.environ.get('HTML_THEME', 'saltstack2')
html_theme_path = ['_themes']
html_title = u''
html_short_title = 'Salt'
html_static_path = ['_static']
html_logo = None # specified in the theme layout.html
html_favicon = 'favicon.ico'
smartquotes = False
# Use Google customized search or use Sphinx built-in JavaScript search
if on_saltstack:
html_search_template = 'googlesearch.html'
else:
html_search_template = 'searchbox.html'
html_additional_pages = {
'404': '404.html',
}
html_default_sidebars = [
html_search_template,
'version.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
]
html_sidebars = {
'ref/**/all/salt.*': [
html_search_template,
'version.html',
'modules-sidebar.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
],
'ref/formula/all/*': [
],
}
html_context = {
'on_saltstack': on_saltstack,
'html_default_sidebars': html_default_sidebars,
'github_base': 'https://github.com/saltstack/salt',
'github_issues': 'https://github.com/saltstack/salt/issues',
'github_downloads': 'https://github.com/saltstack/salt/downloads',
'latest_release': latest_release,
'previous_release': previous_release,
'previous_release_dir': previous_release_dir,
'next_release': next_release,
'next_release_dir': next_release_dir,
'search_cx': search_cx,
'build_type': build_type,
'today': today,
'copyright': copyright,
}
html_use_index = True
html_last_updated_fmt = '%b %d, %Y'
html_show_sourcelink = False
html_show_sphinx = True
html_show_copyright = True
### Latex options
latex_documents = [
('contents', 'Salt.tex', 'Salt Documentation', 'SaltStack, Inc.', 'manual'),
]
latex_logo = '_static/salt-logo.png'
latex_elements = {
'inputenc': '', # use XeTeX instead of the inputenc LaTeX package.
'utf8extra': '',
'preamble': r'''
\usepackage{fontspec}
\setsansfont{Linux Biolinum O}
\setromanfont{Linux Libertine O}
\setmonofont{Source Code Pro}
''',
}
### Linux Biolinum, Linux Libertine: http://www.linuxlibertine.org/
### Source Code Pro: https://github.com/adobe-fonts/source-code-pro/releases
### Linkcheck options
linkcheck_ignore = [
r'http://127.0.0.1',
r'http://salt:\d+',
r'http://local:\d+',
r'https://console.aws.amazon.com',
r'http://192.168.33.10',
r'http://domain:\d+',
r'http://123.456.789.012:\d+',
r'http://localhost',
r'https://groups.google.com/forum/#!forum/salt-users',
r'http://logstash.net/docs/latest/inputs/udp',
r'http://logstash.net/docs/latest/inputs/zeromq',
r'http://www.youtube.com/saltstack',
r'https://raven.readthedocs.io',
r'https://getsentry.com',
r'https://salt-cloud.readthedocs.io',
r'https://salt.readthedocs.io',
r'http://www.pip-installer.org/',
r'http://www.windowsazure.com/',
r'https://github.com/watching',
r'dash-feed://',
r'https://github.com/saltstack/salt/',
r'http://bootstrap.saltstack.org',
r'https://bootstrap.saltstack.com',
r'https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh',
r'media.readthedocs.org/dash/salt/latest/salt.xml',
r'https://portal.aws.amazon.com/gp/aws/securityCredentials',
r'https://help.github.com/articles/fork-a-repo',
r'dash-feed://https%3A//media.readthedocs.org/dash/salt/latest/salt.xml',
]
linkcheck_anchors = False
### Manpage options
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
authors = [
'Thomas S. Hatch <thatch45@gmail.com> and many others, please see the Authors file',
]
man_pages = [
('contents', 'salt', 'Salt Documentation', authors, 7),
('ref/cli/salt', 'salt', 'salt', authors, 1),
('ref/cli/salt-master', 'salt-master', 'salt-master Documentation', authors, 1),
('ref/cli/salt-minion', 'salt-minion', 'salt-minion Documentation', authors, 1),
('ref/cli/salt-key', 'salt-key', 'salt-key Documentation', authors, 1),
('ref/cli/salt-cp', 'salt-cp', 'salt-cp Documentation', authors, 1),
('ref/cli/salt-call', 'salt-call', 'salt-call Documentation', authors, 1),
('ref/cli/salt-proxy', 'salt-proxy', 'salt-proxy Documentation', authors, 1),
('ref/cli/salt-syndic', 'salt-syndic', 'salt-syndic Documentation', authors, 1),
('ref/cli/salt-run', 'salt-run', 'salt-run Documentation', authors, 1),
('ref/cli/salt-ssh', 'salt-ssh', 'salt-ssh Documentation', authors, 1),
('ref/cli/salt-cloud', 'salt-cloud', 'Salt Cloud Command', authors, 1),
('ref/cli/salt-api', 'salt-api', 'salt-api Command', authors, 1),
('ref/cli/salt-unity', 'salt-unity', 'salt-unity Command', authors, 1),
('ref/cli/spm', 'spm', 'Salt Package Manager Command', authors, 1),
]
### epub options
epub_title = 'Salt Documentation'
epub_author = 'SaltStack, Inc.'
epub_publisher = epub_author
epub_copyright = copyright
epub_scheme = 'URL'
epub_identifier = 'http://saltstack.com/'
epub_tocdup = False
#epub_tocdepth = 3
def skip_mod_init_member(app, what, name, obj, skip, options):
# pylint: disable=too-many-arguments,unused-argument
if name.startswith('_'):
return True
if isinstance(obj, types.FunctionType) and obj.__name__ == 'mod_init':
return True
return False
def _normalize_version(args):
_, path = args
return '.'.join([x.zfill(4) for x in (path.split('/')[-1].split('.'))])
class ReleasesTree(TocTree):
option_spec = dict(TocTree.option_spec)
def run(self):
rst = super(ReleasesTree, self).run()
entries = rst[0][0]['entries'][:]
entries.sort(key=_normalize_version, reverse=True)
rst[0][0]['entries'][:] = entries
return rst
def setup(app):
app.add_directive('releasestree', ReleasesTree)
app.connect('autodoc-skip-member', skip_mod_init_member)
| 30.664912
| 147
| 0.647863
|
4a05369a804d14051c7af57ebfb320fd22e1c098
| 71,982
|
py
|
Python
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/operations/_database_accounts_operations.py
|
ConnectionMaster/azure-cli-extensions
|
08d184f4efeac397c1ffcd21a83d651f4fad2782
|
[
"MIT"
] | 2
|
2021-06-05T17:51:26.000Z
|
2021-11-17T11:17:56.000Z
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/operations/_database_accounts_operations.py
|
ConnectionMaster/azure-cli-extensions
|
08d184f4efeac397c1ffcd21a83d651f4fad2782
|
[
"MIT"
] | 1
|
2020-06-12T01:39:40.000Z
|
2020-06-12T01:39:40.000Z
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/operations/_database_accounts_operations.py
|
ConnectionMaster/azure-cli-extensions
|
08d184f4efeac397c1ffcd21a83d651f4fad2782
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class DatabaseAccountsOperations(object):
"""DatabaseAccountsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version to use for this operation. Constant value: "2020-06-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2020-06-01-preview"
self.config = config
def get(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves the properties of an existing Azure Cosmos DB database
account.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DatabaseAccountGetResults or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.cosmosdb.models.DatabaseAccountGetResults or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatabaseAccountGetResults', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}'}
def _update_initial(
self, resource_group_name, account_name, update_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(update_parameters, 'DatabaseAccountUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatabaseAccountGetResults', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, account_name, update_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates the properties of an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param update_parameters: The parameters to provide for the current
database account.
:type update_parameters:
~azure.mgmt.cosmosdb.models.DatabaseAccountUpdateParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
DatabaseAccountGetResults or
ClientRawResponse<DatabaseAccountGetResults> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.cosmosdb.models.DatabaseAccountGetResults]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.cosmosdb.models.DatabaseAccountGetResults]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_initial(
resource_group_name=resource_group_name,
account_name=account_name,
update_parameters=update_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('DatabaseAccountGetResults', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}'}
def _create_or_update_initial(
self, resource_group_name, account_name, create_update_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(create_update_parameters, 'DatabaseAccountCreateUpdateParameters')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatabaseAccountGetResults', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, account_name, create_update_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates an Azure Cosmos DB database account. The "Update"
method is preferred when performing updates on an account.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param create_update_parameters: The parameters to provide for the
current database account.
:type create_update_parameters:
~azure.mgmt.cosmosdb.models.DatabaseAccountCreateUpdateParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
DatabaseAccountGetResults or
ClientRawResponse<DatabaseAccountGetResults> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.cosmosdb.models.DatabaseAccountGetResults]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.cosmosdb.models.DatabaseAccountGetResults]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
account_name=account_name,
create_update_parameters=create_update_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('DatabaseAccountGetResults', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}'}
def _delete_initial(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, account_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
account_name=account_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}'}
def _failover_priority_change_initial(
self, resource_group_name, account_name, failover_policies, custom_headers=None, raw=False, **operation_config):
failover_parameters = models.FailoverPolicies(failover_policies=failover_policies)
# Construct URL
url = self.failover_priority_change.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(failover_parameters, 'FailoverPolicies')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def failover_priority_change(
self, resource_group_name, account_name, failover_policies, custom_headers=None, raw=False, polling=True, **operation_config):
"""Changes the failover priority for the Azure Cosmos DB database account.
A failover priority of 0 indicates a write region. The maximum value
for a failover priority = (total number of regions - 1). Failover
priority values must be unique for each of the regions in which the
database account exists.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param failover_policies: List of failover policies.
:type failover_policies:
list[~azure.mgmt.cosmosdb.models.FailoverPolicy]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._failover_priority_change_initial(
resource_group_name=resource_group_name,
account_name=account_name,
failover_policies=failover_policies,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
failover_priority_change.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/failoverPriorityChange'}
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all the Azure Cosmos DB database accounts available under the
subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DatabaseAccountGetResults
:rtype:
~azure.mgmt.cosmosdb.models.DatabaseAccountGetResultsPaged[~azure.mgmt.cosmosdb.models.DatabaseAccountGetResults]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.DatabaseAccountGetResultsPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DocumentDB/databaseAccounts'}
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all the Azure Cosmos DB database accounts available under the
given resource group.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DatabaseAccountGetResults
:rtype:
~azure.mgmt.cosmosdb.models.DatabaseAccountGetResultsPaged[~azure.mgmt.cosmosdb.models.DatabaseAccountGetResults]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.DatabaseAccountGetResultsPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts'}
def list_keys(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Lists the access keys for the specified Azure Cosmos DB database
account.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DatabaseAccountListKeysResult or ClientRawResponse if
raw=true
:rtype: ~azure.mgmt.cosmosdb.models.DatabaseAccountListKeysResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.list_keys.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatabaseAccountListKeysResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/listKeys'}
def list_connection_strings(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Lists the connection strings for the specified Azure Cosmos DB database
account.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DatabaseAccountListConnectionStringsResult or
ClientRawResponse if raw=true
:rtype:
~azure.mgmt.cosmosdb.models.DatabaseAccountListConnectionStringsResult
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.list_connection_strings.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatabaseAccountListConnectionStringsResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_connection_strings.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/listConnectionStrings'}
def _offline_region_initial(
self, resource_group_name, account_name, region, custom_headers=None, raw=False, **operation_config):
region_parameter_for_offline = models.RegionForOnlineOffline(region=region)
# Construct URL
url = self.offline_region.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(region_parameter_for_offline, 'RegionForOnlineOffline')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def offline_region(
self, resource_group_name, account_name, region, custom_headers=None, raw=False, polling=True, **operation_config):
"""Offline the specified region for the specified Azure Cosmos DB database
account.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param region: Cosmos DB region, with spaces between words and each
word capitalized.
:type region: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.cosmosdb.models.ErrorResponseException>`
"""
raw_result = self._offline_region_initial(
resource_group_name=resource_group_name,
account_name=account_name,
region=region,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
offline_region.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/offlineRegion'}
def _online_region_initial(
self, resource_group_name, account_name, region, custom_headers=None, raw=False, **operation_config):
region_parameter_for_online = models.RegionForOnlineOffline(region=region)
# Construct URL
url = self.online_region.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(region_parameter_for_online, 'RegionForOnlineOffline')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def online_region(
self, resource_group_name, account_name, region, custom_headers=None, raw=False, polling=True, **operation_config):
"""Online the specified region for the specified Azure Cosmos DB database
account.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param region: Cosmos DB region, with spaces between words and each
word capitalized.
:type region: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.cosmosdb.models.ErrorResponseException>`
"""
raw_result = self._online_region_initial(
resource_group_name=resource_group_name,
account_name=account_name,
region=region,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
online_region.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/onlineRegion'}
def get_read_only_keys(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Lists the read-only access keys for the specified Azure Cosmos DB
database account.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DatabaseAccountListReadOnlyKeysResult or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.cosmosdb.models.DatabaseAccountListReadOnlyKeysResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_read_only_keys.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatabaseAccountListReadOnlyKeysResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_read_only_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/readonlykeys'}
def list_read_only_keys(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Lists the read-only access keys for the specified Azure Cosmos DB
database account.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DatabaseAccountListReadOnlyKeysResult or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.cosmosdb.models.DatabaseAccountListReadOnlyKeysResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.list_read_only_keys.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatabaseAccountListReadOnlyKeysResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_read_only_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/readonlykeys'}
def _regenerate_key_initial(
self, resource_group_name, account_name, key_kind, custom_headers=None, raw=False, **operation_config):
key_to_regenerate = models.DatabaseAccountRegenerateKeyParameters(key_kind=key_kind)
# Construct URL
url = self.regenerate_key.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(key_to_regenerate, 'DatabaseAccountRegenerateKeyParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def regenerate_key(
self, resource_group_name, account_name, key_kind, custom_headers=None, raw=False, polling=True, **operation_config):
"""Regenerates an access key for the specified Azure Cosmos DB database
account.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param key_kind: The access key to regenerate. Possible values
include: 'primary', 'secondary', 'primaryReadonly',
'secondaryReadonly'
:type key_kind: str or ~azure.mgmt.cosmosdb.models.KeyKind
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._regenerate_key_initial(
resource_group_name=resource_group_name,
account_name=account_name,
key_kind=key_kind,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
regenerate_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/regenerateKey'}
def check_name_exists(
self, account_name, custom_headers=None, raw=False, **operation_config):
"""Checks that the Azure Cosmos DB account name already exists. A valid
account name may contain only lowercase letters, numbers, and the '-'
character, and must be between 3 and 50 characters.
:param account_name: Cosmos DB database account name.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: bool or ClientRawResponse if raw=true
:rtype: bool or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.check_name_exists.metadata['url']
path_format_arguments = {
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.head(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 404]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = (response.status_code == 200)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
check_name_exists.metadata = {'url': '/providers/Microsoft.DocumentDB/databaseAccountNames/{accountName}'}
def list_metrics(
self, resource_group_name, account_name, filter, custom_headers=None, raw=False, **operation_config):
"""Retrieves the metrics determined by the given filter for the given
database account.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param filter: An OData filter expression that describes a subset of
metrics to return. The parameters that can be filtered are name.value
(name of the metric, can have an or of multiple names), startTime,
endTime, and timeGrain. The supported operator is eq.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Metric
:rtype:
~azure.mgmt.cosmosdb.models.MetricPaged[~azure.mgmt.cosmosdb.models.Metric]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_metrics.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.MetricPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_metrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/metrics'}
def list_usages(
self, resource_group_name, account_name, filter=None, custom_headers=None, raw=False, **operation_config):
"""Retrieves the usages (most recent data) for the given database account.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param filter: An OData filter expression that describes a subset of
usages to return. The supported parameter is name.value (name of the
metric, can have an or of multiple names).
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Usage
:rtype:
~azure.mgmt.cosmosdb.models.UsagePaged[~azure.mgmt.cosmosdb.models.Usage]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_usages.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.UsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_usages.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/usages'}
def list_metric_definitions(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves metric definitions for the given database account.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of MetricDefinition
:rtype:
~azure.mgmt.cosmosdb.models.MetricDefinitionPaged[~azure.mgmt.cosmosdb.models.MetricDefinition]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_metric_definitions.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.MetricDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_metric_definitions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/metricDefinitions'}
| 49.883576
| 202
| 0.672418
|
4a0537f68f1de664e2dd07e3231f6bad343789bc
| 1,379
|
py
|
Python
|
setup.py
|
nschloe/launchpad-tools
|
20b178cd5851c6a23b95eeb957ac98fdebd8c5a1
|
[
"MIT"
] | null | null | null |
setup.py
|
nschloe/launchpad-tools
|
20b178cd5851c6a23b95eeb957ac98fdebd8c5a1
|
[
"MIT"
] | null | null | null |
setup.py
|
nschloe/launchpad-tools
|
20b178cd5851c6a23b95eeb957ac98fdebd8c5a1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
from setuptools import setup, find_packages
import os
import codecs
# https://packaging.python.org/single_source_version/
base_dir = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(base_dir, "launchpadtools", "__about__.py"), "rb") as f:
exec(f.read(), about)
def read(fname):
return codecs.open(os.path.join(base_dir, fname), encoding="utf-8").read()
setup(
name="launchpadtools",
version=about["__version__"],
author=about["__author__"],
author_email=about["__author_email__"],
packages=find_packages(),
description="Tools for submitting packages to Ubuntu Launchpad",
long_description=read("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/nschloe/launchpadtools",
license=about["__license__"],
platforms="any",
install_requires=["GitPython", "launchpadlib", "paramiko"],
classifiers=[
about["__status__"],
about["__license__"],
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Build Tools",
"Topic :: System :: Operating System",
],
entry_points={"console_scripts": ["launchpad-submit = launchpadtools.cli:main"]},
)
| 32.069767
| 85
| 0.672951
|
4a053bf3aa5971c7c21c6bd256d53d0a0629b745
| 3,696
|
py
|
Python
|
webui/src/config/script/list_values.py
|
ptriolo/eve-pi
|
71d5452b83884bf0437340c2264056ebe4a27021
|
[
"MIT"
] | 8
|
2020-06-12T13:10:51.000Z
|
2022-03-12T01:24:27.000Z
|
webui/src/config/script/list_values.py
|
ptriolo/eve-pi
|
71d5452b83884bf0437340c2264056ebe4a27021
|
[
"MIT"
] | 9
|
2019-07-24T13:46:43.000Z
|
2020-07-23T02:30:34.000Z
|
webui/src/config/script/list_values.py
|
ptriolo/eve-pi
|
71d5452b83884bf0437340c2264056ebe4a27021
|
[
"MIT"
] | 17
|
2019-07-03T14:00:42.000Z
|
2022-03-12T01:25:21.000Z
|
import abc
import logging
import re
from model.model_helper import is_empty, fill_parameter_values, InvalidFileException, list_files
from utils import process_utils
from utils.file_utils import FileMatcher
LOGGER = logging.getLogger('list_values')
class ValuesProvider(metaclass=abc.ABCMeta):
def get_required_parameters(self):
return []
@abc.abstractmethod
def get_values(self, parameter_values):
pass
def map_value(self, user_value):
return user_value
class EmptyValuesProvider(ValuesProvider):
def get_values(self, parameter_values):
return []
class NoneValuesProvider(ValuesProvider):
def get_values(self, parameter_values):
return None
class ConstValuesProvider(ValuesProvider):
def __init__(self, values) -> None:
self._values = tuple(values)
def get_values(self, parameter_values):
return self._values
class ScriptValuesProvider(ValuesProvider):
def __init__(self, script) -> None:
script_output = process_utils.invoke(script)
script_output = script_output.rstrip('\n')
self._values = [line for line in script_output.split('\n') if not is_empty(line)]
def get_values(self, parameter_values):
return self._values
class DependantScriptValuesProvider(ValuesProvider):
def __init__(self, script, parameters_supplier) -> None:
pattern = re.compile('\${([^}]+)\}')
search_start = 0
script_template = ''
required_parameters = set()
while search_start < len(script):
match = pattern.search(script, search_start)
if not match:
script_template += script[search_start:]
break
param_start = match.start()
if param_start > search_start:
script_template += script[search_start:param_start]
param_name = match.group(1)
required_parameters.add(param_name)
search_start = match.end() + 1
self._required_parameters = tuple(required_parameters)
self._script_template = script
self._parameters_supplier = parameters_supplier
def get_required_parameters(self):
return self._required_parameters
def get_values(self, parameter_values):
for param_name in self._required_parameters:
value = parameter_values.get(param_name)
if is_empty(value):
return []
parameters = self._parameters_supplier()
script = fill_parameter_values(parameters, self._script_template, parameter_values)
try:
script_output = process_utils.invoke(script)
except Exception as e:
LOGGER.warning('Failed to execute script. ' + str(e))
return []
script_output = script_output.rstrip('\n')
return [line for line in script_output.split('\n') if not is_empty(line)]
class FilesProvider(ValuesProvider):
def __init__(self,
file_dir,
file_type=None,
file_extensions=None,
excluded_files_matcher: FileMatcher = None) -> None:
self._file_dir = file_dir
try:
self._values = list_files(file_dir,
file_type=file_type,
file_extensions=file_extensions,
excluded_files_matcher=excluded_files_matcher)
except InvalidFileException as e:
LOGGER.warning('Failed to list files for ' + file_dir + ': ' + str(e))
self._values = []
def get_values(self, parameter_values):
return self._values
| 29.333333
| 96
| 0.639069
|
4a053d8a7fd09677f52e71295e6cafef2fddbe1a
| 1,219
|
py
|
Python
|
qutipy/Pauli/nQubit_Pauli_coeff.py
|
sumeetkhatri/QuTIPy
|
ca2a3344c1caa818504425496ea37278d80b1c44
|
[
"Apache-2.0"
] | null | null | null |
qutipy/Pauli/nQubit_Pauli_coeff.py
|
sumeetkhatri/QuTIPy
|
ca2a3344c1caa818504425496ea37278d80b1c44
|
[
"Apache-2.0"
] | null | null | null |
qutipy/Pauli/nQubit_Pauli_coeff.py
|
sumeetkhatri/QuTIPy
|
ca2a3344c1caa818504425496ea37278d80b1c44
|
[
"Apache-2.0"
] | null | null | null |
'''
This code is part of QuTIpy.
(c) Copyright Sumeet Khatri, 2021
This code is licensed under the Apache License, Version 2.0. You may
obtain a copy of this license in the LICENSE.txt file in the root directory
of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
Any modifications or derivative works of this code must retain this
copyright notice, and modified files need to carry a notice indicating
that they have been altered from the originals.
'''
import itertools
import numpy as np
from qutipy.Pauli import generate_nQubit_Pauli
from qutipy.general_functions import dag,Tr
def nQubit_Pauli_coeff(X,n,return_dict=False):
'''
Generates the coefficients of the matrix X in the n-qubit Pauli basis.
The coefficients c_{alpha} are such that
X=(1/2^n)\sum_{alpha} c_alpha \sigma_alpha
The coefficients are returned in lexicographical ordering.
'''
indices=list(itertools.product(*[range(0,4)]*n))
if return_dict:
C={}
else:
C=[]
for index in indices:
sigma_i=generate_nQubit_Pauli(index)
if return_dict:
C[index]=Tr(dag(sigma_i)@X)
else:
C.append(Tr(dag(sigma_i)@X))
return C
| 25.395833
| 75
| 0.704676
|
4a053e03109d6f84e72810ac7620d1add74532ec
| 3,438
|
py
|
Python
|
lapsolver/tests/test_dense.py
|
cheind/py-hungarian-c
|
69f6e55efe1c1cd2e30139cc68c49e20a4fc3f49
|
[
"MIT"
] | 110
|
2018-02-10T18:28:41.000Z
|
2022-03-23T19:26:13.000Z
|
lapsolver/tests/test_dense.py
|
cheind/py-hungarian-c
|
69f6e55efe1c1cd2e30139cc68c49e20a4fc3f49
|
[
"MIT"
] | 16
|
2018-02-12T09:18:25.000Z
|
2021-04-29T02:07:23.000Z
|
lapsolver/tests/test_dense.py
|
cheind/py-hungarian-c
|
69f6e55efe1c1cd2e30139cc68c49e20a4fc3f49
|
[
"MIT"
] | 20
|
2018-07-18T15:05:53.000Z
|
2022-03-18T09:36:27.000Z
|
import pytest
import numpy as np
import lapsolver as lap
@pytest.mark.parametrize('dtype', ['float', 'int', 'float32', 'float64', 'int32', 'int64'])
def test_small(dtype):
costs = np.array([[6, 9, 1],[10, 3, 2],[8, 7, 4]], dtype=dtype)
r = lap.solve_dense(costs)
expected = np.array([[0, 1, 2], [2, 1, 0]])
np.testing.assert_equal(r, expected)
def test_plain_array():
costs = [[6, 9, 1],[10, 3, 2],[8, 7, 4.]]
r = lap.solve_dense(costs)
expected = np.array([[0, 1, 2], [2, 1, 0]])
np.testing.assert_allclose(r, expected)
def test_plain_array_integer():
# Integer problem whose solution is changed by fractional modification.
costs = [[6, 9, 1],[10, 3, 2],[8, 5, 4]]
r = lap.solve_dense(costs)
expected = np.array([[0, 1, 2], [2, 1, 0]])
np.testing.assert_allclose(r, expected)
def test_plain_array_fractional():
# Add fractional costs that change the solution.
# Before: (1 + 3 + 8) = 12 < 13 = (6 + 5 + 2)
# After: (1.4 + 3.4 + 8.4) = 13.2 < 13
# This confirms that pylib11 did not cast float to int.
costs = [[6, 9, 1.4],[10, 3.4, 2],[8.4, 5, 4]]
r = lap.solve_dense(costs)
expected = np.array([[0, 1, 2], [0, 2, 1]])
np.testing.assert_allclose(r, expected)
def test_nonsquare():
costs = np.array([[6, 9],[10, 3],[8, 7]], dtype=float)
r = lap.solve_dense(costs)
expected = np.array([[0, 1], [0, 1]])
np.testing.assert_allclose(r, expected)
r = lap.solve_dense(costs.T) # view test
expected = np.array([[0, 1], [0, 1]])
np.testing.assert_allclose(r, expected)
costs = np.array(
[[ -17.13614455, -536.59009819],
[ 292.64662837, 187.49841358],
[ 664.70501771, 948.09658792]])
expected = np.array([[0, 1], [1, 0]])
r = lap.solve_dense(costs)
np.testing.assert_allclose(r, expected)
def test_views():
costs = np.array([[6, 9],[10, 3],[8, 7]], dtype=float)
np.testing.assert_allclose(lap.solve_dense(costs.T[1:, :]), [[0], [1]])
def test_large():
costs = np.random.uniform(size=(5000,5000))
r = lap.solve_dense(costs)
def test_solve_nan():
costs = np.array([[5, 9, np.nan],[10, np.nan, 2],[8, 7, 4.]])
r = lap.solve_dense(costs)
expected = np.array([[0, 1, 2], [0, 2, 1]])
np.testing.assert_allclose(r, expected)
def test_solve_inf():
costs = np.array([[5, 9, np.inf],[10, np.inf, 2],[8, 7, 4.]])
r = lap.solve_dense(costs)
expected = np.array([[0, 1, 2], [0, 2, 1]])
np.testing.assert_allclose(r, expected)
def test_missing_edge_negative():
costs = np.array([[-1000, -1], [-1, np.nan]])
r = lap.solve_dense(costs)
# The optimal solution is (0, 1), (1, 0) with cost -1 + -1.
# If the implementation does not use a large enough constant, it may choose
# (0, 0), (1, 1) with cost -1000 + L.
expected = np.array([[0, 1], [1, 0]])
np.testing.assert_allclose(r, expected)
def test_missing_edge_positive():
costs = np.array([
[np.nan, 1000, np.nan],
[np.nan, 1, 1000],
[1000, np.nan, 1],
])
costs_copy = costs.copy()
r = lap.solve_dense(costs)
# The optimal solution is (0, 1), (1, 2), (2, 0) with cost 1000 + 1000 + 1000.
# If the implementation does not use a large enough constant, it may choose
# (0, 0), (1, 1), (2, 2) with cost (L + 1 + 1) instead.
expected = np.array([[0, 1, 2], [1, 2, 0]])
np.testing.assert_allclose(r, expected)
| 35.443299
| 91
| 0.588424
|
4a053e3acc06b9fdacb9c2c6e9d947a458371885
| 1,433
|
py
|
Python
|
bakery_lint/tests/upstream/__init__.py
|
anthrotype/fontbakery
|
5717c39e9c999b62cacbdfde1fc1ee96ae049e5a
|
[
"Apache-2.0"
] | null | null | null |
bakery_lint/tests/upstream/__init__.py
|
anthrotype/fontbakery
|
5717c39e9c999b62cacbdfde1fc1ee96ae049e5a
|
[
"Apache-2.0"
] | null | null | null |
bakery_lint/tests/upstream/__init__.py
|
anthrotype/fontbakery
|
5717c39e9c999b62cacbdfde1fc1ee96ae049e5a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
"""
Load test methods from each file in the same directory as this file,
that start with 'test_', and have 'Test' in their class name
"""
import os
import importlib
import inspect
for testfile in os.listdir(os.path.dirname(__file__)):
if testfile.startswith('test_'):
try:
module_name, _ = os.path.splitext(testfile)
module = 'bakery_lint.tests.upstream.%s' % module_name
module = importlib.import_module(module)
for name, obj in inspect.getmembers(module):
if 'Test' in name:
exec 'from bakery_lint.tests.upstream.%s import %s' % (module_name, name)
except (ImportError, AttributeError, IndexError) as ex:
pass
| 38.72973
| 93
| 0.699232
|
4a053f2f264d4588cae54475ad6a02736657882a
| 973
|
py
|
Python
|
models/gfnn.py
|
0penDynamic/GNN_models_pytorch_geometric
|
2e4c6f39ff28744245cc8ba4e31a6e8e512565be
|
[
"MIT"
] | 16
|
2020-07-18T01:01:17.000Z
|
2021-10-22T03:19:07.000Z
|
models/gfnn.py
|
0penDynamic/GNN_models_pytorch_geometric
|
2e4c6f39ff28744245cc8ba4e31a6e8e512565be
|
[
"MIT"
] | 1
|
2020-07-17T09:20:00.000Z
|
2020-08-06T00:26:06.000Z
|
models/gfnn.py
|
0penDynamic/GNN_models_pytorch_geometric
|
2e4c6f39ff28744245cc8ba4e31a6e8e512565be
|
[
"MIT"
] | 2
|
2020-07-23T02:22:10.000Z
|
2020-09-16T14:58:20.000Z
|
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from torch_geometric.nn import SGConv
from datasets import get_planetoid_dataset
class GFNN(nn.Module):
def __init__(self, dataset, nhid, K):
super(GFNN, self).__init__()
self.gc1 = SGConv(dataset.num_features, nhid, K=K, cached=True)
self.fc1 = nn.Linear(nhid, dataset.num_classes)
def reset_parameters(self):
self.gc1.reset_parameters()
self.fc1.reset_parameters()
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = self.gc1(x, edge_index)
x = F.relu(x)
x = self.fc1(x)
return F.log_softmax(x, dim=1)
def create_gfnn_model(data_name, nhid=32, lr=0.2, weight_decay=5e-4):
dataset = get_planetoid_dataset(data_name, True)
model = GFNN(dataset, nhid, 2)
optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
return dataset, model, optimizer
| 30.40625
| 74
| 0.681398
|
4a053f33179499ff3912ce11d20c3d7699fbd949
| 1,886
|
py
|
Python
|
sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/automl/nlp/nlp_limit_settings.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/automl/nlp/nlp_limit_settings.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/automl/nlp/nlp_limit_settings.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from azure.ai.ml._restclient.v2022_02_01_preview.models import (
NlpVerticalLimitSettings as RestNlpLimitSettings,
)
from azure.ai.ml._utils.utils import (
to_iso_duration_format_mins,
from_iso_duration_format_mins,
)
from azure.ai.ml.entities._mixins import RestTranslatableMixin
class NlpLimitSettings(RestTranslatableMixin):
"""Limit settings for all AutoML NLP Verticals."""
def __init__(
self,
*,
max_concurrent_trials: int = None,
max_trials: int = 1,
timeout_minutes: int = None,
):
self.max_concurrent_trials = max_concurrent_trials
self.max_trials = max_trials
self.timeout_minutes = timeout_minutes
def _to_rest_object(self) -> RestNlpLimitSettings:
return RestNlpLimitSettings(
max_concurrent_trials=self.max_concurrent_trials,
max_trials=self.max_trials,
timeout=to_iso_duration_format_mins(self.timeout_minutes),
)
@classmethod
def _from_rest_object(cls, obj: RestNlpLimitSettings) -> "NlpLimitSettings":
return cls(
max_concurrent_trials=obj.max_concurrent_trials,
max_trials=obj.max_trials,
timeout_minutes=from_iso_duration_format_mins(obj.timeout),
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, NlpLimitSettings):
return NotImplemented
return (
self.max_concurrent_trials == other.max_concurrent_trials
and self.max_trials == other.max_trials
and self.timeout_minutes == other.timeout_minutes
)
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
| 33.678571
| 80
| 0.64316
|
4a053f3d13e307eef0e2b7a7a645141ce0453b1a
| 397
|
py
|
Python
|
randomuuid/wsgi.py
|
danielcoker/random-uuid
|
0cf459eb2a838923e9ee5f17e20dbd9ece27a4ce
|
[
"MIT"
] | null | null | null |
randomuuid/wsgi.py
|
danielcoker/random-uuid
|
0cf459eb2a838923e9ee5f17e20dbd9ece27a4ce
|
[
"MIT"
] | null | null | null |
randomuuid/wsgi.py
|
danielcoker/random-uuid
|
0cf459eb2a838923e9ee5f17e20dbd9ece27a4ce
|
[
"MIT"
] | null | null | null |
"""
WSGI config for randomuuid project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'randomuuid.settings')
application = get_wsgi_application()
| 23.352941
| 78
| 0.788413
|
4a053f9cd75b45ff164af226015ac8fde0011308
| 4,785
|
py
|
Python
|
autolens/mock/mock.py
|
agarwalutkarsh554/PyAutoLens
|
72d2f5c39834446e72879fd119b591e52b36cac4
|
[
"MIT"
] | null | null | null |
autolens/mock/mock.py
|
agarwalutkarsh554/PyAutoLens
|
72d2f5c39834446e72879fd119b591e52b36cac4
|
[
"MIT"
] | null | null | null |
autolens/mock/mock.py
|
agarwalutkarsh554/PyAutoLens
|
72d2f5c39834446e72879fd119b591e52b36cac4
|
[
"MIT"
] | null | null | null |
import autofit as af
from autofit.mock.mock import MockSearch, MockSamples
class MockResult(af.MockResult):
def __init__(
self,
samples=None,
instance=None,
model=None,
analysis=None,
search=None,
mask=None,
model_image=None,
max_log_likelihood_tracer=None,
hyper_galaxy_image_path_dict=None,
hyper_model_image=None,
hyper_galaxy_visibilities_path_dict=None,
hyper_model_visibilities=None,
pixelization=None,
positions=None,
updated_positions=None,
updated_positions_threshold=None,
stochastic_log_evidences=None,
use_as_hyper_dataset=False,
):
super().__init__(
samples=samples,
instance=instance,
model=model,
analysis=analysis,
search=search,
)
self.previous_model = model
self.gaussian_tuples = None
self.mask = None
self.positions = None
self.mask = mask
self.hyper_galaxy_image_path_dict = hyper_galaxy_image_path_dict
self.hyper_model_image = hyper_model_image
self.hyper_galaxy_visibilities_path_dict = hyper_galaxy_visibilities_path_dict
self.hyper_model_visibilities = hyper_model_visibilities
self.model_image = model_image
self.unmasked_model_image = model_image
self.max_log_likelihood_tracer = max_log_likelihood_tracer
self.pixelization = pixelization
self.use_as_hyper_dataset = use_as_hyper_dataset
self.positions = positions
self.updated_positions = (
updated_positions if updated_positions is not None else []
)
self.updated_positions_threshold = updated_positions_threshold
self._stochastic_log_evidences = stochastic_log_evidences
def stochastic_log_evidences(self):
return self._stochastic_log_evidences
@property
def image_plane_multiple_image_positions_of_source_plane_centres(self):
return self.updated_positions
class MockResults(af.ResultsCollection):
def __init__(
self,
samples=None,
instance=None,
model=None,
analysis=None,
search=None,
mask=None,
model_image=None,
max_log_likelihood_tracer=None,
hyper_galaxy_image_path_dict=None,
hyper_model_image=None,
hyper_galaxy_visibilities_path_dict=None,
hyper_model_visibilities=None,
pixelization=None,
positions=None,
updated_positions=None,
updated_positions_threshold=None,
stochastic_log_evidences=None,
use_as_hyper_dataset=False,
):
"""
A collection of results from previous phases. Results can be obtained using an index or the name of the phase
from whence they came.
"""
super().__init__()
result = MockResult(
samples=samples,
instance=instance,
model=model,
analysis=analysis,
search=search,
mask=mask,
model_image=model_image,
max_log_likelihood_tracer=max_log_likelihood_tracer,
hyper_galaxy_image_path_dict=hyper_galaxy_image_path_dict,
hyper_model_image=hyper_model_image,
hyper_galaxy_visibilities_path_dict=hyper_galaxy_visibilities_path_dict,
hyper_model_visibilities=hyper_model_visibilities,
pixelization=pixelization,
positions=positions,
updated_positions=updated_positions,
updated_positions_threshold=updated_positions_threshold,
stochastic_log_evidences=stochastic_log_evidences,
use_as_hyper_dataset=use_as_hyper_dataset,
)
self.__result_list = [result]
@property
def last(self):
"""
The result of the last phase
"""
if len(self.__result_list) > 0:
return self.__result_list[-1]
return None
def __getitem__(self, item):
"""
Get the result of a previous phase by index
Parameters
----------
item: int
The index of the result
Returns
-------
result: Result
The result of a previous phase
"""
return self.__result_list[item]
def __len__(self):
return len(self.__result_list)
class MockPositionsSolver:
def __init__(self, model_positions):
self.model_positions = model_positions
def solve_from_tracer(self, tracer):
return self.model_positions
| 31.480263
| 118
| 0.627795
|
4a0544d7ff32bcbf3697dbbe98dfd3cf50721237
| 829
|
py
|
Python
|
tests/utils/tensorflow/test_numpy.py
|
orcaformation/chatbot_widget
|
cdbc0db5103a5a701878804ba183d5448823c798
|
[
"Apache-2.0"
] | 37
|
2019-06-07T07:39:00.000Z
|
2022-01-27T08:32:57.000Z
|
tests/utils/tensorflow/test_numpy.py
|
orcaformation/chatbot_widget
|
cdbc0db5103a5a701878804ba183d5448823c798
|
[
"Apache-2.0"
] | 50
|
2021-03-05T04:13:48.000Z
|
2022-03-01T13:36:06.000Z
|
tests/utils/tensorflow/test_numpy.py
|
orcaformation/chatbot_widget
|
cdbc0db5103a5a701878804ba183d5448823c798
|
[
"Apache-2.0"
] | 65
|
2019-05-21T12:16:53.000Z
|
2022-02-23T10:54:15.000Z
|
import pytest
import tensorflow as tf
import numpy as np
import rasa.utils.tensorflow.numpy
from typing import Optional, Dict, Any
@pytest.mark.parametrize(
"value, expected_result",
[
({}, {}),
({"a": 1}, {"a": 1}),
({"a": tf.zeros((2, 3))}, {"a": np.zeros((2, 3))}),
],
)
def test_values_to_numpy(
value: Optional[Dict[Any, Any]], expected_result: Optional[Dict[Any, Any]]
):
actual_result = rasa.utils.tensorflow.numpy.values_to_numpy(value)
actual_result_value_types = [
type(value) for value in sorted(actual_result.values())
]
expected_result_value_types = [
type(value) for value in sorted(actual_result.values())
]
assert actual_result_value_types == expected_result_value_types
np.testing.assert_equal(actual_result, expected_result)
| 29.607143
| 78
| 0.670688
|
4a054595b354fc56a93e4664562ef67d04d9e103
| 1,765
|
py
|
Python
|
esp32_ulp/definesdb.py
|
wnienhaus/micropython-esp32-ulp
|
19d4b98d41fae74062d855760d7aaef1988804f6
|
[
"MIT"
] | 63
|
2018-03-11T20:28:18.000Z
|
2022-02-03T15:03:24.000Z
|
esp32_ulp/definesdb.py
|
wnienhaus/micropython-esp32-ulp
|
19d4b98d41fae74062d855760d7aaef1988804f6
|
[
"MIT"
] | 56
|
2018-03-11T18:48:18.000Z
|
2022-03-01T00:16:26.000Z
|
esp32_ulp/definesdb.py
|
wnienhaus/micropython-esp32-ulp
|
19d4b98d41fae74062d855760d7aaef1988804f6
|
[
"MIT"
] | 14
|
2018-03-13T07:33:39.000Z
|
2022-02-03T15:03:27.000Z
|
import os
import btree
from .util import file_exists
DBNAME = 'defines.db'
class DefinesDB:
def __init__(self):
self._file = None
self._db = None
self._db_exists = None
def clear(self):
self.close()
try:
os.remove(DBNAME)
self._db_exists = False
except OSError:
pass
def is_open(self):
return self._db is not None
def open(self):
if self.is_open():
return
try:
self._file = open(DBNAME, 'r+b')
except OSError:
self._file = open(DBNAME, 'w+b')
self._db = btree.open(self._file)
self._db_exists = True
def close(self):
if not self.is_open():
return
self._db.close()
self._db = None
self._file.close()
self._file = None
def db_exists(self):
if self._db_exists is None:
self._db_exists = file_exists(DBNAME)
return self._db_exists
def update(self, dictionary):
for k, v in dictionary.items():
self.__setitem__(k, v)
def get(self, key, default):
try:
result = self.__getitem__(key)
except KeyError:
result = default
return result
def keys(self):
if not self.db_exists():
return []
self.open()
return [k.decode() for k in self._db.keys()]
def __getitem__(self, key):
if not self.db_exists():
raise KeyError
self.open()
return self._db[key.encode()].decode()
def __setitem__(self, key, value):
self.open()
self._db[key.encode()] = str(value).encode()
def __iter__(self):
return iter(self.keys())
| 22.341772
| 52
| 0.538244
|
4a0545ca86f36df53587ac4dd543ad4514c3f2fb
| 2,990
|
py
|
Python
|
cogs/quotes.py
|
darklink2458/Poggy
|
5a28aa0ea2dd16b323e377906d9eae456dbfda0e
|
[
"MIT"
] | 1
|
2020-12-28T09:53:16.000Z
|
2020-12-28T09:53:16.000Z
|
cogs/quotes.py
|
darklink2458/Poggy
|
5a28aa0ea2dd16b323e377906d9eae456dbfda0e
|
[
"MIT"
] | 4
|
2020-12-28T09:49:11.000Z
|
2020-12-29T22:49:36.000Z
|
cogs/quotes.py
|
darklink2458/Poggy
|
5a28aa0ea2dd16b323e377906d9eae456dbfda0e
|
[
"MIT"
] | null | null | null |
import discord
import random
import json
import os
from discord.ext import commands,tasks
class Quotes(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print('quotes Loaded!')
@commands.command()
async def ting(self, ctx):
await ctx.send(ctx.message.guild.name)
@commands.command(aliases=['addquote'], help = 'Adds a quote to the quote list.')
async def addQuote(self, ctx, *, quote):
#Opens the Json Quote Database
file = open('quotes.json', 'r')
ServerQuotes = json.load(file)
server = ctx.message.guild.name
if server not in ServerQuotes:
ServerQuotes[server] = {'quoteNum':0,'quotes':{}}
curQuoteNum = ServerQuotes[server]['quoteNum'] + 1
ServerQuotes[server]['quoteNum'] = curQuoteNum
ServerQuotes[server]['quotes'][curQuoteNum] = quote
#Writes the Updated Database to the Json File
file = open('quotes.json', 'w')
json.dump(ServerQuotes, file)
file.close()
await ctx.send('Quote Saved!')
@commands.command(aliases=['randQuote'], help = 'Produces a random quote from the ones saved with the addQuote command')
async def randquote(self, ctx):
file = open('quotes.json', 'r')
ServerQuotes = json.load(file)
file.close()
server = ctx.message.guild.name
quotenum = ServerQuotes[server]['quoteNum']
myline = ServerQuotes[server]['quotes'][f'{random.randint(1, quotenum)}']
await ctx.send(myline)
@commands.command(aliases=['getQuote'], help = 'Grabs the specified quote from the server quote list')
async def getquote(self, ctx, input):
file = open('quotes.json', 'r')
ServerQuotes = json.load(file)
file.close()
server = ctx.message.guild.name
quotenum = ServerQuotes[server]['quoteNum']
if input not in ServerQuotes[server]['quotes']:
await ctx.send(f'You entered {input}, this is not a valid quote number.\nThere are {quotenum} quotes saved for this server.')
else:
myline = ServerQuotes[server]['quotes'][input]
await ctx.send(f'Quote {input}: {myline}')
@commands.command(aliases=['getQuoteList', 'quotelist'], help = 'Grabs the server quote list')
async def quoteList(self, ctx):
file = open('quotes.json', 'r')
ServerQuotes = json.load(file)
file.close()
server = ctx.message.guild.name
content = ''
file = open(f'{server}.txt', 'w')
for quote in ServerQuotes[server]['quotes']:
content = content + ServerQuotes[server]['quotes'][quote] + '\n'
file.write(content)
file.close()
file = open(f'{server}.txt', 'rb')
await ctx.send(file = discord.File(file))
file.close()
os.remove(f'{server}.txt')
def setup(client):
client.add_cog(Quotes(client))
| 34.367816
| 137
| 0.619064
|
4a05476f63a5e010cc63750ca80bb63e37a079c2
| 3,306
|
py
|
Python
|
src1/profiles/cmb_functions.py
|
mlares/CBR_CrossCorr
|
f3599aed997e003d2d838ba5ad345d2f783a5bda
|
[
"MIT"
] | null | null | null |
src1/profiles/cmb_functions.py
|
mlares/CBR_CrossCorr
|
f3599aed997e003d2d838ba5ad345d2f783a5bda
|
[
"MIT"
] | null | null | null |
src1/profiles/cmb_functions.py
|
mlares/CBR_CrossCorr
|
f3599aed997e003d2d838ba5ad345d2f783a5bda
|
[
"MIT"
] | null | null | null |
def readmap(filename):
import healpy as hp
hp_data, hp_header = hp.read_map(filename, h=True,field=(0,3))#, nest=None) # estoy leyendo la columna cero Y la tres
#hp_data, hp_header = hp.read_map(filename, h=True)#, nest=None)
hp_data_sel=hp_data[0] # <---- elijo la columna de los datos
hp_mask=hp_data[1] # <---- elijo la columna de la máscara
return(hp_data_sel,hp_mask)
def profdata(nside,fac,rprof,Dsel,vec,hp_data_sel,hp_mask):
import numpy as np
import healpy as hp
from scipy.stats import sem
Kr=np.zeros(len(rprof))
serr=np.zeros(len(rprof))
for k in range(len(Kr)):
Kd=[]
for i in range(len(vec)):
listpix = hp.query_disc(nside, vec[i], ((10.**(Dsel['r_ext'][i]))/206264.99992965)*rprof[k],inclusive=True, fact=4, nest=False) #radianes
listpix_prev=hp.query_disc(nside, vec[i], ((10.**(Dsel['r_ext'][i]))/206264.99992965)*rprof[k-1],inclusive=True, fact=4, nest=False)
if(k==0):
listpix_prev=[]
listpix_mask=[]
for j in range(len(listpix)):
if(hp_mask[listpix[j]]==1):
if(listpix[j] not in listpix_prev):
listpix_mask.append(listpix[j])
meanK_disk=np.nanmean(hp_data_sel[listpix_mask])
Kd.append(meanK_disk)
serr[k]=sem(Kd,nan_policy='omit')*10.**fac
Kr[k]=np.nanmean(Kd)*(10.**fac)
return(Kr,serr)
def profran(nran,nside,fac,rprof,Dsel,vec,hp_data_sel,hp_mask):
import numpy as np
import healpy as hp
from scipy.stats import sem
Kr_ran=np.zeros((nran,len(rprof)))
for r in range(nran):
np.random.seed()
vec_ran=vec
for rr in range(len(vec)):
phiran = np.random.uniform(0,2*np.pi,1)
costheta = np.random.uniform(-1,1,1)
u = np.random.uniform(0,1)
thetaran = np.arccos(costheta)
rran = u**(1/3)
xran = rran * np.sin(thetaran) * np.cos(phiran)
yran = rran * np.sin(thetaran) * np.sin(phiran)
zran = rran * np.cos(thetaran)
vec_ran[rr,0]=xran
vec_ran[rr,1]=yran
vec_ran[rr,2]=zran
for k in range(len(Kr_ran[r,:])):
Kd_ran=[]
for i in range(len(vec_ran)):
listpix_ran = hp.query_disc(nside, vec_ran[i], ((10.**(Dsel['r_ext'][i]))/206264.99992965)*rprof[k],inclusive=True, fact=4, nest=False) #radianes
listpix_ran_prev=hp.query_disc(nside, vec_ran[i], ((10.**(Dsel['r_ext'][i]))/206264.99992965)*rprof[k-1],inclusive=True, fact=4, nest=False)
if(k==0):
listpix_ran_prev=[]
listpix_mask_ran=[]
for j in range(len(listpix_ran)):
if(hp_mask[listpix_ran[j]]==1):
if(listpix_ran[j] not in listpix_ran_prev):
listpix_mask_ran.append(listpix_ran[j])
meanK_disk_ran=np.nanmean(hp_data_sel[listpix_mask_ran])
Kd_ran.append(meanK_disk_ran)
Kr_ran[r,k]=np.nanmean(Kd_ran)*(10.**fac)
return(Kr_ran)
| 26.238095
| 161
| 0.548094
|
4a05493dfc73a6a73a86d1b58e58d9cf424f2ad0
| 5,141
|
py
|
Python
|
cqr/get_meps_data/meps_dataset_panel19_fy2015_reg.py
|
allenbai01/cp-gen
|
41c81feda79400979a4b9fd6fc33c68d8a8d6fd8
|
[
"MIT"
] | 86
|
2019-05-15T10:23:11.000Z
|
2022-03-05T23:10:17.000Z
|
get_meps_data/meps_dataset_panel19_fy2015_reg.py
|
chrinide/cqr
|
27f56e274fac9b41a7bc639490868c3ae7d627dc
|
[
"MIT"
] | 2
|
2021-06-03T17:19:24.000Z
|
2021-11-25T17:26:19.000Z
|
get_meps_data/meps_dataset_panel19_fy2015_reg.py
|
chrinide/cqr
|
27f56e274fac9b41a7bc639490868c3ae7d627dc
|
[
"MIT"
] | 30
|
2019-05-10T07:11:52.000Z
|
2022-03-10T15:34:51.000Z
|
# This code is a variant of
# https://github.com/IBM/AIF360/blob/master/aif360/datasets/meps_dataset_panel19_fy2015.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pandas as pd
from save_dataset import SaveDataset
default_mappings = {
'protected_attribute_maps': [{1.0: 'White', 0.0: 'Non-White'}]
}
def default_preprocessing(df):
"""
1.Create a new column, RACE that is 'White' if RACEV2X = 1 and HISPANX = 2 i.e. non Hispanic White
and 'non-White' otherwise
2. Restrict to Panel 19
3. RENAME all columns that are PANEL/ROUND SPECIFIC
4. Drop rows based on certain values of individual features that correspond to missing/unknown - generally < -1
5. Compute UTILIZATION
"""
def race(row):
if ((row['HISPANX'] == 2) and (row['RACEV2X'] == 1)): #non-Hispanic Whites are marked as WHITE; all others as NON-WHITE
return 'White'
return 'Non-White'
df['RACEV2X'] = df.apply(lambda row: race(row), axis=1)
df = df.rename(columns = {'RACEV2X' : 'RACE'})
df = df[df['PANEL'] == 19]
# RENAME COLUMNS
df = df.rename(columns = {'FTSTU53X' : 'FTSTU', 'ACTDTY53' : 'ACTDTY', 'HONRDC53' : 'HONRDC', 'RTHLTH53' : 'RTHLTH',
'MNHLTH53' : 'MNHLTH', 'CHBRON53' : 'CHBRON', 'JTPAIN53' : 'JTPAIN', 'PREGNT53' : 'PREGNT',
'WLKLIM53' : 'WLKLIM', 'ACTLIM53' : 'ACTLIM', 'SOCLIM53' : 'SOCLIM', 'COGLIM53' : 'COGLIM',
'EMPST53' : 'EMPST', 'REGION53' : 'REGION', 'MARRY53X' : 'MARRY', 'AGE53X' : 'AGE',
'POVCAT15' : 'POVCAT', 'INSCOV15' : 'INSCOV'})
df = df[df['REGION'] >= 0] # remove values -1
df = df[df['AGE'] >= 0] # remove values -1
df = df[df['MARRY'] >= 0] # remove values -1, -7, -8, -9
df = df[df['ASTHDX'] >= 0] # remove values -1, -7, -8, -9
df = df[(df[['FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX','EDUCYR','HIDEG',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42',
'PHQ242','EMPST','POVCAT','INSCOV']] >= -1).all(1)] #for all other categorical features, remove values < -1
df = df[(df[['OBTOTV15', 'OPTOTV15', 'ERTOT15', 'IPNGTD15', 'HHTOTD15']]>=0).all(1)]
def utilization(row):
return row['OBTOTV15'] + row['OPTOTV15'] + row['ERTOT15'] + row['IPNGTD15'] + row['HHTOTD15']
df['TOTEXP15'] = df.apply(lambda row: utilization(row), axis=1)
df = df.rename(columns = {'TOTEXP15' : 'UTILIZATION_reg'})
return df
class MEPSDataset19Reg(SaveDataset):
"""MEPS Dataset.
"""
def __init__(self, label_name='UTILIZATION_reg', favorable_classes=[1.0],
protected_attribute_names=['RACE'],
privileged_classes=[['White']],
instance_weights_name='PERWT15F',
categorical_features=['REGION','SEX','MARRY',
'FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42',
'PHQ242','EMPST','POVCAT','INSCOV'],
features_to_keep=['REGION','AGE','SEX','RACE','MARRY',
'FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42','PCS42',
'MCS42','K6SUM42','PHQ242','EMPST','POVCAT','INSCOV','UTILIZATION_reg','PERWT15F'],
features_to_drop=[],
na_values=[], custom_preprocessing=default_preprocessing,
metadata=default_mappings):
filepath = './h181.csv'
df = pd.read_csv(filepath, sep=',', na_values=na_values)
super(MEPSDataset19Reg, self).__init__(df=df, label_name=label_name,
favorable_classes=favorable_classes,
protected_attribute_names=protected_attribute_names,
privileged_classes=privileged_classes,
instance_weights_name=instance_weights_name,
categorical_features=categorical_features,
features_to_keep=features_to_keep,
features_to_drop=features_to_drop, na_values=na_values,
custom_preprocessing=custom_preprocessing, metadata=metadata, dataset_name='meps_19_reg')
| 49.432692
| 137
| 0.573429
|
4a0549ca9e298615c94ca69550bda72e7390bff9
| 6,864
|
py
|
Python
|
docker/context/context.py
|
wpjunior/docker-py
|
030af62dca982e62ed418314c857da1f90ce4282
|
[
"Apache-2.0"
] | 2
|
2020-03-09T03:39:05.000Z
|
2020-03-23T09:56:09.000Z
|
docker/context/context.py
|
wpjunior/docker-py
|
030af62dca982e62ed418314c857da1f90ce4282
|
[
"Apache-2.0"
] | 6
|
2021-03-02T01:30:46.000Z
|
2022-02-26T03:15:16.000Z
|
docker/context/context.py
|
wpjunior/docker-py
|
030af62dca982e62ed418314c857da1f90ce4282
|
[
"Apache-2.0"
] | 3
|
2020-03-23T09:56:08.000Z
|
2020-06-14T05:30:17.000Z
|
import os
import json
from shutil import copyfile, rmtree
from docker.tls import TLSConfig
from docker.errors import ContextException
from docker.context.config import get_meta_dir
from docker.context.config import get_meta_file
from docker.context.config import get_tls_dir
from docker.context.config import get_context_host
class Context:
"""A context."""
def __init__(self, name, orchestrator="swarm", host=None, endpoints=None):
if not name:
raise Exception("Name not provided")
self.name = name
self.orchestrator = orchestrator
if not endpoints:
default_endpoint = "docker" if (
orchestrator == "swarm"
) else orchestrator
self.endpoints = {
default_endpoint: {
"Host": get_context_host(host),
"SkipTLSVerify": False
}
}
else:
for k, v in endpoints.items():
ekeys = v.keys()
for param in ["Host", "SkipTLSVerify"]:
if param not in ekeys:
raise ContextException(
"Missing parameter {} from endpoint {}".format(
param, k))
self.endpoints = endpoints
self.tls_cfg = {}
self.meta_path = "IN MEMORY"
self.tls_path = "IN MEMORY"
def set_endpoint(
self, name="docker", host=None, tls_cfg=None,
skip_tls_verify=False, def_namespace=None):
self.endpoints[name] = {
"Host": get_context_host(host),
"SkipTLSVerify": skip_tls_verify
}
if def_namespace:
self.endpoints[name]["DefaultNamespace"] = def_namespace
if tls_cfg:
self.tls_cfg[name] = tls_cfg
def inspect(self):
return self.__call__()
@classmethod
def load_context(cls, name):
name, orchestrator, endpoints = Context._load_meta(name)
if name:
instance = cls(name, orchestrator, endpoints=endpoints)
instance._load_certs()
instance.meta_path = get_meta_dir(name)
return instance
return None
@classmethod
def _load_meta(cls, name):
metadata = {}
meta_file = get_meta_file(name)
if os.path.isfile(meta_file):
with open(meta_file) as f:
try:
with open(meta_file) as f:
metadata = json.load(f)
for k, v in metadata["Endpoints"].items():
metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
v["SkipTLSVerify"])
except (IOError, KeyError, ValueError) as e:
# unknown format
raise Exception("""Detected corrupted meta file for
context {} : {}""".format(name, e))
return (
metadata["Name"], metadata["Metadata"]["StackOrchestrator"],
metadata["Endpoints"])
return None, None, None
def _load_certs(self):
certs = {}
tls_dir = get_tls_dir(self.name)
for endpoint in self.endpoints.keys():
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
continue
ca_cert = None
cert = None
key = None
for filename in os.listdir(os.path.join(tls_dir, endpoint)):
if filename.startswith("ca"):
ca_cert = os.path.join(tls_dir, endpoint, filename)
elif filename.startswith("cert"):
cert = os.path.join(tls_dir, endpoint, filename)
elif filename.startswith("key"):
key = os.path.join(tls_dir, endpoint, filename)
if all([ca_cert, cert, key]):
certs[endpoint] = TLSConfig(
client_cert=(cert, key), ca_cert=ca_cert)
self.tls_cfg = certs
self.tls_path = tls_dir
def save(self):
meta_dir = get_meta_dir(self.name)
if not os.path.isdir(meta_dir):
os.makedirs(meta_dir)
with open(get_meta_file(self.name), "w") as f:
f.write(json.dumps(self.Metadata))
tls_dir = get_tls_dir(self.name)
for endpoint, tls in self.tls_cfg.items():
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
os.makedirs(os.path.join(tls_dir, endpoint))
ca_file = tls.ca_cert
if ca_file:
copyfile(ca_file, os.path.join(
tls_dir, endpoint, os.path.basename(ca_file)))
if tls.cert:
cert_file, key_file = tls.cert
copyfile(cert_file, os.path.join(
tls_dir, endpoint, os.path.basename(cert_file)))
copyfile(key_file, os.path.join(
tls_dir, endpoint, os.path.basename(key_file)))
self.meta_path = get_meta_dir(self.name)
self.tls_path = get_tls_dir(self.name)
def remove(self):
if os.path.isdir(self.meta_path):
rmtree(self.meta_path)
if os.path.isdir(self.tls_path):
rmtree(self.tls_path)
def __repr__(self):
return "<%s: '%s'>" % (self.__class__.__name__, self.name)
def __str__(self):
return json.dumps(self.__call__(), indent=2)
def __call__(self):
result = self.Metadata
result.update(self.TLSMaterial)
result.update(self.Storage)
return result
@property
def Name(self):
return self.name
@property
def Host(self):
if self.orchestrator == "swarm":
return self.endpoints["docker"]["Host"]
return self.endpoints[self.orchestrator]["Host"]
@property
def Orchestrator(self):
return self.orchestrator
@property
def Metadata(self):
return {
"Name": self.name,
"Metadata": {
"StackOrchestrator": self.orchestrator
},
"Endpoints": self.endpoints
}
@property
def TLSConfig(self):
key = self.orchestrator
if key == "swarm":
key = "docker"
if key in self.tls_cfg.keys():
return self.tls_cfg[key]
return None
@property
def TLSMaterial(self):
certs = {}
for endpoint, tls in self.tls_cfg.items():
cert, key = tls.cert
certs[endpoint] = list(
map(os.path.basename, [tls.ca_cert, cert, key]))
return {
"TLSMaterial": certs
}
@property
def Storage(self):
return {
"Storage": {
"MetadataPath": self.meta_path,
"TLSPath": self.tls_path
}}
| 32.842105
| 78
| 0.540938
|
4a0549f31e0495e639a599202b126a13266033ba
| 6,281
|
py
|
Python
|
bcbio/ngsalign/novoalign.py
|
markdunning/bcbio-nextgen
|
37b69efcc5b2b3713b8d5cd207cece4cb343380d
|
[
"MIT"
] | 1
|
2021-11-11T18:49:15.000Z
|
2021-11-11T18:49:15.000Z
|
bcbio/ngsalign/novoalign.py
|
karegapauline/bcbio-nextgen
|
9be442305a54c601e4d3f5c7b09d8f9835c677fd
|
[
"MIT"
] | null | null | null |
bcbio/ngsalign/novoalign.py
|
karegapauline/bcbio-nextgen
|
9be442305a54c601e4d3f5c7b09d8f9835c677fd
|
[
"MIT"
] | null | null | null |
"""Next-gen sequencing alignment with Novoalign: http://www.novocraft.com
For BAM input handling this requires:
novoalign (with license for multicore)
samtools
"""
import os
import subprocess
from bcbio import bam, utils
from bcbio.ngsalign import alignprep, postalign
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.distributed.transaction import tx_tmpdir
from bcbio.utils import (memoize_outfile, file_exists)
# ## BAM realignment
def get_rg_info(names):
out = r"@RG\tID:{rg}\tPL:{pl}\tPU:{pu}\tSM:{sample}".format(**names)
if names.get("lb"):
out += r"\tLB:{lb}".format(**names)
return out
def align_bam(in_bam, ref_file, names, align_dir, data):
"""Perform realignment of input BAM file; uses unix pipes for avoid IO.
"""
config = data["config"]
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
novoalign = config_utils.get_program("novoalign", config)
samtools = config_utils.get_program("samtools", config)
resources = config_utils.get_resources("novoalign", config)
num_cores = config["algorithm"].get("num_cores", 1)
max_mem = resources.get("memory", "4G").upper()
extra_novo_args = " ".join(_novoalign_args_from_config(config, False))
if not file_exists(out_file):
with tx_tmpdir(data, base_dir=align_dir) as work_dir:
with postalign.tobam_cl(data, out_file, bam.is_paired(in_bam)) as (tobam_cl, tx_out_file):
rg_info = get_rg_info(names)
tx_out_prefix = os.path.splitext(tx_out_file)[0]
prefix1 = "%s-in1" % tx_out_prefix
cmd = ("unset JAVA_HOME && "
"{samtools} sort -n -o -l 1 -@ {num_cores} -m {max_mem} {in_bam} {prefix1} "
"| {novoalign} -o SAM '{rg_info}' -d {ref_file} -f /dev/stdin "
" -F BAMPE -c {num_cores} {extra_novo_args} | ")
cmd = (cmd + tobam_cl).format(**locals())
do.run(cmd, "Novoalign: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, in_bam)])
return out_file
# ## Fastq to BAM alignment
def align_pipe(fastq_file, pair_file, ref_file, names, align_dir, data):
"""Perform piped alignment of fastq input files, generating sorted output BAM.
"""
pair_file = pair_file if pair_file else ""
# back compatible -- older files were named with lane information, use sample name now
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
if not utils.file_exists(out_file):
out_file = os.path.join(align_dir, "{0}-sort.bam".format(dd.get_sample_name(data)))
if data.get("align_split") or fastq_file.endswith(".sdf"):
final_file = out_file
out_file, data = alignprep.setup_combine(final_file, data)
fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data)
else:
final_file = None
samtools = config_utils.get_program("samtools", data["config"])
novoalign = config_utils.get_program("novoalign", data["config"])
resources = config_utils.get_resources("novoalign", data["config"])
num_cores = data["config"]["algorithm"].get("num_cores", 1)
max_mem = resources.get("memory", "1G")
extra_novo_args = " ".join(_novoalign_args_from_config(data["config"]))
rg_info = get_rg_info(names)
if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)):
with tx_tmpdir(data) as work_dir:
with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file):
tx_out_prefix = os.path.splitext(tx_out_file)[0]
cmd = ("unset JAVA_HOME && "
"{novoalign} -o SAM '{rg_info}' -d {ref_file} -f {fastq_file} {pair_file} "
" -c {num_cores} {extra_novo_args} | ")
cmd = (cmd + tobam_cl).format(**locals())
do.run(cmd, "Novoalign: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, fastq_file)])
data["work_bam"] = out_file
return data
def _novoalign_args_from_config(config, need_quality=True):
"""Select novoalign options based on configuration parameters.
"""
if need_quality:
qual_format = config["algorithm"].get("quality_format", "").lower()
qual_flags = ["-F", "ILMFQ" if qual_format == "illumina" else "STDFQ"]
else:
qual_flags = []
multi_mappers = config["algorithm"].get("multiple_mappers")
if multi_mappers is True:
multi_flag = "Random"
elif isinstance(multi_mappers, basestring):
multi_flag = multi_mappers
else:
multi_flag = "None"
multi_flags = ["-r"] + multi_flag.split()
resources = config_utils.get_resources("novoalign", config)
# default arguments for improved variant calling based on
# comparisons to reference materials: turn off soft clipping and recalibrate
if resources.get("options") is None:
extra_args = ["-o", "FullNW", "-k"]
else:
extra_args = [str(x) for x in resources.get("options", [])]
return qual_flags + multi_flags + extra_args
# Tweaks to add
# -k -t 200 -K quality calibration metrics
# paired end sizes
# ## Indexing
@memoize_outfile(ext=".ndx")
def refindex(ref_file, kmer_size=None, step_size=None, out_file=None):
cl = ["novoindex"]
if kmer_size:
cl += ["-k", str(kmer_size)]
if step_size:
cl += ["-s", str(step_size)]
cl += [out_file, ref_file]
subprocess.check_call(cl)
# ## Galaxy integration
# Optional galaxy location file. Falls back on remap_index_fn if not found
galaxy_location_file = "novoalign_indices.loc"
def remap_index_fn(ref_file):
"""Map sequence references to equivalent novoalign indexes.
"""
checks = [os.path.splitext(ref_file)[0].replace("/seq/", "/novoalign/"),
os.path.splitext(ref_file)[0] + ".ndx",
ref_file + ".bs.ndx",
ref_file + ".ndx"]
for check in checks:
if os.path.exists(check):
return check
return checks[0]
| 43.020548
| 105
| 0.649578
|
4a054acfd79f55324a66a45f692cccaf924458fa
| 834
|
py
|
Python
|
users/forms.py
|
cantoniazzi/me-too
|
557987d4863061b81c82e38875edeeb05fcae8c9
|
[
"MIT"
] | null | null | null |
users/forms.py
|
cantoniazzi/me-too
|
557987d4863061b81c82e38875edeeb05fcae8c9
|
[
"MIT"
] | null | null | null |
users/forms.py
|
cantoniazzi/me-too
|
557987d4863061b81c82e38875edeeb05fcae8c9
|
[
"MIT"
] | null | null | null |
from django import forms
from django.contrib.auth.models import User
class UserRegisterForm(forms.Form):
name = forms.CharField(required = True)
email = forms.CharField(required = True)
password = forms.CharField(required = True)
fone = forms.CharField(required = True)
company_name = forms.CharField(required = True)
def is_valid(self):
valid = True
if not super(UserRegisterForm, self).is_valid():
self.add_errorForm("Please, check the informed data")
valid = False
user_exists = User.objects.filter(username=self.data['name']).exists()
if user_exists:
self.add_errorForm("User already exists")
valid = False
return valid
def add_errorForm(self, message):
errors = self._errors.setdefault(forms.forms.NON_FIELD_ERRORS,forms.utils.ErrorList())
errors.append(message)
| 30.888889
| 88
| 0.726619
|
4a054b9f2be2085448d8a80d0c40d59d271bfffd
| 7,516
|
py
|
Python
|
datasets/loaders.py
|
fbuchert/mixmatch-pytorch
|
5dc989432bf26561b3c40ea03b319d12c7ace20b
|
[
"MIT"
] | null | null | null |
datasets/loaders.py
|
fbuchert/mixmatch-pytorch
|
5dc989432bf26561b3c40ea03b319d12c7ace20b
|
[
"MIT"
] | null | null | null |
datasets/loaders.py
|
fbuchert/mixmatch-pytorch
|
5dc989432bf26561b3c40ea03b319d12c7ace20b
|
[
"MIT"
] | null | null | null |
from typing import Union, Tuple, NamedTuple, List
import numpy as np
from torch.utils.data import (
Dataset,
DataLoader,
Sampler,
RandomSampler,
WeightedRandomSampler,
)
def get_sampler(
dataset: Dataset,
num_samples: int,
reweighted: bool = False,
median_freq: bool = False,
replacement: bool = True,
):
"""
Method that generates torch samplers.
Parameters
----------
dataset: Dataset
Torch base dataset object from which samples are selected.
replacement: bool
Boolean flag indicating whether samples should be drawn with replacement or not.
num_samples: int
Number of samples that are drawn. Should only be specified when sampling with replacement, i.e. replacement=True
reweighted: bool
See get_reweighted_sampler
median_freq: bool
See get_reweighted_sampler
Returns
----------
sampler: Sampler (either RandomSampler or WeightedRandomSampler)
Returns torch sampler instance, which can be used as input to a torch DataLoader. If reweighted=True,
a WeightedRandomSampler instance is returned, while if reweighted=False a RandomSampler instance is
returned.
"""
if reweighted:
return get_reweighted_sampler(dataset.targets, num_samples, replacement, median_freq)
else:
return get_uniform_sampler(dataset, replacement, num_samples)
def get_uniform_sampler(dataset: Dataset, replacement: bool = False, num_samples: int = None):
"""
Method that generates samplers that randomly select samples from the dataset with equal probability.
Parameters
----------
dataset: Dataset
Torch base dataset object from which samples are selected.
replacement: bool
Boolean flag indicating whether samples should be drawn with replacement or not.
num_samples: int
Number of samples that are drawn. Should only be specified when sampling with replacement, i.e. replacement=True
Returns
----------
random_sampler: RandomSampler
Returns random sampler instance, which can be used as input to a torch DataLoader.
"""
if replacement:
return RandomSampler(dataset, replacement=replacement, num_samples=num_samples)
else:
return RandomSampler(dataset, replacement=replacement)
def get_reweighted_sampler(targets: List, num_samples: int, replacement: bool = True, median_freq: bool = False):
"""
Method that generates a weighted random sampler that selects samples with a probability inversely proportional
to their frequency (or median frequency) in the dataset. This sampling strategy can be useful when working with
highly imbalanced datasets.
Parameters
----------
targets: List
List of sample targets / classes based on which sampling weights are computed.
num_samples: int
Number of samples that are drawn. Should only be specified when sampling with replacement, i.e. replacement=True
replacement: bool
Boolean flag indicating whether samples should be drawn with replacement or not.
median_freq: bool
Boolean flag indicating whether sample weights are computed to be inversely proportional to frequency or median
frequency of sample class.
Returns
----------
weighted_samples: WeightedRandomSampler
Returns the weighted sampler instance, which can be used as input to a torch DataLoader.
"""
labels, counts = np.unique(targets, return_counts=True)
if not median_freq:
class_weights = 1 / (counts / np.sum(counts))
else:
class_weights = 1 / (counts / np.median(counts))
sample_weights = np.zeros(len(targets))
for class_label in labels:
sample_weights[np.array(targets) == class_label] = class_weights[
class_label
]
num_samples = num_samples if replacement else len(targets)
return WeightedRandomSampler(sample_weights, num_samples=num_samples, replacement=replacement)
def create_loaders(
args,
train_labeled: Dataset,
train_unlabeled: Dataset,
validation_dataset: Dataset,
test_dataset: Dataset,
batch_size: int,
num_workers: int = 0,
mu: int = 1,
total_iters: int = None,
) -> (DataLoader, DataLoader, DataLoader):
"""
Parameters
----------
args: argparse.Namespace
Namespace object that contains all command line arguments with their corresponding values.
train_labeled: Dataset
Labeled training dataset for semi-supervised learning.
train_unlabeled: Dataset
Unlabeled training dataset for semi-supervised learning.
validation_dataset: Dataset
Validation dataset.
test_dataset: Dataset
Test dataset.
batch_size: int
Batch size specifying how many samples per batch are loaded. The given batch size is used for all dataloaders,
except the unlabeled train loader - semi-supervised learning algorithms commonly use a multiple of the batch
size used for labeled train loader for the unlabeled train loader.
num_workers: int (default: 0)
Number of subprocesses used for data loading. If num_workers=0, data loading is executed in the main process.
mu: int (default: 1)
Multiplication factor which is used to compute the batch_size for the unlabeled train dataset. In MixMatch
this factor is set to 1, i.e. at every iteration the same number of samples is loaded from the labeled and
the unlabeled train datasets.
total_iters: int (default: None)
total_iters specifies the total number of desired training iterations per epoch. If not None,
the product of total_iters and batch_size is used to compute the total number of samples used for training
at every epoch. Note: This is required as MixMatch deviates from the conventional definition of `epoch`,
i.e. it trains for 1024 iterations per epoch passing through a dataset such as CIFAR-10 multiple times.
Returns
----------
data_loaders: Tuple[Tuple[DataLoader, DataLoader], DataLoader, DataLoader]
Returns a tuple of all data loaders required for semi-supervised learning.
"""
num_labeled_samples = (
len(train_labeled) if total_iters is None else total_iters * batch_size
)
train_loader_labeled = DataLoader(
train_labeled,
batch_size=batch_size,
sampler=get_sampler(
train_labeled,
num_samples=num_labeled_samples,
reweighted=args.weighted_sampling,
),
num_workers=num_workers,
drop_last=False,
pin_memory=args.pin_memory
)
train_loader_unlabeled = DataLoader(
train_unlabeled,
batch_size=batch_size * mu,
sampler=get_sampler(
train_unlabeled, num_samples=num_labeled_samples * mu, reweighted=False
),
num_workers=num_workers,
drop_last=False,
pin_memory=args.pin_memory
)
validation_loader = DataLoader(
validation_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=True,
pin_memory=args.pin_memory,
)
test_loader = DataLoader(
test_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=True,
pin_memory=args.pin_memory,
)
return (
(train_loader_labeled, train_loader_unlabeled),
validation_loader,
test_loader,
)
| 37.58
| 120
| 0.696381
|
4a054bb8b23dcaa57064a88846828347c1119eae
| 3,017
|
py
|
Python
|
ttp/src/iam_backdoor_users_password_src.py
|
blackbotinc/AWS-Attack
|
ad4668ab60173aabce3c6b9c7685160be5e3f14d
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 26
|
2021-03-29T13:39:28.000Z
|
2022-03-21T10:57:58.000Z
|
ttp/src/iam_backdoor_users_password_src.py
|
blackbotinc/AWS-Attack
|
ad4668ab60173aabce3c6b9c7685160be5e3f14d
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
ttp/src/iam_backdoor_users_password_src.py
|
blackbotinc/AWS-Attack
|
ad4668ab60173aabce3c6b9c7685160be5e3f14d
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 8
|
2021-02-23T12:17:04.000Z
|
2022-02-25T13:28:14.000Z
|
#!/usr/bin/env python3
import datetime
import argparse
from random import choice
import string
from botocore.exceptions import ClientError
def main(args, awsattack_main, data=None):
session = awsattack_main.get_active_session()
print = awsattack_main.print
users = data
summary_data = {}
client = awsattack_main.get_boto3_client('iam')
try:
password_policy = client.get_account_password_policy()
except:
# Policy unable to be fetched, set to None so that a 128 char password
# with all types of characters gets created below
password_policy = None
target_user = ''
password = create_valid_password(password_policy)
summary_data['backdoored_password_count'] = 0
if args.update:
func = 'update_login_profile'
print('Modifying an IAM user\'s current password')
else:
func = 'create_login_profile'
print('Creating an IAM user password')
caller = getattr(client, func)
for user in users:
if args.usernames is None:
pass
else:
print(' User: {}'.format(user))
password = create_valid_password(password_policy)
try:
caller(
UserName=user,
Password=password,
PasswordResetRequired=False)
except ClientError as error:
code = error.response['Error']['Code']
if code == 'AccessDenied':
print(' FAILURE: MISSING NEEDED PERMISSIONS')
elif code == 'EntityAlreadyExists':
print(' FAILURE: LOGIN PROFILE ALREADY EXISTS')
else:
print(' FAILURE: {}'.format(code))
continue
print(' Password successfully changed')
print(' Password: {}'.format(password))
summary_data['backdoored_password_count'] += 1
return summary_data
def create_valid_password(password_policy):
symbols = '!@#$%^&*()_+=-\][{}|;:",./?><`~'
password = ''.join(choice(string.ascii_lowercase) for _ in range(3))
try:
if password_policy['RequireNumbers'] is True:
password += ''.join(choice(string.digits) for _ in range(3))
if password_policy['RequireSymbols'] is True:
password += ''.join(choice(symbols) for _ in range(3))
if password_policy['RequireUppercaseCharacters'] is True:
password += ''.join(choice(string.uppercase) for _ in range(3))
if password_policy['MinimumPasswordLength'] > 0:
while len(password) < password_policy['MinimumPasswordLength']:
password += choice(string.digits)
except:
# Password policy couldn't be grabbed for some reason, make a max-length
# password with all types of characters, so no matter what, it will be accepted.
characters = string.ascii_lowercase + string.ascii_uppercase + string.digits + symbols
password = ''.join(choice(characters) for _ in range(128))
return password
| 37.246914
| 94
| 0.628107
|
4a054be41d8b24b50078921e01a3437e85b8ea2a
| 204,121
|
py
|
Python
|
git_cl.py
|
iamanother/depot_tools
|
3b563091836025e39b6b44ec0fbdde2d7ba36024
|
[
"BSD-3-Clause"
] | null | null | null |
git_cl.py
|
iamanother/depot_tools
|
3b563091836025e39b6b44ec0fbdde2d7ba36024
|
[
"BSD-3-Clause"
] | null | null | null |
git_cl.py
|
iamanother/depot_tools
|
3b563091836025e39b6b44ec0fbdde2d7ba36024
|
[
"BSD-3-Clause"
] | 1
|
2021-06-29T00:41:44.000Z
|
2021-06-29T00:41:44.000Z
|
#!/usr/bin/env vpython
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copyright (C) 2008 Evan Martin <martine@danga.com>
"""A git-command for integrating reviews on Gerrit."""
from __future__ import print_function
import base64
import collections
import datetime
import fnmatch
import httplib2
import itertools
import json
import logging
import multiprocessing
import optparse
import os
import re
import shutil
import stat
import sys
import tempfile
import textwrap
import time
import uuid
import webbrowser
import zlib
from third_party import colorama
import auth
import clang_format
import fix_encoding
import gclient_utils
import gerrit_util
import git_common
import git_footers
import git_new_branch
import metrics
import metrics_utils
import owners_client
import owners_finder
import presubmit_canned_checks
import presubmit_support
import scm
import setup_color
import split_cl
import subcommand
import subprocess2
import watchlists
from third_party import six
from six.moves import urllib
if sys.version_info.major == 3:
basestring = (str,) # pylint: disable=redefined-builtin
__version__ = '2.0'
# Traces for git push will be stored in a traces directory inside the
# depot_tools checkout.
DEPOT_TOOLS = os.path.dirname(os.path.abspath(__file__))
TRACES_DIR = os.path.join(DEPOT_TOOLS, 'traces')
PRESUBMIT_SUPPORT = os.path.join(DEPOT_TOOLS, 'presubmit_support.py')
# When collecting traces, Git hashes will be reduced to 6 characters to reduce
# the size after compression.
GIT_HASH_RE = re.compile(r'\b([a-f0-9]{6})[a-f0-9]{34}\b', flags=re.I)
# Used to redact the cookies from the gitcookies file.
GITCOOKIES_REDACT_RE = re.compile(r'1/.*')
MAX_ATTEMPTS = 3
# The maximum number of traces we will keep. Multiplied by 3 since we store
# 3 files per trace.
MAX_TRACES = 3 * 10
# Message to be displayed to the user to inform where to find the traces for a
# git-cl upload execution.
TRACES_MESSAGE = (
'\n'
'The traces of this git-cl execution have been recorded at:\n'
' %(trace_name)s-traces.zip\n'
'Copies of your gitcookies file and git config have been recorded at:\n'
' %(trace_name)s-git-info.zip\n')
# Format of the message to be stored as part of the traces to give developers a
# better context when they go through traces.
TRACES_README_FORMAT = (
'Date: %(now)s\n'
'\n'
'Change: https://%(gerrit_host)s/q/%(change_id)s\n'
'Title: %(title)s\n'
'\n'
'%(description)s\n'
'\n'
'Execution time: %(execution_time)s\n'
'Exit code: %(exit_code)s\n') + TRACES_MESSAGE
POSTUPSTREAM_HOOK = '.git/hooks/post-cl-land'
DESCRIPTION_BACKUP_FILE = '.git_cl_description_backup'
REFS_THAT_ALIAS_TO_OTHER_REFS = {
'refs/remotes/origin/lkgr': 'refs/remotes/origin/master',
'refs/remotes/origin/lkcr': 'refs/remotes/origin/master',
}
DEFAULT_OLD_BRANCH = 'refs/remotes/origin/master'
DEFAULT_NEW_BRANCH = 'refs/remotes/origin/main'
# Valid extensions for files we want to lint.
DEFAULT_LINT_REGEX = r"(.*\.cpp|.*\.cc|.*\.h)"
DEFAULT_LINT_IGNORE_REGEX = r"$^"
# File name for yapf style config files.
YAPF_CONFIG_FILENAME = '.style.yapf'
# The issue, patchset and codereview server are stored on git config for each
# branch under branch.<branch-name>.<config-key>.
ISSUE_CONFIG_KEY = 'gerritissue'
PATCHSET_CONFIG_KEY = 'gerritpatchset'
CODEREVIEW_SERVER_CONFIG_KEY = 'gerritserver'
# Shortcut since it quickly becomes repetitive.
Fore = colorama.Fore
# Initialized in main()
settings = None
# Used by tests/git_cl_test.py to add extra logging.
# Inside the weirdly failing test, add this:
# >>> self.mock(git_cl, '_IS_BEING_TESTED', True)
# And scroll up to see the stack trace printed.
_IS_BEING_TESTED = False
_KNOWN_GERRIT_TO_SHORT_URLS = {
'https://chrome-internal-review.googlesource.com': 'https://crrev.com/i',
'https://chromium-review.googlesource.com': 'https://crrev.com/c',
}
assert len(_KNOWN_GERRIT_TO_SHORT_URLS) == len(
set(_KNOWN_GERRIT_TO_SHORT_URLS.values())), 'must have unique values'
class GitPushError(Exception):
pass
def DieWithError(message, change_desc=None):
if change_desc:
SaveDescriptionBackup(change_desc)
print('\n ** Content of CL description **\n' +
'='*72 + '\n' +
change_desc.description + '\n' +
'='*72 + '\n')
print(message, file=sys.stderr)
sys.exit(1)
def SaveDescriptionBackup(change_desc):
backup_path = os.path.join(DEPOT_TOOLS, DESCRIPTION_BACKUP_FILE)
print('\nsaving CL description to %s\n' % backup_path)
with open(backup_path, 'w') as backup_file:
backup_file.write(change_desc.description)
def GetNoGitPagerEnv():
env = os.environ.copy()
# 'cat' is a magical git string that disables pagers on all platforms.
env['GIT_PAGER'] = 'cat'
return env
def RunCommand(args, error_ok=False, error_message=None, shell=False, **kwargs):
try:
stdout = subprocess2.check_output(args, shell=shell, **kwargs)
return stdout.decode('utf-8', 'replace')
except subprocess2.CalledProcessError as e:
logging.debug('Failed running %s', args)
if not error_ok:
message = error_message or e.stdout.decode('utf-8', 'replace') or ''
DieWithError('Command "%s" failed.\n%s' % (' '.join(args), message))
return e.stdout.decode('utf-8', 'replace')
def RunGit(args, **kwargs):
"""Returns stdout."""
return RunCommand(['git'] + args, **kwargs)
def RunGitWithCode(args, suppress_stderr=False):
"""Returns return code and stdout."""
if suppress_stderr:
stderr = subprocess2.DEVNULL
else:
stderr = sys.stderr
try:
(out, _), code = subprocess2.communicate(['git'] + args,
env=GetNoGitPagerEnv(),
stdout=subprocess2.PIPE,
stderr=stderr)
return code, out.decode('utf-8', 'replace')
except subprocess2.CalledProcessError as e:
logging.debug('Failed running %s', ['git'] + args)
return e.returncode, e.stdout.decode('utf-8', 'replace')
def RunGitSilent(args):
"""Returns stdout, suppresses stderr and ignores the return code."""
return RunGitWithCode(args, suppress_stderr=True)[1]
def time_sleep(seconds):
# Use this so that it can be mocked in tests without interfering with python
# system machinery.
return time.sleep(seconds)
def time_time():
# Use this so that it can be mocked in tests without interfering with python
# system machinery.
return time.time()
def datetime_now():
# Use this so that it can be mocked in tests without interfering with python
# system machinery.
return datetime.datetime.now()
def confirm_or_exit(prefix='', action='confirm'):
"""Asks user to press enter to continue or press Ctrl+C to abort."""
if not prefix or prefix.endswith('\n'):
mid = 'Press'
elif prefix.endswith('.') or prefix.endswith('?'):
mid = ' Press'
elif prefix.endswith(' '):
mid = 'press'
else:
mid = ' press'
gclient_utils.AskForData(
'%s%s Enter to %s, or Ctrl+C to abort' % (prefix, mid, action))
def ask_for_explicit_yes(prompt):
"""Returns whether user typed 'y' or 'yes' to confirm the given prompt."""
result = gclient_utils.AskForData(prompt + ' [Yes/No]: ').lower()
while True:
if 'yes'.startswith(result):
return True
if 'no'.startswith(result):
return False
result = gclient_utils.AskForData('Please, type yes or no: ').lower()
def _get_properties_from_options(options):
prop_list = getattr(options, 'properties', [])
properties = dict(x.split('=', 1) for x in prop_list)
for key, val in properties.items():
try:
properties[key] = json.loads(val)
except ValueError:
pass # If a value couldn't be evaluated, treat it as a string.
return properties
def _call_buildbucket(http, buildbucket_host, method, request):
"""Calls a buildbucket v2 method and returns the parsed json response."""
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
}
request = json.dumps(request)
url = 'https://%s/prpc/buildbucket.v2.Builds/%s' % (buildbucket_host, method)
logging.info('POST %s with %s' % (url, request))
attempts = 1
time_to_sleep = 1
while True:
response, content = http.request(url, 'POST', body=request, headers=headers)
if response.status == 200:
return json.loads(content[4:])
if attempts >= MAX_ATTEMPTS or 400 <= response.status < 500:
msg = '%s error when calling POST %s with %s: %s' % (
response.status, url, request, content)
raise BuildbucketResponseException(msg)
logging.debug(
'%s error when calling POST %s with %s. '
'Sleeping for %d seconds and retrying...' % (
response.status, url, request, time_to_sleep))
time.sleep(time_to_sleep)
time_to_sleep *= 2
attempts += 1
assert False, 'unreachable'
def _parse_bucket(raw_bucket):
legacy = True
project = bucket = None
if '/' in raw_bucket:
legacy = False
project, bucket = raw_bucket.split('/', 1)
# Assume luci.<project>.<bucket>.
elif raw_bucket.startswith('luci.'):
project, bucket = raw_bucket[len('luci.'):].split('.', 1)
# Otherwise, assume prefix is also the project name.
elif '.' in raw_bucket:
project = raw_bucket.split('.')[0]
bucket = raw_bucket
# Legacy buckets.
if legacy and project and bucket:
print('WARNING Please use %s/%s to specify the bucket.' % (project, bucket))
return project, bucket
def _trigger_tryjobs(changelist, jobs, options, patchset):
"""Sends a request to Buildbucket to trigger tryjobs for a changelist.
Args:
changelist: Changelist that the tryjobs are associated with.
jobs: A list of (project, bucket, builder).
options: Command-line options.
"""
print('Scheduling jobs on:')
for project, bucket, builder in jobs:
print(' %s/%s: %s' % (project, bucket, builder))
print('To see results here, run: git cl try-results')
print('To see results in browser, run: git cl web')
requests = _make_tryjob_schedule_requests(changelist, jobs, options, patchset)
if not requests:
return
http = auth.Authenticator().authorize(httplib2.Http())
http.force_exception_to_status_code = True
batch_request = {'requests': requests}
batch_response = _call_buildbucket(
http, options.buildbucket_host, 'Batch', batch_request)
errors = [
' ' + response['error']['message']
for response in batch_response.get('responses', [])
if 'error' in response
]
if errors:
raise BuildbucketResponseException(
'Failed to schedule builds for some bots:\n%s' % '\n'.join(errors))
def _make_tryjob_schedule_requests(changelist, jobs, options, patchset):
"""Constructs requests for Buildbucket to trigger tryjobs."""
gerrit_changes = [changelist.GetGerritChange(patchset)]
shared_properties = {
'category': options.ensure_value('category', 'git_cl_try')
}
if options.ensure_value('clobber', False):
shared_properties['clobber'] = True
shared_properties.update(_get_properties_from_options(options) or {})
shared_tags = [{'key': 'user_agent', 'value': 'git_cl_try'}]
if options.ensure_value('retry_failed', False):
shared_tags.append({'key': 'retry_failed',
'value': '1'})
requests = []
for (project, bucket, builder) in jobs:
properties = shared_properties.copy()
if 'presubmit' in builder.lower():
properties['dry_run'] = 'true'
requests.append({
'scheduleBuild': {
'requestId': str(uuid.uuid4()),
'builder': {
'project': getattr(options, 'project', None) or project,
'bucket': bucket,
'builder': builder,
},
'gerritChanges': gerrit_changes,
'properties': properties,
'tags': [
{'key': 'builder', 'value': builder},
] + shared_tags,
}
})
if options.ensure_value('revision', None):
remote, remote_branch = changelist.GetRemoteBranch()
requests[-1]['scheduleBuild']['gitilesCommit'] = {
'host': gerrit_changes[0]['host'],
'project': gerrit_changes[0]['project'],
'id': options.revision,
'ref': GetTargetRef(remote, remote_branch, None)
}
return requests
def _fetch_tryjobs(changelist, buildbucket_host, patchset=None):
"""Fetches tryjobs from buildbucket.
Returns list of buildbucket.v2.Build with the try jobs for the changelist.
"""
fields = ['id', 'builder', 'status', 'createTime', 'tags']
request = {
'predicate': {
'gerritChanges': [changelist.GetGerritChange(patchset)],
},
'fields': ','.join('builds.*.' + field for field in fields),
}
authenticator = auth.Authenticator()
if authenticator.has_cached_credentials():
http = authenticator.authorize(httplib2.Http())
else:
print('Warning: Some results might be missing because %s' %
# Get the message on how to login.
(str(auth.LoginRequiredError()),))
http = httplib2.Http()
http.force_exception_to_status_code = True
response = _call_buildbucket(http, buildbucket_host, 'SearchBuilds', request)
return response.get('builds', [])
def _fetch_latest_builds(changelist, buildbucket_host, latest_patchset=None):
"""Fetches builds from the latest patchset that has builds (within
the last few patchsets).
Args:
changelist (Changelist): The CL to fetch builds for
buildbucket_host (str): Buildbucket host, e.g. "cr-buildbucket.appspot.com"
lastest_patchset(int|NoneType): the patchset to start fetching builds from.
If None (default), starts with the latest available patchset.
Returns:
A tuple (builds, patchset) where builds is a list of buildbucket.v2.Build,
and patchset is the patchset number where those builds came from.
"""
assert buildbucket_host
assert changelist.GetIssue(), 'CL must be uploaded first'
assert changelist.GetCodereviewServer(), 'CL must be uploaded first'
if latest_patchset is None:
assert changelist.GetMostRecentPatchset()
ps = changelist.GetMostRecentPatchset()
else:
assert latest_patchset > 0, latest_patchset
ps = latest_patchset
min_ps = max(1, ps - 5)
while ps >= min_ps:
builds = _fetch_tryjobs(changelist, buildbucket_host, patchset=ps)
if len(builds):
return builds, ps
ps -= 1
return [], 0
def _filter_failed_for_retry(all_builds):
"""Returns a list of buckets/builders that are worth retrying.
Args:
all_builds (list): Builds, in the format returned by _fetch_tryjobs,
i.e. a list of buildbucket.v2.Builds which includes status and builder
info.
Returns:
A dict {(proj, bucket): [builders]}. This is the same format accepted by
_trigger_tryjobs.
"""
grouped = {}
for build in all_builds:
builder = build['builder']
key = (builder['project'], builder['bucket'], builder['builder'])
grouped.setdefault(key, []).append(build)
jobs = []
for (project, bucket, builder), builds in grouped.items():
if 'triggered' in builder:
print('WARNING: Not scheduling %s. Triggered bots require an initial job '
'from a parent. Please schedule a manual job for the parent '
'instead.')
continue
if any(b['status'] in ('STARTED', 'SCHEDULED') for b in builds):
# Don't retry if any are running.
continue
# If builder had several builds, retry only if the last one failed.
# This is a bit different from CQ, which would re-use *any* SUCCESS-full
# build, but in case of retrying failed jobs retrying a flaky one makes
# sense.
builds = sorted(builds, key=lambda b: b['createTime'])
if builds[-1]['status'] not in ('FAILURE', 'INFRA_FAILURE'):
continue
# Don't retry experimental build previously triggered by CQ.
if any(t['key'] == 'cq_experimental' and t['value'] == 'true'
for t in builds[-1]['tags']):
continue
jobs.append((project, bucket, builder))
# Sort the jobs to make testing easier.
return sorted(jobs)
def _print_tryjobs(options, builds):
"""Prints nicely result of _fetch_tryjobs."""
if not builds:
print('No tryjobs scheduled.')
return
longest_builder = max(len(b['builder']['builder']) for b in builds)
name_fmt = '{builder:<%d}' % longest_builder
if options.print_master:
longest_bucket = max(len(b['builder']['bucket']) for b in builds)
name_fmt = ('{bucket:>%d} ' % longest_bucket) + name_fmt
builds_by_status = {}
for b in builds:
builds_by_status.setdefault(b['status'], []).append({
'id': b['id'],
'name': name_fmt.format(
builder=b['builder']['builder'], bucket=b['builder']['bucket']),
})
sort_key = lambda b: (b['name'], b['id'])
def print_builds(title, builds, fmt=None, color=None):
"""Pop matching builds from `builds` dict and print them."""
if not builds:
return
fmt = fmt or '{name} https://ci.chromium.org/b/{id}'
if not options.color or color is None:
colorize = lambda x: x
else:
colorize = lambda x: '%s%s%s' % (color, x, Fore.RESET)
print(colorize(title))
for b in sorted(builds, key=sort_key):
print(' ', colorize(fmt.format(**b)))
total = len(builds)
print_builds(
'Successes:', builds_by_status.pop('SUCCESS', []), color=Fore.GREEN)
print_builds(
'Infra Failures:', builds_by_status.pop('INFRA_FAILURE', []),
color=Fore.MAGENTA)
print_builds('Failures:', builds_by_status.pop('FAILURE', []), color=Fore.RED)
print_builds('Canceled:', builds_by_status.pop('CANCELED', []), fmt='{name}',
color=Fore.MAGENTA)
print_builds('Started:', builds_by_status.pop('STARTED', []),
color=Fore.YELLOW)
print_builds(
'Scheduled:', builds_by_status.pop('SCHEDULED', []), fmt='{name} id={id}')
# The last section is just in case buildbucket API changes OR there is a bug.
print_builds(
'Other:', sum(builds_by_status.values(), []), fmt='{name} id={id}')
print('Total: %d tryjobs' % total)
def _ComputeDiffLineRanges(files, upstream_commit):
"""Gets the changed line ranges for each file since upstream_commit.
Parses a git diff on provided files and returns a dict that maps a file name
to an ordered list of range tuples in the form (start_line, count).
Ranges are in the same format as a git diff.
"""
# If files is empty then diff_output will be a full diff.
if len(files) == 0:
return {}
# Take the git diff and find the line ranges where there are changes.
diff_cmd = BuildGitDiffCmd('-U0', upstream_commit, files, allow_prefix=True)
diff_output = RunGit(diff_cmd)
pattern = r'(?:^diff --git a/(?:.*) b/(.*))|(?:^@@.*\+(.*) @@)'
# 2 capture groups
# 0 == fname of diff file
# 1 == 'diff_start,diff_count' or 'diff_start'
# will match each of
# diff --git a/foo.foo b/foo.py
# @@ -12,2 +14,3 @@
# @@ -12,2 +17 @@
# running re.findall on the above string with pattern will give
# [('foo.py', ''), ('', '14,3'), ('', '17')]
curr_file = None
line_diffs = {}
for match in re.findall(pattern, diff_output, flags=re.MULTILINE):
if match[0] != '':
# Will match the second filename in diff --git a/a.py b/b.py.
curr_file = match[0]
line_diffs[curr_file] = []
else:
# Matches +14,3
if ',' in match[1]:
diff_start, diff_count = match[1].split(',')
else:
# Single line changes are of the form +12 instead of +12,1.
diff_start = match[1]
diff_count = 1
diff_start = int(diff_start)
diff_count = int(diff_count)
# If diff_count == 0 this is a removal we can ignore.
line_diffs[curr_file].append((diff_start, diff_count))
return line_diffs
def _FindYapfConfigFile(fpath, yapf_config_cache, top_dir=None):
"""Checks if a yapf file is in any parent directory of fpath until top_dir.
Recursively checks parent directories to find yapf file and if no yapf file
is found returns None. Uses yapf_config_cache as a cache for previously found
configs.
"""
fpath = os.path.abspath(fpath)
# Return result if we've already computed it.
if fpath in yapf_config_cache:
return yapf_config_cache[fpath]
parent_dir = os.path.dirname(fpath)
if os.path.isfile(fpath):
ret = _FindYapfConfigFile(parent_dir, yapf_config_cache, top_dir)
else:
# Otherwise fpath is a directory
yapf_file = os.path.join(fpath, YAPF_CONFIG_FILENAME)
if os.path.isfile(yapf_file):
ret = yapf_file
elif fpath == top_dir or parent_dir == fpath:
# If we're at the top level directory, or if we're at root
# there is no provided style.
ret = None
else:
# Otherwise recurse on the current directory.
ret = _FindYapfConfigFile(parent_dir, yapf_config_cache, top_dir)
yapf_config_cache[fpath] = ret
return ret
def _GetYapfIgnorePatterns(top_dir):
"""Returns all patterns in the .yapfignore file.
yapf is supposed to handle the ignoring of files listed in .yapfignore itself,
but this functionality appears to break when explicitly passing files to
yapf for formatting. According to
https://github.com/google/yapf/blob/HEAD/README.rst#excluding-files-from-formatting-yapfignore,
the .yapfignore file should be in the directory that yapf is invoked from,
which we assume to be the top level directory in this case.
Args:
top_dir: The top level directory for the repository being formatted.
Returns:
A set of all fnmatch patterns to be ignored.
"""
yapfignore_file = os.path.join(top_dir, '.yapfignore')
ignore_patterns = set()
if not os.path.exists(yapfignore_file):
return ignore_patterns
for line in gclient_utils.FileRead(yapfignore_file).split('\n'):
stripped_line = line.strip()
# Comments and blank lines should be ignored.
if stripped_line.startswith('#') or stripped_line == '':
continue
ignore_patterns.add(stripped_line)
return ignore_patterns
def _FilterYapfIgnoredFiles(filepaths, patterns):
"""Filters out any filepaths that match any of the given patterns.
Args:
filepaths: An iterable of strings containing filepaths to filter.
patterns: An iterable of strings containing fnmatch patterns to filter on.
Returns:
A list of strings containing all the elements of |filepaths| that did not
match any of the patterns in |patterns|.
"""
# Not inlined so that tests can use the same implementation.
return [f for f in filepaths
if not any(fnmatch.fnmatch(f, p) for p in patterns)]
def print_stats(args):
"""Prints statistics about the change to the user."""
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = GetNoGitPagerEnv()
if 'GIT_EXTERNAL_DIFF' in env:
del env['GIT_EXTERNAL_DIFF']
return subprocess2.call(
['git', 'diff', '--no-ext-diff', '--stat', '-l100000', '-C50'] + args,
env=env)
class BuildbucketResponseException(Exception):
pass
class Settings(object):
def __init__(self):
self.cc = None
self.root = None
self.tree_status_url = None
self.viewvc_url = None
self.updated = False
self.is_gerrit = None
self.squash_gerrit_uploads = None
self.gerrit_skip_ensure_authenticated = None
self.git_editor = None
self.format_full_by_default = None
def _LazyUpdateIfNeeded(self):
"""Updates the settings from a codereview.settings file, if available."""
if self.updated:
return
# The only value that actually changes the behavior is
# autoupdate = "false". Everything else means "true".
autoupdate = (
scm.GIT.GetConfig(self.GetRoot(), 'rietveld.autoupdate', '').lower())
cr_settings_file = FindCodereviewSettingsFile()
if autoupdate != 'false' and cr_settings_file:
LoadCodereviewSettingsFromFile(cr_settings_file)
cr_settings_file.close()
self.updated = True
@staticmethod
def GetRelativeRoot():
return scm.GIT.GetCheckoutRoot('.')
def GetRoot(self):
if self.root is None:
self.root = os.path.abspath(self.GetRelativeRoot())
return self.root
def GetTreeStatusUrl(self, error_ok=False):
if not self.tree_status_url:
self.tree_status_url = self._GetConfig('rietveld.tree-status-url')
if self.tree_status_url is None and not error_ok:
DieWithError(
'You must configure your tree status URL by running '
'"git cl config".')
return self.tree_status_url
def GetViewVCUrl(self):
if not self.viewvc_url:
self.viewvc_url = self._GetConfig('rietveld.viewvc-url')
return self.viewvc_url
def GetBugPrefix(self):
return self._GetConfig('rietveld.bug-prefix')
def GetRunPostUploadHook(self):
run_post_upload_hook = self._GetConfig(
'rietveld.run-post-upload-hook')
return run_post_upload_hook == "True"
def GetDefaultCCList(self):
return self._GetConfig('rietveld.cc')
def GetSquashGerritUploads(self):
"""Returns True if uploads to Gerrit should be squashed by default."""
if self.squash_gerrit_uploads is None:
self.squash_gerrit_uploads = self.GetSquashGerritUploadsOverride()
if self.squash_gerrit_uploads is None:
# Default is squash now (http://crbug.com/611892#c23).
self.squash_gerrit_uploads = self._GetConfig(
'gerrit.squash-uploads').lower() != 'false'
return self.squash_gerrit_uploads
def GetSquashGerritUploadsOverride(self):
"""Return True or False if codereview.settings should be overridden.
Returns None if no override has been defined.
"""
# See also http://crbug.com/611892#c23
result = self._GetConfig('gerrit.override-squash-uploads').lower()
if result == 'true':
return True
if result == 'false':
return False
return None
def GetGerritSkipEnsureAuthenticated(self):
"""Return True if EnsureAuthenticated should not be done for Gerrit
uploads."""
if self.gerrit_skip_ensure_authenticated is None:
self.gerrit_skip_ensure_authenticated = self._GetConfig(
'gerrit.skip-ensure-authenticated').lower() == 'true'
return self.gerrit_skip_ensure_authenticated
def GetGitEditor(self):
"""Returns the editor specified in the git config, or None if none is."""
if self.git_editor is None:
# Git requires single quotes for paths with spaces. We need to replace
# them with double quotes for Windows to treat such paths as a single
# path.
self.git_editor = self._GetConfig('core.editor').replace('\'', '"')
return self.git_editor or None
def GetLintRegex(self):
return self._GetConfig('rietveld.cpplint-regex', DEFAULT_LINT_REGEX)
def GetLintIgnoreRegex(self):
return self._GetConfig(
'rietveld.cpplint-ignore-regex', DEFAULT_LINT_IGNORE_REGEX)
def GetFormatFullByDefault(self):
if self.format_full_by_default is None:
result = (
RunGit(['config', '--bool', 'rietveld.format-full-by-default'],
error_ok=True).strip())
self.format_full_by_default = (result == 'true')
return self.format_full_by_default
def _GetConfig(self, key, default=''):
self._LazyUpdateIfNeeded()
return scm.GIT.GetConfig(self.GetRoot(), key, default)
class _CQState(object):
"""Enum for states of CL with respect to CQ."""
NONE = 'none'
QUICK_RUN = 'quick_run'
DRY_RUN = 'dry_run'
COMMIT = 'commit'
ALL_STATES = [NONE, QUICK_RUN, DRY_RUN, COMMIT]
class _ParsedIssueNumberArgument(object):
def __init__(self, issue=None, patchset=None, hostname=None):
self.issue = issue
self.patchset = patchset
self.hostname = hostname
@property
def valid(self):
return self.issue is not None
def ParseIssueNumberArgument(arg):
"""Parses the issue argument and returns _ParsedIssueNumberArgument."""
fail_result = _ParsedIssueNumberArgument()
if isinstance(arg, int):
return _ParsedIssueNumberArgument(issue=arg)
if not isinstance(arg, basestring):
return fail_result
if arg.isdigit():
return _ParsedIssueNumberArgument(issue=int(arg))
if not arg.startswith('http'):
return fail_result
url = gclient_utils.UpgradeToHttps(arg)
for gerrit_url, short_url in _KNOWN_GERRIT_TO_SHORT_URLS.items():
if url.startswith(short_url):
url = gerrit_url + url[len(short_url):]
break
try:
parsed_url = urllib.parse.urlparse(url)
except ValueError:
return fail_result
# Gerrit's new UI is https://domain/c/project/+/<issue_number>[/[patchset]]
# But old GWT UI is https://domain/#/c/project/+/<issue_number>[/[patchset]]
# Short urls like https://domain/<issue_number> can be used, but don't allow
# specifying the patchset (you'd 404), but we allow that here.
if parsed_url.path == '/':
part = parsed_url.fragment
else:
part = parsed_url.path
match = re.match(
r'(/c(/.*/\+)?)?/(?P<issue>\d+)(/(?P<patchset>\d+)?/?)?$', part)
if not match:
return fail_result
issue = int(match.group('issue'))
patchset = match.group('patchset')
return _ParsedIssueNumberArgument(
issue=issue,
patchset=int(patchset) if patchset else None,
hostname=parsed_url.netloc)
def _create_description_from_log(args):
"""Pulls out the commit log to use as a base for the CL description."""
log_args = []
if len(args) == 1 and not args[0].endswith('.'):
log_args = [args[0] + '..']
elif len(args) == 1 and args[0].endswith('...'):
log_args = [args[0][:-1]]
elif len(args) == 2:
log_args = [args[0] + '..' + args[1]]
else:
log_args = args[:] # Hope for the best!
return RunGit(['log', '--pretty=format:%B%n'] + log_args)
class GerritChangeNotExists(Exception):
def __init__(self, issue, url):
self.issue = issue
self.url = url
super(GerritChangeNotExists, self).__init__()
def __str__(self):
return 'change %s at %s does not exist or you have no access to it' % (
self.issue, self.url)
_CommentSummary = collections.namedtuple(
'_CommentSummary', ['date', 'message', 'sender', 'autogenerated',
# TODO(tandrii): these two aren't known in Gerrit.
'approval', 'disapproval'])
class Changelist(object):
"""Changelist works with one changelist in local branch.
Notes:
* Not safe for concurrent multi-{thread,process} use.
* Caches values from current branch. Therefore, re-use after branch change
with great care.
"""
def __init__(self,
branchref=None,
issue=None,
codereview_host=None,
commit_date=None):
"""Create a new ChangeList instance.
**kwargs will be passed directly to Gerrit implementation.
"""
# Poke settings so we get the "configure your server" message if necessary.
global settings
if not settings:
# Happens when git_cl.py is used as a utility library.
settings = Settings()
self.branchref = branchref
if self.branchref:
assert branchref.startswith('refs/heads/')
self.branch = scm.GIT.ShortBranchName(self.branchref)
else:
self.branch = None
self.commit_date = commit_date
self.upstream_branch = None
self.lookedup_issue = False
self.issue = issue or None
self.description = None
self.lookedup_patchset = False
self.patchset = None
self.cc = None
self.more_cc = []
self._remote = None
self._cached_remote_url = (False, None) # (is_cached, value)
# Lazily cached values.
self._gerrit_host = None # e.g. chromium-review.googlesource.com
self._gerrit_server = None # e.g. https://chromium-review.googlesource.com
self._owners_client = None
# Map from change number (issue) to its detail cache.
self._detail_cache = {}
if codereview_host is not None:
assert not codereview_host.startswith('https://'), codereview_host
self._gerrit_host = codereview_host
self._gerrit_server = 'https://%s' % codereview_host
@property
def owners_client(self):
if self._owners_client is None:
remote, remote_branch = self.GetRemoteBranch()
branch = GetTargetRef(remote, remote_branch, None)
self._owners_client = owners_client.GetCodeOwnersClient(
root=settings.GetRoot(),
upstream=self.GetCommonAncestorWithUpstream(),
host=self.GetGerritHost(),
project=self.GetGerritProject(),
branch=branch)
return self._owners_client
def GetCCList(self):
"""Returns the users cc'd on this CL.
The return value is a string suitable for passing to git cl with the --cc
flag.
"""
if self.cc is None:
base_cc = settings.GetDefaultCCList()
more_cc = ','.join(self.more_cc)
self.cc = ','.join(filter(None, (base_cc, more_cc))) or ''
return self.cc
def ExtendCC(self, more_cc):
"""Extends the list of users to cc on this CL based on the changed files."""
self.more_cc.extend(more_cc)
def GetCommitDate(self):
"""Returns the commit date as provided in the constructor"""
return self.commit_date
def GetBranch(self):
"""Returns the short branch name, e.g. 'main'."""
if not self.branch:
branchref = scm.GIT.GetBranchRef(settings.GetRoot())
if not branchref:
return None
self.branchref = branchref
self.branch = scm.GIT.ShortBranchName(self.branchref)
return self.branch
def GetBranchRef(self):
"""Returns the full branch name, e.g. 'refs/heads/main'."""
self.GetBranch() # Poke the lazy loader.
return self.branchref
def _GitGetBranchConfigValue(self, key, default=None):
return scm.GIT.GetBranchConfig(
settings.GetRoot(), self.GetBranch(), key, default)
def _GitSetBranchConfigValue(self, key, value):
action = 'set %s to %r' % (key, value)
if not value:
action = 'unset %s' % key
assert self.GetBranch(), 'a branch is needed to ' + action
return scm.GIT.SetBranchConfig(
settings.GetRoot(), self.GetBranch(), key, value)
@staticmethod
def FetchUpstreamTuple(branch):
"""Returns a tuple containing remote and remote ref,
e.g. 'origin', 'refs/heads/main'
"""
remote, upstream_branch = scm.GIT.FetchUpstreamTuple(
settings.GetRoot(), branch)
if not remote or not upstream_branch:
DieWithError(
'Unable to determine default branch to diff against.\n'
'Verify this branch is set up to track another \n'
'(via the --track argument to "git checkout -b ..."). \n'
'or pass complete "git diff"-style arguments if supported, like\n'
' git cl upload origin/main\n')
return remote, upstream_branch
def GetCommonAncestorWithUpstream(self):
upstream_branch = self.GetUpstreamBranch()
if not scm.GIT.IsValidRevision(settings.GetRoot(), upstream_branch):
DieWithError('The upstream for the current branch (%s) does not exist '
'anymore.\nPlease fix it and try again.' % self.GetBranch())
return git_common.get_or_create_merge_base(self.GetBranch(),
upstream_branch)
def GetUpstreamBranch(self):
if self.upstream_branch is None:
remote, upstream_branch = self.FetchUpstreamTuple(self.GetBranch())
if remote != '.':
upstream_branch = upstream_branch.replace('refs/heads/',
'refs/remotes/%s/' % remote)
upstream_branch = upstream_branch.replace('refs/branch-heads/',
'refs/remotes/branch-heads/')
self.upstream_branch = upstream_branch
return self.upstream_branch
def GetRemoteBranch(self):
if not self._remote:
remote, branch = None, self.GetBranch()
seen_branches = set()
while branch not in seen_branches:
seen_branches.add(branch)
remote, branch = self.FetchUpstreamTuple(branch)
branch = scm.GIT.ShortBranchName(branch)
if remote != '.' or branch.startswith('refs/remotes'):
break
else:
remotes = RunGit(['remote'], error_ok=True).split()
if len(remotes) == 1:
remote, = remotes
elif 'origin' in remotes:
remote = 'origin'
logging.warning('Could not determine which remote this change is '
'associated with, so defaulting to "%s".' %
self._remote)
else:
logging.warning('Could not determine which remote this change is '
'associated with.')
branch = 'HEAD'
if branch.startswith('refs/remotes'):
self._remote = (remote, branch)
elif branch.startswith('refs/branch-heads/'):
self._remote = (remote, branch.replace('refs/', 'refs/remotes/'))
else:
self._remote = (remote, 'refs/remotes/%s/%s' % (remote, branch))
return self._remote
def GetRemoteUrl(self):
"""Return the configured remote URL, e.g. 'git://example.org/foo.git/'.
Returns None if there is no remote.
"""
is_cached, value = self._cached_remote_url
if is_cached:
return value
remote, _ = self.GetRemoteBranch()
url = scm.GIT.GetConfig(settings.GetRoot(), 'remote.%s.url' % remote, '')
# Check if the remote url can be parsed as an URL.
host = urllib.parse.urlparse(url).netloc
if host:
self._cached_remote_url = (True, url)
return url
# If it cannot be parsed as an url, assume it is a local directory,
# probably a git cache.
logging.warning('"%s" doesn\'t appear to point to a git host. '
'Interpreting it as a local directory.', url)
if not os.path.isdir(url):
logging.error(
'Remote "%(remote)s" for branch "%(branch)s" points to "%(url)s", '
'but it doesn\'t exist.',
{'remote': remote, 'branch': self.GetBranch(), 'url': url})
return None
cache_path = url
url = scm.GIT.GetConfig(url, 'remote.%s.url' % remote, '')
host = urllib.parse.urlparse(url).netloc
if not host:
logging.error(
'Remote "%(remote)s" for branch "%(branch)s" points to '
'"%(cache_path)s", but it is misconfigured.\n'
'"%(cache_path)s" must be a git repo and must have a remote named '
'"%(remote)s" pointing to the git host.', {
'remote': remote,
'cache_path': cache_path,
'branch': self.GetBranch()})
return None
self._cached_remote_url = (True, url)
return url
def GetIssue(self):
"""Returns the issue number as a int or None if not set."""
if self.issue is None and not self.lookedup_issue:
self.issue = self._GitGetBranchConfigValue(ISSUE_CONFIG_KEY)
if self.issue is not None:
self.issue = int(self.issue)
self.lookedup_issue = True
return self.issue
def GetIssueURL(self, short=False):
"""Get the URL for a particular issue."""
issue = self.GetIssue()
if not issue:
return None
server = self.GetCodereviewServer()
if short:
server = _KNOWN_GERRIT_TO_SHORT_URLS.get(server, server)
return '%s/%s' % (server, issue)
def FetchDescription(self, pretty=False):
assert self.GetIssue(), 'issue is required to query Gerrit'
if self.description is None:
data = self._GetChangeDetail(['CURRENT_REVISION', 'CURRENT_COMMIT'])
current_rev = data['current_revision']
self.description = data['revisions'][current_rev]['commit']['message']
if not pretty:
return self.description
# Set width to 72 columns + 2 space indent.
wrapper = textwrap.TextWrapper(width=74, replace_whitespace=True)
wrapper.initial_indent = wrapper.subsequent_indent = ' '
lines = self.description.splitlines()
return '\n'.join([wrapper.fill(line) for line in lines])
def GetPatchset(self):
"""Returns the patchset number as a int or None if not set."""
if self.patchset is None and not self.lookedup_patchset:
self.patchset = self._GitGetBranchConfigValue(PATCHSET_CONFIG_KEY)
if self.patchset is not None:
self.patchset = int(self.patchset)
self.lookedup_patchset = True
return self.patchset
def GetAuthor(self):
return scm.GIT.GetConfig(settings.GetRoot(), 'user.email')
def SetPatchset(self, patchset):
"""Set this branch's patchset. If patchset=0, clears the patchset."""
assert self.GetBranch()
if not patchset:
self.patchset = None
else:
self.patchset = int(patchset)
self._GitSetBranchConfigValue(PATCHSET_CONFIG_KEY, str(self.patchset))
def SetIssue(self, issue=None):
"""Set this branch's issue. If issue isn't given, clears the issue."""
assert self.GetBranch()
if issue:
issue = int(issue)
self._GitSetBranchConfigValue(ISSUE_CONFIG_KEY, str(issue))
self.issue = issue
codereview_server = self.GetCodereviewServer()
if codereview_server:
self._GitSetBranchConfigValue(
CODEREVIEW_SERVER_CONFIG_KEY, codereview_server)
else:
# Reset all of these just to be clean.
reset_suffixes = [
'last-upload-hash',
ISSUE_CONFIG_KEY,
PATCHSET_CONFIG_KEY,
CODEREVIEW_SERVER_CONFIG_KEY,
'gerritsquashhash',
]
for prop in reset_suffixes:
try:
self._GitSetBranchConfigValue(prop, None)
except subprocess2.CalledProcessError:
pass
msg = RunGit(['log', '-1', '--format=%B']).strip()
if msg and git_footers.get_footer_change_id(msg):
print('WARNING: The change patched into this branch has a Change-Id. '
'Removing it.')
RunGit(['commit', '--amend', '-m',
git_footers.remove_footer(msg, 'Change-Id')])
self.lookedup_issue = True
self.issue = None
self.patchset = None
def GetAffectedFiles(self, upstream):
try:
return [f for _, f in scm.GIT.CaptureStatus(settings.GetRoot(), upstream)]
except subprocess2.CalledProcessError:
DieWithError(
('\nFailed to diff against upstream branch %s\n\n'
'This branch probably doesn\'t exist anymore. To reset the\n'
'tracking branch, please run\n'
' git branch --set-upstream-to origin/main %s\n'
'or replace origin/main with the relevant branch') %
(upstream, self.GetBranch()))
def UpdateDescription(self, description, force=False):
assert self.GetIssue(), 'issue is required to update description'
if gerrit_util.HasPendingChangeEdit(
self.GetGerritHost(), self._GerritChangeIdentifier()):
if not force:
confirm_or_exit(
'The description cannot be modified while the issue has a pending '
'unpublished edit. Either publish the edit in the Gerrit web UI '
'or delete it.\n\n', action='delete the unpublished edit')
gerrit_util.DeletePendingChangeEdit(
self.GetGerritHost(), self._GerritChangeIdentifier())
gerrit_util.SetCommitMessage(
self.GetGerritHost(), self._GerritChangeIdentifier(),
description, notify='NONE')
self.description = description
def _GetCommonPresubmitArgs(self, verbose, upstream):
args = [
'--root', settings.GetRoot(),
'--upstream', upstream,
]
args.extend(['--verbose'] * verbose)
remote, remote_branch = self.GetRemoteBranch()
target_ref = GetTargetRef(remote, remote_branch, None)
args.extend(['--gerrit_url', self.GetCodereviewServer()])
args.extend(['--gerrit_project', self.GetGerritProject()])
args.extend(['--gerrit_branch', target_ref])
author = self.GetAuthor()
issue = self.GetIssue()
patchset = self.GetPatchset()
if author:
args.extend(['--author', author])
if issue:
args.extend(['--issue', str(issue)])
if patchset:
args.extend(['--patchset', str(patchset)])
return args
def RunHook(self, committing, may_prompt, verbose, parallel, upstream,
description, all_files, resultdb=False, realm=None):
"""Calls sys.exit() if the hook fails; returns a HookResults otherwise."""
args = self._GetCommonPresubmitArgs(verbose, upstream)
args.append('--commit' if committing else '--upload')
if may_prompt:
args.append('--may_prompt')
if parallel:
args.append('--parallel')
if all_files:
args.append('--all_files')
if resultdb and not realm:
# TODO (crbug.com/1113463): store realm somewhere and look it up so
# it is not required to pass the realm flag
print('Note: ResultDB reporting will NOT be performed because --realm'
' was not specified. To enable ResultDB, please run the command'
' again with the --realm argument to specify the LUCI realm.')
py2_results = self._RunPresubmit(args, resultdb, realm, description,
use_python3=False)
py3_results = self._RunPresubmit(args, resultdb, realm, description,
use_python3=True)
return self._MergePresubmitResults(py2_results, py3_results)
def _RunPresubmit(self, args, resultdb, realm, description, use_python3):
args = args[:]
vpython = 'vpython3' if use_python3 else 'vpython'
with gclient_utils.temporary_file() as description_file:
with gclient_utils.temporary_file() as json_output:
gclient_utils.FileWrite(description_file, description)
args.extend(['--json_output', json_output])
args.extend(['--description_file', description_file])
start = time_time()
cmd = [vpython, PRESUBMIT_SUPPORT] + args
if resultdb and realm:
cmd = ['rdb', 'stream', '-new', '-realm', realm, '--'] + cmd
p = subprocess2.Popen(cmd)
exit_code = p.wait()
metrics.collector.add_repeated('sub_commands', {
'command': 'presubmit',
'execution_time': time_time() - start,
'exit_code': exit_code,
})
if exit_code:
sys.exit(exit_code)
json_results = gclient_utils.FileRead(json_output)
return json.loads(json_results)
def _MergePresubmitResults(self, py2_results, py3_results):
return {
'more_cc': sorted(set(py2_results.get('more_cc', []) +
py3_results.get('more_cc', []))),
'errors': (
py2_results.get('errors', []) + py3_results.get('errors', [])),
'notifications': (
py2_results.get('notifications', []) +
py3_results.get('notifications', [])),
'warnings': (
py2_results.get('warnings', []) + py3_results.get('warnings', []))
}
def RunPostUploadHook(self, verbose, upstream, description):
args = self._GetCommonPresubmitArgs(verbose, upstream)
args.append('--post_upload')
with gclient_utils.temporary_file() as description_file:
gclient_utils.FileWrite(description_file, description)
args.extend(['--description_file', description_file])
p = subprocess2.Popen(['vpython', PRESUBMIT_SUPPORT] + args)
p.wait()
def _GetDescriptionForUpload(self, options, git_diff_args, files):
# Get description message for upload.
if self.GetIssue():
description = self.FetchDescription()
elif options.message:
description = options.message
else:
description = _create_description_from_log(git_diff_args)
if options.title and options.squash:
description = options.title + '\n\n' + description
# Extract bug number from branch name.
bug = options.bug
fixed = options.fixed
match = re.match(r'(?P<type>bug|fix(?:e[sd])?)[_-]?(?P<bugnum>\d+)',
self.GetBranch())
if not bug and not fixed and match:
if match.group('type') == 'bug':
bug = match.group('bugnum')
else:
fixed = match.group('bugnum')
change_description = ChangeDescription(description, bug, fixed)
# Fill gaps in OWNERS coverage to tbrs/reviewers if requested.
if options.add_owners_to:
assert options.add_owners_to in ('TBR', 'R'), options.add_owners_to
status = self.owners_client.GetFilesApprovalStatus(
files, [], options.tbrs + options.reviewers)
missing_files = [
f for f in files
if status[f] == self._owners_client.INSUFFICIENT_REVIEWERS
]
owners = self.owners_client.SuggestOwners(
missing_files, exclude=[self.GetAuthor()])
if options.add_owners_to == 'TBR':
assert isinstance(options.tbrs, list), options.tbrs
options.tbrs.extend(owners)
else:
assert isinstance(options.reviewers, list), options.reviewers
options.reviewers.extend(owners)
# Set the reviewer list now so that presubmit checks can access it.
if options.reviewers or options.tbrs:
change_description.update_reviewers(options.reviewers, options.tbrs)
return change_description
def _GetTitleForUpload(self, options):
# When not squashing, just return options.title.
if not options.squash:
return options.title
# On first upload, patchset title is always this string, while options.title
# gets converted to first line of message.
if not self.GetIssue():
return 'Initial upload'
# When uploading subsequent patchsets, options.message is taken as the title
# if options.title is not provided.
if options.title:
return options.title
if options.message:
return options.message.strip()
# Use the subject of the last commit as title by default.
title = RunGit(['show', '-s', '--format=%s', 'HEAD']).strip()
if options.force or options.skip_title:
return title
user_title = gclient_utils.AskForData('Title for patchset [%s]: ' % title)
# Use the default title if the user confirms the default with a 'y'.
if user_title.lower() == 'y':
return title
return user_title or title
def CMDUpload(self, options, git_diff_args, orig_args):
"""Uploads a change to codereview."""
custom_cl_base = None
if git_diff_args:
custom_cl_base = base_branch = git_diff_args[0]
else:
if self.GetBranch() is None:
DieWithError('Can\'t upload from detached HEAD state. Get on a branch!')
# Default to diffing against common ancestor of upstream branch
base_branch = self.GetCommonAncestorWithUpstream()
git_diff_args = [base_branch, 'HEAD']
# Fast best-effort checks to abort before running potentially expensive
# hooks if uploading is likely to fail anyway. Passing these checks does
# not guarantee that uploading will not fail.
self.EnsureAuthenticated(force=options.force)
self.EnsureCanUploadPatchset(force=options.force)
# Apply watchlists on upload.
watchlist = watchlists.Watchlists(settings.GetRoot())
files = self.GetAffectedFiles(base_branch)
if not options.bypass_watchlists:
self.ExtendCC(watchlist.GetWatchersForPaths(files))
change_desc = self._GetDescriptionForUpload(options, git_diff_args, files)
if not options.bypass_hooks:
hook_results = self.RunHook(
committing=False,
may_prompt=not options.force,
verbose=options.verbose,
parallel=options.parallel,
upstream=base_branch,
description=change_desc.description,
all_files=False,
resultdb=options.resultdb,
realm=options.realm)
self.ExtendCC(hook_results['more_cc'])
print_stats(git_diff_args)
ret = self.CMDUploadChange(
options, git_diff_args, custom_cl_base, change_desc)
if not ret:
self._GitSetBranchConfigValue(
'last-upload-hash', scm.GIT.ResolveCommit(settings.GetRoot(), 'HEAD'))
# Run post upload hooks, if specified.
if settings.GetRunPostUploadHook():
self.RunPostUploadHook(
options.verbose, base_branch, change_desc.description)
# Upload all dependencies if specified.
if options.dependencies:
print()
print('--dependencies has been specified.')
print('All dependent local branches will be re-uploaded.')
print()
# Remove the dependencies flag from args so that we do not end up in a
# loop.
orig_args.remove('--dependencies')
ret = upload_branch_deps(self, orig_args, options.force)
return ret
def SetCQState(self, new_state):
"""Updates the CQ state for the latest patchset.
Issue must have been already uploaded and known. Optionally allows for
updating Quick-Run (QR) state.
"""
assert new_state in _CQState.ALL_STATES
assert self.GetIssue()
try:
vote_map = {
_CQState.NONE: 0,
_CQState.QUICK_RUN: 1,
_CQState.DRY_RUN: 1,
_CQState.COMMIT: 2,
}
if new_state == _CQState.QUICK_RUN:
labels = {
'Commit-Queue': vote_map[_CQState.DRY_RUN],
'Quick-Run': vote_map[_CQState.QUICK_RUN],
}
else:
labels = {'Commit-Queue': vote_map[new_state]}
notify = False if new_state == _CQState.DRY_RUN else None
gerrit_util.SetReview(
self.GetGerritHost(), self._GerritChangeIdentifier(),
labels=labels, notify=notify)
return 0
except KeyboardInterrupt:
raise
except:
print('WARNING: Failed to %s.\n'
'Either:\n'
' * Your project has no CQ,\n'
' * You don\'t have permission to change the CQ state,\n'
' * There\'s a bug in this code (see stack trace below).\n'
'Consider specifying which bots to trigger manually or asking your '
'project owners for permissions or contacting Chrome Infra at:\n'
'https://www.chromium.org/infra\n\n' %
('cancel CQ' if new_state == _CQState.NONE else 'trigger CQ'))
# Still raise exception so that stack trace is printed.
raise
def GetGerritHost(self):
# Lazy load of configs.
self.GetCodereviewServer()
if self._gerrit_host and '.' not in self._gerrit_host:
# Abbreviated domain like "chromium" instead of chromium.googlesource.com.
# This happens for internal stuff http://crbug.com/614312.
parsed = urllib.parse.urlparse(self.GetRemoteUrl())
if parsed.scheme == 'sso':
print('WARNING: using non-https URLs for remote is likely broken\n'
' Your current remote is: %s' % self.GetRemoteUrl())
self._gerrit_host = '%s.googlesource.com' % self._gerrit_host
self._gerrit_server = 'https://%s' % self._gerrit_host
return self._gerrit_host
def _GetGitHost(self):
"""Returns git host to be used when uploading change to Gerrit."""
remote_url = self.GetRemoteUrl()
if not remote_url:
return None
return urllib.parse.urlparse(remote_url).netloc
def GetCodereviewServer(self):
if not self._gerrit_server:
# If we're on a branch then get the server potentially associated
# with that branch.
if self.GetIssue() and self.GetBranch():
self._gerrit_server = self._GitGetBranchConfigValue(
CODEREVIEW_SERVER_CONFIG_KEY)
if self._gerrit_server:
self._gerrit_host = urllib.parse.urlparse(self._gerrit_server).netloc
if not self._gerrit_server:
# We assume repo to be hosted on Gerrit, and hence Gerrit server
# has "-review" suffix for lowest level subdomain.
parts = self._GetGitHost().split('.')
parts[0] = parts[0] + '-review'
self._gerrit_host = '.'.join(parts)
self._gerrit_server = 'https://%s' % self._gerrit_host
return self._gerrit_server
def GetGerritProject(self):
"""Returns Gerrit project name based on remote git URL."""
remote_url = self.GetRemoteUrl()
if remote_url is None:
logging.warning('can\'t detect Gerrit project.')
return None
project = urllib.parse.urlparse(remote_url).path.strip('/')
if project.endswith('.git'):
project = project[:-len('.git')]
# *.googlesource.com hosts ensure that Git/Gerrit projects don't start with
# 'a/' prefix, because 'a/' prefix is used to force authentication in
# gitiles/git-over-https protocol. E.g.,
# https://chromium.googlesource.com/a/v8/v8 refers to the same repo/project
# as
# https://chromium.googlesource.com/v8/v8
if project.startswith('a/'):
project = project[len('a/'):]
return project
def _GerritChangeIdentifier(self):
"""Handy method for gerrit_util.ChangeIdentifier for a given CL.
Not to be confused by value of "Change-Id:" footer.
If Gerrit project can be determined, this will speed up Gerrit HTTP API RPC.
"""
project = self.GetGerritProject()
if project:
return gerrit_util.ChangeIdentifier(project, self.GetIssue())
# Fall back on still unique, but less efficient change number.
return str(self.GetIssue())
def EnsureAuthenticated(self, force, refresh=None):
"""Best effort check that user is authenticated with Gerrit server."""
if settings.GetGerritSkipEnsureAuthenticated():
# For projects with unusual authentication schemes.
# See http://crbug.com/603378.
return
# Check presence of cookies only if using cookies-based auth method.
cookie_auth = gerrit_util.Authenticator.get()
if not isinstance(cookie_auth, gerrit_util.CookiesAuthenticator):
return
remote_url = self.GetRemoteUrl()
if remote_url is None:
logging.warning('invalid remote')
return
if urllib.parse.urlparse(remote_url).scheme != 'https':
logging.warning('Ignoring branch %(branch)s with non-https remote '
'%(remote)s', {
'branch': self.branch,
'remote': self.GetRemoteUrl()
})
return
# Lazy-loader to identify Gerrit and Git hosts.
self.GetCodereviewServer()
git_host = self._GetGitHost()
assert self._gerrit_server and self._gerrit_host and git_host
gerrit_auth = cookie_auth.get_auth_header(self._gerrit_host)
git_auth = cookie_auth.get_auth_header(git_host)
if gerrit_auth and git_auth:
if gerrit_auth == git_auth:
return
all_gsrc = cookie_auth.get_auth_header('d0esN0tEx1st.googlesource.com')
print(
'WARNING: You have different credentials for Gerrit and git hosts:\n'
' %s\n'
' %s\n'
' Consider running the following command:\n'
' git cl creds-check\n'
' %s\n'
' %s' %
(git_host, self._gerrit_host,
('Hint: delete creds for .googlesource.com' if all_gsrc else ''),
cookie_auth.get_new_password_message(git_host)))
if not force:
confirm_or_exit('If you know what you are doing', action='continue')
return
else:
missing = (
([] if gerrit_auth else [self._gerrit_host]) +
([] if git_auth else [git_host]))
DieWithError('Credentials for the following hosts are required:\n'
' %s\n'
'These are read from %s (or legacy %s)\n'
'%s' % (
'\n '.join(missing),
cookie_auth.get_gitcookies_path(),
cookie_auth.get_netrc_path(),
cookie_auth.get_new_password_message(git_host)))
def EnsureCanUploadPatchset(self, force):
if not self.GetIssue():
return
status = self._GetChangeDetail()['status']
if status in ('MERGED', 'ABANDONED'):
DieWithError('Change %s has been %s, new uploads are not allowed' %
(self.GetIssueURL(),
'submitted' if status == 'MERGED' else 'abandoned'))
# TODO(vadimsh): For some reason the chunk of code below was skipped if
# 'is_gce' is True. I'm just refactoring it to be 'skip if not cookies'.
# Apparently this check is not very important? Otherwise get_auth_email
# could have been added to other implementations of Authenticator.
cookies_auth = gerrit_util.Authenticator.get()
if not isinstance(cookies_auth, gerrit_util.CookiesAuthenticator):
return
cookies_user = cookies_auth.get_auth_email(self.GetGerritHost())
if self.GetIssueOwner() == cookies_user:
return
logging.debug('change %s owner is %s, cookies user is %s',
self.GetIssue(), self.GetIssueOwner(), cookies_user)
# Maybe user has linked accounts or something like that,
# so ask what Gerrit thinks of this user.
details = gerrit_util.GetAccountDetails(self.GetGerritHost(), 'self')
if details['email'] == self.GetIssueOwner():
return
if not force:
print('WARNING: Change %s is owned by %s, but you authenticate to Gerrit '
'as %s.\n'
'Uploading may fail due to lack of permissions.' %
(self.GetIssue(), self.GetIssueOwner(), details['email']))
confirm_or_exit(action='upload')
def GetStatus(self):
"""Applies a rough heuristic to give a simple summary of an issue's review
or CQ status, assuming adherence to a common workflow.
Returns None if no issue for this branch, or one of the following keywords:
* 'error' - error from review tool (including deleted issues)
* 'unsent' - no reviewers added
* 'waiting' - waiting for review
* 'reply' - waiting for uploader to reply to review
* 'lgtm' - Code-Review label has been set
* 'dry-run' - dry-running in the CQ
* 'commit' - in the CQ
* 'closed' - successfully submitted or abandoned
"""
if not self.GetIssue():
return None
try:
data = self._GetChangeDetail([
'DETAILED_LABELS', 'CURRENT_REVISION', 'SUBMITTABLE'])
except GerritChangeNotExists:
return 'error'
if data['status'] in ('ABANDONED', 'MERGED'):
return 'closed'
cq_label = data['labels'].get('Commit-Queue', {})
max_cq_vote = 0
for vote in cq_label.get('all', []):
max_cq_vote = max(max_cq_vote, vote.get('value', 0))
if max_cq_vote == 2:
return 'commit'
if max_cq_vote == 1:
return 'dry-run'
if data['labels'].get('Code-Review', {}).get('approved'):
return 'lgtm'
if not data.get('reviewers', {}).get('REVIEWER', []):
return 'unsent'
owner = data['owner'].get('_account_id')
messages = sorted(data.get('messages', []), key=lambda m: m.get('date'))
while messages:
m = messages.pop()
if m.get('tag', '').startswith('autogenerated:cq:'):
# Ignore replies from CQ.
continue
if m.get('author', {}).get('_account_id') == owner:
# Most recent message was by owner.
return 'waiting'
else:
# Some reply from non-owner.
return 'reply'
# Somehow there are no messages even though there are reviewers.
return 'unsent'
def GetMostRecentPatchset(self):
if not self.GetIssue():
return None
data = self._GetChangeDetail(['CURRENT_REVISION'])
patchset = data['revisions'][data['current_revision']]['_number']
self.SetPatchset(patchset)
return patchset
def GetMostRecentDryRunPatchset(self):
"""Get patchsets equivalent to the most recent patchset and return
the patchset with the latest dry run. If none have been dry run, return
the latest patchset."""
if not self.GetIssue():
return None
data = self._GetChangeDetail(['ALL_REVISIONS'])
patchset = data['revisions'][data['current_revision']]['_number']
dry_run = set([int(m['_revision_number'])
for m in data.get('messages', [])
if m.get('tag', '').endswith('dry-run')])
for revision_info in sorted(data.get('revisions', {}).values(),
key=lambda c: c['_number'], reverse=True):
if revision_info['_number'] in dry_run:
patchset = revision_info['_number']
break
if revision_info.get('kind', '') not in \
('NO_CHANGE', 'NO_CODE_CHANGE', 'TRIVIAL_REBASE'):
break
self.SetPatchset(patchset)
return patchset
def AddComment(self, message, publish=None):
gerrit_util.SetReview(
self.GetGerritHost(), self._GerritChangeIdentifier(),
msg=message, ready=publish)
def GetCommentsSummary(self, readable=True):
# DETAILED_ACCOUNTS is to get emails in accounts.
# CURRENT_REVISION is included to get the latest patchset so that
# only the robot comments from the latest patchset can be shown.
messages = self._GetChangeDetail(
options=['MESSAGES', 'DETAILED_ACCOUNTS',
'CURRENT_REVISION']).get('messages', [])
file_comments = gerrit_util.GetChangeComments(
self.GetGerritHost(), self._GerritChangeIdentifier())
robot_file_comments = gerrit_util.GetChangeRobotComments(
self.GetGerritHost(), self._GerritChangeIdentifier())
# Add the robot comments onto the list of comments, but only
# keep those that are from the latest patchset.
latest_patch_set = self.GetMostRecentPatchset()
for path, robot_comments in robot_file_comments.items():
line_comments = file_comments.setdefault(path, [])
line_comments.extend(
[c for c in robot_comments if c['patch_set'] == latest_patch_set])
# Build dictionary of file comments for easy access and sorting later.
# {author+date: {path: {patchset: {line: url+message}}}}
comments = collections.defaultdict(
lambda: collections.defaultdict(lambda: collections.defaultdict(dict)))
server = self.GetCodereviewServer()
if server in _KNOWN_GERRIT_TO_SHORT_URLS:
# /c/ is automatically added by short URL server.
url_prefix = '%s/%s' % (_KNOWN_GERRIT_TO_SHORT_URLS[server],
self.GetIssue())
else:
url_prefix = '%s/c/%s' % (server, self.GetIssue())
for path, line_comments in file_comments.items():
for comment in line_comments:
tag = comment.get('tag', '')
if tag.startswith('autogenerated') and 'robot_id' not in comment:
continue
key = (comment['author']['email'], comment['updated'])
if comment.get('side', 'REVISION') == 'PARENT':
patchset = 'Base'
else:
patchset = 'PS%d' % comment['patch_set']
line = comment.get('line', 0)
url = ('%s/%s/%s#%s%s' %
(url_prefix, comment['patch_set'], path,
'b' if comment.get('side') == 'PARENT' else '',
str(line) if line else ''))
comments[key][path][patchset][line] = (url, comment['message'])
summaries = []
for msg in messages:
summary = self._BuildCommentSummary(msg, comments, readable)
if summary:
summaries.append(summary)
return summaries
@staticmethod
def _BuildCommentSummary(msg, comments, readable):
key = (msg['author']['email'], msg['date'])
# Don't bother showing autogenerated messages that don't have associated
# file or line comments. this will filter out most autogenerated
# messages, but will keep robot comments like those from Tricium.
is_autogenerated = msg.get('tag', '').startswith('autogenerated')
if is_autogenerated and not comments.get(key):
return None
message = msg['message']
# Gerrit spits out nanoseconds.
assert len(msg['date'].split('.')[-1]) == 9
date = datetime.datetime.strptime(msg['date'][:-3],
'%Y-%m-%d %H:%M:%S.%f')
if key in comments:
message += '\n'
for path, patchsets in sorted(comments.get(key, {}).items()):
if readable:
message += '\n%s' % path
for patchset, lines in sorted(patchsets.items()):
for line, (url, content) in sorted(lines.items()):
if line:
line_str = 'Line %d' % line
path_str = '%s:%d:' % (path, line)
else:
line_str = 'File comment'
path_str = '%s:0:' % path
if readable:
message += '\n %s, %s: %s' % (patchset, line_str, url)
message += '\n %s\n' % content
else:
message += '\n%s ' % path_str
message += '\n%s\n' % content
return _CommentSummary(
date=date,
message=message,
sender=msg['author']['email'],
autogenerated=is_autogenerated,
# These could be inferred from the text messages and correlated with
# Code-Review label maximum, however this is not reliable.
# Leaving as is until the need arises.
approval=False,
disapproval=False,
)
def CloseIssue(self):
gerrit_util.AbandonChange(
self.GetGerritHost(), self._GerritChangeIdentifier(), msg='')
def SubmitIssue(self, wait_for_merge=True):
gerrit_util.SubmitChange(
self.GetGerritHost(), self._GerritChangeIdentifier(),
wait_for_merge=wait_for_merge)
def _GetChangeDetail(self, options=None):
"""Returns details of associated Gerrit change and caching results."""
options = options or []
assert self.GetIssue(), 'issue is required to query Gerrit'
# Optimization to avoid multiple RPCs:
if 'CURRENT_REVISION' in options or 'ALL_REVISIONS' in options:
options.append('CURRENT_COMMIT')
# Normalize issue and options for consistent keys in cache.
cache_key = str(self.GetIssue())
options_set = frozenset(o.upper() for o in options)
for cached_options_set, data in self._detail_cache.get(cache_key, []):
# Assumption: data fetched before with extra options is suitable
# for return for a smaller set of options.
# For example, if we cached data for
# options=[CURRENT_REVISION, DETAILED_FOOTERS]
# and request is for options=[CURRENT_REVISION],
# THEN we can return prior cached data.
if options_set.issubset(cached_options_set):
return data
try:
data = gerrit_util.GetChangeDetail(
self.GetGerritHost(), self._GerritChangeIdentifier(), options_set)
except gerrit_util.GerritError as e:
if e.http_status == 404:
raise GerritChangeNotExists(self.GetIssue(), self.GetCodereviewServer())
raise
self._detail_cache.setdefault(cache_key, []).append((options_set, data))
return data
def _GetChangeCommit(self):
assert self.GetIssue(), 'issue must be set to query Gerrit'
try:
data = gerrit_util.GetChangeCommit(
self.GetGerritHost(), self._GerritChangeIdentifier())
except gerrit_util.GerritError as e:
if e.http_status == 404:
raise GerritChangeNotExists(self.GetIssue(), self.GetCodereviewServer())
raise
return data
def _IsCqConfigured(self):
detail = self._GetChangeDetail(['LABELS'])
return u'Commit-Queue' in detail.get('labels', {})
def CMDLand(self, force, bypass_hooks, verbose, parallel, resultdb, realm):
if git_common.is_dirty_git_tree('land'):
return 1
detail = self._GetChangeDetail(['CURRENT_REVISION', 'LABELS'])
if not force and self._IsCqConfigured():
confirm_or_exit('\nIt seems this repository has a CQ, '
'which can test and land changes for you. '
'Are you sure you wish to bypass it?\n',
action='bypass CQ')
differs = True
last_upload = self._GitGetBranchConfigValue('gerritsquashhash')
# Note: git diff outputs nothing if there is no diff.
if not last_upload or RunGit(['diff', last_upload]).strip():
print('WARNING: Some changes from local branch haven\'t been uploaded.')
else:
if detail['current_revision'] == last_upload:
differs = False
else:
print('WARNING: Local branch contents differ from latest uploaded '
'patchset.')
if differs:
if not force:
confirm_or_exit(
'Do you want to submit latest Gerrit patchset and bypass hooks?\n',
action='submit')
print('WARNING: Bypassing hooks and submitting latest uploaded patchset.')
elif not bypass_hooks:
upstream = self.GetCommonAncestorWithUpstream()
if self.GetIssue():
description = self.FetchDescription()
else:
description = _create_description_from_log([upstream])
self.RunHook(
committing=True,
may_prompt=not force,
verbose=verbose,
parallel=parallel,
upstream=upstream,
description=description,
all_files=False,
resultdb=resultdb,
realm=realm)
self.SubmitIssue(wait_for_merge=True)
print('Issue %s has been submitted.' % self.GetIssueURL())
links = self._GetChangeCommit().get('web_links', [])
for link in links:
if link.get('name') == 'gitiles' and link.get('url'):
print('Landed as: %s' % link.get('url'))
break
return 0
def CMDPatchWithParsedIssue(self, parsed_issue_arg, nocommit, force,
newbranch):
assert parsed_issue_arg.valid
self.issue = parsed_issue_arg.issue
if parsed_issue_arg.hostname:
self._gerrit_host = parsed_issue_arg.hostname
self._gerrit_server = 'https://%s' % self._gerrit_host
try:
detail = self._GetChangeDetail(['ALL_REVISIONS'])
except GerritChangeNotExists as e:
DieWithError(str(e))
if not parsed_issue_arg.patchset:
# Use current revision by default.
revision_info = detail['revisions'][detail['current_revision']]
patchset = int(revision_info['_number'])
else:
patchset = parsed_issue_arg.patchset
for revision_info in detail['revisions'].values():
if int(revision_info['_number']) == parsed_issue_arg.patchset:
break
else:
DieWithError('Couldn\'t find patchset %i in change %i' %
(parsed_issue_arg.patchset, self.GetIssue()))
remote_url = self.GetRemoteUrl()
if remote_url.endswith('.git'):
remote_url = remote_url[:-len('.git')]
remote_url = remote_url.rstrip('/')
fetch_info = revision_info['fetch']['http']
fetch_info['url'] = fetch_info['url'].rstrip('/')
if remote_url != fetch_info['url']:
DieWithError('Trying to patch a change from %s but this repo appears '
'to be %s.' % (fetch_info['url'], remote_url))
RunGit(['fetch', fetch_info['url'], fetch_info['ref']])
# If we have created a new branch then do the "set issue" immediately in
# case the cherry-pick fails, which happens when resolving conflicts.
if newbranch:
self.SetIssue(parsed_issue_arg.issue)
if force:
RunGit(['reset', '--hard', 'FETCH_HEAD'])
print('Checked out commit for change %i patchset %i locally' %
(parsed_issue_arg.issue, patchset))
elif nocommit:
RunGit(['cherry-pick', '--no-commit', 'FETCH_HEAD'])
print('Patch applied to index.')
else:
RunGit(['cherry-pick', 'FETCH_HEAD'])
print('Committed patch for change %i patchset %i locally.' %
(parsed_issue_arg.issue, patchset))
print('Note: this created a local commit which does not have '
'the same hash as the one uploaded for review. This will make '
'uploading changes based on top of this branch difficult.\n'
'If you want to do that, use "git cl patch --force" instead.')
if self.GetBranch():
self.SetIssue(parsed_issue_arg.issue)
self.SetPatchset(patchset)
fetched_hash = scm.GIT.ResolveCommit(settings.GetRoot(), 'FETCH_HEAD')
self._GitSetBranchConfigValue('last-upload-hash', fetched_hash)
self._GitSetBranchConfigValue('gerritsquashhash', fetched_hash)
else:
print('WARNING: You are in detached HEAD state.\n'
'The patch has been applied to your checkout, but you will not be '
'able to upload a new patch set to the gerrit issue.\n'
'Try using the \'-b\' option if you would like to work on a '
'branch and/or upload a new patch set.')
return 0
def _GerritCommitMsgHookCheck(self, offer_removal):
hook = os.path.join(settings.GetRoot(), '.git', 'hooks', 'commit-msg')
if not os.path.exists(hook):
return
# Crude attempt to distinguish Gerrit Codereview hook from a potentially
# custom developer-made one.
data = gclient_utils.FileRead(hook)
if not('From Gerrit Code Review' in data and 'add_ChangeId()' in data):
return
print('WARNING: You have Gerrit commit-msg hook installed.\n'
'It is not necessary for uploading with git cl in squash mode, '
'and may interfere with it in subtle ways.\n'
'We recommend you remove the commit-msg hook.')
if offer_removal:
if ask_for_explicit_yes('Do you want to remove it now?'):
gclient_utils.rm_file_or_tree(hook)
print('Gerrit commit-msg hook removed.')
else:
print('OK, will keep Gerrit commit-msg hook in place.')
def _CleanUpOldTraces(self):
"""Keep only the last |MAX_TRACES| traces."""
try:
traces = sorted([
os.path.join(TRACES_DIR, f)
for f in os.listdir(TRACES_DIR)
if (os.path.isfile(os.path.join(TRACES_DIR, f))
and not f.startswith('tmp'))
])
traces_to_delete = traces[:-MAX_TRACES]
for trace in traces_to_delete:
os.remove(trace)
except OSError:
print('WARNING: Failed to remove old git traces from\n'
' %s'
'Consider removing them manually.' % TRACES_DIR)
def _WriteGitPushTraces(self, trace_name, traces_dir, git_push_metadata):
"""Zip and write the git push traces stored in traces_dir."""
gclient_utils.safe_makedirs(TRACES_DIR)
traces_zip = trace_name + '-traces'
traces_readme = trace_name + '-README'
# Create a temporary dir to store git config and gitcookies in. It will be
# compressed and stored next to the traces.
git_info_dir = tempfile.mkdtemp()
git_info_zip = trace_name + '-git-info'
git_push_metadata['now'] = datetime_now().strftime('%Y-%m-%dT%H:%M:%S.%f')
git_push_metadata['trace_name'] = trace_name
gclient_utils.FileWrite(
traces_readme, TRACES_README_FORMAT % git_push_metadata)
# Keep only the first 6 characters of the git hashes on the packet
# trace. This greatly decreases size after compression.
packet_traces = os.path.join(traces_dir, 'trace-packet')
if os.path.isfile(packet_traces):
contents = gclient_utils.FileRead(packet_traces)
gclient_utils.FileWrite(
packet_traces, GIT_HASH_RE.sub(r'\1', contents))
shutil.make_archive(traces_zip, 'zip', traces_dir)
# Collect and compress the git config and gitcookies.
git_config = RunGit(['config', '-l'])
gclient_utils.FileWrite(
os.path.join(git_info_dir, 'git-config'),
git_config)
cookie_auth = gerrit_util.Authenticator.get()
if isinstance(cookie_auth, gerrit_util.CookiesAuthenticator):
gitcookies_path = cookie_auth.get_gitcookies_path()
if os.path.isfile(gitcookies_path):
gitcookies = gclient_utils.FileRead(gitcookies_path)
gclient_utils.FileWrite(
os.path.join(git_info_dir, 'gitcookies'),
GITCOOKIES_REDACT_RE.sub('REDACTED', gitcookies))
shutil.make_archive(git_info_zip, 'zip', git_info_dir)
gclient_utils.rmtree(git_info_dir)
def _RunGitPushWithTraces(self,
refspec,
refspec_opts,
git_push_metadata,
git_push_options=None):
"""Run git push and collect the traces resulting from the execution."""
# Create a temporary directory to store traces in. Traces will be compressed
# and stored in a 'traces' dir inside depot_tools.
traces_dir = tempfile.mkdtemp()
trace_name = os.path.join(
TRACES_DIR, datetime_now().strftime('%Y%m%dT%H%M%S.%f'))
env = os.environ.copy()
env['GIT_REDACT_COOKIES'] = 'o,SSO,GSSO_Uberproxy'
env['GIT_TR2_EVENT'] = os.path.join(traces_dir, 'tr2-event')
env['GIT_TRACE2_EVENT'] = os.path.join(traces_dir, 'tr2-event')
env['GIT_TRACE_CURL'] = os.path.join(traces_dir, 'trace-curl')
env['GIT_TRACE_CURL_NO_DATA'] = '1'
env['GIT_TRACE_PACKET'] = os.path.join(traces_dir, 'trace-packet')
try:
push_returncode = 0
remote_url = self.GetRemoteUrl()
before_push = time_time()
push_cmd = ['git', 'push', remote_url, refspec]
if git_push_options:
for opt in git_push_options:
push_cmd.extend(['-o', opt])
push_stdout = gclient_utils.CheckCallAndFilter(
push_cmd,
env=env,
print_stdout=True,
# Flush after every line: useful for seeing progress when running as
# recipe.
filter_fn=lambda _: sys.stdout.flush())
push_stdout = push_stdout.decode('utf-8', 'replace')
except subprocess2.CalledProcessError as e:
push_returncode = e.returncode
if 'blocked keyword' in str(e.stdout):
raise GitPushError(
'Failed to create a change, very likely due to blocked keyword. '
'Please examine output above for the reason of the failure.\n'
'If this is a false positive, you can try to bypass blocked '
'keyword by using push option '
'-o uploadvalidator~skip, e.g.:\n'
'git cl upload -o uploadvalidator~skip\n\n'
'If git-cl is not working correctly, file a bug under the '
'Infra>SDK component.')
raise GitPushError(
'Failed to create a change. Please examine output above for the '
'reason of the failure.\n'
'For emergencies, Googlers can escalate to '
'go/gob-support or go/notify#gob\n'
'Hint: run command below to diagnose common Git/Gerrit '
'credential problems:\n'
' git cl creds-check\n'
'\n'
'If git-cl is not working correctly, file a bug under the Infra>SDK '
'component including the files below.\n'
'Review the files before upload, since they might contain sensitive '
'information.\n'
'Set the Restrict-View-Google label so that they are not publicly '
'accessible.\n' + TRACES_MESSAGE % {'trace_name': trace_name})
finally:
execution_time = time_time() - before_push
metrics.collector.add_repeated('sub_commands', {
'command': 'git push',
'execution_time': execution_time,
'exit_code': push_returncode,
'arguments': metrics_utils.extract_known_subcommand_args(refspec_opts),
})
git_push_metadata['execution_time'] = execution_time
git_push_metadata['exit_code'] = push_returncode
self._WriteGitPushTraces(trace_name, traces_dir, git_push_metadata)
self._CleanUpOldTraces()
gclient_utils.rmtree(traces_dir)
return push_stdout
def CMDUploadChange(self, options, git_diff_args, custom_cl_base,
change_desc):
"""Upload the current branch to Gerrit, retry if new remote HEAD is
found. options and change_desc may be mutated."""
remote, remote_branch = self.GetRemoteBranch()
branch = GetTargetRef(remote, remote_branch, options.target_branch)
try:
return self._CMDUploadChange(options, git_diff_args, custom_cl_base,
change_desc, branch)
except GitPushError as e:
# Repository might be in the middle of transition to main branch as
# default, and uploads to old default might be blocked.
if remote_branch not in [DEFAULT_OLD_BRANCH, DEFAULT_NEW_BRANCH]:
DieWithError(str(e), change_desc)
project_head = gerrit_util.GetProjectHead(self._gerrit_host,
self.GetGerritProject())
if project_head == branch:
DieWithError(str(e), change_desc)
branch = project_head
print("WARNING: Fetching remote state and retrying upload to default "
"branch...")
RunGit(['fetch', '--prune', remote])
options.edit_description = False
options.force = True
try:
self._CMDUploadChange(options, git_diff_args, custom_cl_base,
change_desc, branch)
except GitPushError as e:
DieWithError(str(e), change_desc)
def _CMDUploadChange(self, options, git_diff_args, custom_cl_base,
change_desc, branch):
"""Upload the current branch to Gerrit."""
if options.squash:
self._GerritCommitMsgHookCheck(offer_removal=not options.force)
if self.GetIssue():
# User requested to change description
if options.edit_description:
change_desc.prompt()
change_id = self._GetChangeDetail()['change_id']
change_desc.ensure_change_id(change_id)
else: # if not self.GetIssue()
if not options.force and not options.message_file:
change_desc.prompt()
change_ids = git_footers.get_footer_change_id(change_desc.description)
if len(change_ids) == 1:
change_id = change_ids[0]
else:
change_id = GenerateGerritChangeId(change_desc.description)
change_desc.ensure_change_id(change_id)
if options.preserve_tryjobs:
change_desc.set_preserve_tryjobs()
remote, upstream_branch = self.FetchUpstreamTuple(self.GetBranch())
parent = self._ComputeParent(
remote, upstream_branch, custom_cl_base, options.force, change_desc)
tree = RunGit(['rev-parse', 'HEAD:']).strip()
with gclient_utils.temporary_file() as desc_tempfile:
gclient_utils.FileWrite(desc_tempfile, change_desc.description)
ref_to_push = RunGit(
['commit-tree', tree, '-p', parent, '-F', desc_tempfile]).strip()
else: # if not options.squash
if options.no_add_changeid:
pass
else: # adding Change-Ids is okay.
if not git_footers.get_footer_change_id(change_desc.description):
DownloadGerritHook(False)
change_desc.set_description(
self._AddChangeIdToCommitMessage(change_desc.description,
git_diff_args))
ref_to_push = 'HEAD'
# For no-squash mode, we assume the remote called "origin" is the one we
# want. It is not worthwhile to support different workflows for
# no-squash mode.
parent = 'origin/%s' % branch
# attempt to extract the changeid from the current description
# fail informatively if not possible.
change_id_candidates = git_footers.get_footer_change_id(
change_desc.description)
if not change_id_candidates:
DieWithError("Unable to extract change-id from message.")
change_id = change_id_candidates[0]
SaveDescriptionBackup(change_desc)
commits = RunGitSilent(['rev-list', '%s..%s' % (parent,
ref_to_push)]).splitlines()
if len(commits) > 1:
print('WARNING: This will upload %d commits. Run the following command '
'to see which commits will be uploaded: ' % len(commits))
print('git log %s..%s' % (parent, ref_to_push))
print('You can also use `git squash-branch` to squash these into a '
'single commit.')
confirm_or_exit(action='upload')
reviewers = sorted(change_desc.get_reviewers())
cc = []
# Add CCs from WATCHLISTS and rietveld.cc git config unless this is
# the initial upload, the CL is private, or auto-CCing has ben disabled.
if not (self.GetIssue() or options.private or options.no_autocc):
cc = self.GetCCList().split(',')
if len(cc) > 100:
lsc = ('https://chromium.googlesource.com/chromium/src/+/HEAD/docs/'
'process/lsc/lsc_workflow.md')
print('WARNING: This will auto-CC %s users.' % len(cc))
print('LSC may be more appropriate: %s' % lsc)
print('You can also use the --no-autocc flag to disable auto-CC.')
confirm_or_exit(action='continue')
# Add cc's from the --cc flag.
if options.cc:
cc.extend(options.cc)
cc = [email.strip() for email in cc if email.strip()]
if change_desc.get_cced():
cc.extend(change_desc.get_cced())
if self.GetGerritHost() == 'chromium-review.googlesource.com':
valid_accounts = set(reviewers + cc)
# TODO(crbug/877717): relax this for all hosts.
else:
valid_accounts = gerrit_util.ValidAccounts(
self.GetGerritHost(), reviewers + cc)
logging.info('accounts %s are recognized, %s invalid',
sorted(valid_accounts),
set(reviewers + cc).difference(set(valid_accounts)))
# Extra options that can be specified at push time. Doc:
# https://gerrit-review.googlesource.com/Documentation/user-upload.html
refspec_opts = []
# By default, new changes are started in WIP mode, and subsequent patchsets
# don't send email. At any time, passing --send-mail will mark the change
# ready and send email for that particular patch.
if options.send_mail:
refspec_opts.append('ready')
refspec_opts.append('notify=ALL')
elif not self.GetIssue() and options.squash:
refspec_opts.append('wip')
else:
refspec_opts.append('notify=NONE')
# TODO(tandrii): options.message should be posted as a comment
# if --send-mail is set on non-initial upload as Rietveld used to do it.
# Set options.title in case user was prompted in _GetTitleForUpload and
# _CMDUploadChange needs to be called again.
options.title = self._GetTitleForUpload(options)
if options.title:
# Punctuation and whitespace in |title| must be percent-encoded.
refspec_opts.append(
'm=' + gerrit_util.PercentEncodeForGitRef(options.title))
if options.private:
refspec_opts.append('private')
for r in sorted(reviewers):
if r in valid_accounts:
refspec_opts.append('r=%s' % r)
reviewers.remove(r)
else:
# TODO(tandrii): this should probably be a hard failure.
print('WARNING: reviewer %s doesn\'t have a Gerrit account, skipping'
% r)
for c in sorted(cc):
# refspec option will be rejected if cc doesn't correspond to an
# account, even though REST call to add such arbitrary cc may succeed.
if c in valid_accounts:
refspec_opts.append('cc=%s' % c)
cc.remove(c)
if options.topic:
# Documentation on Gerrit topics is here:
# https://gerrit-review.googlesource.com/Documentation/user-upload.html#topic
refspec_opts.append('topic=%s' % options.topic)
if options.enable_auto_submit:
refspec_opts.append('l=Auto-Submit+1')
if options.set_bot_commit:
refspec_opts.append('l=Bot-Commit+1')
if options.use_commit_queue:
refspec_opts.append('l=Commit-Queue+2')
elif options.cq_dry_run:
refspec_opts.append('l=Commit-Queue+1')
elif options.cq_quick_run:
refspec_opts.append('l=Commit-Queue+1')
refspec_opts.append('l=Quick-Run+1')
if change_desc.get_reviewers(tbr_only=True):
score = gerrit_util.GetCodeReviewTbrScore(
self.GetGerritHost(),
self.GetGerritProject())
refspec_opts.append('l=Code-Review+%s' % score)
# Gerrit sorts hashtags, so order is not important.
hashtags = {change_desc.sanitize_hash_tag(t) for t in options.hashtags}
if not self.GetIssue():
hashtags.update(change_desc.get_hash_tags())
refspec_opts += ['hashtag=%s' % t for t in sorted(hashtags)]
refspec_suffix = ''
if refspec_opts:
refspec_suffix = '%' + ','.join(refspec_opts)
assert ' ' not in refspec_suffix, (
'spaces not allowed in refspec: "%s"' % refspec_suffix)
refspec = '%s:refs/for/%s%s' % (ref_to_push, branch, refspec_suffix)
git_push_metadata = {
'gerrit_host': self.GetGerritHost(),
'title': options.title or '<untitled>',
'change_id': change_id,
'description': change_desc.description,
}
push_stdout = self._RunGitPushWithTraces(refspec, refspec_opts,
git_push_metadata,
options.push_options)
if options.squash:
regex = re.compile(r'remote:\s+https?://[\w\-\.\+\/#]*/(\d+)\s.*')
change_numbers = [m.group(1)
for m in map(regex.match, push_stdout.splitlines())
if m]
if len(change_numbers) != 1:
DieWithError(
('Created|Updated %d issues on Gerrit, but only 1 expected.\n'
'Change-Id: %s') % (len(change_numbers), change_id), change_desc)
self.SetIssue(change_numbers[0])
self._GitSetBranchConfigValue('gerritsquashhash', ref_to_push)
if self.GetIssue() and (reviewers or cc):
# GetIssue() is not set in case of non-squash uploads according to tests.
# TODO(crbug.com/751901): non-squash uploads in git cl should be removed.
gerrit_util.AddReviewers(
self.GetGerritHost(),
self._GerritChangeIdentifier(),
reviewers, cc,
notify=bool(options.send_mail))
return 0
def _ComputeParent(self, remote, upstream_branch, custom_cl_base, force,
change_desc):
"""Computes parent of the generated commit to be uploaded to Gerrit.
Returns revision or a ref name.
"""
if custom_cl_base:
# Try to avoid creating additional unintended CLs when uploading, unless
# user wants to take this risk.
local_ref_of_target_remote = self.GetRemoteBranch()[1]
code, _ = RunGitWithCode(['merge-base', '--is-ancestor', custom_cl_base,
local_ref_of_target_remote])
if code == 1:
print('\nWARNING: Manually specified base of this CL `%s` '
'doesn\'t seem to belong to target remote branch `%s`.\n\n'
'If you proceed with upload, more than 1 CL may be created by '
'Gerrit as a result, in turn confusing or crashing git cl.\n\n'
'If you are certain that specified base `%s` has already been '
'uploaded to Gerrit as another CL, you may proceed.\n' %
(custom_cl_base, local_ref_of_target_remote, custom_cl_base))
if not force:
confirm_or_exit(
'Do you take responsibility for cleaning up potential mess '
'resulting from proceeding with upload?',
action='upload')
return custom_cl_base
if remote != '.':
return self.GetCommonAncestorWithUpstream()
# If our upstream branch is local, we base our squashed commit on its
# squashed version.
upstream_branch_name = scm.GIT.ShortBranchName(upstream_branch)
if upstream_branch_name == 'master':
return self.GetCommonAncestorWithUpstream()
if upstream_branch_name == 'main':
return self.GetCommonAncestorWithUpstream()
# Check the squashed hash of the parent.
# TODO(tandrii): consider checking parent change in Gerrit and using its
# hash if tree hash of latest parent revision (patchset) in Gerrit matches
# the tree hash of the parent branch. The upside is less likely bogus
# requests to reupload parent change just because it's uploadhash is
# missing, yet the downside likely exists, too (albeit unknown to me yet).
parent = scm.GIT.GetBranchConfig(
settings.GetRoot(), upstream_branch_name, 'gerritsquashhash')
# Verify that the upstream branch has been uploaded too, otherwise
# Gerrit will create additional CLs when uploading.
if not parent or (RunGitSilent(['rev-parse', upstream_branch + ':']) !=
RunGitSilent(['rev-parse', parent + ':'])):
DieWithError(
'\nUpload upstream branch %s first.\n'
'It is likely that this branch has been rebased since its last '
'upload, so you just need to upload it again.\n'
'(If you uploaded it with --no-squash, then branch dependencies '
'are not supported, and you should reupload with --squash.)'
% upstream_branch_name,
change_desc)
return parent
def _AddChangeIdToCommitMessage(self, log_desc, args):
"""Re-commits using the current message, assumes the commit hook is in
place.
"""
RunGit(['commit', '--amend', '-m', log_desc])
new_log_desc = _create_description_from_log(args)
if git_footers.get_footer_change_id(new_log_desc):
print('git-cl: Added Change-Id to commit message.')
return new_log_desc
else:
DieWithError('ERROR: Gerrit commit-msg hook not installed.')
def CannotTriggerTryJobReason(self):
try:
data = self._GetChangeDetail()
except GerritChangeNotExists:
return 'Gerrit doesn\'t know about your change %s' % self.GetIssue()
if data['status'] in ('ABANDONED', 'MERGED'):
return 'CL %s is closed' % self.GetIssue()
def GetGerritChange(self, patchset=None):
"""Returns a buildbucket.v2.GerritChange message for the current issue."""
host = urllib.parse.urlparse(self.GetCodereviewServer()).hostname
issue = self.GetIssue()
patchset = int(patchset or self.GetPatchset())
data = self._GetChangeDetail(['ALL_REVISIONS'])
assert host and issue and patchset, 'CL must be uploaded first'
has_patchset = any(
int(revision_data['_number']) == patchset
for revision_data in data['revisions'].values())
if not has_patchset:
raise Exception('Patchset %d is not known in Gerrit change %d' %
(patchset, self.GetIssue()))
return {
'host': host,
'change': issue,
'project': data['project'],
'patchset': patchset,
}
def GetIssueOwner(self):
return self._GetChangeDetail(['DETAILED_ACCOUNTS'])['owner']['email']
def GetReviewers(self):
details = self._GetChangeDetail(['DETAILED_ACCOUNTS'])
return [r['email'] for r in details['reviewers'].get('REVIEWER', [])]
def _get_bug_line_values(default_project_prefix, bugs):
"""Given default_project_prefix and comma separated list of bugs, yields bug
line values.
Each bug can be either:
* a number, which is combined with default_project_prefix
* string, which is left as is.
This function may produce more than one line, because bugdroid expects one
project per line.
>>> list(_get_bug_line_values('v8:', '123,chromium:789'))
['v8:123', 'chromium:789']
"""
default_bugs = []
others = []
for bug in bugs.split(','):
bug = bug.strip()
if bug:
try:
default_bugs.append(int(bug))
except ValueError:
others.append(bug)
if default_bugs:
default_bugs = ','.join(map(str, default_bugs))
if default_project_prefix:
if not default_project_prefix.endswith(':'):
default_project_prefix += ':'
yield '%s%s' % (default_project_prefix, default_bugs)
else:
yield default_bugs
for other in sorted(others):
# Don't bother finding common prefixes, CLs with >2 bugs are very very rare.
yield other
class ChangeDescription(object):
"""Contains a parsed form of the change description."""
R_LINE = r'^[ \t]*(TBR|R)[ \t]*=[ \t]*(.*?)[ \t]*$'
CC_LINE = r'^[ \t]*(CC)[ \t]*=[ \t]*(.*?)[ \t]*$'
BUG_LINE = r'^[ \t]*(?:(BUG)[ \t]*=|Bug:)[ \t]*(.*?)[ \t]*$'
FIXED_LINE = r'^[ \t]*Fixed[ \t]*:[ \t]*(.*?)[ \t]*$'
CHERRY_PICK_LINE = r'^\(cherry picked from commit [a-fA-F0-9]{40}\)$'
STRIP_HASH_TAG_PREFIX = r'^(\s*(revert|reland)( "|:)?\s*)*'
BRACKET_HASH_TAG = r'\s*\[([^\[\]]+)\]'
COLON_SEPARATED_HASH_TAG = r'^([a-zA-Z0-9_\- ]+):($|[^:])'
BAD_HASH_TAG_CHUNK = r'[^a-zA-Z0-9]+'
def __init__(self, description, bug=None, fixed=None):
self._description_lines = (description or '').strip().splitlines()
if bug:
regexp = re.compile(self.BUG_LINE)
prefix = settings.GetBugPrefix()
if not any((regexp.match(line) for line in self._description_lines)):
values = list(_get_bug_line_values(prefix, bug))
self.append_footer('Bug: %s' % ', '.join(values))
if fixed:
regexp = re.compile(self.FIXED_LINE)
prefix = settings.GetBugPrefix()
if not any((regexp.match(line) for line in self._description_lines)):
values = list(_get_bug_line_values(prefix, fixed))
self.append_footer('Fixed: %s' % ', '.join(values))
@property # www.logilab.org/ticket/89786
def description(self): # pylint: disable=method-hidden
return '\n'.join(self._description_lines)
def set_description(self, desc):
if isinstance(desc, basestring):
lines = desc.splitlines()
else:
lines = [line.rstrip() for line in desc]
while lines and not lines[0]:
lines.pop(0)
while lines and not lines[-1]:
lines.pop(-1)
self._description_lines = lines
def ensure_change_id(self, change_id):
description = self.description
footer_change_ids = git_footers.get_footer_change_id(description)
# Make sure that the Change-Id in the description matches the given one.
if footer_change_ids != [change_id]:
if footer_change_ids:
# Remove any existing Change-Id footers since they don't match the
# expected change_id footer.
description = git_footers.remove_footer(description, 'Change-Id')
print('WARNING: Change-Id has been set to %s. Use `git cl issue 0` '
'if you want to set a new one.')
# Add the expected Change-Id footer.
description = git_footers.add_footer_change_id(description, change_id)
self.set_description(description)
def update_reviewers(self, reviewers, tbrs):
"""Rewrites the R=/TBR= line(s) as a single line each.
Args:
reviewers (list(str)) - list of additional emails to use for reviewers.
tbrs (list(str)) - list of additional emails to use for TBRs.
"""
if not reviewers and not tbrs:
return
reviewers = set(reviewers)
tbrs = set(tbrs)
LOOKUP = {
'TBR': tbrs,
'R': reviewers,
}
# Get the set of R= and TBR= lines and remove them from the description.
regexp = re.compile(self.R_LINE)
matches = [regexp.match(line) for line in self._description_lines]
new_desc = [l for i, l in enumerate(self._description_lines)
if not matches[i]]
self.set_description(new_desc)
# Construct new unified R= and TBR= lines.
# First, update tbrs/reviewers with names from the R=/TBR= lines (if any).
for match in matches:
if not match:
continue
LOOKUP[match.group(1)].update(cleanup_list([match.group(2).strip()]))
# If any folks ended up in both groups, remove them from tbrs.
tbrs -= reviewers
new_r_line = 'R=' + ', '.join(sorted(reviewers)) if reviewers else None
new_tbr_line = 'TBR=' + ', '.join(sorted(tbrs)) if tbrs else None
# Put the new lines in the description where the old first R= line was.
line_loc = next((i for i, match in enumerate(matches) if match), -1)
if 0 <= line_loc < len(self._description_lines):
if new_tbr_line:
self._description_lines.insert(line_loc, new_tbr_line)
if new_r_line:
self._description_lines.insert(line_loc, new_r_line)
else:
if new_r_line:
self.append_footer(new_r_line)
if new_tbr_line:
self.append_footer(new_tbr_line)
def set_preserve_tryjobs(self):
"""Ensures description footer contains 'Cq-Do-Not-Cancel-Tryjobs: true'."""
footers = git_footers.parse_footers(self.description)
for v in footers.get('Cq-Do-Not-Cancel-Tryjobs', []):
if v.lower() == 'true':
return
self.append_footer('Cq-Do-Not-Cancel-Tryjobs: true')
def prompt(self):
"""Asks the user to update the description."""
self.set_description([
'# Enter a description of the change.',
'# This will be displayed on the codereview site.',
'# The first line will also be used as the subject of the review.',
'#--------------------This line is 72 characters long'
'--------------------',
] + self._description_lines)
bug_regexp = re.compile(self.BUG_LINE)
fixed_regexp = re.compile(self.FIXED_LINE)
prefix = settings.GetBugPrefix()
has_issue = lambda l: bug_regexp.match(l) or fixed_regexp.match(l)
if not any((has_issue(line) for line in self._description_lines)):
self.append_footer('Bug: %s' % prefix)
print('Waiting for editor...')
content = gclient_utils.RunEditor(self.description, True,
git_editor=settings.GetGitEditor())
if not content:
DieWithError('Running editor failed')
lines = content.splitlines()
# Strip off comments and default inserted "Bug:" line.
clean_lines = [line.rstrip() for line in lines if not
(line.startswith('#') or
line.rstrip() == "Bug:" or
line.rstrip() == "Bug: " + prefix)]
if not clean_lines:
DieWithError('No CL description, aborting')
self.set_description(clean_lines)
def append_footer(self, line):
"""Adds a footer line to the description.
Differentiates legacy "KEY=xxx" footers (used to be called tags) and
Gerrit's footers in the form of "Footer-Key: footer any value" and ensures
that Gerrit footers are always at the end.
"""
parsed_footer_line = git_footers.parse_footer(line)
if parsed_footer_line:
# Line is a gerrit footer in the form: Footer-Key: any value.
# Thus, must be appended observing Gerrit footer rules.
self.set_description(
git_footers.add_footer(self.description,
key=parsed_footer_line[0],
value=parsed_footer_line[1]))
return
if not self._description_lines:
self._description_lines.append(line)
return
top_lines, gerrit_footers, _ = git_footers.split_footers(self.description)
if gerrit_footers:
# git_footers.split_footers ensures that there is an empty line before
# actual (gerrit) footers, if any. We have to keep it that way.
assert top_lines and top_lines[-1] == ''
top_lines, separator = top_lines[:-1], top_lines[-1:]
else:
separator = [] # No need for separator if there are no gerrit_footers.
prev_line = top_lines[-1] if top_lines else ''
if (not presubmit_support.Change.TAG_LINE_RE.match(prev_line) or
not presubmit_support.Change.TAG_LINE_RE.match(line)):
top_lines.append('')
top_lines.append(line)
self._description_lines = top_lines + separator + gerrit_footers
def get_reviewers(self, tbr_only=False):
"""Retrieves the list of reviewers."""
matches = [re.match(self.R_LINE, line) for line in self._description_lines]
reviewers = [match.group(2).strip()
for match in matches
if match and (not tbr_only or match.group(1).upper() == 'TBR')]
return cleanup_list(reviewers)
def get_cced(self):
"""Retrieves the list of reviewers."""
matches = [re.match(self.CC_LINE, line) for line in self._description_lines]
cced = [match.group(2).strip() for match in matches if match]
return cleanup_list(cced)
def get_hash_tags(self):
"""Extracts and sanitizes a list of Gerrit hashtags."""
subject = (self._description_lines or ('',))[0]
subject = re.sub(
self.STRIP_HASH_TAG_PREFIX, '', subject, flags=re.IGNORECASE)
tags = []
start = 0
bracket_exp = re.compile(self.BRACKET_HASH_TAG)
while True:
m = bracket_exp.match(subject, start)
if not m:
break
tags.append(self.sanitize_hash_tag(m.group(1)))
start = m.end()
if not tags:
# Try "Tag: " prefix.
m = re.match(self.COLON_SEPARATED_HASH_TAG, subject)
if m:
tags.append(self.sanitize_hash_tag(m.group(1)))
return tags
@classmethod
def sanitize_hash_tag(cls, tag):
"""Returns a sanitized Gerrit hash tag.
A sanitized hashtag can be used as a git push refspec parameter value.
"""
return re.sub(cls.BAD_HASH_TAG_CHUNK, '-', tag).strip('-').lower()
def FindCodereviewSettingsFile(filename='codereview.settings'):
"""Finds the given file starting in the cwd and going up.
Only looks up to the top of the repository unless an
'inherit-review-settings-ok' file exists in the root of the repository.
"""
inherit_ok_file = 'inherit-review-settings-ok'
cwd = os.getcwd()
root = settings.GetRoot()
if os.path.isfile(os.path.join(root, inherit_ok_file)):
root = '/'
while True:
if filename in os.listdir(cwd):
if os.path.isfile(os.path.join(cwd, filename)):
return open(os.path.join(cwd, filename))
if cwd == root:
break
cwd = os.path.dirname(cwd)
def LoadCodereviewSettingsFromFile(fileobj):
"""Parses a codereview.settings file and updates hooks."""
keyvals = gclient_utils.ParseCodereviewSettingsContent(fileobj.read())
def SetProperty(name, setting, unset_error_ok=False):
fullname = 'rietveld.' + name
if setting in keyvals:
RunGit(['config', fullname, keyvals[setting]])
else:
RunGit(['config', '--unset-all', fullname], error_ok=unset_error_ok)
if not keyvals.get('GERRIT_HOST', False):
SetProperty('server', 'CODE_REVIEW_SERVER')
# Only server setting is required. Other settings can be absent.
# In that case, we ignore errors raised during option deletion attempt.
SetProperty('cc', 'CC_LIST', unset_error_ok=True)
SetProperty('tree-status-url', 'STATUS', unset_error_ok=True)
SetProperty('viewvc-url', 'VIEW_VC', unset_error_ok=True)
SetProperty('bug-prefix', 'BUG_PREFIX', unset_error_ok=True)
SetProperty('cpplint-regex', 'LINT_REGEX', unset_error_ok=True)
SetProperty('cpplint-ignore-regex', 'LINT_IGNORE_REGEX', unset_error_ok=True)
SetProperty('run-post-upload-hook', 'RUN_POST_UPLOAD_HOOK',
unset_error_ok=True)
SetProperty(
'format-full-by-default', 'FORMAT_FULL_BY_DEFAULT', unset_error_ok=True)
if 'GERRIT_HOST' in keyvals:
RunGit(['config', 'gerrit.host', keyvals['GERRIT_HOST']])
if 'GERRIT_SQUASH_UPLOADS' in keyvals:
RunGit(['config', 'gerrit.squash-uploads',
keyvals['GERRIT_SQUASH_UPLOADS']])
if 'GERRIT_SKIP_ENSURE_AUTHENTICATED' in keyvals:
RunGit(['config', 'gerrit.skip-ensure-authenticated',
keyvals['GERRIT_SKIP_ENSURE_AUTHENTICATED']])
if 'PUSH_URL_CONFIG' in keyvals and 'ORIGIN_URL_CONFIG' in keyvals:
# should be of the form
# PUSH_URL_CONFIG: url.ssh://gitrw.chromium.org.pushinsteadof
# ORIGIN_URL_CONFIG: http://src.chromium.org/git
RunGit(['config', keyvals['PUSH_URL_CONFIG'],
keyvals['ORIGIN_URL_CONFIG']])
def urlretrieve(source, destination):
"""Downloads a network object to a local file, like urllib.urlretrieve.
This is necessary because urllib is broken for SSL connections via a proxy.
"""
with open(destination, 'w') as f:
f.write(urllib.request.urlopen(source).read())
def hasSheBang(fname):
"""Checks fname is a #! script."""
with open(fname) as f:
return f.read(2).startswith('#!')
def DownloadGerritHook(force):
"""Downloads and installs a Gerrit commit-msg hook.
Args:
force: True to update hooks. False to install hooks if not present.
"""
src = 'https://gerrit-review.googlesource.com/tools/hooks/commit-msg'
dst = os.path.join(settings.GetRoot(), '.git', 'hooks', 'commit-msg')
if not os.access(dst, os.X_OK):
if os.path.exists(dst):
if not force:
return
try:
urlretrieve(src, dst)
if not hasSheBang(dst):
DieWithError('Not a script: %s\n'
'You need to download from\n%s\n'
'into .git/hooks/commit-msg and '
'chmod +x .git/hooks/commit-msg' % (dst, src))
os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
except Exception:
if os.path.exists(dst):
os.remove(dst)
DieWithError('\nFailed to download hooks.\n'
'You need to download from\n%s\n'
'into .git/hooks/commit-msg and '
'chmod +x .git/hooks/commit-msg' % src)
class _GitCookiesChecker(object):
"""Provides facilities for validating and suggesting fixes to .gitcookies."""
_GOOGLESOURCE = 'googlesource.com'
def __init__(self):
# Cached list of [host, identity, source], where source is either
# .gitcookies or .netrc.
self._all_hosts = None
def ensure_configured_gitcookies(self):
"""Runs checks and suggests fixes to make git use .gitcookies from default
path."""
default = gerrit_util.CookiesAuthenticator.get_gitcookies_path()
configured_path = RunGitSilent(
['config', '--global', 'http.cookiefile']).strip()
configured_path = os.path.expanduser(configured_path)
if configured_path:
self._ensure_default_gitcookies_path(configured_path, default)
else:
self._configure_gitcookies_path(default)
@staticmethod
def _ensure_default_gitcookies_path(configured_path, default_path):
assert configured_path
if configured_path == default_path:
print('git is already configured to use your .gitcookies from %s' %
configured_path)
return
print('WARNING: You have configured custom path to .gitcookies: %s\n'
'Gerrit and other depot_tools expect .gitcookies at %s\n' %
(configured_path, default_path))
if not os.path.exists(configured_path):
print('However, your configured .gitcookies file is missing.')
confirm_or_exit('Reconfigure git to use default .gitcookies?',
action='reconfigure')
RunGit(['config', '--global', 'http.cookiefile', default_path])
return
if os.path.exists(default_path):
print('WARNING: default .gitcookies file already exists %s' %
default_path)
DieWithError('Please delete %s manually and re-run git cl creds-check' %
default_path)
confirm_or_exit('Move existing .gitcookies to default location?',
action='move')
shutil.move(configured_path, default_path)
RunGit(['config', '--global', 'http.cookiefile', default_path])
print('Moved and reconfigured git to use .gitcookies from %s' %
default_path)
@staticmethod
def _configure_gitcookies_path(default_path):
netrc_path = gerrit_util.CookiesAuthenticator.get_netrc_path()
if os.path.exists(netrc_path):
print('You seem to be using outdated .netrc for git credentials: %s' %
netrc_path)
print('This tool will guide you through setting up recommended '
'.gitcookies store for git credentials.\n'
'\n'
'IMPORTANT: If something goes wrong and you decide to go back, do:\n'
' git config --global --unset http.cookiefile\n'
' mv %s %s.backup\n\n' % (default_path, default_path))
confirm_or_exit(action='setup .gitcookies')
RunGit(['config', '--global', 'http.cookiefile', default_path])
print('Configured git to use .gitcookies from %s' % default_path)
def get_hosts_with_creds(self, include_netrc=False):
if self._all_hosts is None:
a = gerrit_util.CookiesAuthenticator()
self._all_hosts = [
(h, u, s)
for h, u, s in itertools.chain(
((h, u, '.netrc') for h, (u, _, _) in a.netrc.hosts.items()),
((h, u, '.gitcookies') for h, (u, _) in a.gitcookies.items())
)
if h.endswith(self._GOOGLESOURCE)
]
if include_netrc:
return self._all_hosts
return [(h, u, s) for h, u, s in self._all_hosts if s != '.netrc']
def print_current_creds(self, include_netrc=False):
hosts = sorted(self.get_hosts_with_creds(include_netrc=include_netrc))
if not hosts:
print('No Git/Gerrit credentials found')
return
lengths = [max(map(len, (row[i] for row in hosts))) for i in range(3)]
header = [('Host', 'User', 'Which file'),
['=' * l for l in lengths]]
for row in (header + hosts):
print('\t'.join((('%%+%ds' % l) % s)
for l, s in zip(lengths, row)))
@staticmethod
def _parse_identity(identity):
"""Parses identity "git-<username>.domain" into <username> and domain."""
# Special case: usernames that contain ".", which are generally not
# distinguishable from sub-domains. But we do know typical domains:
if identity.endswith('.chromium.org'):
domain = 'chromium.org'
username = identity[:-len('.chromium.org')]
else:
username, domain = identity.split('.', 1)
if username.startswith('git-'):
username = username[len('git-'):]
return username, domain
def _canonical_git_googlesource_host(self, host):
"""Normalizes Gerrit hosts (with '-review') to Git host."""
assert host.endswith(self._GOOGLESOURCE)
# Prefix doesn't include '.' at the end.
prefix = host[:-(1 + len(self._GOOGLESOURCE))]
if prefix.endswith('-review'):
prefix = prefix[:-len('-review')]
return prefix + '.' + self._GOOGLESOURCE
def _canonical_gerrit_googlesource_host(self, host):
git_host = self._canonical_git_googlesource_host(host)
prefix = git_host.split('.', 1)[0]
return prefix + '-review.' + self._GOOGLESOURCE
def _get_counterpart_host(self, host):
assert host.endswith(self._GOOGLESOURCE)
git = self._canonical_git_googlesource_host(host)
gerrit = self._canonical_gerrit_googlesource_host(git)
return git if gerrit == host else gerrit
def has_generic_host(self):
"""Returns whether generic .googlesource.com has been configured.
Chrome Infra recommends to use explicit ${host}.googlesource.com instead.
"""
for host, _, _ in self.get_hosts_with_creds(include_netrc=False):
if host == '.' + self._GOOGLESOURCE:
return True
return False
def _get_git_gerrit_identity_pairs(self):
"""Returns map from canonic host to pair of identities (Git, Gerrit).
One of identities might be None, meaning not configured.
"""
host_to_identity_pairs = {}
for host, identity, _ in self.get_hosts_with_creds():
canonical = self._canonical_git_googlesource_host(host)
pair = host_to_identity_pairs.setdefault(canonical, [None, None])
idx = 0 if canonical == host else 1
pair[idx] = identity
return host_to_identity_pairs
def get_partially_configured_hosts(self):
return set(
(host if i1 else self._canonical_gerrit_googlesource_host(host))
for host, (i1, i2) in self._get_git_gerrit_identity_pairs().items()
if None in (i1, i2) and host != '.' + self._GOOGLESOURCE)
def get_conflicting_hosts(self):
return set(
host
for host, (i1, i2) in self._get_git_gerrit_identity_pairs().items()
if None not in (i1, i2) and i1 != i2)
def get_duplicated_hosts(self):
counters = collections.Counter(h for h, _, _ in self.get_hosts_with_creds())
return set(host for host, count in counters.items() if count > 1)
@staticmethod
def _format_hosts(hosts, extra_column_func=None):
hosts = sorted(hosts)
assert hosts
if extra_column_func is None:
extras = [''] * len(hosts)
else:
extras = [extra_column_func(host) for host in hosts]
tmpl = '%%-%ds %%-%ds' % (max(map(len, hosts)), max(map(len, extras)))
lines = []
for he in zip(hosts, extras):
lines.append(tmpl % he)
return lines
def _find_problems(self):
if self.has_generic_host():
yield ('.googlesource.com wildcard record detected',
['Chrome Infrastructure team recommends to list full host names '
'explicitly.'],
None)
dups = self.get_duplicated_hosts()
if dups:
yield ('The following hosts were defined twice',
self._format_hosts(dups),
None)
partial = self.get_partially_configured_hosts()
if partial:
yield ('Credentials should come in pairs for Git and Gerrit hosts. '
'These hosts are missing',
self._format_hosts(partial, lambda host: 'but %s defined' %
self._get_counterpart_host(host)),
partial)
conflicting = self.get_conflicting_hosts()
if conflicting:
yield ('The following Git hosts have differing credentials from their '
'Gerrit counterparts',
self._format_hosts(conflicting, lambda host: '%s vs %s' %
tuple(self._get_git_gerrit_identity_pairs()[host])),
conflicting)
def find_and_report_problems(self):
"""Returns True if there was at least one problem, else False."""
found = False
bad_hosts = set()
for title, sublines, hosts in self._find_problems():
if not found:
found = True
print('\n\n.gitcookies problem report:\n')
bad_hosts.update(hosts or [])
print(' %s%s' % (title, (':' if sublines else '')))
if sublines:
print()
print(' %s' % '\n '.join(sublines))
print()
if bad_hosts:
assert found
print(' You can manually remove corresponding lines in your %s file and '
'visit the following URLs with correct account to generate '
'correct credential lines:\n' %
gerrit_util.CookiesAuthenticator.get_gitcookies_path())
print(' %s' % '\n '.join(sorted(set(
gerrit_util.CookiesAuthenticator().get_new_password_url(
self._canonical_git_googlesource_host(host))
for host in bad_hosts
))))
return found
@metrics.collector.collect_metrics('git cl creds-check')
def CMDcreds_check(parser, args):
"""Checks credentials and suggests changes."""
_, _ = parser.parse_args(args)
# Code below checks .gitcookies. Abort if using something else.
authn = gerrit_util.Authenticator.get()
if not isinstance(authn, gerrit_util.CookiesAuthenticator):
message = (
'This command is not designed for bot environment. It checks '
'~/.gitcookies file not generally used on bots.')
# TODO(crbug.com/1059384): Automatically detect when running on cloudtop.
if isinstance(authn, gerrit_util.GceAuthenticator):
message += (
'\n'
'If you need to run this on GCE or a cloudtop instance, '
'export SKIP_GCE_AUTH_FOR_GIT=1 in your env.')
DieWithError(message)
checker = _GitCookiesChecker()
checker.ensure_configured_gitcookies()
print('Your .netrc and .gitcookies have credentials for these hosts:')
checker.print_current_creds(include_netrc=True)
if not checker.find_and_report_problems():
print('\nNo problems detected in your .gitcookies file.')
return 0
return 1
@metrics.collector.collect_metrics('git cl baseurl')
def CMDbaseurl(parser, args):
"""Gets or sets base-url for this branch."""
branchref = scm.GIT.GetBranchRef(settings.GetRoot())
branch = scm.GIT.ShortBranchName(branchref)
_, args = parser.parse_args(args)
if not args:
print('Current base-url:')
return RunGit(['config', 'branch.%s.base-url' % branch],
error_ok=False).strip()
else:
print('Setting base-url to %s' % args[0])
return RunGit(['config', 'branch.%s.base-url' % branch, args[0]],
error_ok=False).strip()
def color_for_status(status):
"""Maps a Changelist status to color, for CMDstatus and other tools."""
BOLD = '\033[1m'
return {
'unsent': BOLD + Fore.YELLOW,
'waiting': BOLD + Fore.RED,
'reply': BOLD + Fore.YELLOW,
'not lgtm': BOLD + Fore.RED,
'lgtm': BOLD + Fore.GREEN,
'commit': BOLD + Fore.MAGENTA,
'closed': BOLD + Fore.CYAN,
'error': BOLD + Fore.WHITE,
}.get(status, Fore.WHITE)
def get_cl_statuses(changes, fine_grained, max_processes=None):
"""Returns a blocking iterable of (cl, status) for given branches.
If fine_grained is true, this will fetch CL statuses from the server.
Otherwise, simply indicate if there's a matching url for the given branches.
If max_processes is specified, it is used as the maximum number of processes
to spawn to fetch CL status from the server. Otherwise 1 process per branch is
spawned.
See GetStatus() for a list of possible statuses.
"""
if not changes:
return
if not fine_grained:
# Fast path which doesn't involve querying codereview servers.
# Do not use get_approving_reviewers(), since it requires an HTTP request.
for cl in changes:
yield (cl, 'waiting' if cl.GetIssueURL() else 'error')
return
# First, sort out authentication issues.
logging.debug('ensuring credentials exist')
for cl in changes:
cl.EnsureAuthenticated(force=False, refresh=True)
def fetch(cl):
try:
return (cl, cl.GetStatus())
except:
# See http://crbug.com/629863.
logging.exception('failed to fetch status for cl %s:', cl.GetIssue())
raise
threads_count = len(changes)
if max_processes:
threads_count = max(1, min(threads_count, max_processes))
logging.debug('querying %d CLs using %d threads', len(changes), threads_count)
pool = multiprocessing.pool.ThreadPool(threads_count)
fetched_cls = set()
try:
it = pool.imap_unordered(fetch, changes).__iter__()
while True:
try:
cl, status = it.next(timeout=5)
except (multiprocessing.TimeoutError, StopIteration):
break
fetched_cls.add(cl)
yield cl, status
finally:
pool.close()
# Add any branches that failed to fetch.
for cl in set(changes) - fetched_cls:
yield (cl, 'error')
def upload_branch_deps(cl, args, force=False):
"""Uploads CLs of local branches that are dependents of the current branch.
If the local branch dependency tree looks like:
test1 -> test2.1 -> test3.1
-> test3.2
-> test2.2 -> test3.3
and you run "git cl upload --dependencies" from test1 then "git cl upload" is
run on the dependent branches in this order:
test2.1, test3.1, test3.2, test2.2, test3.3
Note: This function does not rebase your local dependent branches. Use it
when you make a change to the parent branch that will not conflict
with its dependent branches, and you would like their dependencies
updated in Rietveld.
"""
if git_common.is_dirty_git_tree('upload-branch-deps'):
return 1
root_branch = cl.GetBranch()
if root_branch is None:
DieWithError('Can\'t find dependent branches from detached HEAD state. '
'Get on a branch!')
if not cl.GetIssue():
DieWithError('Current branch does not have an uploaded CL. We cannot set '
'patchset dependencies without an uploaded CL.')
branches = RunGit(['for-each-ref',
'--format=%(refname:short) %(upstream:short)',
'refs/heads'])
if not branches:
print('No local branches found.')
return 0
# Create a dictionary of all local branches to the branches that are
# dependent on it.
tracked_to_dependents = collections.defaultdict(list)
for b in branches.splitlines():
tokens = b.split()
if len(tokens) == 2:
branch_name, tracked = tokens
tracked_to_dependents[tracked].append(branch_name)
print()
print('The dependent local branches of %s are:' % root_branch)
dependents = []
def traverse_dependents_preorder(branch, padding=''):
dependents_to_process = tracked_to_dependents.get(branch, [])
padding += ' '
for dependent in dependents_to_process:
print('%s%s' % (padding, dependent))
dependents.append(dependent)
traverse_dependents_preorder(dependent, padding)
traverse_dependents_preorder(root_branch)
print()
if not dependents:
print('There are no dependent local branches for %s' % root_branch)
return 0
if not force:
confirm_or_exit('This command will checkout all dependent branches and run '
'"git cl upload".', action='continue')
# Record all dependents that failed to upload.
failures = {}
# Go through all dependents, checkout the branch and upload.
try:
for dependent_branch in dependents:
print()
print('--------------------------------------')
print('Running "git cl upload" from %s:' % dependent_branch)
RunGit(['checkout', '-q', dependent_branch])
print()
try:
if CMDupload(OptionParser(), args) != 0:
print('Upload failed for %s!' % dependent_branch)
failures[dependent_branch] = 1
except: # pylint: disable=bare-except
failures[dependent_branch] = 1
print()
finally:
# Swap back to the original root branch.
RunGit(['checkout', '-q', root_branch])
print()
print('Upload complete for dependent branches!')
for dependent_branch in dependents:
upload_status = 'failed' if failures.get(dependent_branch) else 'succeeded'
print(' %s : %s' % (dependent_branch, upload_status))
print()
return 0
def GetArchiveTagForBranch(issue_num, branch_name, existing_tags, pattern):
"""Given a proposed tag name, returns a tag name that is guaranteed to be
unique. If 'foo' is proposed but already exists, then 'foo-2' is used,
or 'foo-3', and so on."""
proposed_tag = pattern.format(**{'issue': issue_num, 'branch': branch_name})
for suffix_num in itertools.count(1):
if suffix_num == 1:
to_check = proposed_tag
else:
to_check = '%s-%d' % (proposed_tag, suffix_num)
if to_check not in existing_tags:
return to_check
@metrics.collector.collect_metrics('git cl archive')
def CMDarchive(parser, args):
"""Archives and deletes branches associated with closed changelists."""
parser.add_option(
'-j', '--maxjobs', action='store', type=int,
help='The maximum number of jobs to use when retrieving review status.')
parser.add_option(
'-f', '--force', action='store_true',
help='Bypasses the confirmation prompt.')
parser.add_option(
'-d', '--dry-run', action='store_true',
help='Skip the branch tagging and removal steps.')
parser.add_option(
'-t', '--notags', action='store_true',
help='Do not tag archived branches. '
'Note: local commit history may be lost.')
parser.add_option(
'-p',
'--pattern',
default='git-cl-archived-{issue}-{branch}',
help='Format string for archive tags. '
'E.g. \'archived-{issue}-{branch}\'.')
options, args = parser.parse_args(args)
if args:
parser.error('Unsupported args: %s' % ' '.join(args))
branches = RunGit(['for-each-ref', '--format=%(refname)', 'refs/heads'])
if not branches:
return 0
tags = RunGit(['for-each-ref', '--format=%(refname)',
'refs/tags']).splitlines() or []
tags = [t.split('/')[-1] for t in tags]
print('Finding all branches associated with closed issues...')
changes = [Changelist(branchref=b)
for b in branches.splitlines()]
alignment = max(5, max(len(c.GetBranch()) for c in changes))
statuses = get_cl_statuses(changes,
fine_grained=True,
max_processes=options.maxjobs)
proposal = [(cl.GetBranch(),
GetArchiveTagForBranch(cl.GetIssue(), cl.GetBranch(), tags,
options.pattern))
for cl, status in statuses
if status in ('closed', 'rietveld-not-supported')]
proposal.sort()
if not proposal:
print('No branches with closed codereview issues found.')
return 0
current_branch = scm.GIT.GetBranch(settings.GetRoot())
print('\nBranches with closed issues that will be archived:\n')
if options.notags:
for next_item in proposal:
print(' ' + next_item[0])
else:
print('%*s | %s' % (alignment, 'Branch name', 'Archival tag name'))
for next_item in proposal:
print('%*s %s' % (alignment, next_item[0], next_item[1]))
# Quit now on precondition failure or if instructed by the user, either
# via an interactive prompt or by command line flags.
if options.dry_run:
print('\nNo changes were made (dry run).\n')
return 0
elif any(branch == current_branch for branch, _ in proposal):
print('You are currently on a branch \'%s\' which is associated with a '
'closed codereview issue, so archive cannot proceed. Please '
'checkout another branch and run this command again.' %
current_branch)
return 1
elif not options.force:
answer = gclient_utils.AskForData('\nProceed with deletion (Y/n)? ').lower()
if answer not in ('y', ''):
print('Aborted.')
return 1
for branch, tagname in proposal:
if not options.notags:
RunGit(['tag', tagname, branch])
if RunGitWithCode(['branch', '-D', branch])[0] != 0:
# Clean up the tag if we failed to delete the branch.
RunGit(['tag', '-d', tagname])
print('\nJob\'s done!')
return 0
@metrics.collector.collect_metrics('git cl status')
def CMDstatus(parser, args):
"""Show status of changelists.
Colors are used to tell the state of the CL unless --fast is used:
- Blue waiting for review
- Yellow waiting for you to reply to review, or not yet sent
- Green LGTM'ed
- Red 'not LGTM'ed
- Magenta in the CQ
- Cyan was committed, branch can be deleted
- White error, or unknown status
Also see 'git cl comments'.
"""
parser.add_option(
'--no-branch-color',
action='store_true',
help='Disable colorized branch names')
parser.add_option('--field',
help='print only specific field (desc|id|patch|status|url)')
parser.add_option('-f', '--fast', action='store_true',
help='Do not retrieve review status')
parser.add_option(
'-j', '--maxjobs', action='store', type=int,
help='The maximum number of jobs to use when retrieving review status')
parser.add_option(
'-i', '--issue', type=int,
help='Operate on this issue instead of the current branch\'s implicit '
'issue. Requires --field to be set.')
parser.add_option('-d',
'--date-order',
action='store_true',
help='Order branches by committer date.')
options, args = parser.parse_args(args)
if args:
parser.error('Unsupported args: %s' % args)
if options.issue is not None and not options.field:
parser.error('--field must be given when --issue is set.')
if options.field:
cl = Changelist(issue=options.issue)
if options.field.startswith('desc'):
if cl.GetIssue():
print(cl.FetchDescription())
elif options.field == 'id':
issueid = cl.GetIssue()
if issueid:
print(issueid)
elif options.field == 'patch':
patchset = cl.GetMostRecentPatchset()
if patchset:
print(patchset)
elif options.field == 'status':
print(cl.GetStatus())
elif options.field == 'url':
url = cl.GetIssueURL()
if url:
print(url)
return 0
branches = RunGit([
'for-each-ref', '--format=%(refname) %(committerdate:unix)', 'refs/heads'
])
if not branches:
print('No local branch found.')
return 0
changes = [
Changelist(branchref=b, commit_date=ct)
for b, ct in map(lambda line: line.split(' '), branches.splitlines())
]
print('Branches associated with reviews:')
output = get_cl_statuses(changes,
fine_grained=not options.fast,
max_processes=options.maxjobs)
current_branch = scm.GIT.GetBranch(settings.GetRoot())
def FormatBranchName(branch, colorize=False):
"""Simulates 'git branch' behavior. Colorizes and prefixes branch name with
an asterisk when it is the current branch."""
asterisk = ""
color = Fore.RESET
if branch == current_branch:
asterisk = "* "
color = Fore.GREEN
branch_name = scm.GIT.ShortBranchName(branch)
if colorize:
return asterisk + color + branch_name + Fore.RESET
return asterisk + branch_name
branch_statuses = {}
alignment = max(5, max(len(FormatBranchName(c.GetBranch())) for c in changes))
if options.date_order:
sorted_changes = sorted(changes,
key=lambda c: c.GetCommitDate(),
reverse=True)
else:
sorted_changes = sorted(changes, key=lambda c: c.GetBranch())
for cl in sorted_changes:
branch = cl.GetBranch()
while branch not in branch_statuses:
c, status = next(output)
branch_statuses[c.GetBranch()] = status
status = branch_statuses.pop(branch)
url = cl.GetIssueURL(short=True)
if url and (not status or status == 'error'):
# The issue probably doesn't exist anymore.
url += ' (broken)'
color = color_for_status(status)
# Turn off bold as well as colors.
END = '\033[0m'
reset = Fore.RESET + END
if not setup_color.IS_TTY:
color = ''
reset = ''
status_str = '(%s)' % status if status else ''
branch_display = FormatBranchName(branch)
padding = ' ' * (alignment - len(branch_display))
if not options.no_branch_color:
branch_display = FormatBranchName(branch, colorize=True)
print(' %s : %s%s %s%s' % (padding + branch_display, color, url,
status_str, reset))
print()
print('Current branch: %s' % current_branch)
for cl in changes:
if cl.GetBranch() == current_branch:
break
if not cl.GetIssue():
print('No issue assigned.')
return 0
print('Issue number: %s (%s)' % (cl.GetIssue(), cl.GetIssueURL()))
if not options.fast:
print('Issue description:')
print(cl.FetchDescription(pretty=True))
return 0
def colorize_CMDstatus_doc():
"""To be called once in main() to add colors to git cl status help."""
colors = [i for i in dir(Fore) if i[0].isupper()]
def colorize_line(line):
for color in colors:
if color in line.upper():
# Extract whitespace first and the leading '-'.
indent = len(line) - len(line.lstrip(' ')) + 1
return line[:indent] + getattr(Fore, color) + line[indent:] + Fore.RESET
return line
lines = CMDstatus.__doc__.splitlines()
CMDstatus.__doc__ = '\n'.join(colorize_line(l) for l in lines)
def write_json(path, contents):
if path == '-':
json.dump(contents, sys.stdout)
else:
with open(path, 'w') as f:
json.dump(contents, f)
@subcommand.usage('[issue_number]')
@metrics.collector.collect_metrics('git cl issue')
def CMDissue(parser, args):
"""Sets or displays the current code review issue number.
Pass issue number 0 to clear the current issue.
"""
parser.add_option('-r', '--reverse', action='store_true',
help='Lookup the branch(es) for the specified issues. If '
'no issues are specified, all branches with mapped '
'issues will be listed.')
parser.add_option('--json',
help='Path to JSON output file, or "-" for stdout.')
options, args = parser.parse_args(args)
if options.reverse:
branches = RunGit(['for-each-ref', 'refs/heads',
'--format=%(refname)']).splitlines()
# Reverse issue lookup.
issue_branch_map = {}
git_config = {}
for config in RunGit(['config', '--get-regexp',
r'branch\..*issue']).splitlines():
name, _space, val = config.partition(' ')
git_config[name] = val
for branch in branches:
issue = git_config.get(
'branch.%s.%s' % (scm.GIT.ShortBranchName(branch), ISSUE_CONFIG_KEY))
if issue:
issue_branch_map.setdefault(int(issue), []).append(branch)
if not args:
args = sorted(issue_branch_map.keys())
result = {}
for issue in args:
try:
issue_num = int(issue)
except ValueError:
print('ERROR cannot parse issue number: %s' % issue, file=sys.stderr)
continue
result[issue_num] = issue_branch_map.get(issue_num)
print('Branch for issue number %s: %s' % (
issue, ', '.join(issue_branch_map.get(issue_num) or ('None',))))
if options.json:
write_json(options.json, result)
return 0
if len(args) > 0:
issue = ParseIssueNumberArgument(args[0])
if not issue.valid:
DieWithError('Pass a url or number to set the issue, 0 to unset it, '
'or no argument to list it.\n'
'Maybe you want to run git cl status?')
cl = Changelist()
cl.SetIssue(issue.issue)
else:
cl = Changelist()
print('Issue number: %s (%s)' % (cl.GetIssue(), cl.GetIssueURL()))
if options.json:
write_json(options.json, {
'gerrit_host': cl.GetGerritHost(),
'gerrit_project': cl.GetGerritProject(),
'issue_url': cl.GetIssueURL(),
'issue': cl.GetIssue(),
})
return 0
@metrics.collector.collect_metrics('git cl comments')
def CMDcomments(parser, args):
"""Shows or posts review comments for any changelist."""
parser.add_option('-a', '--add-comment', dest='comment',
help='comment to add to an issue')
parser.add_option('-p', '--publish', action='store_true',
help='marks CL as ready and sends comment to reviewers')
parser.add_option('-i', '--issue', dest='issue',
help='review issue id (defaults to current issue).')
parser.add_option('-m', '--machine-readable', dest='readable',
action='store_false', default=True,
help='output comments in a format compatible with '
'editor parsing')
parser.add_option('-j', '--json-file',
help='File to write JSON summary to, or "-" for stdout')
options, args = parser.parse_args(args)
issue = None
if options.issue:
try:
issue = int(options.issue)
except ValueError:
DieWithError('A review issue ID is expected to be a number.')
cl = Changelist(issue=issue)
if options.comment:
cl.AddComment(options.comment, options.publish)
return 0
summary = sorted(cl.GetCommentsSummary(readable=options.readable),
key=lambda c: c.date)
for comment in summary:
if comment.disapproval:
color = Fore.RED
elif comment.approval:
color = Fore.GREEN
elif comment.sender == cl.GetIssueOwner():
color = Fore.MAGENTA
elif comment.autogenerated:
color = Fore.CYAN
else:
color = Fore.BLUE
print('\n%s%s %s%s\n%s' % (
color,
comment.date.strftime('%Y-%m-%d %H:%M:%S UTC'),
comment.sender,
Fore.RESET,
'\n'.join(' ' + l for l in comment.message.strip().splitlines())))
if options.json_file:
def pre_serialize(c):
dct = c._asdict().copy()
dct['date'] = dct['date'].strftime('%Y-%m-%d %H:%M:%S.%f')
return dct
write_json(options.json_file, [pre_serialize(x) for x in summary])
return 0
@subcommand.usage('[codereview url or issue id]')
@metrics.collector.collect_metrics('git cl description')
def CMDdescription(parser, args):
"""Brings up the editor for the current CL's description."""
parser.add_option('-d', '--display', action='store_true',
help='Display the description instead of opening an editor')
parser.add_option('-n', '--new-description',
help='New description to set for this issue (- for stdin, '
'+ to load from local commit HEAD)')
parser.add_option('-f', '--force', action='store_true',
help='Delete any unpublished Gerrit edits for this issue '
'without prompting')
options, args = parser.parse_args(args)
target_issue_arg = None
if len(args) > 0:
target_issue_arg = ParseIssueNumberArgument(args[0])
if not target_issue_arg.valid:
parser.error('Invalid issue ID or URL.')
kwargs = {}
if target_issue_arg:
kwargs['issue'] = target_issue_arg.issue
kwargs['codereview_host'] = target_issue_arg.hostname
cl = Changelist(**kwargs)
if not cl.GetIssue():
DieWithError('This branch has no associated changelist.')
if args and not args[0].isdigit():
logging.info('canonical issue/change URL: %s\n', cl.GetIssueURL())
description = ChangeDescription(cl.FetchDescription())
if options.display:
print(description.description)
return 0
if options.new_description:
text = options.new_description
if text == '-':
text = '\n'.join(l.rstrip() for l in sys.stdin)
elif text == '+':
base_branch = cl.GetCommonAncestorWithUpstream()
text = _create_description_from_log([base_branch])
description.set_description(text)
else:
description.prompt()
if cl.FetchDescription().strip() != description.description:
cl.UpdateDescription(description.description, force=options.force)
return 0
@metrics.collector.collect_metrics('git cl lint')
def CMDlint(parser, args):
"""Runs cpplint on the current changelist."""
parser.add_option('--filter', action='append', metavar='-x,+y',
help='Comma-separated list of cpplint\'s category-filters')
options, args = parser.parse_args(args)
# Access to a protected member _XX of a client class
# pylint: disable=protected-access
try:
import cpplint
import cpplint_chromium
except ImportError:
print('Your depot_tools is missing cpplint.py and/or cpplint_chromium.py.')
return 1
# Change the current working directory before calling lint so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(settings.GetRoot())
try:
cl = Changelist()
files = cl.GetAffectedFiles(cl.GetCommonAncestorWithUpstream())
if not files:
print('Cannot lint an empty CL')
return 1
# Process cpplint arguments, if any.
filters = presubmit_canned_checks.GetCppLintFilters(options.filter)
command = ['--filter=' + ','.join(filters)] + args + files
filenames = cpplint.ParseArguments(command)
include_regex = re.compile(settings.GetLintRegex())
ignore_regex = re.compile(settings.GetLintIgnoreRegex())
extra_check_functions = [cpplint_chromium.CheckPointerDeclarationWhitespace]
for filename in filenames:
if not include_regex.match(filename):
print('Skipping file %s' % filename)
continue
if ignore_regex.match(filename):
print('Ignoring file %s' % filename)
continue
cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level,
extra_check_functions)
finally:
os.chdir(previous_cwd)
print('Total errors found: %d\n' % cpplint._cpplint_state.error_count)
if cpplint._cpplint_state.error_count != 0:
return 1
return 0
@metrics.collector.collect_metrics('git cl presubmit')
def CMDpresubmit(parser, args):
"""Runs presubmit tests on the current changelist."""
parser.add_option('-u', '--upload', action='store_true',
help='Run upload hook instead of the push hook')
parser.add_option('-f', '--force', action='store_true',
help='Run checks even if tree is dirty')
parser.add_option('--all', action='store_true',
help='Run checks against all files, not just modified ones')
parser.add_option('--parallel', action='store_true',
help='Run all tests specified by input_api.RunTests in all '
'PRESUBMIT files in parallel.')
parser.add_option('--resultdb', action='store_true',
help='Run presubmit checks in the ResultSink environment '
'and send results to the ResultDB database.')
parser.add_option('--realm', help='LUCI realm if reporting to ResultDB')
options, args = parser.parse_args(args)
if not options.force and git_common.is_dirty_git_tree('presubmit'):
print('use --force to check even if tree is dirty.')
return 1
cl = Changelist()
if args:
base_branch = args[0]
else:
# Default to diffing against the common ancestor of the upstream branch.
base_branch = cl.GetCommonAncestorWithUpstream()
if cl.GetIssue():
description = cl.FetchDescription()
else:
description = _create_description_from_log([base_branch])
cl.RunHook(
committing=not options.upload,
may_prompt=False,
verbose=options.verbose,
parallel=options.parallel,
upstream=base_branch,
description=description,
all_files=options.all,
resultdb=options.resultdb,
realm=options.realm)
return 0
def GenerateGerritChangeId(message):
"""Returns the Change ID footer value (Ixxxxxx...xxx).
Works the same way as
https://gerrit-review.googlesource.com/tools/hooks/commit-msg
but can be called on demand on all platforms.
The basic idea is to generate git hash of a state of the tree, original
commit message, author/committer info and timestamps.
"""
lines = []
tree_hash = RunGitSilent(['write-tree'])
lines.append('tree %s' % tree_hash.strip())
code, parent = RunGitWithCode(['rev-parse', 'HEAD~0'], suppress_stderr=False)
if code == 0:
lines.append('parent %s' % parent.strip())
author = RunGitSilent(['var', 'GIT_AUTHOR_IDENT'])
lines.append('author %s' % author.strip())
committer = RunGitSilent(['var', 'GIT_COMMITTER_IDENT'])
lines.append('committer %s' % committer.strip())
lines.append('')
# Note: Gerrit's commit-hook actually cleans message of some lines and
# whitespace. This code is not doing this, but it clearly won't decrease
# entropy.
lines.append(message)
change_hash = RunCommand(['git', 'hash-object', '-t', 'commit', '--stdin'],
stdin=('\n'.join(lines)).encode())
return 'I%s' % change_hash.strip()
def GetTargetRef(remote, remote_branch, target_branch):
"""Computes the remote branch ref to use for the CL.
Args:
remote (str): The git remote for the CL.
remote_branch (str): The git remote branch for the CL.
target_branch (str): The target branch specified by the user.
"""
if not (remote and remote_branch):
return None
if target_branch:
# Canonicalize branch references to the equivalent local full symbolic
# refs, which are then translated into the remote full symbolic refs
# below.
if '/' not in target_branch:
remote_branch = 'refs/remotes/%s/%s' % (remote, target_branch)
else:
prefix_replacements = (
('^((refs/)?remotes/)?branch-heads/', 'refs/remotes/branch-heads/'),
('^((refs/)?remotes/)?%s/' % remote, 'refs/remotes/%s/' % remote),
('^(refs/)?heads/', 'refs/remotes/%s/' % remote),
)
match = None
for regex, replacement in prefix_replacements:
match = re.search(regex, target_branch)
if match:
remote_branch = target_branch.replace(match.group(0), replacement)
break
if not match:
# This is a branch path but not one we recognize; use as-is.
remote_branch = target_branch
elif remote_branch in REFS_THAT_ALIAS_TO_OTHER_REFS:
# Handle the refs that need to land in different refs.
remote_branch = REFS_THAT_ALIAS_TO_OTHER_REFS[remote_branch]
# Migration to new default branch, only if available on remote.
allow_push_on_master = bool(os.environ.get("ALLOW_PUSH_TO_MASTER", None))
if remote_branch == DEFAULT_OLD_BRANCH and not allow_push_on_master:
if RunGit(['show-branch', DEFAULT_NEW_BRANCH], error_ok=True,
stderr=subprocess2.PIPE):
# TODO(crbug.com/ID): Print location to local git migration script.
print("WARNING: Using new branch name %s instead of %s" % (
DEFAULT_NEW_BRANCH, DEFAULT_OLD_BRANCH))
remote_branch = DEFAULT_NEW_BRANCH
# Create the true path to the remote branch.
# Does the following translation:
# * refs/remotes/origin/refs/diff/test -> refs/diff/test
# * refs/remotes/origin/main -> refs/heads/main
# * refs/remotes/branch-heads/test -> refs/branch-heads/test
if remote_branch.startswith('refs/remotes/%s/refs/' % remote):
remote_branch = remote_branch.replace('refs/remotes/%s/' % remote, '')
elif remote_branch.startswith('refs/remotes/%s/' % remote):
remote_branch = remote_branch.replace('refs/remotes/%s/' % remote,
'refs/heads/')
elif remote_branch.startswith('refs/remotes/branch-heads'):
remote_branch = remote_branch.replace('refs/remotes/', 'refs/')
return remote_branch
def cleanup_list(l):
"""Fixes a list so that comma separated items are put as individual items.
So that "--reviewers joe@c,john@c --reviewers joa@c" results in
options.reviewers == sorted(['joe@c', 'john@c', 'joa@c']).
"""
items = sum((i.split(',') for i in l), [])
stripped_items = (i.strip() for i in items)
return sorted(filter(None, stripped_items))
@subcommand.usage('[flags]')
@metrics.collector.collect_metrics('git cl upload')
def CMDupload(parser, args):
"""Uploads the current changelist to codereview.
Can skip dependency patchset uploads for a branch by running:
git config branch.branch_name.skip-deps-uploads True
To unset, run:
git config --unset branch.branch_name.skip-deps-uploads
Can also set the above globally by using the --global flag.
If the name of the checked out branch starts with "bug-" or "fix-" followed
by a bug number, this bug number is automatically populated in the CL
description.
If subject contains text in square brackets or has "<text>: " prefix, such
text(s) is treated as Gerrit hashtags. For example, CLs with subjects:
[git-cl] add support for hashtags
Foo bar: implement foo
will be hashtagged with "git-cl" and "foo-bar" respectively.
"""
parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks',
help='bypass upload presubmit hook')
parser.add_option('--bypass-watchlists', action='store_true',
dest='bypass_watchlists',
help='bypass watchlists auto CC-ing reviewers')
parser.add_option('-f', '--force', action='store_true', dest='force',
help="force yes to questions (don't prompt)")
parser.add_option('--message', '-m', dest='message',
help='message for patchset')
parser.add_option('-b', '--bug',
help='pre-populate the bug number(s) for this issue. '
'If several, separate with commas')
parser.add_option('--message-file', dest='message_file',
help='file which contains message for patchset')
parser.add_option('--title', '-t', dest='title',
help='title for patchset')
parser.add_option('-T', '--skip-title', action='store_true',
dest='skip_title',
help='Use the most recent commit message as the title of '
'the patchset')
parser.add_option('-r', '--reviewers',
action='append', default=[],
help='reviewer email addresses')
parser.add_option('--tbrs',
action='append', default=[],
help='TBR email addresses')
parser.add_option('--cc',
action='append', default=[],
help='cc email addresses')
parser.add_option('--hashtag', dest='hashtags',
action='append', default=[],
help=('Gerrit hashtag for new CL; '
'can be applied multiple times'))
parser.add_option('-s', '--send-mail', action='store_true',
help='send email to reviewer(s) and cc(s) immediately')
parser.add_option('--target_branch',
'--target-branch',
metavar='TARGET',
help='Apply CL to remote ref TARGET. ' +
'Default: remote branch head, or main')
parser.add_option('--squash', action='store_true',
help='Squash multiple commits into one')
parser.add_option('--no-squash', action='store_false', dest='squash',
help='Don\'t squash multiple commits into one')
parser.add_option('--topic', default=None,
help='Topic to specify when uploading')
parser.add_option('--tbr-owners', dest='add_owners_to', action='store_const',
const='TBR', help='add a set of OWNERS to TBR')
parser.add_option('--r-owners', dest='add_owners_to', action='store_const',
const='R', help='add a set of OWNERS to R')
parser.add_option('-c', '--use-commit-queue', action='store_true',
default=False,
help='tell the CQ to commit this patchset; '
'implies --send-mail')
parser.add_option('-d', '--cq-dry-run',
action='store_true', default=False,
help='Send the patchset to do a CQ dry run right after '
'upload.')
parser.add_option(
'-q',
'--cq-quick-run',
action='store_true',
default=False,
help='Send the patchset to do a CQ quick run right after '
'upload (https://source.chromium.org/chromium/chromium/src/+/main:do'
'cs/cq_quick_run.md) (chromium only).')
parser.add_option('--set-bot-commit', action='store_true',
help=optparse.SUPPRESS_HELP)
parser.add_option('--preserve-tryjobs', action='store_true',
help='instruct the CQ to let tryjobs running even after '
'new patchsets are uploaded instead of canceling '
'prior patchset\' tryjobs')
parser.add_option('--dependencies', action='store_true',
help='Uploads CLs of all the local branches that depend on '
'the current branch')
parser.add_option('-a', '--enable-auto-submit', action='store_true',
help='Sends your change to the CQ after an approval. Only '
'works on repos that have the Auto-Submit label '
'enabled')
parser.add_option('--parallel', action='store_true',
help='Run all tests specified by input_api.RunTests in all '
'PRESUBMIT files in parallel.')
parser.add_option('--no-autocc', action='store_true',
help='Disables automatic addition of CC emails')
parser.add_option('--private', action='store_true',
help='Set the review private. This implies --no-autocc.')
parser.add_option('-R', '--retry-failed', action='store_true',
help='Retry failed tryjobs from old patchset immediately '
'after uploading new patchset. Cannot be used with '
'--use-commit-queue or --cq-dry-run.')
parser.add_option('--buildbucket-host', default='cr-buildbucket.appspot.com',
help='Host of buildbucket. The default host is %default.')
parser.add_option('--fixed', '-x',
help='List of bugs that will be commented on and marked '
'fixed (pre-populates "Fixed:" tag). Same format as '
'-b option / "Bug:" tag. If fixing several issues, '
'separate with commas.')
parser.add_option('--edit-description', action='store_true', default=False,
help='Modify description before upload. Cannot be used '
'with --force. It is a noop when --no-squash is set '
'or a new commit is created.')
parser.add_option('--git-completion-helper', action="store_true",
help=optparse.SUPPRESS_HELP)
parser.add_option('--resultdb', action='store_true',
help='Run presubmit checks in the ResultSink environment '
'and send results to the ResultDB database.')
parser.add_option('--realm', help='LUCI realm if reporting to ResultDB')
parser.add_option('-o',
'--push-options',
action='append',
default=[],
help='Transmit the given string to the server when '
'performing git push (pass-through). See git-push '
'documentation for more details.')
parser.add_option('--no-add-changeid',
action='store_true',
dest='no_add_changeid',
help='Do not add change-ids to messages.')
orig_args = args
(options, args) = parser.parse_args(args)
if options.git_completion_helper:
print(' '.join(opt.get_opt_string() for opt in parser.option_list
if opt.help != optparse.SUPPRESS_HELP))
return
if git_common.is_dirty_git_tree('upload'):
return 1
options.reviewers = cleanup_list(options.reviewers)
options.tbrs = cleanup_list(options.tbrs)
options.cc = cleanup_list(options.cc)
if options.edit_description and options.force:
parser.error('Only one of --force and --edit-description allowed')
if options.message_file:
if options.message:
parser.error('Only one of --message and --message-file allowed.')
options.message = gclient_utils.FileRead(options.message_file)
if ([options.cq_dry_run,
options.cq_quick_run,
options.use_commit_queue,
options.retry_failed].count(True) > 1):
parser.error('Only one of --use-commit-queue, --cq-dry-run, --cq-quick-run '
'or --retry-failed is allowed.')
if options.skip_title and options.title:
parser.error('Only one of --title and --skip-title allowed.')
if options.use_commit_queue:
options.send_mail = True
if options.squash is None:
# Load default for user, repo, squash=true, in this order.
options.squash = settings.GetSquashGerritUploads()
cl = Changelist(branchref=options.target_branch)
# Warm change details cache now to avoid RPCs later, reducing latency for
# developers.
if cl.GetIssue():
cl._GetChangeDetail(
['DETAILED_ACCOUNTS', 'CURRENT_REVISION', 'CURRENT_COMMIT', 'LABELS'])
if options.retry_failed and not cl.GetIssue():
print('No previous patchsets, so --retry-failed has no effect.')
options.retry_failed = False
# cl.GetMostRecentPatchset uses cached information, and can return the last
# patchset before upload. Calling it here makes it clear that it's the
# last patchset before upload. Note that GetMostRecentPatchset will fail
# if no CL has been uploaded yet.
if options.retry_failed:
patchset = cl.GetMostRecentPatchset()
ret = cl.CMDUpload(options, args, orig_args)
if options.retry_failed:
if ret != 0:
print('Upload failed, so --retry-failed has no effect.')
return ret
builds, _ = _fetch_latest_builds(
cl, options.buildbucket_host, latest_patchset=patchset)
jobs = _filter_failed_for_retry(builds)
if len(jobs) == 0:
print('No failed tryjobs, so --retry-failed has no effect.')
return ret
_trigger_tryjobs(cl, jobs, options, patchset + 1)
return ret
@subcommand.usage('--description=<description file>')
@metrics.collector.collect_metrics('git cl split')
def CMDsplit(parser, args):
"""Splits a branch into smaller branches and uploads CLs.
Creates a branch and uploads a CL for each group of files modified in the
current branch that share a common OWNERS file. In the CL description and
comment, the string '$directory', is replaced with the directory containing
the shared OWNERS file.
"""
parser.add_option('-d', '--description', dest='description_file',
help='A text file containing a CL description in which '
'$directory will be replaced by each CL\'s directory.')
parser.add_option('-c', '--comment', dest='comment_file',
help='A text file containing a CL comment.')
parser.add_option('-n', '--dry-run', dest='dry_run', action='store_true',
default=False,
help='List the files and reviewers for each CL that would '
'be created, but don\'t create branches or CLs.')
parser.add_option('--cq-dry-run', action='store_true',
help='If set, will do a cq dry run for each uploaded CL. '
'Please be careful when doing this; more than ~10 CLs '
'has the potential to overload our build '
'infrastructure. Try to upload these not during high '
'load times (usually 11-3 Mountain View time). Email '
'infra-dev@chromium.org with any questions.')
parser.add_option('-a', '--enable-auto-submit', action='store_true',
default=True,
help='Sends your change to the CQ after an approval. Only '
'works on repos that have the Auto-Submit label '
'enabled')
options, _ = parser.parse_args(args)
if not options.description_file:
parser.error('No --description flag specified.')
def WrappedCMDupload(args):
return CMDupload(OptionParser(), args)
return split_cl.SplitCl(
options.description_file, options.comment_file, Changelist,
WrappedCMDupload, options.dry_run, options.cq_dry_run,
options.enable_auto_submit, settings.GetRoot())
@subcommand.usage('DEPRECATED')
@metrics.collector.collect_metrics('git cl commit')
def CMDdcommit(parser, args):
"""DEPRECATED: Used to commit the current changelist via git-svn."""
message = ('git-cl no longer supports committing to SVN repositories via '
'git-svn. You probably want to use `git cl land` instead.')
print(message)
return 1
@subcommand.usage('[upstream branch to apply against]')
@metrics.collector.collect_metrics('git cl land')
def CMDland(parser, args):
"""Commits the current changelist via git.
In case of Gerrit, uses Gerrit REST api to "submit" the issue, which pushes
upstream and closes the issue automatically and atomically.
"""
parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks',
help='bypass upload presubmit hook')
parser.add_option('-f', '--force', action='store_true', dest='force',
help="force yes to questions (don't prompt)")
parser.add_option('--parallel', action='store_true',
help='Run all tests specified by input_api.RunTests in all '
'PRESUBMIT files in parallel.')
parser.add_option('--resultdb', action='store_true',
help='Run presubmit checks in the ResultSink environment '
'and send results to the ResultDB database.')
parser.add_option('--realm', help='LUCI realm if reporting to ResultDB')
(options, args) = parser.parse_args(args)
cl = Changelist()
if not cl.GetIssue():
DieWithError('You must upload the change first to Gerrit.\n'
' If you would rather have `git cl land` upload '
'automatically for you, see http://crbug.com/642759')
return cl.CMDLand(options.force, options.bypass_hooks, options.verbose,
options.parallel, options.resultdb, options.realm)
@subcommand.usage('<patch url or issue id or issue url>')
@metrics.collector.collect_metrics('git cl patch')
def CMDpatch(parser, args):
"""Patches in a code review."""
parser.add_option('-b', dest='newbranch',
help='create a new branch off trunk for the patch')
parser.add_option('-f', '--force', action='store_true',
help='overwrite state on the current or chosen branch')
parser.add_option('-n', '--no-commit', action='store_true', dest='nocommit',
help='don\'t commit after patch applies.')
group = optparse.OptionGroup(
parser,
'Options for continuing work on the current issue uploaded from a '
'different clone (e.g. different machine). Must be used independently '
'from the other options. No issue number should be specified, and the '
'branch must have an issue number associated with it')
group.add_option('--reapply', action='store_true', dest='reapply',
help='Reset the branch and reapply the issue.\n'
'CAUTION: This will undo any local changes in this '
'branch')
group.add_option('--pull', action='store_true', dest='pull',
help='Performs a pull before reapplying.')
parser.add_option_group(group)
(options, args) = parser.parse_args(args)
if options.reapply:
if options.newbranch:
parser.error('--reapply works on the current branch only.')
if len(args) > 0:
parser.error('--reapply implies no additional arguments.')
cl = Changelist()
if not cl.GetIssue():
parser.error('Current branch must have an associated issue.')
upstream = cl.GetUpstreamBranch()
if upstream is None:
parser.error('No upstream branch specified. Cannot reset branch.')
RunGit(['reset', '--hard', upstream])
if options.pull:
RunGit(['pull'])
target_issue_arg = ParseIssueNumberArgument(cl.GetIssue())
return cl.CMDPatchWithParsedIssue(target_issue_arg, options.nocommit, False,
False)
if len(args) != 1 or not args[0]:
parser.error('Must specify issue number or URL.')
target_issue_arg = ParseIssueNumberArgument(args[0])
if not target_issue_arg.valid:
parser.error('Invalid issue ID or URL.')
# We don't want uncommitted changes mixed up with the patch.
if git_common.is_dirty_git_tree('patch'):
return 1
if options.newbranch:
if options.force:
RunGit(['branch', '-D', options.newbranch],
stderr=subprocess2.PIPE, error_ok=True)
git_new_branch.create_new_branch(options.newbranch)
cl = Changelist(
codereview_host=target_issue_arg.hostname, issue=target_issue_arg.issue)
if not args[0].isdigit():
print('canonical issue/change URL: %s\n' % cl.GetIssueURL())
return cl.CMDPatchWithParsedIssue(target_issue_arg, options.nocommit,
options.force, options.newbranch)
def GetTreeStatus(url=None):
"""Fetches the tree status and returns either 'open', 'closed',
'unknown' or 'unset'."""
url = url or settings.GetTreeStatusUrl(error_ok=True)
if url:
status = str(urllib.request.urlopen(url).read().lower())
if status.find('closed') != -1 or status == '0':
return 'closed'
elif status.find('open') != -1 or status == '1':
return 'open'
return 'unknown'
return 'unset'
def GetTreeStatusReason():
"""Fetches the tree status from a json url and returns the message
with the reason for the tree to be opened or closed."""
url = settings.GetTreeStatusUrl()
json_url = urllib.parse.urljoin(url, '/current?format=json')
connection = urllib.request.urlopen(json_url)
status = json.loads(connection.read())
connection.close()
return status['message']
@metrics.collector.collect_metrics('git cl tree')
def CMDtree(parser, args):
"""Shows the status of the tree."""
_, args = parser.parse_args(args)
status = GetTreeStatus()
if 'unset' == status:
print('You must configure your tree status URL by running "git cl config".')
return 2
print('The tree is %s' % status)
print()
print(GetTreeStatusReason())
if status != 'open':
return 1
return 0
@metrics.collector.collect_metrics('git cl try')
def CMDtry(parser, args):
"""Triggers tryjobs using either Buildbucket or CQ dry run."""
group = optparse.OptionGroup(parser, 'Tryjob options')
group.add_option(
'-b', '--bot', action='append',
help=('IMPORTANT: specify ONE builder per --bot flag. Use it multiple '
'times to specify multiple builders. ex: '
'"-b win_rel -b win_layout". See '
'the try server waterfall for the builders name and the tests '
'available.'))
group.add_option(
'-B', '--bucket', default='',
help=('Buildbucket bucket to send the try requests.'))
group.add_option(
'-r', '--revision',
help='Revision to use for the tryjob; default: the revision will '
'be determined by the try recipe that builder runs, which usually '
'defaults to HEAD of origin/master or origin/main')
group.add_option(
'-c', '--clobber', action='store_true', default=False,
help='Force a clobber before building; that is don\'t do an '
'incremental build')
group.add_option(
'-q',
'--quick-run',
action='store_true',
default=False,
help='trigger in quick run mode '
'(https://source.chromium.org/chromium/chromium/src/+/main:docs/cq_q'
'uick_run.md) (chromium only).')
group.add_option(
'--category', default='git_cl_try', help='Specify custom build category.')
group.add_option(
'--project',
help='Override which project to use. Projects are defined '
'in recipe to determine to which repository or directory to '
'apply the patch')
group.add_option(
'-p', '--property', dest='properties', action='append', default=[],
help='Specify generic properties in the form -p key1=value1 -p '
'key2=value2 etc. The value will be treated as '
'json if decodable, or as string otherwise. '
'NOTE: using this may make your tryjob not usable for CQ, '
'which will then schedule another tryjob with default properties')
group.add_option(
'--buildbucket-host', default='cr-buildbucket.appspot.com',
help='Host of buildbucket. The default host is %default.')
parser.add_option_group(group)
parser.add_option(
'-R', '--retry-failed', action='store_true', default=False,
help='Retry failed jobs from the latest set of tryjobs. '
'Not allowed with --bucket and --bot options.')
parser.add_option(
'-i', '--issue', type=int,
help='Operate on this issue instead of the current branch\'s implicit '
'issue.')
options, args = parser.parse_args(args)
# Make sure that all properties are prop=value pairs.
bad_params = [x for x in options.properties if '=' not in x]
if bad_params:
parser.error('Got properties with missing "=": %s' % bad_params)
if args:
parser.error('Unknown arguments: %s' % args)
cl = Changelist(issue=options.issue)
if not cl.GetIssue():
parser.error('Need to upload first.')
# HACK: warm up Gerrit change detail cache to save on RPCs.
cl._GetChangeDetail(['DETAILED_ACCOUNTS', 'ALL_REVISIONS'])
error_message = cl.CannotTriggerTryJobReason()
if error_message:
parser.error('Can\'t trigger tryjobs: %s' % error_message)
if options.bot:
if options.retry_failed:
parser.error('--bot is not compatible with --retry-failed.')
if not options.bucket:
parser.error('A bucket (e.g. "chromium/try") is required.')
triggered = [b for b in options.bot if 'triggered' in b]
if triggered:
parser.error(
'Cannot schedule builds on triggered bots: %s.\n'
'This type of bot requires an initial job from a parent (usually a '
'builder). Schedule a job on the parent instead.\n' % triggered)
if options.bucket.startswith('.master'):
parser.error('Buildbot masters are not supported.')
project, bucket = _parse_bucket(options.bucket)
if project is None or bucket is None:
parser.error('Invalid bucket: %s.' % options.bucket)
jobs = sorted((project, bucket, bot) for bot in options.bot)
elif options.retry_failed:
print('Searching for failed tryjobs...')
builds, patchset = _fetch_latest_builds(cl, options.buildbucket_host)
if options.verbose:
print('Got %d builds in patchset #%d' % (len(builds), patchset))
jobs = _filter_failed_for_retry(builds)
if not jobs:
print('There are no failed jobs in the latest set of jobs '
'(patchset #%d), doing nothing.' % patchset)
return 0
num_builders = len(jobs)
if num_builders > 10:
confirm_or_exit('There are %d builders with failed builds.'
% num_builders, action='continue')
elif options.quick_run:
print('Scheduling CQ quick run on: %s' % cl.GetIssueURL())
return cl.SetCQState(_CQState.QUICK_RUN)
else:
if options.verbose:
print('git cl try with no bots now defaults to CQ dry run.')
print('Scheduling CQ dry run on: %s' % cl.GetIssueURL())
return cl.SetCQState(_CQState.DRY_RUN)
patchset = cl.GetMostRecentPatchset()
try:
_trigger_tryjobs(cl, jobs, options, patchset)
except BuildbucketResponseException as ex:
print('ERROR: %s' % ex)
return 1
return 0
@metrics.collector.collect_metrics('git cl try-results')
def CMDtry_results(parser, args):
"""Prints info about results for tryjobs associated with the current CL."""
group = optparse.OptionGroup(parser, 'Tryjob results options')
group.add_option(
'-p', '--patchset', type=int, help='patchset number if not current.')
group.add_option(
'--print-master', action='store_true', help='print master name as well.')
group.add_option(
'--color', action='store_true', default=setup_color.IS_TTY,
help='force color output, useful when piping output.')
group.add_option(
'--buildbucket-host', default='cr-buildbucket.appspot.com',
help='Host of buildbucket. The default host is %default.')
group.add_option(
'--json', help=('Path of JSON output file to write tryjob results to,'
'or "-" for stdout.'))
parser.add_option_group(group)
parser.add_option(
'-i', '--issue', type=int,
help='Operate on this issue instead of the current branch\'s implicit '
'issue.')
options, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist(issue=options.issue)
if not cl.GetIssue():
parser.error('Need to upload first.')
patchset = options.patchset
if not patchset:
patchset = cl.GetMostRecentDryRunPatchset()
if not patchset:
parser.error('Code review host doesn\'t know about issue %s. '
'No access to issue or wrong issue number?\n'
'Either upload first, or pass --patchset explicitly.' %
cl.GetIssue())
try:
jobs = _fetch_tryjobs(cl, options.buildbucket_host, patchset)
except BuildbucketResponseException as ex:
print('Buildbucket error: %s' % ex)
return 1
if options.json:
write_json(options.json, jobs)
else:
_print_tryjobs(options, jobs)
return 0
@subcommand.usage('[new upstream branch]')
@metrics.collector.collect_metrics('git cl upstream')
def CMDupstream(parser, args):
"""Prints or sets the name of the upstream branch, if any."""
_, args = parser.parse_args(args)
if len(args) > 1:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist()
if args:
# One arg means set upstream branch.
branch = cl.GetBranch()
RunGit(['branch', '--set-upstream-to', args[0], branch])
cl = Changelist()
print('Upstream branch set to %s' % (cl.GetUpstreamBranch(),))
# Clear configured merge-base, if there is one.
git_common.remove_merge_base(branch)
else:
print(cl.GetUpstreamBranch())
return 0
@metrics.collector.collect_metrics('git cl web')
def CMDweb(parser, args):
"""Opens the current CL in the web browser."""
_, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
issue_url = Changelist().GetIssueURL()
if not issue_url:
print('ERROR No issue to open', file=sys.stderr)
return 1
# Redirect I/O before invoking browser to hide its output. For example, this
# allows us to hide the "Created new window in existing browser session."
# message from Chrome. Based on https://stackoverflow.com/a/2323563.
saved_stdout = os.dup(1)
saved_stderr = os.dup(2)
os.close(1)
os.close(2)
os.open(os.devnull, os.O_RDWR)
try:
webbrowser.open(issue_url)
finally:
os.dup2(saved_stdout, 1)
os.dup2(saved_stderr, 2)
return 0
@metrics.collector.collect_metrics('git cl set-commit')
def CMDset_commit(parser, args):
"""Sets the commit bit to trigger the CQ."""
parser.add_option('-d', '--dry-run', action='store_true',
help='trigger in dry run mode')
parser.add_option(
'-q',
'--quick-run',
action='store_true',
help='trigger in quick run mode '
'(https://source.chromium.org/chromium/chromium/src/+/main:docs/cq_qu'
'ick_run.md) (chromium only).')
parser.add_option('-c', '--clear', action='store_true',
help='stop CQ run, if any')
parser.add_option(
'-i', '--issue', type=int,
help='Operate on this issue instead of the current branch\'s implicit '
'issue.')
options, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
if [options.dry_run, options.quick_run, options.clear].count(True) > 1:
parser.error('Only one of --dry-run, --quick-run, and --clear are allowed.')
cl = Changelist(issue=options.issue)
if not cl.GetIssue():
parser.error('Must upload the issue first.')
if options.clear:
state = _CQState.NONE
elif options.quick_run:
state = _CQState.QUICK_RUN
elif options.dry_run:
state = _CQState.DRY_RUN
else:
state = _CQState.COMMIT
cl.SetCQState(state)
return 0
@metrics.collector.collect_metrics('git cl set-close')
def CMDset_close(parser, args):
"""Closes the issue."""
parser.add_option(
'-i', '--issue', type=int,
help='Operate on this issue instead of the current branch\'s implicit '
'issue.')
options, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist(issue=options.issue)
# Ensure there actually is an issue to close.
if not cl.GetIssue():
DieWithError('ERROR: No issue to close.')
cl.CloseIssue()
return 0
@metrics.collector.collect_metrics('git cl diff')
def CMDdiff(parser, args):
"""Shows differences between local tree and last upload."""
parser.add_option(
'--stat',
action='store_true',
dest='stat',
help='Generate a diffstat')
options, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist()
issue = cl.GetIssue()
branch = cl.GetBranch()
if not issue:
DieWithError('No issue found for current branch (%s)' % branch)
base = cl._GitGetBranchConfigValue('last-upload-hash')
if not base:
base = cl._GitGetBranchConfigValue('gerritsquashhash')
if not base:
detail = cl._GetChangeDetail(['CURRENT_REVISION', 'CURRENT_COMMIT'])
revision_info = detail['revisions'][detail['current_revision']]
fetch_info = revision_info['fetch']['http']
RunGit(['fetch', fetch_info['url'], fetch_info['ref']])
base = 'FETCH_HEAD'
cmd = ['git', 'diff']
if options.stat:
cmd.append('--stat')
cmd.append(base)
subprocess2.check_call(cmd)
return 0
@metrics.collector.collect_metrics('git cl owners')
def CMDowners(parser, args):
"""Finds potential owners for reviewing."""
parser.add_option(
'--ignore-current',
action='store_true',
help='Ignore the CL\'s current reviewers and start from scratch.')
parser.add_option(
'--ignore-self',
action='store_true',
help='Do not consider CL\'s author as an owners.')
parser.add_option(
'--no-color',
action='store_true',
help='Use this option to disable color output')
parser.add_option(
'--batch',
action='store_true',
help='Do not run interactively, just suggest some')
# TODO: Consider moving this to another command, since other
# git-cl owners commands deal with owners for a given CL.
parser.add_option(
'--show-all',
action='store_true',
help='Show all owners for a particular file')
options, args = parser.parse_args(args)
cl = Changelist()
author = cl.GetAuthor()
if options.show_all:
if len(args) == 0:
print('No files specified for --show-all. Nothing to do.')
return 0
owners_by_path = cl.owners_client.BatchListOwners(args)
for path in args:
print('Owners for %s:' % path)
print('\n'.join(
' - %s' % owner
for owner in owners_by_path.get(path, ['No owners found'])))
return 0
if args:
if len(args) > 1:
parser.error('Unknown args.')
base_branch = args[0]
else:
# Default to diffing against the common ancestor of the upstream branch.
base_branch = cl.GetCommonAncestorWithUpstream()
affected_files = cl.GetAffectedFiles(base_branch)
if options.batch:
owners = cl.owners_client.SuggestOwners(affected_files, exclude=[author])
print('\n'.join(owners))
return 0
return owners_finder.OwnersFinder(
affected_files,
author,
[] if options.ignore_current else cl.GetReviewers(),
cl.owners_client,
disable_color=options.no_color,
ignore_author=options.ignore_self).run()
def BuildGitDiffCmd(diff_type, upstream_commit, args, allow_prefix=False):
"""Generates a diff command."""
# Generate diff for the current branch's changes.
diff_cmd = ['-c', 'core.quotePath=false', 'diff', '--no-ext-diff']
if allow_prefix:
# explicitly setting --src-prefix and --dst-prefix is necessary in the
# case that diff.noprefix is set in the user's git config.
diff_cmd += ['--src-prefix=a/', '--dst-prefix=b/']
else:
diff_cmd += ['--no-prefix']
diff_cmd += [diff_type, upstream_commit, '--']
if args:
for arg in args:
if os.path.isdir(arg) or os.path.isfile(arg):
diff_cmd.append(arg)
else:
DieWithError('Argument "%s" is not a file or a directory' % arg)
return diff_cmd
def _RunClangFormatDiff(opts, clang_diff_files, top_dir, upstream_commit):
"""Runs clang-format-diff and sets a return value if necessary."""
if not clang_diff_files:
return 0
# Set to 2 to signal to CheckPatchFormatted() that this patch isn't
# formatted. This is used to block during the presubmit.
return_value = 0
# Locate the clang-format binary in the checkout
try:
clang_format_tool = clang_format.FindClangFormatToolInChromiumTree()
except clang_format.NotFoundError as e:
DieWithError(e)
if opts.full or settings.GetFormatFullByDefault():
cmd = [clang_format_tool]
if not opts.dry_run and not opts.diff:
cmd.append('-i')
if opts.dry_run:
for diff_file in clang_diff_files:
with open(diff_file, 'r') as myfile:
code = myfile.read().replace('\r\n', '\n')
stdout = RunCommand(cmd + [diff_file], cwd=top_dir)
stdout = stdout.replace('\r\n', '\n')
if opts.diff:
sys.stdout.write(stdout)
if code != stdout:
return_value = 2
else:
stdout = RunCommand(cmd + clang_diff_files, cwd=top_dir)
if opts.diff:
sys.stdout.write(stdout)
else:
try:
script = clang_format.FindClangFormatScriptInChromiumTree(
'clang-format-diff.py')
except clang_format.NotFoundError as e:
DieWithError(e)
cmd = ['vpython', script, '-p0']
if not opts.dry_run and not opts.diff:
cmd.append('-i')
diff_cmd = BuildGitDiffCmd('-U0', upstream_commit, clang_diff_files)
diff_output = RunGit(diff_cmd).encode('utf-8')
env = os.environ.copy()
env['PATH'] = (
str(os.path.dirname(clang_format_tool)) + os.pathsep + env['PATH'])
stdout = RunCommand(
cmd, stdin=diff_output, cwd=top_dir, env=env,
shell=sys.platform.startswith('win32'))
if opts.diff:
sys.stdout.write(stdout)
if opts.dry_run and len(stdout) > 0:
return_value = 2
return return_value
def MatchingFileType(file_name, extensions):
"""Returns True if the file name ends with one of the given extensions."""
return bool([ext for ext in extensions if file_name.lower().endswith(ext)])
@subcommand.usage('[files or directories to diff]')
@metrics.collector.collect_metrics('git cl format')
def CMDformat(parser, args):
"""Runs auto-formatting tools (clang-format etc.) on the diff."""
CLANG_EXTS = ['.cc', '.cpp', '.h', '.m', '.mm', '.proto', '.java']
GN_EXTS = ['.gn', '.gni', '.typemap']
parser.add_option('--full', action='store_true',
help='Reformat the full content of all touched files')
parser.add_option('--dry-run', action='store_true',
help='Don\'t modify any file on disk.')
parser.add_option(
'--no-clang-format',
dest='clang_format',
action='store_false',
default=True,
help='Disables formatting of various file types using clang-format.')
parser.add_option(
'--python',
action='store_true',
default=None,
help='Enables python formatting on all python files.')
parser.add_option(
'--no-python',
action='store_true',
default=False,
help='Disables python formatting on all python files. '
'If neither --python or --no-python are set, python files that have a '
'.style.yapf file in an ancestor directory will be formatted. '
'It is an error to set both.')
parser.add_option(
'--js',
action='store_true',
help='Format javascript code with clang-format. '
'Has no effect if --no-clang-format is set.')
parser.add_option('--diff', action='store_true',
help='Print diff to stdout rather than modifying files.')
parser.add_option('--presubmit', action='store_true',
help='Used when running the script from a presubmit.')
opts, args = parser.parse_args(args)
if opts.python is not None and opts.no_python:
raise parser.error('Cannot set both --python and --no-python')
if opts.no_python:
opts.python = False
# Normalize any remaining args against the current path, so paths relative to
# the current directory are still resolved as expected.
args = [os.path.join(os.getcwd(), arg) for arg in args]
# git diff generates paths against the root of the repository. Change
# to that directory so clang-format can find files even within subdirs.
rel_base_path = settings.GetRelativeRoot()
if rel_base_path:
os.chdir(rel_base_path)
# Grab the merge-base commit, i.e. the upstream commit of the current
# branch when it was created or the last time it was rebased. This is
# to cover the case where the user may have called "git fetch origin",
# moving the origin branch to a newer commit, but hasn't rebased yet.
upstream_commit = None
cl = Changelist()
upstream_branch = cl.GetUpstreamBranch()
if upstream_branch:
upstream_commit = RunGit(['merge-base', 'HEAD', upstream_branch])
upstream_commit = upstream_commit.strip()
if not upstream_commit:
DieWithError('Could not find base commit for this branch. '
'Are you in detached state?')
changed_files_cmd = BuildGitDiffCmd('--name-only', upstream_commit, args)
diff_output = RunGit(changed_files_cmd)
diff_files = diff_output.splitlines()
# Filter out files deleted by this CL
diff_files = [x for x in diff_files if os.path.isfile(x)]
if opts.js:
CLANG_EXTS.extend(['.js', '.ts'])
clang_diff_files = []
if opts.clang_format:
clang_diff_files = [
x for x in diff_files if MatchingFileType(x, CLANG_EXTS)
]
python_diff_files = [x for x in diff_files if MatchingFileType(x, ['.py'])]
gn_diff_files = [x for x in diff_files if MatchingFileType(x, GN_EXTS)]
top_dir = settings.GetRoot()
return_value = _RunClangFormatDiff(opts, clang_diff_files, top_dir,
upstream_commit)
# Similar code to above, but using yapf on .py files rather than clang-format
# on C/C++ files
py_explicitly_disabled = opts.python is not None and not opts.python
if python_diff_files and not py_explicitly_disabled:
depot_tools_path = os.path.dirname(os.path.abspath(__file__))
yapf_tool = os.path.join(depot_tools_path, 'yapf')
# Used for caching.
yapf_configs = {}
for f in python_diff_files:
# Find the yapf style config for the current file, defaults to depot
# tools default.
_FindYapfConfigFile(f, yapf_configs, top_dir)
# Turn on python formatting by default if a yapf config is specified.
# This breaks in the case of this repo though since the specified
# style file is also the global default.
if opts.python is None:
filtered_py_files = []
for f in python_diff_files:
if _FindYapfConfigFile(f, yapf_configs, top_dir) is not None:
filtered_py_files.append(f)
else:
filtered_py_files = python_diff_files
# Note: yapf still seems to fix indentation of the entire file
# even if line ranges are specified.
# See https://github.com/google/yapf/issues/499
if not opts.full and filtered_py_files:
py_line_diffs = _ComputeDiffLineRanges(filtered_py_files, upstream_commit)
yapfignore_patterns = _GetYapfIgnorePatterns(top_dir)
filtered_py_files = _FilterYapfIgnoredFiles(filtered_py_files,
yapfignore_patterns)
for f in filtered_py_files:
yapf_style = _FindYapfConfigFile(f, yapf_configs, top_dir)
# Default to pep8 if not .style.yapf is found.
if not yapf_style:
yapf_style = 'pep8'
with open(f, 'r') as py_f:
if 'python3' in py_f.readline():
vpython_script = 'vpython3'
else:
vpython_script = 'vpython'
cmd = [vpython_script, yapf_tool, '--style', yapf_style, f]
has_formattable_lines = False
if not opts.full:
# Only run yapf over changed line ranges.
for diff_start, diff_len in py_line_diffs[f]:
diff_end = diff_start + diff_len - 1
# Yapf errors out if diff_end < diff_start but this
# is a valid line range diff for a removal.
if diff_end >= diff_start:
has_formattable_lines = True
cmd += ['-l', '{}-{}'.format(diff_start, diff_end)]
# If all line diffs were removals we have nothing to format.
if not has_formattable_lines:
continue
if opts.diff or opts.dry_run:
cmd += ['--diff']
# Will return non-zero exit code if non-empty diff.
stdout = RunCommand(cmd,
error_ok=True,
cwd=top_dir,
shell=sys.platform.startswith('win32'))
if opts.diff:
sys.stdout.write(stdout)
elif len(stdout) > 0:
return_value = 2
else:
cmd += ['-i']
RunCommand(cmd, cwd=top_dir, shell=sys.platform.startswith('win32'))
# Format GN build files. Always run on full build files for canonical form.
if gn_diff_files:
cmd = ['gn', 'format']
if opts.dry_run or opts.diff:
cmd.append('--dry-run')
for gn_diff_file in gn_diff_files:
gn_ret = subprocess2.call(cmd + [gn_diff_file],
shell=sys.platform.startswith('win'),
cwd=top_dir)
if opts.dry_run and gn_ret == 2:
return_value = 2 # Not formatted.
elif opts.diff and gn_ret == 2:
# TODO this should compute and print the actual diff.
print('This change has GN build file diff for ' + gn_diff_file)
elif gn_ret != 0:
# For non-dry run cases (and non-2 return values for dry-run), a
# nonzero error code indicates a failure, probably because the file
# doesn't parse.
DieWithError('gn format failed on ' + gn_diff_file +
'\nTry running `gn format` on this file manually.')
# Skip the metrics formatting from the global presubmit hook. These files have
# a separate presubmit hook that issues an error if the files need formatting,
# whereas the top-level presubmit script merely issues a warning. Formatting
# these files is somewhat slow, so it's important not to duplicate the work.
if not opts.presubmit:
for diff_xml in GetDiffXMLs(diff_files):
xml_dir = GetMetricsDir(diff_xml)
if not xml_dir:
continue
tool_dir = os.path.join(top_dir, xml_dir)
pretty_print_tool = os.path.join(tool_dir, 'pretty_print.py')
cmd = ['vpython', pretty_print_tool, '--non-interactive']
# If the XML file is histograms.xml or enums.xml, add the xml path to the
# command as histograms/pretty_print.py now needs a relative path argument
# after splitting the histograms into multiple directories.
# For example, in tools/metrics/ukm, pretty-print could be run using:
# $ python pretty_print.py
# But in tools/metrics/histogrmas, pretty-print should be run with an
# additional relative path argument, like:
# $ python pretty_print.py histograms_xml/UMA/histograms.xml
# $ python pretty_print.py enums.xml
# TODO (crbug/1116488): Remove this check after ensuring that the updated
# version of histograms/pretty_print.py is released.
filepath_required = os.path.exists(
os.path.join(tool_dir, 'validate_prefix.py'))
if (diff_xml.endswith('histograms.xml') or diff_xml.endswith('enums.xml')
or diff_xml.endswith('histogram_suffixes_list.xml')
) and filepath_required:
cmd.append(diff_xml)
if opts.dry_run or opts.diff:
cmd.append('--diff')
# TODO(isherman): Once this file runs only on Python 3.3+, drop the
# `shell` param and instead replace `'vpython'` with
# `shutil.which('frob')` above: https://stackoverflow.com/a/32799942
stdout = RunCommand(cmd,
cwd=top_dir,
shell=sys.platform.startswith('win32'))
if opts.diff:
sys.stdout.write(stdout)
if opts.dry_run and stdout:
return_value = 2 # Not formatted.
return return_value
def GetDiffXMLs(diff_files):
return [
os.path.normpath(x) for x in diff_files if MatchingFileType(x, ['.xml'])
]
def GetMetricsDir(diff_xml):
metrics_xml_dirs = [
os.path.join('tools', 'metrics', 'actions'),
os.path.join('tools', 'metrics', 'histograms'),
os.path.join('tools', 'metrics', 'rappor'),
os.path.join('tools', 'metrics', 'structured'),
os.path.join('tools', 'metrics', 'ukm'),
]
for xml_dir in metrics_xml_dirs:
if diff_xml.startswith(xml_dir):
return xml_dir
return None
@subcommand.usage('<codereview url or issue id>')
@metrics.collector.collect_metrics('git cl checkout')
def CMDcheckout(parser, args):
"""Checks out a branch associated with a given Gerrit issue."""
_, args = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
return 1
issue_arg = ParseIssueNumberArgument(args[0])
if not issue_arg.valid:
parser.error('Invalid issue ID or URL.')
target_issue = str(issue_arg.issue)
output = RunGit(['config', '--local', '--get-regexp',
r'branch\..*\.' + ISSUE_CONFIG_KEY],
error_ok=True)
branches = []
for key, issue in [x.split() for x in output.splitlines()]:
if issue == target_issue:
branches.append(re.sub(r'branch\.(.*)\.' + ISSUE_CONFIG_KEY, r'\1', key))
if len(branches) == 0:
print('No branch found for issue %s.' % target_issue)
return 1
if len(branches) == 1:
RunGit(['checkout', branches[0]])
else:
print('Multiple branches match issue %s:' % target_issue)
for i in range(len(branches)):
print('%d: %s' % (i, branches[i]))
which = gclient_utils.AskForData('Choose by index: ')
try:
RunGit(['checkout', branches[int(which)]])
except (IndexError, ValueError):
print('Invalid selection, not checking out any branch.')
return 1
return 0
def CMDlol(parser, args):
# This command is intentionally undocumented.
print(zlib.decompress(base64.b64decode(
'eNptkLEOwyAMRHe+wupCIqW57v0Vq84WqWtXyrcXnCBsmgMJ+/SSAxMZgRB6NzE'
'E2ObgCKJooYdu4uAQVffUEoE1sRQLxAcqzd7uK2gmStrll1ucV3uZyaY5sXyDd9'
'JAnN+lAXsOMJ90GANAi43mq5/VeeacylKVgi8o6F1SC63FxnagHfJUTfUYdCR/W'
'Ofe+0dHL7PicpytKP750Fh1q2qnLVof4w8OZWNY')).decode('utf-8'))
return 0
class OptionParser(optparse.OptionParser):
"""Creates the option parse and add --verbose support."""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(
self, *args, prog='git cl', version=__version__, **kwargs)
self.add_option(
'-v', '--verbose', action='count', default=0,
help='Use 2 times for more debugging info')
def parse_args(self, args=None, _values=None):
try:
return self._parse_args(args)
finally:
# Regardless of success or failure of args parsing, we want to report
# metrics, but only after logging has been initialized (if parsing
# succeeded).
global settings
settings = Settings()
if metrics.collector.config.should_collect_metrics:
# GetViewVCUrl ultimately calls logging method.
project_url = settings.GetViewVCUrl().strip('/+')
if project_url in metrics_utils.KNOWN_PROJECT_URLS:
metrics.collector.add('project_urls', [project_url])
def _parse_args(self, args=None):
# Create an optparse.Values object that will store only the actual passed
# options, without the defaults.
actual_options = optparse.Values()
_, args = optparse.OptionParser.parse_args(self, args, actual_options)
# Create an optparse.Values object with the default options.
options = optparse.Values(self.get_default_values().__dict__)
# Update it with the options passed by the user.
options._update_careful(actual_options.__dict__)
# Store the options passed by the user in an _actual_options attribute.
# We store only the keys, and not the values, since the values can contain
# arbitrary information, which might be PII.
metrics.collector.add('arguments', list(actual_options.__dict__.keys()))
levels = [logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(
level=levels[min(options.verbose, len(levels) - 1)],
format='[%(levelname).1s%(asctime)s %(process)d %(thread)d '
'%(filename)s] %(message)s')
return options, args
def main(argv):
if sys.hexversion < 0x02060000:
print('\nYour Python version %s is unsupported, please upgrade.\n' %
(sys.version.split(' ', 1)[0],), file=sys.stderr)
return 2
colorize_CMDstatus_doc()
dispatcher = subcommand.CommandDispatcher(__name__)
try:
return dispatcher.execute(OptionParser(), argv)
except auth.LoginRequiredError as e:
DieWithError(str(e))
except urllib.error.HTTPError as e:
if e.code != 500:
raise
DieWithError(
('App Engine is misbehaving and returned HTTP %d, again. Keep faith '
'and retry or visit go/isgaeup.\n%s') % (e.code, str(e)))
return 0
if __name__ == '__main__':
# These affect sys.stdout, so do it outside of main() to simplify mocks in
# the unit tests.
fix_encoding.fix_encoding()
setup_color.init()
with metrics.collector.print_notice_and_exit():
sys.exit(main(sys.argv[1:]))
| 37.460268
| 97
| 0.653485
|
4a054d21a70c794b56caa69afea63198cb3f598a
| 59,918
|
py
|
Python
|
androguard/core/bytecodes/axml.py
|
xiaopiandao/androguard
|
abed877951cd91cb2cf523447ff12f3bb38f8d2a
|
[
"Apache-2.0"
] | 3
|
2015-10-23T13:36:07.000Z
|
2021-07-22T23:30:41.000Z
|
androguard/core/bytecodes/axml.py
|
xiaopiandao/androguard
|
abed877951cd91cb2cf523447ff12f3bb38f8d2a
|
[
"Apache-2.0"
] | null | null | null |
androguard/core/bytecodes/axml.py
|
xiaopiandao/androguard
|
abed877951cd91cb2cf523447ff12f3bb38f8d2a
|
[
"Apache-2.0"
] | 1
|
2019-11-28T05:03:18.000Z
|
2019-11-28T05:03:18.000Z
|
from __future__ import division
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import chr
from builtins import range
from builtins import object
from androguard.core import bytecode
from androguard.core.resources import public
from struct import pack, unpack
from xml.sax.saxutils import escape
import collections
from collections import defaultdict
import lxml.sax
from lxml import etree
import logging
log = logging.getLogger("androguard.axml")
################################## AXML FORMAT ########################################
# Translated from
# http://code.google.com/p/android4me/source/browse/src/android/content/res/AXmlResourceParser.java
# Flags in the STRING Section
SORTED_FLAG = 1 << 0
UTF8_FLAG = 1 << 8
class StringBlock(object):
"""
StringBlock is a CHUNK inside an AXML File
It contains all strings, which are used by referecing to ID's
TODO might migrate this block into the ARSCParser, as it it not a "special" block but a normal tag.
"""
def __init__(self, buff, header):
self._cache = {}
self.header = header
# We already read the header (which was chunk_type and chunk_size
# Now, we read the string_count:
self.stringCount = unpack('<i', buff.read(4))[0]
# style_count
self.styleOffsetCount = unpack('<i', buff.read(4))[0]
# flags
self.flags = unpack('<i', buff.read(4))[0]
self.m_isUTF8 = ((self.flags & UTF8_FLAG) != 0)
# string_pool_offset
# The string offset is counted from the beginning of the string section
self.stringsOffset = unpack('<i', buff.read(4))[0]
# style_pool_offset
# The styles offset is counted as well from the beginning of the string section
self.stylesOffset = unpack('<i', buff.read(4))[0]
# Check if they supplied a stylesOffset even if the count is 0:
if self.styleOffsetCount == 0 and self.stylesOffset > 0:
log.warning("Styles Offset given, but styleCount is zero.")
self.m_stringOffsets = []
self.m_styleOffsets = []
self.m_charbuff = ""
self.m_styles = []
# Next, there is a list of string following
# This is only a list of offsets (4 byte each)
for i in range(0, self.stringCount):
self.m_stringOffsets.append(unpack('<i', buff.read(4))[0])
# And a list of styles
# again, a list of offsets
for i in range(0, self.styleOffsetCount):
self.m_styleOffsets.append(unpack('<i', buff.read(4))[0])
# FIXME it is probably better to parse n strings and not calculate the size
size = self.header.size - self.stringsOffset
# if there are styles as well, we do not want to read them too.
# Only read them, if no
if self.stylesOffset != 0 and self.styleOffsetCount != 0:
size = self.stylesOffset - self.stringsOffset
# FIXME unaligned
if (size % 4) != 0:
log.warning("Size of strings is not aligned by four bytes.")
self.m_charbuff = buff.read(size)
if self.stylesOffset != 0 and self.styleOffsetCount != 0:
size = self.header.size - self.stylesOffset
# FIXME unaligned
if (size % 4) != 0:
log.warning("Size of styles is not aligned by four bytes.")
for i in range(0, size // 4):
self.m_styles.append(unpack('<i', buff.read(4))[0])
def getString(self, idx):
if idx in self._cache:
return self._cache[idx]
if idx < 0 or not self.m_stringOffsets or idx >= len(
self.m_stringOffsets):
return ""
offset = self.m_stringOffsets[idx]
if self.m_isUTF8:
self._cache[idx] = self.decode8(offset)
else:
self._cache[idx] = self.decode16(offset)
return self._cache[idx]
def getStyle(self, idx):
# FIXME
return self.m_styles[idx]
def decode8(self, offset):
str_len, skip = self.decodeLength(offset, 1)
offset += skip
encoded_bytes, skip = self.decodeLength(offset, 1)
offset += skip
data = self.m_charbuff[offset: offset + encoded_bytes]
return self.decode_bytes(data, 'utf-8', str_len)
def decode16(self, offset):
str_len, skip = self.decodeLength(offset, 2)
offset += skip
encoded_bytes = str_len * 2
data = self.m_charbuff[offset: offset + encoded_bytes]
return self.decode_bytes(data, 'utf-16', str_len)
def decode_bytes(self, data, encoding, str_len):
string = data.decode(encoding, 'replace')
if len(string) != str_len:
log.warning("invalid decoded string length")
return string
def decodeLength(self, offset, sizeof_char):
length = self.m_charbuff[offset]
sizeof_2chars = sizeof_char << 1
fmt_chr = 'B' if sizeof_char == 1 else 'H'
fmt = "<2" + fmt_chr
length1, length2 = unpack(fmt, self.m_charbuff[offset:(offset + sizeof_2chars)])
highbit = 0x80 << (8 * (sizeof_char - 1))
if (length & highbit) != 0:
return ((length1 & ~highbit) << (8 * sizeof_char)) | length2, sizeof_2chars
else:
return length1, sizeof_char
def show(self):
print("StringBlock(%x, %x, %x, %x, %x, %x" % (
self.start,
self.header,
self.header_size,
self.chunkSize,
self.stringsOffset,
self.flags))
for i in range(0, len(self.m_stringOffsets)):
print(i, repr(self.getString(i)))
# Position of the fields inside an attribute
ATTRIBUTE_IX_NAMESPACE_URI = 0
ATTRIBUTE_IX_NAME = 1
ATTRIBUTE_IX_VALUE_STRING = 2
ATTRIBUTE_IX_VALUE_TYPE = 3
ATTRIBUTE_IX_VALUE_DATA = 4
ATTRIBUTE_LENGHT = 5
# Chunk Headers
CHUNK_AXML_FILE = 0x00080003
CHUNK_STRING = 0x001C0001
CHUNK_RESOURCEIDS = 0x00080180
CHUNK_XML_FIRST = 0x00100100
CHUNK_XML_START_NAMESPACE = 0x00100100
CHUNK_XML_END_NAMESPACE = 0x00100101
CHUNK_XML_START_TAG = 0x00100102
CHUNK_XML_END_TAG = 0x00100103
CHUNK_XML_TEXT = 0x00100104
CHUNK_XML_LAST = 0x00100104
START_DOCUMENT = 0
END_DOCUMENT = 1
START_TAG = 2
END_TAG = 3
TEXT = 4
class AXMLParser(object):
def __init__(self, raw_buff):
self.reset()
self.valid_axml = True
self.axml_tampered = False
self.packerwarning = False
self.buff = bytecode.BuffHandle(raw_buff)
axml_file, = unpack('<L', self.buff.read(4))
if axml_file != CHUNK_AXML_FILE:
# It looks like the header is wrong.
# need some other checks.
# We noted, that a some of files start with 0x0008NNNN, where NNNN is some random number
if axml_file >> 16 == 0x0008:
self.axml_tampered = True
log.warning("AXML file has an unusual header, most malwares like doing such stuff to anti androguard! But we try to parse it anyways. Header: 0x{:08x}".format(axml_file))
else:
self.valid_axml = False
log.error("Not a valid AXML file. Header 0x{:08x}".format(axml_file))
return
# Next is the filesize
self.filesize, = unpack('<L', self.buff.read(4))
assert self.filesize <= self.buff.size(), "Declared filesize does not match real size: {} vs {}".format(self.filesize, self.buff.size())
# Now we parse the STRING POOL
header = ARSCHeader(self.buff) # read 8 byte = String header + chunk_size
assert header.type == RES_STRING_POOL_TYPE, "Expected String Pool header, got %x" % header.type
self.sb = StringBlock(self.buff, header)
self.m_resourceIDs = []
self.m_prefixuri = {}
self.m_uriprefix = defaultdict(list)
# Contains a list of current prefix/uri pairs
self.m_prefixuriL = []
# Store which namespaces are already printed
self.visited_ns = []
def is_valid(self):
return self.valid_axml
def reset(self):
self.m_event = -1
self.m_lineNumber = -1
self.m_name = -1
self.m_namespaceUri = -1
self.m_attributes = []
self.m_idAttribute = -1
self.m_classAttribute = -1
self.m_styleAttribute = -1
def __next__(self):
self.doNext()
return self.m_event
def doNext(self):
if self.m_event == END_DOCUMENT:
return
event = self.m_event
self.reset()
while True:
chunkType = -1
# General notes:
# * chunkSize is from start of chunk, including the tag type
# Fake END_DOCUMENT event.
if event == END_TAG:
pass
# START_DOCUMENT
if event == START_DOCUMENT:
chunkType = CHUNK_XML_START_TAG
else:
# Stop at the declared filesize or at the end of the file
if self.buff.end() or self.buff.get_idx() == self.filesize:
self.m_event = END_DOCUMENT
break
chunkType = unpack('<L', self.buff.read(4))[0]
# Parse ResourceIDs. This chunk is after the String section
if chunkType == CHUNK_RESOURCEIDS:
chunkSize = unpack('<L', self.buff.read(4))[0]
# Check size: < 8 bytes mean that the chunk is not complete
# Should be aligned to 4 bytes.
if chunkSize < 8 or chunkSize % 4 != 0:
log.warning("Invalid chunk size in chunk RESOURCEIDS")
for i in range(0, (chunkSize // 4) - 2):
self.m_resourceIDs.append(unpack('<L', self.buff.read(4))[0])
continue
# FIXME, unknown chunk types might cause problems
if chunkType < CHUNK_XML_FIRST or chunkType > CHUNK_XML_LAST:
log.warning("invalid chunk type 0x{:08x}".format(chunkType))
# Fake START_DOCUMENT event.
if chunkType == CHUNK_XML_START_TAG and event == -1:
self.m_event = START_DOCUMENT
break
# After the chunk_type, there are always 3 fields for the remaining tags we need to parse:
# Chunk Size (we do not need it)
# TODO for sanity checks, we should use it and check if the chunks are correct in size
self.buff.read(4)
# Line Number
self.m_lineNumber = unpack('<L', self.buff.read(4))[0]
# Comment_Index (usually 0xFFFFFFFF, we do not need it)
self.buff.read(4)
# Now start to parse the field
# There are five (maybe more) types of Chunks:
# * START_NAMESPACE
# * END_NAMESPACE
# * START_TAG
# * END_TAG
# * TEXT
if chunkType == CHUNK_XML_START_NAMESPACE or chunkType == CHUNK_XML_END_NAMESPACE:
if chunkType == CHUNK_XML_START_NAMESPACE:
prefix = unpack('<L', self.buff.read(4))[0]
uri = unpack('<L', self.buff.read(4))[0]
# FIXME We will get a problem here, if the same uri is used with different prefixes!
# prefix --> uri is a 1:1 mapping
self.m_prefixuri[prefix] = uri
# but uri --> prefix is a 1:n mapping!
self.m_uriprefix[uri].append(prefix)
self.m_prefixuriL.append((prefix, uri))
self.ns = uri
# Workaround for closing tags
if (uri, prefix) in self.visited_ns:
self.visited_ns.remove((uri, prefix))
else:
self.ns = -1
# END_PREFIX contains again prefix and uri field
prefix, = unpack('<L', self.buff.read(4))
uri, = unpack('<L', self.buff.read(4))
# We can then remove those from the prefixuriL
if (prefix, uri) in self.m_prefixuriL:
self.m_prefixuriL.remove((prefix, uri))
# We also remove the entry from prefixuri and uriprefix:
if prefix in self.m_prefixuri:
del self.m_prefixuri[prefix]
if uri in self.m_uriprefix:
self.m_uriprefix[uri].remove(prefix)
# Need to remove them from visisted namespaces as well, as it might pop up later
# FIXME we need to remove it also if we leave a tag which closes it namespace
# Workaround for now: remove it on a START_NAMESPACE tag
if (uri, prefix) in self.visited_ns:
self.visited_ns.remove((uri, prefix))
else:
log.warning("Reached a NAMESPACE_END without having the namespace stored before? Prefix ID: {}, URI ID: {}".format(prefix, uri))
continue
# START_TAG is the start of a new tag.
if chunkType == CHUNK_XML_START_TAG:
# The TAG consists of some fields:
# * (chunk_size, line_number, comment_index - we read before)
# * namespace_uri
# * name
# * flags
# * attribute_count
# * class_attribute
# After that, there are two lists of attributes, 20 bytes each
self.m_namespaceUri = unpack('<L', self.buff.read(4))[0]
self.m_name = unpack('<L', self.buff.read(4))[0]
# FIXME
self.buff.read(4) # flags
attributeCount = unpack('<L', self.buff.read(4))[0]
self.m_idAttribute = (attributeCount >> 16) - 1
attributeCount = attributeCount & 0xFFFF
self.m_classAttribute = unpack('<L', self.buff.read(4))[0]
self.m_styleAttribute = (self.m_classAttribute >> 16) - 1
self.m_classAttribute = (self.m_classAttribute & 0xFFFF) - 1
# Now, we parse the attributes.
# Each attribute has 5 fields of 4 byte
for i in range(0, attributeCount * ATTRIBUTE_LENGHT):
# Each field is linearly parsed into the array
self.m_attributes.append(unpack('<L', self.buff.read(4))[0])
# Then there are class_attributes
for i in range(ATTRIBUTE_IX_VALUE_TYPE, len(self.m_attributes),
ATTRIBUTE_LENGHT):
self.m_attributes[i] = self.m_attributes[i] >> 24
self.m_event = START_TAG
break
if chunkType == CHUNK_XML_END_TAG:
self.m_namespaceUri = unpack('<L', self.buff.read(4))[0]
self.m_name = unpack('<L', self.buff.read(4))[0]
self.m_event = END_TAG
break
if chunkType == CHUNK_XML_TEXT:
# TODO we do not know what the TEXT field does...
self.m_name = unpack('<L', self.buff.read(4))[0]
# FIXME
# Raw_value
self.buff.read(4)
# typed_value, is an enum
self.buff.read(4)
self.m_event = TEXT
break
def getPrefixByUri(self, uri):
# As uri --> prefix is 1:n mapping,
# We will just return the first one we match.
if uri not in self.m_uriprefix:
return -1
else:
if len(self.m_uriprefix[uri]) == 0:
return -1
return self.m_uriprefix[uri][0]
def getPrefix(self):
# The default is, that the namespaceUri is 0xFFFFFFFF
# Then we know, there is none
if self.m_namespaceUri == 0xFFFFFFFF:
return u''
# FIXME this could be problematic. Need to find the correct namespace prefix
if self.m_namespaceUri in self.m_uriprefix:
candidate = self.m_uriprefix[self.m_namespaceUri][0]
try:
return self.sb.getString(candidate)
except KeyError:
return u''
else:
return u''
def getName(self):
if self.m_name == -1 or (self.m_event != START_TAG and
self.m_event != END_TAG):
return u''
return self.sb.getString(self.m_name)
def getText(self):
if self.m_name == -1 or self.m_event != TEXT:
return u''
return self.sb.getString(self.m_name)
def getNamespacePrefix(self, pos):
prefix = self.m_prefixuriL[pos][0]
return self.sb.getString(prefix)
def getNamespaceUri(self, pos):
uri = self.m_prefixuriL[pos][1]
return self.sb.getString(uri)
def getXMLNS(self):
buff = ""
for prefix, uri in self.m_prefixuri.items():
if (uri, prefix) not in self.visited_ns:
prefix_str = self.sb.getString(prefix)
prefix_uri = self.sb.getString(self.m_prefixuri[prefix])
# FIXME Packers like Liapp use empty uri to fool XML Parser
# FIXME they also mess around with the Manifest, thus it can not be parsed easily
if prefix_uri == '':
log.warning("Empty Namespace URI for Namespace {}.".format(prefix_str))
self.packerwarning = True
# if prefix is (null), which is indicated by an empty str, then do not print :
if prefix_str != '':
prefix_str = ":" + prefix_str
buff += 'xmlns{}="{}"\n'.format(prefix_str, prefix_uri)
self.visited_ns.append((uri, prefix))
return buff
def getNamespaceCount(self, pos):
pass
def getAttributeOffset(self, index):
# FIXME
if self.m_event != START_TAG:
log.warning("Current event is not START_TAG.")
offset = index * 5
# FIXME
if offset >= len(self.m_attributes):
log.warning("Invalid attribute index")
return offset
def getAttributeCount(self):
if self.m_event != START_TAG:
return -1
return len(self.m_attributes) // ATTRIBUTE_LENGHT
def getAttributePrefix(self, index):
offset = self.getAttributeOffset(index)
uri = self.m_attributes[offset + ATTRIBUTE_IX_NAMESPACE_URI]
prefix = self.getPrefixByUri(uri)
if prefix == -1:
return ""
return self.sb.getString(prefix)
def getAttributeName(self, index):
offset = self.getAttributeOffset(index)
name = self.m_attributes[offset + ATTRIBUTE_IX_NAME]
if name == -1:
return ""
res = self.sb.getString(name)
# If the result is a (null) string, we need to look it up.
if not res:
attr = self.m_resourceIDs[name]
if attr in public.SYSTEM_RESOURCES['attributes']['inverse']:
res = 'android:' + public.SYSTEM_RESOURCES['attributes']['inverse'][
attr
]
else:
# Attach the HEX Number, so for multiple missing attributes we do not run
# into problems.
res = 'android:UNKNOWN_SYSTEM_ATTRIBUTE_{:08x}'.format(attr)
return res
def getAttributeValueType(self, index):
offset = self.getAttributeOffset(index)
return self.m_attributes[offset + ATTRIBUTE_IX_VALUE_TYPE]
def getAttributeValueData(self, index):
offset = self.getAttributeOffset(index)
return self.m_attributes[offset + ATTRIBUTE_IX_VALUE_DATA]
def getAttributeValue(self, index):
"""
This function is only used to look up strings
All other work is made by format_value
# FIXME should unite those functions
:param index:
:return:
"""
offset = self.getAttributeOffset(index)
valueType = self.m_attributes[offset + ATTRIBUTE_IX_VALUE_TYPE]
if valueType == TYPE_STRING:
valueString = self.m_attributes[offset + ATTRIBUTE_IX_VALUE_STRING]
return self.sb.getString(valueString)
return ""
# FIXME there are duplicates and missing values...
TYPE_NULL = 0
TYPE_REFERENCE = 1
TYPE_ATTRIBUTE = 2
TYPE_STRING = 3
TYPE_FLOAT = 4
TYPE_DIMENSION = 5
TYPE_FRACTION = 6
TYPE_FIRST_INT = 16
TYPE_INT_DEC = 16
TYPE_INT_HEX = 17
TYPE_INT_BOOLEAN = 18
TYPE_FIRST_COLOR_INT = 28
TYPE_INT_COLOR_ARGB8 = 28
TYPE_INT_COLOR_RGB8 = 29
TYPE_INT_COLOR_ARGB4 = 30
TYPE_INT_COLOR_RGB4 = 31
TYPE_LAST_COLOR_INT = 31
TYPE_LAST_INT = 31
TYPE_TABLE = {
TYPE_ATTRIBUTE: "attribute",
TYPE_DIMENSION: "dimension",
TYPE_FLOAT: "float",
TYPE_FRACTION: "fraction",
TYPE_INT_BOOLEAN: "int_boolean",
TYPE_INT_COLOR_ARGB4: "int_color_argb4",
TYPE_INT_COLOR_ARGB8: "int_color_argb8",
TYPE_INT_COLOR_RGB4: "int_color_rgb4",
TYPE_INT_COLOR_RGB8: "int_color_rgb8",
TYPE_INT_DEC: "int_dec",
TYPE_INT_HEX: "int_hex",
TYPE_NULL: "null",
TYPE_REFERENCE: "reference",
TYPE_STRING: "string",
}
RADIX_MULTS = [0.00390625, 3.051758E-005, 1.192093E-007, 4.656613E-010]
DIMENSION_UNITS = ["px", "dip", "sp", "pt", "in", "mm"]
FRACTION_UNITS = ["%", "%p"]
COMPLEX_UNIT_MASK = 15
def complexToFloat(xcomplex):
return float(xcomplex & 0xFFFFFF00) * RADIX_MULTS[(xcomplex >> 4) & 3]
def long2int(l):
if l > 0x7fffffff:
l = (0x7fffffff & l) - 0x80000000
return l
def long2str(l):
"""Convert an integer to a string."""
if type(l) not in (types.IntType, types.LongType):
raise ValueError('the input must be an integer')
if l < 0:
raise ValueError('the input must be greater than 0')
s = ''
while l:
s = s + chr(l & 255)
l >>= 8
return s
def str2long(s):
"""Convert a string to a long integer."""
if type(s) not in (types.StringType, types.UnicodeType):
raise ValueError('the input must be a string')
l = 0
for i in s:
l <<= 8
l |= ord(i)
return l
def getPackage(i):
if i >> 24 == 1:
return "android:"
return ""
def format_value(_type, _data, lookup_string=lambda ix: "<string>"):
if _type == TYPE_STRING:
return lookup_string(_data)
elif _type == TYPE_ATTRIBUTE:
return "?%s%08X" % (getPackage(_data), _data)
elif _type == TYPE_REFERENCE:
return "@%s%08X" % (getPackage(_data), _data)
elif _type == TYPE_FLOAT:
return "%f" % unpack("=f", pack("=L", _data))[0]
elif _type == TYPE_INT_HEX:
return "0x%08X" % _data
elif _type == TYPE_INT_BOOLEAN:
if _data == 0:
return "false"
return "true"
elif _type == TYPE_DIMENSION:
return "%f%s" % (complexToFloat(_data), DIMENSION_UNITS[_data & COMPLEX_UNIT_MASK])
elif _type == TYPE_FRACTION:
return "%f%s" % (complexToFloat(_data) * 100, FRACTION_UNITS[_data & COMPLEX_UNIT_MASK])
elif TYPE_FIRST_COLOR_INT <= _type <= TYPE_LAST_COLOR_INT:
return "#%08X" % _data
elif TYPE_FIRST_INT <= _type <= TYPE_LAST_INT:
return "%d" % long2int(_data)
return "<0x%X, type 0x%02X>" % (_data, _type)
class AXMLPrinter(object):
"""
Converter for AXML Files into a XML string
"""
def __init__(self, raw_buff):
self.axml = AXMLParser(raw_buff)
self.xmlns = False
self.buff = u''
while True and self.axml.is_valid():
_type = next(self.axml)
if _type == START_DOCUMENT:
self.buff += u'<?xml version="1.0" encoding="utf-8"?>\n'
elif _type == START_TAG:
self.buff += u'<' + self.getPrefix(self.axml.getPrefix()) + self.axml.getName() + u'\n'
self.buff += self.axml.getXMLNS()
for i in range(0, self.axml.getAttributeCount()):
prefix = self.getPrefix(self.axml.getAttributePrefix(i))
name = self.axml.getAttributeName(i)
value = self._escape(self.getAttributeValue(i))
# If the name is a system name AND the prefix is set, we have a problem.
# FIXME we are not sure how this happens, but a quick fix is to remove the prefix if it already in the name
if name.startswith(prefix):
prefix = u''
self.buff += u'{}{}="{}"\n'.format(prefix, name, value)
self.buff += u'>\n'
elif _type == END_TAG:
self.buff += u"</%s%s>\n" % (
self.getPrefix(self.axml.getPrefix()), self.axml.getName())
elif _type == TEXT:
self.buff += u"%s\n" % self._escape(self.axml.getText())
elif _type == END_DOCUMENT:
break
# pleed patch
# FIXME should this be applied for strings directly?
def _escape(self, s):
# FIXME Strings might contain null bytes. Should they be removed?
# We guess so, as normaly the string would terminate there...?!
s = s.replace("\x00", "")
# Other HTML Conversions
s = s.replace("&", "&")
s = s.replace('"', """)
s = s.replace("'", "'")
s = s.replace("<", "<")
s = s.replace(">", ">")
return escape(s)
def is_packed(self):
"""
Return True if we believe that the AXML file is packed
If it is, we can not be sure that the AXML file can be read by a XML Parser
:return: boolean
"""
return self.axml.packerwarning
def get_buff(self):
return self.buff.encode('utf-8')
def get_xml(self):
"""
Get the XML as an UTF-8 string
:return: str
"""
return etree.tostring(self.get_xml_obj(), encoding="utf-8", pretty_print=True)
def get_xml_obj(self):
"""
Get the XML as an ElementTree object
:return: :class:`~lxml.etree.Element`
"""
parser = etree.XMLParser(recover=True, resolve_entities=False)
tree = etree.fromstring(self.get_buff(), parser=parser)
return tree
def getPrefix(self, prefix):
if prefix is None or len(prefix) == 0:
return u''
return prefix + u':'
def getAttributeValue(self, index):
"""
Wrapper function for format_value
to resolve the actual value of an attribute in a tag
:param index:
:return:
"""
_type = self.axml.getAttributeValueType(index)
_data = self.axml.getAttributeValueData(index)
return format_value(_type, _data, lambda _: self.axml.getAttributeValue(index))
# Constants for ARSC Files
RES_NULL_TYPE = 0x0000
RES_STRING_POOL_TYPE = 0x0001
RES_TABLE_TYPE = 0x0002
RES_XML_TYPE = 0x0003
# Chunk types in RES_XML_TYPE
RES_XML_FIRST_CHUNK_TYPE = 0x0100
RES_XML_START_NAMESPACE_TYPE = 0x0100
RES_XML_END_NAMESPACE_TYPE = 0x0101
RES_XML_START_ELEMENT_TYPE = 0x0102
RES_XML_END_ELEMENT_TYPE = 0x0103
RES_XML_CDATA_TYPE = 0x0104
RES_XML_LAST_CHUNK_TYPE = 0x017f
# This contains a uint32_t array mapping strings in the string
# pool back to resource identifiers. It is optional.
RES_XML_RESOURCE_MAP_TYPE = 0x0180
# Chunk types in RES_TABLE_TYPE
RES_TABLE_PACKAGE_TYPE = 0x0200
RES_TABLE_TYPE_TYPE = 0x0201
RES_TABLE_TYPE_SPEC_TYPE = 0x0202
RES_TABLE_LIBRARY_TYPE = 0x0203
ACONFIGURATION_MCC = 0x0001
ACONFIGURATION_MNC = 0x0002
ACONFIGURATION_LOCALE = 0x0004
ACONFIGURATION_TOUCHSCREEN = 0x0008
ACONFIGURATION_KEYBOARD = 0x0010
ACONFIGURATION_KEYBOARD_HIDDEN = 0x0020
ACONFIGURATION_NAVIGATION = 0x0040
ACONFIGURATION_ORIENTATION = 0x0080
ACONFIGURATION_DENSITY = 0x0100
ACONFIGURATION_SCREEN_SIZE = 0x0200
ACONFIGURATION_VERSION = 0x0400
ACONFIGURATION_SCREEN_LAYOUT = 0x0800
ACONFIGURATION_UI_MODE = 0x1000
class ARSCParser(object):
"""
Parser for resource.arsc files
"""
def __init__(self, raw_buff):
self.analyzed = False
self._resolved_strings = None
self.buff = bytecode.BuffHandle(raw_buff)
self.header = ARSCHeader(self.buff)
self.packageCount = unpack('<i', self.buff.read(4))[0]
self.packages = {}
self.values = {}
self.resource_values = collections.defaultdict(collections.defaultdict)
self.resource_configs = collections.defaultdict(lambda: collections.defaultdict(set))
self.resource_keys = collections.defaultdict(
lambda: collections.defaultdict(collections.defaultdict))
self.stringpool_main = None
# skip to the start of the first chunk
self.buff.set_idx(self.header.start + self.header.header_size)
data_end = self.header.start + self.header.size
while self.buff.get_idx() <= data_end - ARSCHeader.SIZE:
res_header = ARSCHeader(self.buff)
if res_header.start + res_header.size > data_end:
# this inner chunk crosses the boundary of the table chunk
break
if res_header.type == RES_STRING_POOL_TYPE and not self.stringpool_main:
self.stringpool_main = StringBlock(self.buff, res_header)
elif res_header.type == RES_TABLE_PACKAGE_TYPE:
assert len(self.packages) < self.packageCount, "Got more packages than expected"
current_package = ARSCResTablePackage(self.buff, res_header)
package_name = current_package.get_name()
package_data_end = res_header.start + res_header.size
self.packages[package_name] = []
# After the Header, we have the resource type symbol table
self.buff.set_idx(current_package.header.start + current_package.typeStrings)
type_sp_header = ARSCHeader(self.buff)
assert type_sp_header.type == RES_STRING_POOL_TYPE, \
"Expected String Pool header, got %x" % type_sp_header.type
mTableStrings = StringBlock(self.buff, type_sp_header)
# Next, we should have the resource key symbol table
self.buff.set_idx(current_package.header.start + current_package.keyStrings)
key_sp_header = ARSCHeader(self.buff)
assert key_sp_header.type == RES_STRING_POOL_TYPE, \
"Expected String Pool header, got %x" % key_sp_header.type
mKeyStrings = StringBlock(self.buff, key_sp_header)
# Add them to the dict of read packages
self.packages[package_name].append(current_package)
self.packages[package_name].append(mTableStrings)
self.packages[package_name].append(mKeyStrings)
pc = PackageContext(current_package, self.stringpool_main,
mTableStrings, mKeyStrings)
# skip to the first header in this table package chunk
# FIXME is this correct? We have already read the first two sections!
# self.buff.set_idx(res_header.start + res_header.header_size)
# this looks more like we want: (???)
self.buff.set_idx(res_header.start + res_header.header_size + type_sp_header.size + key_sp_header.size)
# Read all other headers
while self.buff.get_idx() <= package_data_end - ARSCHeader.SIZE:
pkg_chunk_header = ARSCHeader(self.buff)
log.debug("Found a header: {}".format(pkg_chunk_header))
if pkg_chunk_header.start + pkg_chunk_header.size > package_data_end:
# we are way off the package chunk; bail out
break
self.packages[package_name].append(pkg_chunk_header)
if pkg_chunk_header.type == RES_TABLE_TYPE_SPEC_TYPE:
self.packages[package_name].append(ARSCResTypeSpec(self.buff, pc))
elif pkg_chunk_header.type == RES_TABLE_TYPE_TYPE:
a_res_type = ARSCResType(self.buff, pc)
self.packages[package_name].append(a_res_type)
self.resource_configs[package_name][a_res_type].add(a_res_type.config)
log.debug("Config: {}".format(a_res_type.config))
entries = []
for i in range(0, a_res_type.entryCount):
current_package.mResId = current_package.mResId & 0xffff0000 | i
entries.append((unpack('<i', self.buff.read(4))[0], current_package.mResId))
self.packages[package_name].append(entries)
for entry, res_id in entries:
if self.buff.end():
break
if entry != -1:
ate = ARSCResTableEntry(self.buff, res_id, pc)
self.packages[package_name].append(ate)
if ate.is_weak():
# FIXME we are not sure how to implement the FLAG_WEAk!
# We saw the following: There is just a single Res_value after the ARSCResTableEntry
# and then comes the next ARSCHeader.
# Therefore we think this means all entries are somehow replicated?
# So we do some kind of hack here. We set the idx to the entry again...
# Now we will read all entries!
# Not sure if this is a good solution though
self.buff.set_idx(ate.start)
elif pkg_chunk_header.type == RES_TABLE_LIBRARY_TYPE:
log.warning("RES_TABLE_LIBRARY_TYPE chunk is not supported")
else:
# silently skip other chunk types
pass
# skip to the next chunk
self.buff.set_idx(pkg_chunk_header.start + pkg_chunk_header.size)
# move to the next resource chunk
self.buff.set_idx(res_header.start + res_header.size)
def _analyse(self):
if self.analyzed:
return
self.analyzed = True
for package_name in self.packages:
self.values[package_name] = {}
nb = 3
while nb < len(self.packages[package_name]):
header = self.packages[package_name][nb]
if isinstance(header, ARSCHeader):
if header.type == RES_TABLE_TYPE_TYPE:
a_res_type = self.packages[package_name][nb + 1]
language = a_res_type.config.get_language()
region = a_res_type.config.get_country()
if region == "\x00\x00":
locale = language
else:
locale = "{}-r{}".format(language, region)
c_value = self.values[package_name].setdefault(locale, {"public":[]})
entries = self.packages[package_name][nb + 2]
nb_i = 0
for entry, res_id in entries:
if entry != -1:
ate = self.packages[package_name][nb + 3 + nb_i]
self.resource_values[ate.mResId][a_res_type.config] = ate
self.resource_keys[package_name][a_res_type.get_type()][ate.get_value()] = ate.mResId
if ate.get_index() != -1:
c_value["public"].append(
(a_res_type.get_type(), ate.get_value(),
ate.mResId))
if a_res_type.get_type() not in c_value:
c_value[a_res_type.get_type()] = []
if a_res_type.get_type() == "string":
c_value["string"].append(
self.get_resource_string(ate))
elif a_res_type.get_type() == "id":
if not ate.is_complex():
c_value["id"].append(
self.get_resource_id(ate))
elif a_res_type.get_type() == "bool":
if not ate.is_complex():
c_value["bool"].append(
self.get_resource_bool(ate))
elif a_res_type.get_type() == "integer":
c_value["integer"].append(
self.get_resource_integer(ate))
elif a_res_type.get_type() == "color":
c_value["color"].append(
self.get_resource_color(ate))
elif a_res_type.get_type() == "dimen":
c_value["dimen"].append(
self.get_resource_dimen(ate))
nb_i += 1
nb += 3 + nb_i - 1 # -1 to account for the nb+=1 on the next line
nb += 1
def get_resource_string(self, ate):
return [ate.get_value(), ate.get_key_data()]
def get_resource_id(self, ate):
x = [ate.get_value()]
if ate.key.get_data() == 0:
x.append("false")
elif ate.key.get_data() == 1:
x.append("true")
return x
def get_resource_bool(self, ate):
x = [ate.get_value()]
if ate.key.get_data() == 0:
x.append("false")
elif ate.key.get_data() == -1:
x.append("true")
return x
def get_resource_integer(self, ate):
return [ate.get_value(), ate.key.get_data()]
def get_resource_color(self, ate):
entry_data = ate.key.get_data()
return [
ate.get_value(),
"#%02x%02x%02x%02x" % (
((entry_data >> 24) & 0xFF),
((entry_data >> 16) & 0xFF),
((entry_data >> 8) & 0xFF),
(entry_data & 0xFF))
]
def get_resource_dimen(self, ate):
try:
return [
ate.get_value(), "%s%s" % (
complexToFloat(ate.key.get_data()),
DIMENSION_UNITS[ate.key.get_data() & COMPLEX_UNIT_MASK])
]
except IndexError:
log.debug("Out of range dimension unit index for %s: %s" % (
complexToFloat(ate.key.get_data()),
ate.key.get_data() & COMPLEX_UNIT_MASK))
return [ate.get_value(), ate.key.get_data()]
# FIXME
def get_resource_style(self, ate):
return ["", ""]
def get_packages_names(self):
return list(self.packages.keys())
def get_locales(self, package_name):
self._analyse()
return list(self.values[package_name].keys())
def get_types(self, package_name, locale):
self._analyse()
return list(self.values[package_name][locale].keys())
def get_public_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["public"]:
buff += '<public type="%s" name="%s" id="0x%08x" />\n' % (
i[0], i[1], i[2])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_string_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["string"]:
if any(map(i[1].__contains__, '<&>')):
value = '<![CDATA[%s]]>' % i[1]
else:
value = i[1]
buff += '<string name="%s">%s</string>\n' % (i[0], value)
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_strings_resources(self):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += "<packages>\n"
for package_name in self.get_packages_names():
buff += "<package name=\"%s\">\n" % package_name
for locale in self.get_locales(package_name):
buff += "<locale value=%s>\n" % repr(locale)
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["string"]:
buff += '<string name="%s">%s</string>\n' % (i[0], escape(i[1]))
except KeyError:
pass
buff += '</resources>\n'
buff += '</locale>\n'
buff += "</package>\n"
buff += "</packages>\n"
return buff.encode('utf-8')
def get_id_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["id"]:
if len(i) == 1:
buff += '<item type="id" name="%s"/>\n' % (i[0])
else:
buff += '<item type="id" name="%s">%s</item>\n' % (i[0],
escape(i[1]))
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_bool_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["bool"]:
buff += '<bool name="%s">%s</bool>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_integer_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["integer"]:
buff += '<integer name="%s">%s</integer>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_color_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["color"]:
buff += '<color name="%s">%s</color>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_dimen_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["dimen"]:
buff += '<dimen name="%s">%s</dimen>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_id(self, package_name, rid, locale='\x00\x00'):
self._analyse()
try:
for i in self.values[package_name][locale]["public"]:
if i[2] == rid:
return i
except KeyError:
return None
class ResourceResolver(object):
def __init__(self, android_resources, config=None):
self.resources = android_resources
self.wanted_config = config
def resolve(self, res_id):
result = []
self._resolve_into_result(result, res_id, self.wanted_config)
return result
def _resolve_into_result(self, result, res_id, config):
configs = self.resources.get_res_configs(res_id, config)
if configs:
for config, ate in configs:
self.put_ate_value(result, ate, config)
def put_ate_value(self, result, ate, config):
if ate.is_complex():
complex_array = []
result.append((config, complex_array))
for _, item in ate.item.items:
self.put_item_value(complex_array, item, config, complex_=True)
else:
self.put_item_value(result, ate.key, config, complex_=False)
def put_item_value(self, result, item, config, complex_):
if item.is_reference():
res_id = item.get_data()
if res_id:
self._resolve_into_result(
result,
item.get_data(),
self.wanted_config)
else:
if complex_:
result.append(item.format_value())
else:
result.append((config, item.format_value()))
def get_resolved_res_configs(self, rid, config=None):
resolver = ARSCParser.ResourceResolver(self, config)
return resolver.resolve(rid)
def get_resolved_strings(self):
self._analyse()
if self._resolved_strings:
return self._resolved_strings
r = {}
for package_name in self.get_packages_names():
r[package_name] = {}
k = {}
for locale in self.values[package_name]:
v_locale = locale
if v_locale == '\x00\x00':
v_locale = 'DEFAULT'
r[package_name][v_locale] = {}
try:
for i in self.values[package_name][locale]["public"]:
if i[0] == 'string':
r[package_name][v_locale][i[2]] = None
k[i[1]] = i[2]
except KeyError:
pass
try:
for i in self.values[package_name][locale]["string"]:
if i[0] in k:
r[package_name][v_locale][k[i[0]]] = i[1]
except KeyError:
pass
self._resolved_strings = r
return r
def get_res_configs(self, rid, config=None):
self._analyse()
if not rid:
raise ValueError("'rid' should be set")
try:
res_options = self.resource_values[rid]
if len(res_options) > 1 and config:
return [(
config,
res_options[config])]
else:
return list(res_options.items())
except KeyError:
return []
def get_string(self, package_name, name, locale='\x00\x00'):
self._analyse()
try:
for i in self.values[package_name][locale]["string"]:
if i[0] == name:
return i
except KeyError:
return None
def get_res_id_by_key(self, package_name, resource_type, key):
try:
return self.resource_keys[package_name][resource_type][key]
except KeyError:
return None
def get_items(self, package_name):
self._analyse()
return self.packages[package_name]
def get_type_configs(self, package_name, type_name=None):
if package_name is None:
package_name = self.get_packages_names()[0]
result = collections.defaultdict(list)
for res_type, configs in list(self.resource_configs[package_name].items()):
if res_type.get_package_name() == package_name and (
type_name is None or res_type.get_type() == type_name):
result[res_type.get_type()].extend(configs)
return result
class PackageContext(object):
def __init__(self, current_package, stringpool_main, mTableStrings,
mKeyStrings):
self.stringpool_main = stringpool_main
self.mTableStrings = mTableStrings
self.mKeyStrings = mKeyStrings
self.current_package = current_package
def get_mResId(self):
return self.current_package.mResId
def set_mResId(self, mResId):
self.current_package.mResId = mResId
def get_package_name(self):
return self.current_package.get_name()
class ARSCHeader(object):
SIZE = 2 + 2 + 4
def __init__(self, buff):
self.start = buff.get_idx()
self.type = unpack('<h', buff.read(2))[0]
self.header_size = unpack('<h', buff.read(2))[0]
self.size = unpack('<I', buff.read(4))[0]
def __repr__(self):
return "<ARSCHeader idx='0x{:08x}' type='{}' header_size='{}' size='{}'>".format(self.start, self.type, self.header_size, self.size)
class ARSCResTablePackage(object):
def __init__(self, buff, header):
self.header = header
self.start = buff.get_idx()
self.id = unpack('<I', buff.read(4))[0]
self.name = buff.readNullString(256)
self.typeStrings = unpack('<I', buff.read(4))[0]
self.lastPublicType = unpack('<I', buff.read(4))[0]
self.keyStrings = unpack('<I', buff.read(4))[0]
self.lastPublicKey = unpack('<I', buff.read(4))[0]
self.mResId = self.id << 24
def get_name(self):
name = self.name.decode("utf-16", 'replace')
name = name[:name.find("\x00")]
return name
class ARSCResTypeSpec(object):
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.id = unpack('<b', buff.read(1))[0]
self.res0 = unpack('<b', buff.read(1))[0]
self.res1 = unpack('<h', buff.read(2))[0]
self.entryCount = unpack('<I', buff.read(4))[0]
self.typespec_entries = []
for i in range(0, self.entryCount):
self.typespec_entries.append(unpack('<I', buff.read(4))[0])
class ARSCResType(object):
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.id = unpack('<b', buff.read(1))[0]
self.res0 = unpack('<b', buff.read(1))[0]
self.res1 = unpack('<h', buff.read(2))[0]
self.entryCount = unpack('<i', buff.read(4))[0]
self.entriesStart = unpack('<i', buff.read(4))[0]
self.mResId = (0xff000000 & self.parent.get_mResId()) | self.id << 16
self.parent.set_mResId(self.mResId)
self.config = ARSCResTableConfig(buff)
def get_type(self):
return self.parent.mTableStrings.getString(self.id - 1)
def get_package_name(self):
return self.parent.get_package_name()
def __repr__(self):
return "ARSCResType(%x, %x, %x, %x, %x, %x, %x, %s)" % (
self.start,
self.id,
self.res0,
self.res1,
self.entryCount,
self.entriesStart,
self.mResId,
"table:" + self.parent.mTableStrings.getString(self.id - 1)
)
class ARSCResTableConfig(object):
@classmethod
def default_config(cls):
if not hasattr(cls, 'DEFAULT'):
cls.DEFAULT = ARSCResTableConfig(None)
return cls.DEFAULT
def __init__(self, buff=None, **kwargs):
if buff is not None:
self.start = buff.get_idx()
self.size = unpack('<I', buff.read(4))[0]
self.imsi = unpack('<I', buff.read(4))[0]
self.locale = unpack('<I', buff.read(4))[0]
self.screenType = unpack('<I', buff.read(4))[0]
self.input = unpack('<I', buff.read(4))[0]
self.screenSize = unpack('<I', buff.read(4))[0]
self.version = unpack('<I', buff.read(4))[0]
self.screenConfig = 0
self.screenSizeDp = 0
if self.size >= 32:
self.screenConfig = unpack('<I', buff.read(4))[0]
if self.size >= 36:
self.screenSizeDp = unpack('<I', buff.read(4))[0]
self.exceedingSize = self.size - 36
if self.exceedingSize > 0:
log.debug("Skipping padding bytes!")
self.padding = buff.read(self.exceedingSize)
else:
self.start = 0
self.size = 0
self.imsi = \
((kwargs.pop('mcc', 0) & 0xffff) << 0) + \
((kwargs.pop('mnc', 0) & 0xffff) << 16)
self.locale = 0
for char_ix, char in kwargs.pop('locale', "")[0:4]:
self.locale += (ord(char) << (char_ix * 8))
self.screenType = \
((kwargs.pop('orientation', 0) & 0xff) << 0) + \
((kwargs.pop('touchscreen', 0) & 0xff) << 8) + \
((kwargs.pop('density', 0) & 0xffff) << 16)
self.input = \
((kwargs.pop('keyboard', 0) & 0xff) << 0) + \
((kwargs.pop('navigation', 0) & 0xff) << 8) + \
((kwargs.pop('inputFlags', 0) & 0xff) << 16) + \
((kwargs.pop('inputPad0', 0) & 0xff) << 24)
self.screenSize = \
((kwargs.pop('screenWidth', 0) & 0xffff) << 0) + \
((kwargs.pop('screenHeight', 0) & 0xffff) << 16)
self.version = \
((kwargs.pop('sdkVersion', 0) & 0xffff) << 0) + \
((kwargs.pop('minorVersion', 0) & 0xffff) << 16)
self.screenConfig = \
((kwargs.pop('screenLayout', 0) & 0xff) << 0) + \
((kwargs.pop('uiMode', 0) & 0xff) << 8) + \
((kwargs.pop('smallestScreenWidthDp', 0) & 0xffff) << 16)
self.screenSizeDp = \
((kwargs.pop('screenWidthDp', 0) & 0xffff) << 0) + \
((kwargs.pop('screenHeightDp', 0) & 0xffff) << 16)
self.exceedingSize = 0
def get_language(self):
x = self.locale & 0x0000ffff
return chr(x & 0x00ff) + chr((x & 0xff00) >> 8)
def get_country(self):
x = (self.locale & 0xffff0000) >> 16
return chr(x & 0x00ff) + chr((x & 0xff00) >> 8)
def get_density(self):
x = ((self.screenType >> 16) & 0xffff)
return x
def _get_tuple(self):
return (
self.imsi,
self.locale,
self.screenType,
self.input,
self.screenSize,
self.version,
self.screenConfig,
self.screenSizeDp,
)
def __hash__(self):
return hash(self._get_tuple())
def __eq__(self, other):
return self._get_tuple() == other._get_tuple()
def __repr__(self):
return "<ARSCResTableConfig '{}'>".format(repr(self._get_tuple()))
class ARSCResTableEntry(object):
"""
See https://github.com/LineageOS/android_frameworks_base/blob/df2898d9ce306bb2fe922d3beaa34a9cf6873d27/include/androidfw/ResourceTypes.h#L1370
"""
FLAG_COMPLEX = 1
FLAG_PUBLIC = 2
FLAG_WEAK = 4
def __init__(self, buff, mResId, parent=None):
self.start = buff.get_idx()
self.mResId = mResId
self.parent = parent
self.size = unpack('<H', buff.read(2))[0]
self.flags = unpack('<H', buff.read(2))[0]
self.index = unpack('<I', buff.read(4))[0]
if self.is_complex():
self.item = ARSCComplex(buff, parent)
else:
# If FLAG_COMPLEX is not set, a Res_value structure will follow
self.key = ARSCResStringPoolRef(buff, self.parent)
def get_index(self):
return self.index
def get_value(self):
return self.parent.mKeyStrings.getString(self.index)
def get_key_data(self):
return self.key.get_data_value()
def is_public(self):
return (self.flags & self.FLAG.PUBLIC) != 0
def is_complex(self):
return (self.flags & self.FLAG_COMPLEX) != 0
def is_weak(self):
return (self.flags & self.FLAG_WEAK) != 0
def __repr__(self):
return "<ARSCResTableEntry idx='0x{:08x}' mResId='0x{:08x}' size='{}' flags='0x{:02x}' index='0x{:x}' holding={}>".format(
self.start,
self.mResId,
self.size,
self.flags,
self.index,
self.item if self.is_complex() else self.key)
class ARSCComplex(object):
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.id_parent = unpack('<I', buff.read(4))[0]
self.count = unpack('<I', buff.read(4))[0]
self.items = []
for i in range(0, self.count):
self.items.append((unpack('<I', buff.read(4))[0],
ARSCResStringPoolRef(buff, self.parent)))
def __repr__(self):
return "<ARSCComplex idx='0x{:08x}' parent='{}' count='{}'>".format(self.start, self.id_parent, self.count)
class ARSCResStringPoolRef(object):
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.size, = unpack("<H", buff.read(2))
self.res0, = unpack("<B", buff.read(1))
assert self.res0 == 0, "res0 must be always zero!"
self.data_type = unpack('<B', buff.read(1))[0]
self.data = unpack('<I', buff.read(4))[0]
def get_data_value(self):
return self.parent.stringpool_main.getString(self.data)
def get_data(self):
return self.data
def get_data_type(self):
return self.data_type
def get_data_type_string(self):
return TYPE_TABLE[self.data_type]
def format_value(self):
return format_value(
self.data_type,
self.data,
self.parent.stringpool_main.getString
)
def is_reference(self):
return self.data_type == TYPE_REFERENCE
def __repr__(self):
return "<ARSCResStringPoolRef idx='0x{:08x}' size='{}' type='{}' data='0x{:08x}'>".format(
self.start,
self.size,
TYPE_TABLE.get(self.data_type, "0x%x" % self.data_type),
self.data)
def get_arsc_info(arscobj):
buff = ""
for package in arscobj.get_packages_names():
buff += package + ":\n"
for locale in arscobj.get_locales(package):
buff += "\t" + repr(locale) + ":\n"
for ttype in arscobj.get_types(package, locale):
buff += "\t\t" + ttype + ":\n"
try:
tmp_buff = getattr(arscobj, "get_" + ttype + "_resources")(
package, locale).decode("utf-8", 'replace').split("\n")
for i in tmp_buff:
buff += "\t\t\t" + i + "\n"
except AttributeError:
pass
return buff
| 34.836047
| 186
| 0.551537
|
4a054e62161e2e97a1cee6995013ebee68189c19
| 1,517
|
py
|
Python
|
src/selectedtests/task_mappings/version_limit.py
|
isabella232/selected-tests
|
890cd5f39f5571d50f0406b4c25a1a2eef1006a3
|
[
"Apache-2.0"
] | 2
|
2020-04-13T11:26:57.000Z
|
2022-01-21T00:03:52.000Z
|
src/selectedtests/task_mappings/version_limit.py
|
mongodb/selected-tests
|
467f71f1d45b06ac3cc5db252f18658f8cd93083
|
[
"Apache-2.0"
] | 54
|
2019-09-26T18:56:34.000Z
|
2022-03-12T01:07:00.000Z
|
src/selectedtests/task_mappings/version_limit.py
|
isabella232/selected-tests
|
890cd5f39f5571d50f0406b4c25a1a2eef1006a3
|
[
"Apache-2.0"
] | 6
|
2019-10-01T14:24:27.000Z
|
2020-02-13T15:53:47.000Z
|
"""VersionLimit class used to determine whether an Evergreen version is out of the desired range."""
from datetime import datetime
from typing import Optional
from evergreen.api import Version
class VersionLimit(object):
"""Represents the point in time at which to start analyzing versions of an evergreen project."""
def __init__(
self, stop_at_date: Optional[datetime] = None, stop_at_version_id: Optional[str] = None
):
"""
Create a VersionLimit object.
:param stop_at_date: The date at which to start analyzing versions of the repo.
:param stop_at_version_id: The id of the version at which to start analyzing versions.
"""
self.stop_at_date = stop_at_date
self.stop_at_version_id = stop_at_version_id
def __repr__(self) -> str:
"""Return the object representation of VersionLimit."""
return f"VersionLimit({self.stop_at_date}, {self.stop_at_version_id})"
def check_version_before_limit(self, version: Version) -> bool:
"""
Check whether a version comes before the limit set by stop_at_date or stop_at_version_id.
:param version: The version to compare against.
:return: Whether or not the version comes before the limit.
"""
if self.stop_at_date:
if version.create_time < self.stop_at_date:
return True
else:
if version.version_id == self.stop_at_version_id:
return True
return False
| 37
| 100
| 0.675016
|
4a054e7b889a780c685e8a2fffca50f286e86d5a
| 10,493
|
py
|
Python
|
corehq/apps/reports/standard/cases/basic.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2020-05-05T13:10:01.000Z
|
2020-05-05T13:10:01.000Z
|
corehq/apps/reports/standard/cases/basic.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2019-12-09T14:00:14.000Z
|
2019-12-09T14:00:14.000Z
|
corehq/apps/reports/standard/cases/basic.py
|
MaciejChoromanski/commcare-hq
|
fd7f65362d56d73b75a2c20d2afeabbc70876867
|
[
"BSD-3-Clause"
] | 5
|
2015-11-30T13:12:45.000Z
|
2019-07-01T19:27:07.000Z
|
from __future__ import absolute_import
from __future__ import unicode_literals
from django.contrib import messages
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from elasticsearch import TransportError
from corehq.apps.locations.permissions import location_safe
from corehq.apps.reports.standard.cases.filters import CaseSearchFilter
from corehq.apps.reports.standard.cases.utils import (
query_all_project_data,
query_deactivated_data,
get_case_owners,
query_location_restricted_cases,
)
from corehq.const import SERVER_DATETIME_FORMAT
from corehq.util.timezones.conversions import PhoneTime
from memoized import memoized
from corehq.apps.es import cases as case_es
from corehq.apps.reports.api import ReportDataSource
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.exceptions import BadRequestError
from corehq.apps.reports.filters.select import SelectOpenCloseFilter
from corehq.apps.reports.filters.case_list import CaseListFilter as EMWF
from corehq.apps.reports.generic import ElasticProjectInspectionReport
from corehq.apps.reports.standard import ProjectReportParametersMixin
from corehq.apps.reports.standard.inspect import ProjectInspectionReport
from corehq.elastic import ESError
from corehq.toggles import CASE_LIST_EXPLORER
from .data_sources import CaseInfo, CaseDisplay
class CaseListMixin(ElasticProjectInspectionReport, ProjectReportParametersMixin):
fields = [
'corehq.apps.reports.filters.case_list.CaseListFilter',
'corehq.apps.reports.filters.select.CaseTypeFilter',
'corehq.apps.reports.filters.select.SelectOpenCloseFilter',
'corehq.apps.reports.standard.cases.filters.CaseSearchFilter',
]
case_filter = {}
ajax_pagination = True
asynchronous = True
search_class = case_es.CaseES
def _build_query(self):
query = (self.search_class()
.domain(self.domain)
.size(self.pagination.count)
.start(self.pagination.start))
query.es_query['sort'] = self.get_sorting_block()
mobile_user_and_group_slugs = self.request.GET.getlist(EMWF.slug)
if self.case_filter:
query = query.filter(self.case_filter)
query = query.NOT(case_es.case_type("user-owner-mapping-case"))
if self.case_type:
query = query.case_type(self.case_type)
if self.case_status:
query = query.is_closed(self.case_status == 'closed')
if self.request.can_access_all_locations and (
EMWF.show_all_data(mobile_user_and_group_slugs)
or EMWF.no_filters_selected(mobile_user_and_group_slugs)
):
pass
elif (self.request.can_access_all_locations
and EMWF.show_project_data(mobile_user_and_group_slugs)):
query = query_all_project_data(
query, self.domain, mobile_user_and_group_slugs
)
elif (self.request.can_access_all_locations
and EMWF.show_deactivated_data(mobile_user_and_group_slugs)):
query = query_deactivated_data(query, self.domain)
else: # Only show explicit matches
query = query.owner(self.case_owners)
if not self.request.can_access_all_locations:
query = query_location_restricted_cases(query, self.request)
search_string = CaseSearchFilter.get_value(self.request, self.domain)
if search_string:
query = query.set_query({"query_string": {"query": search_string}})
return query
@property
@memoized
def es_results(self):
try:
return self._build_query().run().raw
except ESError as e:
original_exception = e.args[0]
if isinstance(original_exception, TransportError):
if hasattr(original_exception.info, "get"):
if original_exception.info.get('status') == 400:
raise BadRequestError()
raise e
@property
@memoized
def case_owners(self):
"""
For unrestricted user
:return:
user ids for selected user types
for selected reporting group ids, returns user_ids belonging to these groups
also finds the sharing groups which has any user from the above reporting group
selected sharing group ids
selected user ids
also finds the sharing groups which has any user from the above selected users
ids and descendants ids of assigned locations to these users
ids and descendants ids of selected locations
assigned users at selected locations and their descendants
For restricted user
:return:
selected user ids
also finds the sharing groups which has any user from the above selected users
ids and descendants ids of assigned locations to these users
ids and descendants ids of selected locations
assigned users at selected locations and their descendants
"""
# Get user ids for each user that match the demo_user, admin,
# Unknown Users, or All Mobile Workers filters
mobile_user_and_group_slugs = self.request.GET.getlist(EMWF.slug)
return get_case_owners(self.request, self.domain, mobile_user_and_group_slugs)
def get_case(self, row):
if '_source' in row:
case_dict = row['_source']
else:
raise ValueError("Case object is not in search result %s" % row)
if case_dict['domain'] != self.domain:
raise Exception("case.domain != self.domain; %r and %r, respectively" % (case_dict['domain'], self.domain))
return case_dict
@property
def shared_pagination_GET_params(self):
shared_params = super(CaseListMixin, self).shared_pagination_GET_params
shared_params.append(dict(
name=SelectOpenCloseFilter.slug,
value=self.request.GET.get(SelectOpenCloseFilter.slug, '')
))
return shared_params
@location_safe
class CaseListReport(CaseListMixin, ProjectInspectionReport, ReportDataSource):
# note that this class is not true to the spirit of ReportDataSource; the whole
# point is the decouple generating the raw report data from the report view/django
# request. but currently these are too tightly bound to decouple
name = ugettext_lazy('Case List')
slug = 'case_list'
@classmethod
def get_subpages(cls):
def _get_case_name(request=None, **context):
if 'case' in context:
return mark_safe(context['case'].name)
else:
return _('View Case')
from corehq.apps.reports.views import CaseDataView
return [
{
'title': _get_case_name,
'urlname': CaseDataView.urlname,
},
]
@property
def view_response(self):
if self.request.couch_user.is_dimagi and not CASE_LIST_EXPLORER.enabled(self.domain):
messages.warning(
self.request,
'Hey Dimagi User! Have you tried out the <a href="https://confluence.dimagi.com/display/ccinternal/Case+List+Explorer" target="_blank">Case List Explorer</a> yet? It might be just what you are looking for!',
extra_tags='html',
)
return super(CaseListReport, self).view_response
@classmethod
def display_in_dropdown(cls, domain=None, project=None, user=None):
if project and project.commtrack_enabled:
return False
else:
return True
def slugs(self):
return [
'_case',
'case_id',
'case_name',
'case_type',
'detail_url',
'is_open',
'opened_on',
'modified_on',
'closed_on',
'creator_id',
'creator_name',
'owner_type',
'owner_id',
'owner_name',
'external_id',
]
def get_data(self):
for row in self.es_results['hits'].get('hits', []):
case = self.get_case(row)
ci = CaseInfo(self, case)
data = {
'_case': case,
'detail_url': ci.case_detail_url,
}
data.update((prop, getattr(ci, prop)) for prop in (
'case_type', 'case_name', 'case_id', 'external_id',
'is_closed', 'opened_on', 'modified_on', 'closed_on',
))
creator = ci.creating_user or {}
data.update({
'creator_id': creator.get('id'),
'creator_name': creator.get('name'),
})
owner = ci.owner
data.update({
'owner_type': owner[0],
'owner_id': owner[1]['id'],
'owner_name': owner[1]['name'],
})
yield data
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn(_("Case Type"), prop_name="type.exact"),
DataTablesColumn(_("Name"), prop_name="name.exact", css_class="case-name-link"),
DataTablesColumn(_("Owner"), prop_name="owner_display", sortable=False),
DataTablesColumn(_("Created Date"), prop_name="opened_on"),
DataTablesColumn(_("Created By"), prop_name="opened_by_display", sortable=False),
DataTablesColumn(_("Modified Date"), prop_name="modified_on"),
DataTablesColumn(_("Status"), prop_name="get_status_display", sortable=False)
)
headers.custom_sort = [[5, 'desc']]
return headers
@property
def rows(self):
for data in self.get_data():
display = CaseDisplay(self, data['_case'])
yield [
display.case_type,
display.case_link,
display.owner_display,
display.opened_on,
display.creating_user,
display.modified_on,
display.closed_display
]
def date_to_json(self, date):
if date:
return (PhoneTime(date, self.timezone).user_time(self.timezone)
.ui_string(SERVER_DATETIME_FORMAT))
else:
return ''
| 36.947183
| 223
| 0.635852
|
4a054ec76f9b310f6f902721917ae7bc225bf8fa
| 2,312
|
py
|
Python
|
PiCN/Layers/ICNLayer/ForwardingInformationBase/ForwardingInformationBaseMemoryPrefix.py
|
NikolaiRutz/PiCN
|
7775c61caae506a88af2e4ec34349e8bd9098459
|
[
"BSD-3-Clause"
] | null | null | null |
PiCN/Layers/ICNLayer/ForwardingInformationBase/ForwardingInformationBaseMemoryPrefix.py
|
NikolaiRutz/PiCN
|
7775c61caae506a88af2e4ec34349e8bd9098459
|
[
"BSD-3-Clause"
] | null | null | null |
PiCN/Layers/ICNLayer/ForwardingInformationBase/ForwardingInformationBaseMemoryPrefix.py
|
NikolaiRutz/PiCN
|
7775c61caae506a88af2e4ec34349e8bd9098459
|
[
"BSD-3-Clause"
] | null | null | null |
""" A in memory Forwarding Information Base using longest matching"""
from typing import List
from PiCN.Layers.ICNLayer.ForwardingInformationBase.BaseForwardingInformationBase import BaseForwardingInformationBase, \
ForwardingInformationBaseEntry
from PiCN.Packets import Name
class ForwardingInformationBaseMemoryPrefix(BaseForwardingInformationBase):
def __init__(self):
super().__init__()
def find_fib_entry(self, name: Name, already_used: List[ForwardingInformationBaseEntry] = None,
incoming_faceids: List[int]=None) -> ForwardingInformationBaseEntry:
components = name.components[:]
for i in range(0, len(name.components)):
complen = len(components)
for fib_entry in self._container:
if already_used and fib_entry in already_used:
continue
forward_faceids = []
if fib_entry.name.components != components:
continue
for faceid in fib_entry.faceid:
if not incoming_faceids or faceid not in incoming_faceids:
forward_faceids.append(faceid)
if len(forward_faceids) == 0:
continue
return ForwardingInformationBaseEntry(fib_entry.name, forward_faceids)
components = components[:complen - 1]
return None
def add_fib_entry(self, name: Name, faceid: List[int], static: bool=False):
assert (isinstance(faceid, List))
fib_entry = ForwardingInformationBaseEntry(name, faceid, static)
if fib_entry not in self._container:
self._container.insert(0, fib_entry)
def remove_fib_entry(self, name: Name):
for fib_entry in self._container:
if fib_entry.name == name:
self._container.remove(fib_entry)
def add_faceid_to_entry(self, name, fid):
entry = self.find_fib_entry(name)
self.remove_fib_entry(name)
if entry is None:
return
if fid not in entry.faceid:
entry.faceid.append(fid)
self._container.insert(0, entry)
def clear(self):
for fib_entry in self._container:
if not fib_entry.static:
self._container.remove(fib_entry)
| 39.186441
| 121
| 0.639706
|
4a054f1862debf693dd99b6edd4f4c2685cae27b
| 2,087
|
py
|
Python
|
thesis/introspect_i2a.py
|
Driesssens/ppo-a2c-thesis
|
4f7b8c8290940bb4dc40cf067a99b890655c55ec
|
[
"MIT"
] | 1
|
2021-03-22T15:01:04.000Z
|
2021-03-22T15:01:04.000Z
|
thesis/introspect_i2a.py
|
Driesssens/ppo-a2c-thesis
|
4f7b8c8290940bb4dc40cf067a99b890655c55ec
|
[
"MIT"
] | null | null | null |
thesis/introspect_i2a.py
|
Driesssens/ppo-a2c-thesis
|
4f7b8c8290940bb4dc40cf067a99b890655c55ec
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import gym
import time
from gym_minigrid.minigrid import Grid
try:
import gym_minigrid
except ImportError:
pass
import utils
from thesis.environments import SmallUnlock8x8
from thesis.wrappers import MyFullyObservableWrapperBroadcast, MyFullyObservableWrapper, MyFullyObservableWrapperEgo, ReducedActionWrapper, UndiscountedRewards, HastyRewards
from gym_minigrid.envs import EmptyEnv8x8
import torch
def introspect_i2a(environment, model, seed=0, argmax=False, pause=0.1):
utils.seed(seed)
# Generate environment
environment.seed(seed)
# Define agent
model_dir = utils.get_model_dir(model)
preprocess_obss = utils.ObssPreprocessor(model_dir, environment.observation_space)
model = utils.load_model(model_dir)
# Run the agent
done = True
while True:
if done:
obs = environment.reset()
print("Instr:", obs["mission"])
time.sleep(pause)
renderer = environment.render("human")
preprocessed_obss = preprocess_obss([obs])
with torch.no_grad():
dist, _, pred_actions, pred_observations, pred_rewards = model(preprocessed_obss, introspect=True)
renderer.window.update_imagination_display(pred_observations, pred_actions, pred_rewards)
if argmax:
actions = dist.probs.max(1, keepdim=True)[1]
else:
actions = dist.sample()
if torch.cuda.is_available():
actions = actions.cpu().numpy()
obs, reward, done, _ = environment.step(actions.item())
if renderer.window is None:
break
def enjoy_i2a_empty():
environment_class = HastyRewards(MyFullyObservableWrapperEgo(EmptyEnv8x8()))
introspect_i2a(environment_class, "I2A-3_Haste-Ego-EmptyEnv8x8_s1_18-11-08-07-45-08", pause=1)
def enjoy_i2a_unlocked():
environment_class = HastyRewards(MyFullyObservableWrapperEgo(SmallUnlock8x8()))
introspect_i2a(environment_class, "I2A-3_Haste-Ego-SmallUnlock8x8_s1_18-11-08-08-04-06", pause=1)
enjoy_i2a_unlocked()
| 27.103896
| 173
| 0.714902
|
4a054f2a580870ad82eb41ebd8eeb9a0da85cd92
| 3,704
|
py
|
Python
|
Lib/distutils/tests/test_build_py.py
|
ystk/debian-python2.6
|
17d77164dc5d5748e54aeaa5adc89ac511fc71ae
|
[
"PSF-2.0"
] | 3
|
2015-09-22T14:04:54.000Z
|
2021-07-15T07:07:11.000Z
|
Lib/distutils/tests/test_build_py.py
|
ystk/debian-python2.6
|
17d77164dc5d5748e54aeaa5adc89ac511fc71ae
|
[
"PSF-2.0"
] | 1
|
2020-09-07T15:33:56.000Z
|
2020-09-07T15:33:56.000Z
|
Lib/distutils/tests/test_build_py.py
|
ystk/debian-python2.6
|
17d77164dc5d5748e54aeaa5adc89ac511fc71ae
|
[
"PSF-2.0"
] | 2
|
2015-09-22T14:05:27.000Z
|
2015-11-08T20:15:42.000Z
|
"""Tests for distutils.command.build_py."""
import os
import sys
import StringIO
import unittest
from distutils.command.build_py import build_py
from distutils.core import Distribution
from distutils.errors import DistutilsFileError
from distutils.tests import support
class BuildPyTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_package_data(self):
sources = self.mkdtemp()
f = open(os.path.join(sources, "__init__.py"), "w")
f.write("# Pretend this is a package.")
f.close()
f = open(os.path.join(sources, "README.txt"), "w")
f.write("Info about this package")
f.close()
destination = self.mkdtemp()
dist = Distribution({"packages": ["pkg"],
"package_dir": {"pkg": sources}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.command_obj["build"] = support.DummyCommand(
force=0,
build_lib=destination)
dist.packages = ["pkg"]
dist.package_data = {"pkg": ["README.txt"]}
dist.package_dir = {"pkg": sources}
cmd = build_py(dist)
cmd.compile = 1
cmd.ensure_finalized()
self.assertEqual(cmd.package_data, dist.package_data)
cmd.run()
# This makes sure the list of outputs includes byte-compiled
# files for Python modules but not for package data files
# (there shouldn't *be* byte-code files for those!).
#
self.assertEqual(len(cmd.get_outputs()), 3)
pkgdest = os.path.join(destination, "pkg")
files = os.listdir(pkgdest)
self.assert_("__init__.py" in files)
self.assert_("__init__.pyc" in files)
self.assert_("README.txt" in files)
def test_empty_package_dir (self):
# See SF 1668596/1720897.
cwd = os.getcwd()
# create the distribution files.
sources = self.mkdtemp()
open(os.path.join(sources, "__init__.py"), "w").close()
testdir = os.path.join(sources, "doc")
os.mkdir(testdir)
open(os.path.join(testdir, "testfile"), "w").close()
os.chdir(sources)
old_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
try:
dist = Distribution({"packages": ["pkg"],
"package_dir": {"pkg": ""},
"package_data": {"pkg": ["doc/*"]}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.script_args = ["build"]
dist.parse_command_line()
try:
dist.run_commands()
except DistutilsFileError:
self.fail("failed package_data test when package_dir is ''")
finally:
# Restore state.
os.chdir(cwd)
sys.stdout = old_stdout
def test_dont_write_bytecode(self):
# makes sure byte_compile is not used
pkg_dir, dist = self.create_dist()
cmd = build_py(dist)
cmd.compile = 1
cmd.optimize = 1
old_dont_write_bytecode = sys.dont_write_bytecode
sys.dont_write_bytecode = True
try:
cmd.byte_compile([])
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
self.assertTrue('byte-compiling is disabled' in self.logs[0][1])
def test_suite():
return unittest.makeSuite(BuildPyTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
| 32.491228
| 76
| 0.590173
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.