hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c2d0f849557aee866125c4da8f8a94fe9de46f2 | 7,081 | py | Python | src/configparserenhanced/TypedProperty.py | sandialabs/ConfigParserEnhanced | 93c2b32fa67c47bc2194a95a2464529c4adfaa01 | [
"BSD-3-Clause"
] | 2 | 2021-12-08T15:34:03.000Z | 2021-12-21T21:54:19.000Z | src/configparserenhanced/TypedProperty.py | sandialabs/ConfigParserEnhanced | 93c2b32fa67c47bc2194a95a2464529c4adfaa01 | [
"BSD-3-Clause"
] | null | null | null | src/configparserenhanced/TypedProperty.py | sandialabs/ConfigParserEnhanced | 93c2b32fa67c47bc2194a95a2464529c4adfaa01 | [
"BSD-3-Clause"
] | 4 | 2021-12-08T01:02:15.000Z | 2022-01-31T14:08:57.000Z | #!/usr/bin/env python3
# -*- mode: python; py-indent-offset: 4; py-continuation-offset: 4 -*-
#===============================================================================
# Copyright Notice
# ----------------
# Copyright 2021 National Technology & Engineering Solutions of Sandia,
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# License (3-Clause BSD)
# ----------------------
# Copyright 2021 National Technology & Engineering Solutions of Sandia,
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
"""
"""
import copy
import typing
class SENTINEL:
pass
def typed_property(
name: str,
expected_type=(int, str),
default=SENTINEL,
default_factory=lambda: None,
req_assign_before_use=False,
internal_type=None,
validator=None,
transform=None
):
"""
Implements a typed property in a class using the pattern
`"9.21 Avoiding Repetitive Property Methods" from the O'Reilly
Python Cookbook, 3rd Edition <https://learning.oreilly.com/library/view/python-cookbook-3rd/9781449357337/>`_.
Args:
name (str): The name of the property to create.
expected_type (type,tuple): The *type* or a *tuple of types* enumerating allowable
types to be assigned to the property. Default is ``(int,str)``
default: A default value to be assigned to the tuple. Default is ``None``.
This will be assigned without type checking.
default_factory: Default factory method. This must be callable but is used
when we need a complex type that can't use ``deepcopy``. Default: ``lambda: None``.
req_assign_before_use (bool): If ``True`` then raise an exception if the value is
used before assigned. Otherwise, the *default* value is used. Default: ``False``
internal_type (<type>): Sets the ``<type>`` that the value is stored as (via typecast)
internally. This is done during *assignment*.
validator (func): A special validation function that can be called during assignment
to provide additional checks such as list size, allowable values, etc.
If the validator's return value is *truthy* the check suceeds, otherwise
the check has failed and a ``ValueError`` will be raised.
Default=None (i.e., no extra validation).
transform (func): A function that can be used to transform the value before assignment.
Raises:
TypeError: if the assigned value is of the wrong type on assigmment.
ValueError: if a *validator* is provided and the check fails (is Falsy).
UnboundLocalError: If ``req_assign_before_use`` is True and an attempt to read
the property is made before it's been assigned.
"""
varname = "_" + name
varname_set = varname + "_is_set"
expected_type = expected_type
validator = validator
@property
def prop(self):
if not hasattr(self, varname_set):
setattr(self, varname_set, False)
if req_assign_before_use is True and getattr(self, varname_set) is False:
raise UnboundLocalError("Property {} referenced before assigned.".format(name))
if not hasattr(self, varname):
if default is not SENTINEL:
setattr(self, varname, copy.deepcopy(default))
else:
if not callable(default_factory):
raise TypeError(
"default_factory `{}` in `{}` must be callable.".format(default_factory, name)
)
setattr(self, varname, default_factory())
return getattr(self, varname)
@prop.setter
def prop(self, value):
_expected_type = copy.deepcopy(expected_type)
if not isinstance(_expected_type, typing.Iterable):
_expected_type = (_expected_type, )
for expected_type_i in _expected_type:
if isinstance(value, expected_type_i):
break
else:
type_names = [i.__name__ for i in _expected_type]
raise TypeError("'{}' must be in ({})".format(name, ",".join(type_names)))
if internal_type is not None:
value = internal_type(value)
if transform is not None:
if callable(transform):
value = transform(value)
if internal_type is not None:
value = internal_type(value)
else:
raise TypeError(f"transform '{transform}' for property '{name}' is not callable.")
if validator is not None:
if callable(validator):
if not validator(value):
raise ValueError(
f"Assignment of `{value}` to property `{name}` " +
f"failed validation check in `{validator}`"
)
else:
raise TypeError(f"Validator '{validator}' for property '{name}' is not callable.")
# Assign the value to the property
setattr(self, varname, value)
# Save that we've assigned the value to something
setattr(self, varname_set, True)
@prop.deleter
def prop(self):
if hasattr(self, varname):
delattr(self, varname)
if hasattr(self, varname_set):
delattr(self, varname_set)
return prop
| 42.915152 | 114 | 0.642847 |
import copy
import typing
class SENTINEL:
pass
def typed_property(
name: str,
expected_type=(int, str),
default=SENTINEL,
default_factory=lambda: None,
req_assign_before_use=False,
internal_type=None,
validator=None,
transform=None
):
varname = "_" + name
varname_set = varname + "_is_set"
expected_type = expected_type
validator = validator
@property
def prop(self):
if not hasattr(self, varname_set):
setattr(self, varname_set, False)
if req_assign_before_use is True and getattr(self, varname_set) is False:
raise UnboundLocalError("Property {} referenced before assigned.".format(name))
if not hasattr(self, varname):
if default is not SENTINEL:
setattr(self, varname, copy.deepcopy(default))
else:
if not callable(default_factory):
raise TypeError(
"default_factory `{}` in `{}` must be callable.".format(default_factory, name)
)
setattr(self, varname, default_factory())
return getattr(self, varname)
@prop.setter
def prop(self, value):
_expected_type = copy.deepcopy(expected_type)
if not isinstance(_expected_type, typing.Iterable):
_expected_type = (_expected_type, )
for expected_type_i in _expected_type:
if isinstance(value, expected_type_i):
break
else:
type_names = [i.__name__ for i in _expected_type]
raise TypeError("'{}' must be in ({})".format(name, ",".join(type_names)))
if internal_type is not None:
value = internal_type(value)
if transform is not None:
if callable(transform):
value = transform(value)
if internal_type is not None:
value = internal_type(value)
else:
raise TypeError(f"transform '{transform}' for property '{name}' is not callable.")
if validator is not None:
if callable(validator):
if not validator(value):
raise ValueError(
f"Assignment of `{value}` to property `{name}` " +
f"failed validation check in `{validator}`"
)
else:
raise TypeError(f"Validator '{validator}' for property '{name}' is not callable.")
setattr(self, varname, value)
setattr(self, varname_set, True)
@prop.deleter
def prop(self):
if hasattr(self, varname):
delattr(self, varname)
if hasattr(self, varname_set):
delattr(self, varname_set)
return prop
| true | true |
1c2d105e7db966f407ca10d5da75611063f8eb1a | 16,206 | py | Python | utils/stable_baselines_plotter.py | malikasng/Bbox_HGG_with_CTR_and_RRTstarFND | 2b1aae6c347f544fefface0c9f26dc4ecde51108 | [
"MIT"
] | 1 | 2020-09-16T06:15:17.000Z | 2020-09-16T06:15:17.000Z | utils/stable_baselines_plotter.py | malikasng/Bbox_HGG_with_CTR_and_RRTstarFND | 2b1aae6c347f544fefface0c9f26dc4ecde51108 | [
"MIT"
] | 5 | 2020-09-26T01:30:01.000Z | 2022-01-13T03:15:42.000Z | utils/stable_baselines_plotter.py | malikasng/Bbox_HGG_with_CTR_and_RRTstarFND | 2b1aae6c347f544fefface0c9f26dc4ecde51108 | [
"MIT"
] | null | null | null | # from https://github.com/hill-a/stable-baselines/blob/master/stable_baselines/results_plotter.py
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
# matplotlib.use('TkAgg') # Can change to 'Agg' for non-interactive mode
plt.rcParams['svg.fonttype'] = 'none'
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'lightpurple', 'darkred', 'darkblue']
def rolling_window(array, window):
"""
apply a rolling window to a np.ndarray
:param array: (np.ndarray) the input Array
:param window: (int) length of the rolling window
:return: (np.ndarray) rolling window on the input array
"""
shape = array.shape[:-1] + (array.shape[-1] - window + 1, window)
strides = array.strides + (array.strides[-1],)
return np.lib.stride_tricks.as_strided(array, shape=shape, strides=strides)
def window_func(var_1, var_2, window, func):
"""
apply a function to the rolling window of 2 arrays
:param var_1: (np.ndarray) variable 1
:param var_2: (np.ndarray) variable 2
:param window: (int) length of the rolling window
:param func: (numpy function) function to apply on the rolling window on variable 2 (such as np.mean)
:return: (np.ndarray, np.ndarray) the rolling output with applied function
"""
var_2_window = rolling_window(var_2, window)
function_on_var2 = func(var_2_window, axis=-1)
return var_1[window - 1:], function_on_var2
'''def ts2xy(timesteps, xaxis):
"""
Decompose a timesteps variable to x ans ys
:param timesteps: (Pandas DataFrame) the input data
:param xaxis: (str) the axis for the x and y output
(can be X_TIMESTEPS='timesteps', X_EPISODES='episodes' or X_WALLTIME='walltime_hrs')
:return: (np.ndarray, np.ndarray) the x and y output
"""
if xaxis == X_TIMESTEPS:
x_var = np.cumsum(timesteps.l.values)
y_var = timesteps.r.values
elif xaxis == X_EPISODES:
x_var = np.arange(len(timesteps))
y_var = timesteps.r.values
elif xaxis == X_WALLTIME:
x_var = timesteps.t.values / 3600.
y_var = timesteps.r.values
else:
raise NotImplementedError
return x_var, y_var'''
def plot_curves(xy_list, xlabel, ylabel, window=1, labels=None,title=None, filename=None):
"""
plot the curves
:param xy_list: ([(np.ndarray, np.ndarray)]) the x and y coordinates to plot
:param x_label: (str) the axis for the x and y output
(can be X_TIMESTEPS='timesteps', X_EPISODES='episodes' or X_WALLTIME='walltime_hrs')
:param title: (str) the title of the plot
"""
plt.figure(figsize=(16, 8))
maxx = max(xy[0][-1] for xy in xy_list)
minx = 0
for (i, (x, y)) in enumerate(xy_list):
color = COLORS[i]
#plt.scatter(x, y, s=2)
# Do not plot the smoothed curve at all if the timeseries is shorter than window size.
if x.shape[0] >= window:
# Compute and plot rolling mean with window of size EPISODE_WINDOW
x, y_mean = window_func(x, y, window, np.mean)
if labels is None:
plt.plot(x, y_mean, color=color)
else:
plt.plot(x, y_mean, color=color, label =labels[i])
plt.xlim(minx, maxx)
if title is not None:
plt.title(title)
plt.legend(loc="upper left")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tight_layout()
if filename is not None:
plt.savefig(filename)
'''def plot_results(dirs, num_timesteps, xaxis, task_name):
"""
plot the results
:param dirs: ([str]) the save location of the results to plot
:param num_timesteps: (int or None) only plot the points below this value
:param xaxis: (str) the axis for the x and y output
(can be X_TIMESTEPS='timesteps', X_EPISODES='episodes' or X_WALLTIME='walltime_hrs')
:param task_name: (str) the title of the task to plot
"""
tslist = []
for folder in dirs:
timesteps = load_results(folder)
if num_timesteps is not None:
timesteps = timesteps[timesteps.l.cumsum() <= num_timesteps]
tslist.append(timesteps)
xy_list = [ts2xy(timesteps_item, xaxis) for timesteps_item in tslist]
plot_curves(xy_list, xaxis, task_name)'''
'''def main():
"""
Example usage in jupyter-notebook
.. code-block:: python
from stable_baselines import results_plotter
%matplotlib inline
results_plotter.plot_results(["./log"], 10e6, results_plotter.X_TIMESTEPS, "Breakout")
Here ./log is a directory containing the monitor.csv files
"""
import argparse
import os
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dirs', help='List of log directories', nargs='*', default=['./log'])
parser.add_argument('--num_timesteps', type=int, default=int(10e6))
parser.add_argument('--xaxis', help='Varible on X-axis', default=X_TIMESTEPS)
parser.add_argument('--task_name', help='Title of plot', default='Breakout')
args = parser.parse_args()
args.dirs = [os.path.abspath(folder) for folder in args.dirs]
plot_results(args.dirs, args.num_timesteps, args.xaxis, args.task_name)
plt.show()'''
def tdm_training_plot():
df = pd.read_csv('../logsdir/csv_logs/2020_07_02_12_18_26tdm_training_log.csv')
indexes = df.columns.values
'''df.dropna(subset=[i for i in indexes if i != 'epoch'],
inplace=True, how='all')#this is just because logger creates empty row after before each epoch'''
epoch_time = df[['epoch', 'time']].groupby(by=['epoch'])
epoch_time = epoch_time.last()
y = epoch_time[['time']].values[:, 0]
x = np.arange(0, len(y))
plot_curves([(x, y)], 'epoch', 'time needed', title='(mixed)',
filename='../logsdir/csv_logs/time.png')
epAndEpoch_losses = df[
['epoch', 'episode', 'actor_tr_loss', 'critic_tr_loss', 'actor_eval_loss', 'critic_eval_loss']
].dropna(subset=['episode']).fillna(0.)
x = np.arange(0, len(epAndEpoch_losses))
losses_labels = ['actor_tr_loss', 'critic_tr_loss', 'actor_eval_loss', 'critic_eval_loss']
xs_ys = [(x, epAndEpoch_losses[[k]].values[:, 0])
for k in losses_labels]
plot_curves(xs_ys, 'episode (each 100 timesteps)', 'loss', window=20, title='accumulated loss (mixed)',
labels=losses_labels, filename='../logsdir/csv_logs/log_losses.png')
epAndEpoch_reward = df[['epoch', 'episode', 'episode_reward']].dropna(subset=['episode'])
epAndEpoch_distance = df[['epoch', 'episode', 'distance_to_goal']].dropna(subset=['episode'])
y = epAndEpoch_distance[['distance_to_goal']].values[:, 0]
x = np.arange(0, len(y))
plot_curves([(x, y)], 'episode', 'distance to goal (euclidean distance)', title='distance after episode (mixed)',
window=20, filename='../logsdir/csv_logs/distance.png')
def plot_distances(df, columns_names, labels, name):
x = np.arange(0, len(df))
xs_ys = [(x, df[[k]].values[:, 0]) for k in columns_names]
plot_curves(xs_ys, 'step', 'distance ', title='distance '+name,
labels=labels, filename='../logsdir/csv_logs/distance_evaluation_'+name+'.png')
def plot_against(path_to_folder,csv_filename_prefix, route1_name, route2_name, epoch):
df1 = pd.read_csv(path_to_folder + csv_filename_prefix+route1_name + '.csv')
df2 = pd.read_csv(path_to_folder +csv_filename_prefix+ route2_name + '.csv')
def plot_ag(columns_names, labels):
x = np.arange(0, len(df1))
xs_ys = [(x, df1[[k]].values[:, 0]) for k in columns_names]
xs_ys2 = [(x, df2[[k]].values[:, 0]) for k in columns_names]
l1 = [l + '_'+route1_name for l in labels]
l2 = [l + '_'+route2_name for l in labels]
plot_curves(xs_ys + xs_ys2, 'step', 'distance ', title='distance ' +route1_name+'_against_'+route2_name,
labels=l1 + l2,
filename=path_to_folder + route1_name+'_against_'+
route2_name+'_'+'_'.join(labels) +'_epoch{}'.format(epoch)+'.png')
columns_names_goal = ['from_achieved_goal_to_desired_goal_l2_dist']
labels_goal = ['l2_distance']
plot_ag(columns_names_goal, labels_goal)
columns_names_goal = ['from_state_latent_to_goal_latent_estimator1']
labels_goal = ['estimator1']
plot_ag(columns_names_goal, labels_goal)
columns_names_goal = ['from_state_latent_to_goal_latent_estimator2']
labels_goal = ['estimator2']
plot_ag(columns_names_goal, labels_goal)
def plot_distance_estimators(df, name_prefix):
#to goal
plot_distances(df, ['from_achieved_goal_to_desired_goal_l2_dist', 'from_state_latent_to_goal_latent_estimator1',
'from_state_latent_to_goal_latent_estimator2'],
['l2_distance', 'estimator1', 'estimator2'],
name_prefix + '_distance_to_goal')
plot_distances(df, ['from_achieved_goal_to_desired_goal_l2_dist', 'from_state_latent_to_goal_latent_estimator1'],
['l2_distance', 'estimator1'],
name_prefix + '_distance_to_goal_estimator_1')
plot_distances(df, ['from_achieved_goal_to_desired_goal_l2_dist', 'from_state_latent_to_goal_latent_estimator2'],
['l2_distance', 'estimator2'],
name_prefix + '_distance_to_goal_estimator_2')
# to last state
plot_distances(df, ['from_achieved_goal_to_achieved_goal_l2_dist', 'from_state_latent_to_state_latent_estimator1',
'from_state_latent_to_state_latent_estimator2', 'to_last_steps'],
['l2_distance', 'estimator1', 'estimator2', 'steps_distance'],
name_prefix + '_distance_to_last_state')
plot_distances(df, ['from_achieved_goal_to_achieved_goal_l2_dist', 'from_state_latent_to_state_latent_estimator1',
'to_last_steps'],
['l2_distance', 'estimator1', 'steps_distance'],
name_prefix + '_distance_to_last_state_estimator_1')
plot_distances(df, ['from_achieved_goal_to_achieved_goal_l2_dist', 'from_state_latent_to_state_latent_estimator2',
'to_last_steps'],
['l2_distance', 'estimator2', 'steps_distance'],
name_prefix + '_distance_to_last_state_estimator_2')
#to last trajectory
plot_distances(df, ['achieved_goal_l2_dist_traj', 'state_latent_estimator1_traj',
'state_latent_estimator2_traj', 'to_last_steps'],
['l2_distance', 'estimator1', 'estimator2', 'steps_distance'],
name_prefix + '_along_trajectory_cumulated')
plot_distances(df, ['achieved_goal_l2_dist_traj', 'state_latent_estimator1_traj',
'to_last_steps'],
['l2_distance', 'estimator1', 'steps_distance'],
name_prefix + '_along_trajectory_cumulated_estimator_1')
plot_distances(df, ['achieved_goal_l2_dist_traj', 'state_latent_estimator2_traj',
'to_last_steps'],
['l2_distance', 'estimator2', 'steps_distance'],
name_prefix + '_along_trajectory_cumulated_estimator_2')
# to next
plot_distances(df, ['achieved_goal_to_next_l2_dist', 'state_latent_to_next_estimator1',
'state_latent_to_next_estimator2'],
['l2_distance', 'estimator1', 'estimator2'],
name_prefix + '_to_next')
plot_distances(df, ['achieved_goal_to_next_l2_dist', 'state_latent_to_next_estimator2'],
['l2_distance', 'estimator1'],
name_prefix + '_to_next_estimator_1')
plot_distances(df, ['achieved_goal_to_next_l2_dist', 'state_latent_to_next_estimator1'],
['l2_distance', 'estimator2'],
name_prefix + '_to_next_estimator_2')
def plot_distance_estimators_from_training(df, name_prefix):
#to goal
plot_distances(df, ['to_goal_l2', 'to_goal_est','to_goal_2est'],
['l2_distance', 'estimator1', 'estimator2'],
name_prefix + '_distance_to_goal')
plot_distances(df, ['to_goal_l2', 'to_goal_est'],
['l2_distance', 'estimator1'],
name_prefix + '_distance_to_goal_estimator_1')
plot_distances(df, ['to_goal_l2','to_goal_2est'],
['l2_distance', 'estimator2'],
name_prefix + '_distance_to_goal_estimator_2')
# to last state
plot_distances(df, ['to_last_l2', 'to_last_est','to_last_2est', 'to_last_steps'],
['l2_distance', 'estimator1', 'estimator2', 'steps_distance'],
name_prefix + '_distance_to_last_state')
plot_distances(df, ['to_last_l2', 'to_last_est', 'to_last_steps'],
['l2_distance', 'estimator1', 'steps_distance'],
name_prefix + '_distance_to_last_state_estimator_1')
plot_distances(df, ['to_last_l2','to_last_2est', 'to_last_steps'],
['l2_distance', 'estimator2', 'steps_distance'],
name_prefix + '_distance_to_last_state_estimator_2')
#to last trajectory
plot_distances(df, ['to_last_l2_traj', 'to_last_est_traj','to_last_2est_traj', 'to_last_steps'],
['l2_distance', 'estimator1', 'estimator2', 'steps_distance'],
name_prefix + '_along_trajectory_cumulated')
plot_distances(df, ['to_last_l2_traj', 'to_last_est_traj', 'to_last_steps'],
['l2_distance', 'estimator1', 'steps_distance'],
name_prefix + '_along_trajectory_cumulated_estimator_1')
plot_distances(df, ['to_last_l2_traj','to_last_2est_traj', 'to_last_steps'],
['l2_distance', 'estimator2', 'steps_distance'],
name_prefix + '_along_trajectory_cumulated_estimator_2')
# to next
plot_distances(df, ['to_next_l2','to_next_est','to_next_2est'],
['l2_distance', 'estimator1', 'estimator2'],
name_prefix + '_to_next')
plot_distances(df, ['to_next_l2','to_next_est'],
['l2_distance', 'estimator1'],
name_prefix + '_to_next_estimator_1')
plot_distances(df, ['to_next_l2','to_next_2est'],
['l2_distance', 'estimator2'],
name_prefix + '_to_next_estimator_2')
def plot_distance_group(df, name_prefix):
def p_points(columns_names, labels, name):
x = np.arange(0, len(df))
xs_ys = [(x, df[[k]].values[:, 0]) for k in columns_names]
plot_curves(xs_ys, 'pair', 'distance', title='distance ' + name,
labels=labels, filename='../logsdir/csv_logs/distance_evaluation_' + name + '.png')
p_points(['l2_dist'],['l2_distance'],name_prefix + 'l2_distance')
p_points(['estimator1'], ['estimator1'],name_prefix + 'estimator1')
p_points(['estimator2'],['estimator2'],name_prefix + 'estimator2')
if __name__ == "__main__":
epoch = 390
'''
df = pd.read_csv('../logsdir/csv_logs/dist_no_obstacle_epoch_120_it_0.csv')
plot_distance_estimators_from_training(df,'from_training_epoch_120_it_0')
'''
l = ['random','route_1','route_2','route_3_1','route_3_2','route_3_3','route_4_1','route_4_2']
for r in l:
df = pd.read_csv('../logsdir/csv_logs/distance_evaluation_'+r+'.csv')
plot_distance_estimators(df, r+'_epoch_{}_'.format(epoch))
plot_against('../logsdir/csv_logs/','distance_evaluation_','route_3_1','route_3_2', epoch)
plot_against('../logsdir/csv_logs/', 'distance_evaluation_', 'route_4_1', 'route_4_2', epoch)
for gr in ['a','b','c','d','e','f', 'g', 'h', 'i']:
df = pd.read_csv('../logsdir/csv_logs/group_'+gr+'.csv')
plot_distance_group(df, 'group_'+gr+'_epoch_{}_'.format(epoch))
| 48.960725 | 118 | 0.648895 |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
STEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'lightpurple', 'darkred', 'darkblue']
def rolling_window(array, window):
shape = array.shape[:-1] + (array.shape[-1] - window + 1, window)
strides = array.strides + (array.strides[-1],)
return np.lib.stride_tricks.as_strided(array, shape=shape, strides=strides)
def window_func(var_1, var_2, window, func):
var_2_window = rolling_window(var_2, window)
function_on_var2 = func(var_2_window, axis=-1)
return var_1[window - 1:], function_on_var2
def plot_curves(xy_list, xlabel, ylabel, window=1, labels=None,title=None, filename=None):
plt.figure(figsize=(16, 8))
maxx = max(xy[0][-1] for xy in xy_list)
minx = 0
for (i, (x, y)) in enumerate(xy_list):
color = COLORS[i]
if x.shape[0] >= window:
x, y_mean = window_func(x, y, window, np.mean)
if labels is None:
plt.plot(x, y_mean, color=color)
else:
plt.plot(x, y_mean, color=color, label =labels[i])
plt.xlim(minx, maxx)
if title is not None:
plt.title(title)
plt.legend(loc="upper left")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tight_layout()
if filename is not None:
plt.savefig(filename)
def tdm_training_plot():
df = pd.read_csv('../logsdir/csv_logs/2020_07_02_12_18_26tdm_training_log.csv')
indexes = df.columns.values
epoch_time = df[['epoch', 'time']].groupby(by=['epoch'])
epoch_time = epoch_time.last()
y = epoch_time[['time']].values[:, 0]
x = np.arange(0, len(y))
plot_curves([(x, y)], 'epoch', 'time needed', title='(mixed)',
filename='../logsdir/csv_logs/time.png')
epAndEpoch_losses = df[
['epoch', 'episode', 'actor_tr_loss', 'critic_tr_loss', 'actor_eval_loss', 'critic_eval_loss']
].dropna(subset=['episode']).fillna(0.)
x = np.arange(0, len(epAndEpoch_losses))
losses_labels = ['actor_tr_loss', 'critic_tr_loss', 'actor_eval_loss', 'critic_eval_loss']
xs_ys = [(x, epAndEpoch_losses[[k]].values[:, 0])
for k in losses_labels]
plot_curves(xs_ys, 'episode (each 100 timesteps)', 'loss', window=20, title='accumulated loss (mixed)',
labels=losses_labels, filename='../logsdir/csv_logs/log_losses.png')
epAndEpoch_reward = df[['epoch', 'episode', 'episode_reward']].dropna(subset=['episode'])
epAndEpoch_distance = df[['epoch', 'episode', 'distance_to_goal']].dropna(subset=['episode'])
y = epAndEpoch_distance[['distance_to_goal']].values[:, 0]
x = np.arange(0, len(y))
plot_curves([(x, y)], 'episode', 'distance to goal (euclidean distance)', title='distance after episode (mixed)',
window=20, filename='../logsdir/csv_logs/distance.png')
def plot_distances(df, columns_names, labels, name):
x = np.arange(0, len(df))
xs_ys = [(x, df[[k]].values[:, 0]) for k in columns_names]
plot_curves(xs_ys, 'step', 'distance ', title='distance '+name,
labels=labels, filename='../logsdir/csv_logs/distance_evaluation_'+name+'.png')
def plot_against(path_to_folder,csv_filename_prefix, route1_name, route2_name, epoch):
df1 = pd.read_csv(path_to_folder + csv_filename_prefix+route1_name + '.csv')
df2 = pd.read_csv(path_to_folder +csv_filename_prefix+ route2_name + '.csv')
def plot_ag(columns_names, labels):
x = np.arange(0, len(df1))
xs_ys = [(x, df1[[k]].values[:, 0]) for k in columns_names]
xs_ys2 = [(x, df2[[k]].values[:, 0]) for k in columns_names]
l1 = [l + '_'+route1_name for l in labels]
l2 = [l + '_'+route2_name for l in labels]
plot_curves(xs_ys + xs_ys2, 'step', 'distance ', title='distance ' +route1_name+'_against_'+route2_name,
labels=l1 + l2,
filename=path_to_folder + route1_name+'_against_'+
route2_name+'_'+'_'.join(labels) +'_epoch{}'.format(epoch)+'.png')
columns_names_goal = ['from_achieved_goal_to_desired_goal_l2_dist']
labels_goal = ['l2_distance']
plot_ag(columns_names_goal, labels_goal)
columns_names_goal = ['from_state_latent_to_goal_latent_estimator1']
labels_goal = ['estimator1']
plot_ag(columns_names_goal, labels_goal)
columns_names_goal = ['from_state_latent_to_goal_latent_estimator2']
labels_goal = ['estimator2']
plot_ag(columns_names_goal, labels_goal)
def plot_distance_estimators(df, name_prefix):
plot_distances(df, ['from_achieved_goal_to_desired_goal_l2_dist', 'from_state_latent_to_goal_latent_estimator1',
'from_state_latent_to_goal_latent_estimator2'],
['l2_distance', 'estimator1', 'estimator2'],
name_prefix + '_distance_to_goal')
plot_distances(df, ['from_achieved_goal_to_desired_goal_l2_dist', 'from_state_latent_to_goal_latent_estimator1'],
['l2_distance', 'estimator1'],
name_prefix + '_distance_to_goal_estimator_1')
plot_distances(df, ['from_achieved_goal_to_desired_goal_l2_dist', 'from_state_latent_to_goal_latent_estimator2'],
['l2_distance', 'estimator2'],
name_prefix + '_distance_to_goal_estimator_2')
plot_distances(df, ['from_achieved_goal_to_achieved_goal_l2_dist', 'from_state_latent_to_state_latent_estimator1',
'from_state_latent_to_state_latent_estimator2', 'to_last_steps'],
['l2_distance', 'estimator1', 'estimator2', 'steps_distance'],
name_prefix + '_distance_to_last_state')
plot_distances(df, ['from_achieved_goal_to_achieved_goal_l2_dist', 'from_state_latent_to_state_latent_estimator1',
'to_last_steps'],
['l2_distance', 'estimator1', 'steps_distance'],
name_prefix + '_distance_to_last_state_estimator_1')
plot_distances(df, ['from_achieved_goal_to_achieved_goal_l2_dist', 'from_state_latent_to_state_latent_estimator2',
'to_last_steps'],
['l2_distance', 'estimator2', 'steps_distance'],
name_prefix + '_distance_to_last_state_estimator_2')
plot_distances(df, ['achieved_goal_l2_dist_traj', 'state_latent_estimator1_traj',
'state_latent_estimator2_traj', 'to_last_steps'],
['l2_distance', 'estimator1', 'estimator2', 'steps_distance'],
name_prefix + '_along_trajectory_cumulated')
plot_distances(df, ['achieved_goal_l2_dist_traj', 'state_latent_estimator1_traj',
'to_last_steps'],
['l2_distance', 'estimator1', 'steps_distance'],
name_prefix + '_along_trajectory_cumulated_estimator_1')
plot_distances(df, ['achieved_goal_l2_dist_traj', 'state_latent_estimator2_traj',
'to_last_steps'],
['l2_distance', 'estimator2', 'steps_distance'],
name_prefix + '_along_trajectory_cumulated_estimator_2')
plot_distances(df, ['achieved_goal_to_next_l2_dist', 'state_latent_to_next_estimator1',
'state_latent_to_next_estimator2'],
['l2_distance', 'estimator1', 'estimator2'],
name_prefix + '_to_next')
plot_distances(df, ['achieved_goal_to_next_l2_dist', 'state_latent_to_next_estimator2'],
['l2_distance', 'estimator1'],
name_prefix + '_to_next_estimator_1')
plot_distances(df, ['achieved_goal_to_next_l2_dist', 'state_latent_to_next_estimator1'],
['l2_distance', 'estimator2'],
name_prefix + '_to_next_estimator_2')
def plot_distance_estimators_from_training(df, name_prefix):
plot_distances(df, ['to_goal_l2', 'to_goal_est','to_goal_2est'],
['l2_distance', 'estimator1', 'estimator2'],
name_prefix + '_distance_to_goal')
plot_distances(df, ['to_goal_l2', 'to_goal_est'],
['l2_distance', 'estimator1'],
name_prefix + '_distance_to_goal_estimator_1')
plot_distances(df, ['to_goal_l2','to_goal_2est'],
['l2_distance', 'estimator2'],
name_prefix + '_distance_to_goal_estimator_2')
plot_distances(df, ['to_last_l2', 'to_last_est','to_last_2est', 'to_last_steps'],
['l2_distance', 'estimator1', 'estimator2', 'steps_distance'],
name_prefix + '_distance_to_last_state')
plot_distances(df, ['to_last_l2', 'to_last_est', 'to_last_steps'],
['l2_distance', 'estimator1', 'steps_distance'],
name_prefix + '_distance_to_last_state_estimator_1')
plot_distances(df, ['to_last_l2','to_last_2est', 'to_last_steps'],
['l2_distance', 'estimator2', 'steps_distance'],
name_prefix + '_distance_to_last_state_estimator_2')
plot_distances(df, ['to_last_l2_traj', 'to_last_est_traj','to_last_2est_traj', 'to_last_steps'],
['l2_distance', 'estimator1', 'estimator2', 'steps_distance'],
name_prefix + '_along_trajectory_cumulated')
plot_distances(df, ['to_last_l2_traj', 'to_last_est_traj', 'to_last_steps'],
['l2_distance', 'estimator1', 'steps_distance'],
name_prefix + '_along_trajectory_cumulated_estimator_1')
plot_distances(df, ['to_last_l2_traj','to_last_2est_traj', 'to_last_steps'],
['l2_distance', 'estimator2', 'steps_distance'],
name_prefix + '_along_trajectory_cumulated_estimator_2')
plot_distances(df, ['to_next_l2','to_next_est','to_next_2est'],
['l2_distance', 'estimator1', 'estimator2'],
name_prefix + '_to_next')
plot_distances(df, ['to_next_l2','to_next_est'],
['l2_distance', 'estimator1'],
name_prefix + '_to_next_estimator_1')
plot_distances(df, ['to_next_l2','to_next_2est'],
['l2_distance', 'estimator2'],
name_prefix + '_to_next_estimator_2')
def plot_distance_group(df, name_prefix):
def p_points(columns_names, labels, name):
x = np.arange(0, len(df))
xs_ys = [(x, df[[k]].values[:, 0]) for k in columns_names]
plot_curves(xs_ys, 'pair', 'distance', title='distance ' + name,
labels=labels, filename='../logsdir/csv_logs/distance_evaluation_' + name + '.png')
p_points(['l2_dist'],['l2_distance'],name_prefix + 'l2_distance')
p_points(['estimator1'], ['estimator1'],name_prefix + 'estimator1')
p_points(['estimator2'],['estimator2'],name_prefix + 'estimator2')
if __name__ == "__main__":
epoch = 390
l = ['random','route_1','route_2','route_3_1','route_3_2','route_3_3','route_4_1','route_4_2']
for r in l:
df = pd.read_csv('../logsdir/csv_logs/distance_evaluation_'+r+'.csv')
plot_distance_estimators(df, r+'_epoch_{}_'.format(epoch))
plot_against('../logsdir/csv_logs/','distance_evaluation_','route_3_1','route_3_2', epoch)
plot_against('../logsdir/csv_logs/', 'distance_evaluation_', 'route_4_1', 'route_4_2', epoch)
for gr in ['a','b','c','d','e','f', 'g', 'h', 'i']:
df = pd.read_csv('../logsdir/csv_logs/group_'+gr+'.csv')
plot_distance_group(df, 'group_'+gr+'_epoch_{}_'.format(epoch))
| true | true |
1c2d11ff8e57664d9a10a9757b4355520a675e8a | 3,325 | py | Python | mlio/resources/manager.py | Workable/milo | 66c6425188cd20efaadfea7dfb31d730e68fe84b | [
"MIT"
] | 5 | 2018-07-17T07:09:08.000Z | 2020-03-14T21:06:37.000Z | mlio/resources/manager.py | Workable/mlio | 66c6425188cd20efaadfea7dfb31d730e68fe84b | [
"MIT"
] | null | null | null | mlio/resources/manager.py | Workable/mlio | 66c6425188cd20efaadfea7dfb31d730e68fe84b | [
"MIT"
] | null | null | null | import logging as _logging
from mlio.resources.exceptions import ResourceNotFoundError
from .repositories import RepositoriesContainer
logger = _logging.getLogger(__name__)
class ResourceManager:
"""
A registry of resource objects that are dynamically discovered and loaded from a prioritized list of
sources (called repositories).
"""
def __init__(self):
"""
Instantiate a new resource manager
"""
self._resources = {} # type: dict[str, mlio.resources.resource_types.ResourceBase]
self._repositories = RepositoriesContainer()
@property
def repositories(self):
""" :rtype: RepositoriesContainer """
return self._repositories
@property
def resources(self):
""" :rtype: dict[str, mlio.resources.resource_types.ResourceBase]"""
return self._resources
def has_resource(self, resource_id):
"""
Check if the resource exist in the manager
:param str resource_id: The identifier of the resource
:rtype: bool
"""
return resource_id in self._resources
__contains__ = has_resource
def add_resource(self, resource):
"""
Add a new resource in the manager. The id of the resource must be unique in this manager and the resource
object must not be registered in any other manager.
:param mlio.resources.resource_types.ResourceBase resource: The resource object to be added.
"""
if self.has_resource(resource.id):
logger.warning("There is already a resource with this resource repository_id: {}".format(resource.id))
raise KeyError("There is already a resource with this resource repository_id: {}".format(resource.id))
self._resources[resource.id] = resource
resource.bind_manager(self)
logger.info("Resource '{}' has been added to the resource manager {}".format(resource.id, self))
def __getitem__(self, resource_id):
""":rtype: mlio.resources.resource_types.ResourceBase"""
if resource_id not in self._resources:
raise ResourceNotFoundError("Cannot find resource '{}' in resource manager".format(resource_id))
return self._resources[resource_id].object
def load_resources(self, resource_ids=None):
"""
Load resource from the repository in memory. Resources that are already loaded will be skipped.
:param None|List[str] resource_ids: If None it will try to load all resources, otherwise it will load
only the ids of the resources that where listed
"""
if not resource_ids:
resource_ids = self._resources.keys()
else:
# Validate that all keys exist
for resource_id in resource_ids:
if resource_id not in self:
raise ResourceNotFoundError("Cannot find resource '{}' in resource manager".format(resource_id))
for resource_id in resource_ids:
self.resources[resource_id].load()
def __str__(self):
return "<ResourceManager: #{total_resources} resources in #{total_repos} repositories>".format(
total_repos=len(self.repositories),
total_resources=len(self._resources)
)
def __repr__(self):
return self.__str__()
| 37.359551 | 116 | 0.667669 | import logging as _logging
from mlio.resources.exceptions import ResourceNotFoundError
from .repositories import RepositoriesContainer
logger = _logging.getLogger(__name__)
class ResourceManager:
def __init__(self):
self._resources = {}
self._repositories = RepositoriesContainer()
@property
def repositories(self):
return self._repositories
@property
def resources(self):
return self._resources
def has_resource(self, resource_id):
return resource_id in self._resources
__contains__ = has_resource
def add_resource(self, resource):
if self.has_resource(resource.id):
logger.warning("There is already a resource with this resource repository_id: {}".format(resource.id))
raise KeyError("There is already a resource with this resource repository_id: {}".format(resource.id))
self._resources[resource.id] = resource
resource.bind_manager(self)
logger.info("Resource '{}' has been added to the resource manager {}".format(resource.id, self))
def __getitem__(self, resource_id):
if resource_id not in self._resources:
raise ResourceNotFoundError("Cannot find resource '{}' in resource manager".format(resource_id))
return self._resources[resource_id].object
def load_resources(self, resource_ids=None):
if not resource_ids:
resource_ids = self._resources.keys()
else:
for resource_id in resource_ids:
if resource_id not in self:
raise ResourceNotFoundError("Cannot find resource '{}' in resource manager".format(resource_id))
for resource_id in resource_ids:
self.resources[resource_id].load()
def __str__(self):
return "<ResourceManager: #{total_resources} resources in #{total_repos} repositories>".format(
total_repos=len(self.repositories),
total_resources=len(self._resources)
)
def __repr__(self):
return self.__str__()
| true | true |
1c2d120d9830a54c809476f7ceb13e76d62d5b68 | 1,510 | py | Python | server/benchmarkdataset/permissions.py | johnugeorge/medperf | 5bc3f643064df14e9476bd4d4c1a4c0cce5337d5 | [
"Apache-2.0"
] | 1 | 2021-09-24T18:09:53.000Z | 2021-09-24T18:09:53.000Z | server/benchmarkdataset/permissions.py | johnugeorge/medperf | 5bc3f643064df14e9476bd4d4c1a4c0cce5337d5 | [
"Apache-2.0"
] | 2 | 2021-09-27T16:14:04.000Z | 2021-11-03T14:24:54.000Z | server/benchmarkdataset/permissions.py | johnugeorge/medperf | 5bc3f643064df14e9476bd4d4c1a4c0cce5337d5 | [
"Apache-2.0"
] | null | null | null | from rest_framework.permissions import BasePermission
from benchmark.models import Benchmark
from dataset.models import Dataset
class IsAdmin(BasePermission):
def has_permission(self, request, view):
return request.user.is_superuser
class IsDatasetOwner(BasePermission):
def get_object(self, pk):
try:
return Dataset.objects.get(pk=pk)
except Dataset.DoesNotExist:
raise None
def has_permission(self, request, view):
if request.method == "POST":
pk = request.data.get("dataset", None)
else:
pk = view.kwargs.get("pk", None)
if not pk:
return False
dataset = self.get_object(pk)
if not dataset:
return False
if dataset.owner.id == request.user.id:
return True
else:
return False
class IsBenchmarkOwner(BasePermission):
def get_object(self, pk):
try:
return Benchmark.objects.get(pk=pk)
except Benchmark.DoesNotExist:
raise None
def has_permission(self, request, view):
if request.method == "POST":
pk = request.data.get("benchmark", None)
else:
pk = view.kwargs.get("bid", None)
if not pk:
return False
benchmark = self.get_object(pk)
if not benchmark:
return False
if benchmark.owner.id == request.user.id:
return True
else:
return False
| 27.454545 | 53 | 0.588742 | from rest_framework.permissions import BasePermission
from benchmark.models import Benchmark
from dataset.models import Dataset
class IsAdmin(BasePermission):
def has_permission(self, request, view):
return request.user.is_superuser
class IsDatasetOwner(BasePermission):
def get_object(self, pk):
try:
return Dataset.objects.get(pk=pk)
except Dataset.DoesNotExist:
raise None
def has_permission(self, request, view):
if request.method == "POST":
pk = request.data.get("dataset", None)
else:
pk = view.kwargs.get("pk", None)
if not pk:
return False
dataset = self.get_object(pk)
if not dataset:
return False
if dataset.owner.id == request.user.id:
return True
else:
return False
class IsBenchmarkOwner(BasePermission):
def get_object(self, pk):
try:
return Benchmark.objects.get(pk=pk)
except Benchmark.DoesNotExist:
raise None
def has_permission(self, request, view):
if request.method == "POST":
pk = request.data.get("benchmark", None)
else:
pk = view.kwargs.get("bid", None)
if not pk:
return False
benchmark = self.get_object(pk)
if not benchmark:
return False
if benchmark.owner.id == request.user.id:
return True
else:
return False
| true | true |
1c2d123bb9f3f859a243b8a2d858280e6889d55f | 169 | py | Python | Helloapp/venv/Scripts/django-admin.py | ramadevim/Hello-World | df85d1c8d9650c2f1fb04955ac293a47cdba05bd | [
"MIT"
] | null | null | null | Helloapp/venv/Scripts/django-admin.py | ramadevim/Hello-World | df85d1c8d9650c2f1fb04955ac293a47cdba05bd | [
"MIT"
] | null | null | null | Helloapp/venv/Scripts/django-admin.py | ramadevim/Hello-World | df85d1c8d9650c2f1fb04955ac293a47cdba05bd | [
"MIT"
] | 1 | 2019-10-02T11:56:36.000Z | 2019-10-02T11:56:36.000Z | #!C:\Users\maxgen\django proj\Helloapp\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 28.166667 | 62 | 0.786982 |
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| true | true |
1c2d144146c5d13357b58eb927531a2dbd9a0908 | 1,322 | py | Python | main.py | zero-bugs/littlepytest | c3929dee3d5d40ba9c33200d9f441ae61ff36673 | [
"MIT"
] | null | null | null | main.py | zero-bugs/littlepytest | c3929dee3d5d40ba9c33200d9f441ae61ff36673 | [
"MIT"
] | null | null | null | main.py | zero-bugs/littlepytest | c3929dee3d5d40ba9c33200d9f441ae61ff36673 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
import threading
from common.common_config import CommonConstant
from func.kc_scrawl import KcScrawlImpl, historyImgList
if __name__ == "__main__":
# 第一阶段api提取数据
kcScrawlImpl = KcScrawlImpl()
kcScrawlImpl.init()
# kcScrawlImpl.scrawPicUseApiAll()
print(len(historyImgList))
try:
# 下载图片
t1 = threading.Thread(
target=kcScrawlImpl.scrawPicUseApiAllLatest,
args=(1, 20, '2021-05-10 00:00:00'),
name="FirstThread-1",
)
t1.start()
# 下载图片
t2 = threading.Thread(
target=kcScrawlImpl.scrawPicUseApiAllLatest,
args=(21, 40, '2021-05-10 00:00:00'),
name="FirstThread-2",
)
t2.start()
# 下载图片
t3 = threading.Thread(
target=kcScrawlImpl.scrawPicUseApiAllLatest,
args=(41, 60, '2021-05-10 00:00:00'),
name="FirstThread-3",
)
t3.start()
#
# # 下载图片
# t4 = threading.Thread(
# target=kcScrawlImpl.scrawPicUseApiAllLatest,
# args=(301, 400, '2020-11-30 00:00:00'),
# name="FirstThread-4",
# )
# t4.start()
except Exception as e:
print("start thread error.")
print(e)
| 25.921569 | 58 | 0.549924 |
import threading
from common.common_config import CommonConstant
from func.kc_scrawl import KcScrawlImpl, historyImgList
if __name__ == "__main__":
kcScrawlImpl = KcScrawlImpl()
kcScrawlImpl.init()
print(len(historyImgList))
try:
t1 = threading.Thread(
target=kcScrawlImpl.scrawPicUseApiAllLatest,
args=(1, 20, '2021-05-10 00:00:00'),
name="FirstThread-1",
)
t1.start()
t2 = threading.Thread(
target=kcScrawlImpl.scrawPicUseApiAllLatest,
args=(21, 40, '2021-05-10 00:00:00'),
name="FirstThread-2",
)
t2.start()
t3 = threading.Thread(
target=kcScrawlImpl.scrawPicUseApiAllLatest,
args=(41, 60, '2021-05-10 00:00:00'),
name="FirstThread-3",
)
t3.start()
except Exception as e:
print("start thread error.")
print(e)
| true | true |
1c2d1454c5b45a992c0ec7434b5643be04552823 | 6,846 | py | Python | stock_plots.py | Preston5789/Stock_Regression_Algorithm | f65dd0c710be777628a0c03ce0fa851880cf0d81 | [
"MIT"
] | 1 | 2019-05-27T19:39:35.000Z | 2019-05-27T19:39:35.000Z | stock_plots.py | Preston5789/Stock_Regression_Algorithm | f65dd0c710be777628a0c03ce0fa851880cf0d81 | [
"MIT"
] | null | null | null | stock_plots.py | Preston5789/Stock_Regression_Algorithm | f65dd0c710be777628a0c03ce0fa851880cf0d81 | [
"MIT"
] | 2 | 2019-05-03T03:33:26.000Z | 2020-05-15T23:59:55.000Z |
import matplotlib.pyplot as plt
import numpy
import time
from matplotlib.widgets import Button
from stock_trade import Jesus
from stock_connect import Connection
from stock_reg import Errors
err = Errors()
graphswitcheroo = 0
con = Connection()
jes = Jesus()
con.socket()
plt.ion()
fig, axs = plt.subplots(4, 1,figsize=(8,10), constrained_layout=True)
x1, y1 = [],[]
x2, y2 = [], []
x3, y3 = [], []
x4, y4 = [], []
buyx, buyy = [], []
sellx, selly = [], []
s=2
livedata = axs[0].scatter(x1,y1,s=s, c = 'blue')
selldata = axs[0].scatter(sellx, selly, s = 15, c = 'r')
buydata = axs[0].scatter(buyx, buyy, s = 15, c = 'g')
lnn, = axs[0].plot([],[], 'green', linewidth = 1.0)
ln, = axs[0].plot([],[],'red', linewidth = 1.0)
axs[0].set_title('Live Data')
axs[1].set_title('Linear Regression Analysis')
axs[1].set_ylabel('Slope Value')
axs[0].set_ylabel('Price Over Time')
axs[0].set_xlabel('time (s)')
axs[1].set_xlabel('time (s)')
ln1, = axs[1].plot([],[], 'r', linewidth = 0.2,)
ln2, = axs[1].plot([],[], 'r', linewidth=0.2)
ln11, = axs[1].plot([],[], 'g', linewidth=0.4)
ln22, = axs[1].plot([],[], 'g', linewidth=0.4)
slopedata = axs[1].scatter(x3,y3, s=s, c = 'black')
slopedata2 = axs[1].scatter(x4,y4, s=s, c = 'blue')
maxtime = time.time()*1000.0 + 20000
begintime = time.time()*1000.0
axs[0].set_xlim(begintime,maxtime)
axs[1].set_xlim(begintime,maxtime)
axs[0].grid()
axs[1].grid()
plt.draw()
axs[2].set_title('Regression Trader: Control Panel')
axs[3].set_title('Historical Data')
axs[2].set_ylabel('Price')
axs[2].set_xlabel('Server Time')
ticker_text = fig.text(0.5, 0.43, '' , multialignment="left")
time_text = fig.text(0.5, 0.40, '' , multialignment="left")
yield_text = fig.text(0.5, 0.37, '', multialignment="left")
lossyield_text = fig.text(0.5, 0.34, '', multialignment="left")
totalsell_text = fig.text(0.5, 0.31, '', multialignment="left")
def _closeshop(event,tickerlist):
jes.closeshop(tickerlist)
def on_click(event):
print('doubleclick')
#definition to cycle through graphs
def _yes(event):
global graphswitcheroo
graphswitcheroo += 1
if(graphswitcheroo>=14):
graphswitcheroo = 0
#Renders the graph for all of the trading data
def _rendergraph(event):
print('Generating Graph')
axs[3].clear()
axs[2].set_title('Regression Trader: Controll Panel')
axs[3].set_title('Historical Data')
axs[3].set_ylabel('Price')
axs[3].set_xlabel('Server Time')
axs[3].grid()
x5 = [float(i) for i in con.trackertime_list[graphswitcheroo]]
y5 = [float(i) for i in con.tracker_list[graphswitcheroo]]
axs[3].plot(x5,y5)
axs[3].scatter(sellx, selly,c='red', linewidths=2.0,edgecolors='black',s=60)
axs[3].scatter(buyx,buyy,c='green',linewidths=2.0,edgecolors='black',s=60)
#Everything to get the buttons together
plt.connect('button_press_event', on_click)
nexcut = plt.axes([0.15, 0.40, .17, .05], facecolor='k')
graphcut = plt.axes([0.15, 0.32, .3,.06], facecolor = 'k')
wrapcut = plt.axes([0.78, 0.38, .17,.05], facecolor = 'k')
bnexcut = Button(nexcut, 'Next Stock', color='red', hovercolor='white')
bgraphcut = Button(graphcut, 'Generate Graph', color='blue', hovercolor='white')
bwrapcut = Button(wrapcut, 'Wrap It Up', color='yellow',hovercolor='white')
bgraphcut.on_clicked(_rendergraph)
bnexcut.on_clicked(_yes)
bwrapcut.on_clicked(_closeshop)
while(True):
#print(con.tracker_list)
x1 = [float(i) for i in con.trackertime_list[graphswitcheroo][-1000:]]
y1 = [float(i) for i in con.tracker_list[graphswitcheroo][-1000:]]
x2 =[float(i) for i in con.temptime_list[graphswitcheroo]]
y2 =[float(i) for i in con.temp_list[graphswitcheroo]]
buyx = [float(i) for i in jes.buytime_list[graphswitcheroo]]
buyy = [float(i) for i in jes.buy_list[graphswitcheroo]]
sellx =[float(i) for i in jes.selltime_list[graphswitcheroo]]
selly =[float(i) for i in jes.sell_list[graphswitcheroo]]
##Make sure the graphs are all tidy like
if((len(y1)>0)):
axs[0].set_ylim(0.98*float(min(y1)) ,1.02*float(max(y1)))
time_text.set_text('Current Price: ' + con.temp_list[graphswitcheroo][0].rstrip('0'))
ticker_text.set_text(con.tickerlist[graphswitcheroo])
yield_text.set_text('Permanent Yield: ' + str(jes.tdyyield))
lossyield_text.set_text('Unsold Yield: ' + str(sum(jes.lssyield)))
totalsell_text.set_text('Amount of Trades Holding Assets: ' + str(sum(jes.totalsell)))
axs[0].set_xlim(float(x1[0]), float(x1[-1]))
axs[1].set_xlim(float(x1[0]), float(x1[-1]))
if(len(buyx) == len(buyy)):
buydata.set_offsets(numpy.c_[buyx, buyy])
if(len(sellx)==len(selly)):
selldata.set_offsets(numpy.c_[sellx, selly])
if(numpy.shape(numpy.array(x1))==numpy.shape(numpy.array(y1))):
livedata.set_offsets(numpy.c_[x1, y1])
toparray = [float(i) for i in con.toperror_list[graphswitcheroo]]
print(toparray)
bottomarray = [float(i) for i in con.bottomerror_list[graphswitcheroo]]
toparray2 = [float(i) for i in con.toperror2_list[graphswitcheroo]]
bottomarray2 = [float(i) for i in con.bottomerror2_list[graphswitcheroo]]
if(len(con.tracker_list[graphswitcheroo])>=35):
x3 =[float(i) for i in con.slopetime_list[graphswitcheroo]]
y3 =[float(i) for i in con.slope_list[graphswitcheroo]]
ln1.set_data(x3, toparray)
ln2.set_data(x3, bottomarray)
print("The dimensin of x2 is: {}".format(len(x2)))
print("The dimensin of of line1 is: {}".format(len(con.line1_list[graphswitcheroo])))
print(con.line1_list[graphswitcheroo])
ln.set_data(x2,con.line1_list[graphswitcheroo])
slopedata.set_offsets(numpy.c_[x3,y3])
axs[1].set_xlim(x3[0], x3[-1])
axs[1].set_ylim(min(bottomarray),max(toparray))
if(len(con.tracker_list[graphswitcheroo])>=95):
x4 =[float(i) for i in con.slopetime2_list[graphswitcheroo]]
y4 =[float(i) for i in con.slope2_list[graphswitcheroo]]
y60 = [float(i) for i in con.line2_list[graphswitcheroo]]
x60 = [float(i) for i in con.temptime2_list[graphswitcheroo][0:64]]
ln11.set_data(x4, toparray2)
ln22.set_data(x4, bottomarray2)
lnn.set_data(x60 ,y60)
slopedata2.set_offsets(numpy.c_[x4,y4])
for i in range(len(con.tickerlist)):
if len(con.tracker_list[i])>100:
jes.defender[i] = True;
Jesus().decide(con.startbalnace, con.toperror_list, con.bottomerror_list, con.toperror2_list, con.bottomerror2_list, con.temp_list, con.temptime_list, con.tickerlist, con.tracker_list)
plt.pause(0.01)
| 36.222222 | 193 | 0.647239 |
import matplotlib.pyplot as plt
import numpy
import time
from matplotlib.widgets import Button
from stock_trade import Jesus
from stock_connect import Connection
from stock_reg import Errors
err = Errors()
graphswitcheroo = 0
con = Connection()
jes = Jesus()
con.socket()
plt.ion()
fig, axs = plt.subplots(4, 1,figsize=(8,10), constrained_layout=True)
x1, y1 = [],[]
x2, y2 = [], []
x3, y3 = [], []
x4, y4 = [], []
buyx, buyy = [], []
sellx, selly = [], []
s=2
livedata = axs[0].scatter(x1,y1,s=s, c = 'blue')
selldata = axs[0].scatter(sellx, selly, s = 15, c = 'r')
buydata = axs[0].scatter(buyx, buyy, s = 15, c = 'g')
lnn, = axs[0].plot([],[], 'green', linewidth = 1.0)
ln, = axs[0].plot([],[],'red', linewidth = 1.0)
axs[0].set_title('Live Data')
axs[1].set_title('Linear Regression Analysis')
axs[1].set_ylabel('Slope Value')
axs[0].set_ylabel('Price Over Time')
axs[0].set_xlabel('time (s)')
axs[1].set_xlabel('time (s)')
ln1, = axs[1].plot([],[], 'r', linewidth = 0.2,)
ln2, = axs[1].plot([],[], 'r', linewidth=0.2)
ln11, = axs[1].plot([],[], 'g', linewidth=0.4)
ln22, = axs[1].plot([],[], 'g', linewidth=0.4)
slopedata = axs[1].scatter(x3,y3, s=s, c = 'black')
slopedata2 = axs[1].scatter(x4,y4, s=s, c = 'blue')
maxtime = time.time()*1000.0 + 20000
begintime = time.time()*1000.0
axs[0].set_xlim(begintime,maxtime)
axs[1].set_xlim(begintime,maxtime)
axs[0].grid()
axs[1].grid()
plt.draw()
axs[2].set_title('Regression Trader: Control Panel')
axs[3].set_title('Historical Data')
axs[2].set_ylabel('Price')
axs[2].set_xlabel('Server Time')
ticker_text = fig.text(0.5, 0.43, '' , multialignment="left")
time_text = fig.text(0.5, 0.40, '' , multialignment="left")
yield_text = fig.text(0.5, 0.37, '', multialignment="left")
lossyield_text = fig.text(0.5, 0.34, '', multialignment="left")
totalsell_text = fig.text(0.5, 0.31, '', multialignment="left")
def _closeshop(event,tickerlist):
jes.closeshop(tickerlist)
def on_click(event):
print('doubleclick')
def _yes(event):
global graphswitcheroo
graphswitcheroo += 1
if(graphswitcheroo>=14):
graphswitcheroo = 0
def _rendergraph(event):
print('Generating Graph')
axs[3].clear()
axs[2].set_title('Regression Trader: Controll Panel')
axs[3].set_title('Historical Data')
axs[3].set_ylabel('Price')
axs[3].set_xlabel('Server Time')
axs[3].grid()
x5 = [float(i) for i in con.trackertime_list[graphswitcheroo]]
y5 = [float(i) for i in con.tracker_list[graphswitcheroo]]
axs[3].plot(x5,y5)
axs[3].scatter(sellx, selly,c='red', linewidths=2.0,edgecolors='black',s=60)
axs[3].scatter(buyx,buyy,c='green',linewidths=2.0,edgecolors='black',s=60)
plt.connect('button_press_event', on_click)
nexcut = plt.axes([0.15, 0.40, .17, .05], facecolor='k')
graphcut = plt.axes([0.15, 0.32, .3,.06], facecolor = 'k')
wrapcut = plt.axes([0.78, 0.38, .17,.05], facecolor = 'k')
bnexcut = Button(nexcut, 'Next Stock', color='red', hovercolor='white')
bgraphcut = Button(graphcut, 'Generate Graph', color='blue', hovercolor='white')
bwrapcut = Button(wrapcut, 'Wrap It Up', color='yellow',hovercolor='white')
bgraphcut.on_clicked(_rendergraph)
bnexcut.on_clicked(_yes)
bwrapcut.on_clicked(_closeshop)
while(True):
x1 = [float(i) for i in con.trackertime_list[graphswitcheroo][-1000:]]
y1 = [float(i) for i in con.tracker_list[graphswitcheroo][-1000:]]
x2 =[float(i) for i in con.temptime_list[graphswitcheroo]]
y2 =[float(i) for i in con.temp_list[graphswitcheroo]]
buyx = [float(i) for i in jes.buytime_list[graphswitcheroo]]
buyy = [float(i) for i in jes.buy_list[graphswitcheroo]]
sellx =[float(i) for i in jes.selltime_list[graphswitcheroo]]
selly =[float(i) for i in jes.sell_list[graphswitcheroo]]
t_ylim(0.98*float(min(y1)) ,1.02*float(max(y1)))
time_text.set_text('Current Price: ' + con.temp_list[graphswitcheroo][0].rstrip('0'))
ticker_text.set_text(con.tickerlist[graphswitcheroo])
yield_text.set_text('Permanent Yield: ' + str(jes.tdyyield))
lossyield_text.set_text('Unsold Yield: ' + str(sum(jes.lssyield)))
totalsell_text.set_text('Amount of Trades Holding Assets: ' + str(sum(jes.totalsell)))
axs[0].set_xlim(float(x1[0]), float(x1[-1]))
axs[1].set_xlim(float(x1[0]), float(x1[-1]))
if(len(buyx) == len(buyy)):
buydata.set_offsets(numpy.c_[buyx, buyy])
if(len(sellx)==len(selly)):
selldata.set_offsets(numpy.c_[sellx, selly])
if(numpy.shape(numpy.array(x1))==numpy.shape(numpy.array(y1))):
livedata.set_offsets(numpy.c_[x1, y1])
toparray = [float(i) for i in con.toperror_list[graphswitcheroo]]
print(toparray)
bottomarray = [float(i) for i in con.bottomerror_list[graphswitcheroo]]
toparray2 = [float(i) for i in con.toperror2_list[graphswitcheroo]]
bottomarray2 = [float(i) for i in con.bottomerror2_list[graphswitcheroo]]
if(len(con.tracker_list[graphswitcheroo])>=35):
x3 =[float(i) for i in con.slopetime_list[graphswitcheroo]]
y3 =[float(i) for i in con.slope_list[graphswitcheroo]]
ln1.set_data(x3, toparray)
ln2.set_data(x3, bottomarray)
print("The dimensin of x2 is: {}".format(len(x2)))
print("The dimensin of of line1 is: {}".format(len(con.line1_list[graphswitcheroo])))
print(con.line1_list[graphswitcheroo])
ln.set_data(x2,con.line1_list[graphswitcheroo])
slopedata.set_offsets(numpy.c_[x3,y3])
axs[1].set_xlim(x3[0], x3[-1])
axs[1].set_ylim(min(bottomarray),max(toparray))
if(len(con.tracker_list[graphswitcheroo])>=95):
x4 =[float(i) for i in con.slopetime2_list[graphswitcheroo]]
y4 =[float(i) for i in con.slope2_list[graphswitcheroo]]
y60 = [float(i) for i in con.line2_list[graphswitcheroo]]
x60 = [float(i) for i in con.temptime2_list[graphswitcheroo][0:64]]
ln11.set_data(x4, toparray2)
ln22.set_data(x4, bottomarray2)
lnn.set_data(x60 ,y60)
slopedata2.set_offsets(numpy.c_[x4,y4])
for i in range(len(con.tickerlist)):
if len(con.tracker_list[i])>100:
jes.defender[i] = True;
Jesus().decide(con.startbalnace, con.toperror_list, con.bottomerror_list, con.toperror2_list, con.bottomerror2_list, con.temp_list, con.temptime_list, con.tickerlist, con.tracker_list)
plt.pause(0.01)
| true | true |
1c2d157ae31bc30ab6e3cd6918790e543c9f2f6c | 1,196 | py | Python | setup.py | troywilson/so-lazy | bb5a884f2af2db975c065bb39874af9a47ff1f3b | [
"Apache-2.0"
] | null | null | null | setup.py | troywilson/so-lazy | bb5a884f2af2db975c065bb39874af9a47ff1f3b | [
"Apache-2.0"
] | null | null | null | setup.py | troywilson/so-lazy | bb5a884f2af2db975c065bb39874af9a47ff1f3b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
'''Setup configuration for so_lazy package'''
import json
import setuptools
PKG_NAME = 'so_lazy'
with open(PKG_NAME + '/pkg_info.json') as fh:
_pkg_info = json.load(fh)
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name=PKG_NAME,
version=_pkg_info['version'],
author=_pkg_info['author'],
description=_pkg_info['description'],
long_description=long_description,
long_description_content_type='text/markdown',
url=_pkg_info['homepage'],
download_url=_pkg_info['download'],
license=_pkg_info['license'],
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries :: Python Modules'
],
python_requires='>=3.5'
)
| 29.170732 | 70 | 0.655518 |
import json
import setuptools
PKG_NAME = 'so_lazy'
with open(PKG_NAME + '/pkg_info.json') as fh:
_pkg_info = json.load(fh)
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name=PKG_NAME,
version=_pkg_info['version'],
author=_pkg_info['author'],
description=_pkg_info['description'],
long_description=long_description,
long_description_content_type='text/markdown',
url=_pkg_info['homepage'],
download_url=_pkg_info['download'],
license=_pkg_info['license'],
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries :: Python Modules'
],
python_requires='>=3.5'
)
| true | true |
1c2d15a29bbaf8b5ddc10f33905d8c185a433674 | 1,941 | py | Python | test/test_management_entity_all_of.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 21 | 2018-03-29T14:20:35.000Z | 2021-10-13T05:11:41.000Z | test/test_management_entity_all_of.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 14 | 2018-01-30T15:45:46.000Z | 2022-02-23T14:23:21.000Z | test/test_management_entity_all_of.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 18 | 2018-01-03T15:09:56.000Z | 2021-07-16T02:21:54.000Z | # coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.management_entity_all_of import ManagementEntityAllOf # noqa: E501
from intersight.rest import ApiException
class TestManagementEntityAllOf(unittest.TestCase):
"""ManagementEntityAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testManagementEntityAllOf(self):
"""Test ManagementEntityAllOf"""
# FIXME: construct object with mandatory attributes with example values
# model = intersight.models.management_entity_all_of.ManagementEntityAllOf() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 51.078947 | 1,052 | 0.783101 |
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.management_entity_all_of import ManagementEntityAllOf
from intersight.rest import ApiException
class TestManagementEntityAllOf(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testManagementEntityAllOf(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
1c2d15c565c541b8309285469165d695092f9e23 | 117,050 | py | Python | emsdk.py | Hillsie/emsdk | bb61bc17bd9183fbe691c2b3d945731f63525958 | [
"MIT"
] | null | null | null | emsdk.py | Hillsie/emsdk | bb61bc17bd9183fbe691c2b3d945731f63525958 | [
"MIT"
] | null | null | null | emsdk.py | Hillsie/emsdk | bb61bc17bd9183fbe691c2b3d945731f63525958 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright 2019 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import copy
import errno
import json
import multiprocessing
import os
import os.path
import platform
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import zipfile
if sys.version_info >= (3,):
from urllib.parse import urljoin
from urllib.request import urlopen
import functools
else:
from urlparse import urljoin
from urllib2 import urlopen
# EMSDK_DEV is a developer mode flag, which, if true, the SDK is downloaded from a 'staging' online source,
# instead of the public source. New releases are first deployed to the staging source for testing, before
# being published to the public. Don't enable this unless you develop EMSDK itself and need to access the
# staging source repository instead.
EMSDK_DEV = bool(os.getenv('EMSDK_DEV')) if os.getenv('EMSDK_DEV') is not None else False
if EMSDK_DEV:
print('EMSDK_DEV active.')
emsdk_master_server = 'http://clb.demon.fi/emscripten_dev/packages/'
else:
emsdk_master_server = 'https://storage.googleapis.com/webassembly/emscripten-releases-builds/deps/'
emsdk_packages_url = emsdk_master_server
emscripten_releases_repo = 'https://chromium.googlesource.com/emscripten-releases'
emscripten_releases_download_url_template = "https://storage.googleapis.com/webassembly/emscripten-releases-builds/%s/%s/wasm-binaries.%s"
emsdk_zip_download_url = 'https://github.com/emscripten-core/emsdk/archive/master.zip'
zips_subdir = 'zips/'
# Enable this to do very verbose printing about the different steps that are being run. Useful for debugging.
VERBOSE = int(os.getenv('EMSDK_VERBOSE', '0'))
TTY_OUTPUT = not os.getenv('EMSDK_NOTTY', not sys.stdout.isatty())
POWERSHELL = bool(os.getenv('EMSDK_POWERSHELL'))
WINDOWS = False
if os.name == 'nt' or (os.getenv('SYSTEMROOT') is not None and 'windows' in os.getenv('SYSTEMROOT').lower()) or (os.getenv('COMSPEC') is not None and 'windows' in os.getenv('COMSPEC').lower()):
WINDOWS = True
ENVPATH_SEPARATOR = ';'
MSYS = False
if os.getenv('MSYSTEM'):
MSYS = True
if os.getenv('MSYSTEM') != 'MSYS' and os.getenv('MSYSTEM') != 'MINGW64':
print('Warning: MSYSTEM environment variable is present, and is set to "' + os.getenv('MSYSTEM') + '". This shell has not been tested with emsdk and may not work.') # https://stackoverflow.com/questions/37460073/msys-vs-mingw-internal-environment-variables
OSX = False
if platform.mac_ver()[0] != '':
OSX = True
ENVPATH_SEPARATOR = ':'
LINUX = False
if not OSX and (platform.system() == 'Linux' or os.name == 'posix'):
LINUX = True
ENVPATH_SEPARATOR = ':'
UNIX = (OSX or LINUX)
ARCH = 'unknown'
# platform.machine() may return AMD64 on windows, so standardize the case.
machine = platform.machine().lower()
if machine.startswith('x64') or machine.startswith('amd64') or machine.startswith('x86_64'):
ARCH = 'x86_64'
elif machine.endswith('86'):
ARCH = 'x86'
elif machine.startswith('aarch64') or machine.lower().startswith('arm64'):
ARCH = 'aarch64'
elif platform.machine().startswith('arm'):
ARCH = 'arm'
else:
print("Warning: unknown machine architecture " + machine)
print()
# Don't saturate all cores to not steal the whole system, but be aggressive.
CPU_CORES = int(os.environ.get('EMSDK_NUM_CORES', max(multiprocessing.cpu_count() - 1, 1)))
CMAKE_BUILD_TYPE_OVERRIDE = None
# If true, perform a --shallow clone of git.
GIT_CLONE_SHALLOW = False
# If true, LLVM backend is built with tests enabled, and Binaryen is built with Visual Studio static analyzer enabled.
BUILD_FOR_TESTING = False
# If 'auto', assertions are decided by the build type (Release&MinSizeRel=disabled, Debug&RelWithDebInfo=enabled)
# Other valid values are 'ON' and 'OFF'
ENABLE_LLVM_ASSERTIONS = 'auto'
def os_name():
if WINDOWS:
return 'win'
elif LINUX:
return 'linux'
elif OSX:
return 'osx'
else:
raise Exception('unknown OS')
def os_name_for_emscripten_releases():
if WINDOWS:
return 'win'
elif LINUX:
return 'linux'
elif OSX:
return 'mac'
else:
raise Exception('unknown OS')
def debug_print(msg, **args):
if VERBOSE:
print(msg, **args)
def to_unix_path(p):
return p.replace('\\', '/')
def emsdk_path():
return to_unix_path(os.path.dirname(os.path.realpath(__file__)))
emscripten_config_directory = os.path.expanduser("~/")
# If .emscripten exists, we are configuring as embedded inside the emsdk directory.
if os.path.exists(os.path.join(emsdk_path(), '.emscripten')):
emscripten_config_directory = emsdk_path()
EMSDK_SET_ENV = 'emsdk_set_env.ps1' if POWERSHELL else 'emsdk_set_env.bat' if (WINDOWS and not MSYS) else 'emsdk_set_env.sh'
ARCHIVE_SUFFIXES = ('zip', '.tar', '.gz', '.xz', '.tbz2', '.bz2')
# Finds the given executable 'program' in PATH. Operates like the Unix tool 'which'.
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and (WINDOWS or os.access(fpath, os.X_OK))
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
if WINDOWS and '.' not in fname:
if is_exe(exe_file + '.exe'):
return exe_file + '.exe'
if is_exe(exe_file + '.cmd'):
return exe_file + '.cmd'
if is_exe(exe_file + '.bat'):
return exe_file + '.bat'
return None
def vswhere(version):
try:
program_files = os.environ['ProgramFiles(x86)'] if 'ProgramFiles(x86)' in os.environ else os.environ['ProgramFiles']
vswhere_path = os.path.join(program_files, 'Microsoft Visual Studio', 'Installer', 'vswhere.exe')
output = json.loads(subprocess.check_output([vswhere_path, '-latest', '-version', '[%s.0,%s.0)' % (version, version + 1), '-requires', 'Microsoft.VisualStudio.Component.VC.Tools.x86.x64', '-property', 'installationPath', '-format', 'json']))
# Visual Studio 2017 Express is not included in the above search, and it does not have the VC.Tools.x86.x64 tool, so do a catch-all attempt as a fallback, to detect Express version.
if len(output) == 0:
output = json.loads(subprocess.check_output([vswhere_path, '-latest', '-version', '[%s.0,%s.0)' % (version, version + 1), '-products', '*', '-property', 'installationPath', '-format', 'json']))
return str(output[0]['installationPath']) if len(output) > 0 else ''
except Exception:
return ''
def vs_filewhere(installation_path, platform, file):
try:
vcvarsall = os.path.join(installation_path, 'VC\\Auxiliary\\Build\\vcvarsall.bat')
env = subprocess.check_output('cmd /c "%s" %s & where %s' % (vcvarsall, platform, file))
paths = [path[:-len(file)] for path in env.split('\r\n') if path.endswith(file)]
return paths[0]
except Exception:
return ''
CMAKE_GENERATOR = 'Unix Makefiles'
if WINDOWS:
# Detect which CMake generator to use when building on Windows
if '--mingw' in sys.argv:
CMAKE_GENERATOR = 'MinGW Makefiles'
elif '--vs2013' in sys.argv:
CMAKE_GENERATOR = 'Visual Studio 12'
elif '--vs2015' in sys.argv:
CMAKE_GENERATOR = 'Visual Studio 14'
elif '--vs2017' in sys.argv:
CMAKE_GENERATOR = 'Visual Studio 15'
else:
program_files = os.environ['ProgramFiles(x86)'] if 'ProgramFiles(x86)' in os.environ else os.environ['ProgramFiles']
vs2017_exists = len(vswhere(15)) > 0
vs2015_exists = 'VS140COMNTOOLS' in os.environ or 'VSSDK140Install' in os.environ or os.path.isdir(os.path.join(program_files, 'Microsoft Visual Studio 14.0'))
vs2013_exists = 'VS120COMNTOOLS' in os.environ or os.path.isdir(os.path.join(program_files, 'Microsoft Visual Studio 12.0'))
mingw_exists = which('mingw32-make') is not None and which('g++') is not None
if vs2015_exists:
CMAKE_GENERATOR = 'Visual Studio 14'
elif vs2017_exists:
CMAKE_GENERATOR = 'Visual Studio 15' # VS2017 has an LLVM build issue, see https://github.com/kripken/emscripten-fastcomp/issues/185
elif mingw_exists:
CMAKE_GENERATOR = 'MinGW Makefiles'
elif vs2013_exists:
CMAKE_GENERATOR = 'Visual Studio 12' # VS2013 is no longer supported, so attempt it as a last resort if someone might want to insist using it.
else:
CMAKE_GENERATOR = '' # No detected generator
sys.argv = [a for a in sys.argv if a not in ('--mingw', '--vs2013', '--vs2015', '--vs2017')]
# Computes a suitable path prefix to use when building with a given generator.
def cmake_generator_prefix():
if CMAKE_GENERATOR == 'Visual Studio 15':
return '_vs2017'
elif CMAKE_GENERATOR == 'Visual Studio 14':
return '_vs2015'
elif CMAKE_GENERATOR == 'MinGW Makefiles':
return '_mingw'
return '' # Unix Makefiles and Visual Studio 2013 do not specify a path prefix for backwards path compatibility
# Removes a directory tree even if it was readonly, and doesn't throw exception on failure.
def remove_tree(d):
debug_print('remove_tree(' + str(d) + ')')
try:
def remove_readonly_and_try_again(func, path, exc_info):
if not (os.stat(path).st_mode & stat.S_IWRITE):
os.chmod(path, stat.S_IWRITE)
func(path)
else:
raise
shutil.rmtree(d, onerror=remove_readonly_and_try_again)
except Exception as e:
debug_print('remove_tree threw an exception, ignoring: ' + str(e))
def import_pywin32():
if WINDOWS:
try:
import win32api
import win32con
return win32api, win32con
except Exception:
print('Failed to import Python Windows extensions win32api and win32con. Make sure you are using the version of python available in emsdk, or install PyWin extensions to the distribution of Python you are attempting to use. (This script was launched in python instance from "' + sys.executable + '")')
sys.exit(1)
def win_set_environment_variable_direct(key, value, system=True):
prev_path = os.environ['PATH']
try:
py = find_used_python()
if py:
py_path = to_native_path(py.expand_vars(py.activated_path))
os.environ['PATH'] = os.environ['PATH'] + ';' + py_path
win32api, win32con = import_pywin32()
if system:
# Read globally from ALL USERS section.
folder = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, 'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment', 0, win32con.KEY_ALL_ACCESS)
else:
# Register locally from CURRENT USER section.
folder = win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER, 'Environment', 0, win32con.KEY_ALL_ACCESS)
win32api.RegSetValueEx(folder, key, 0, win32con.REG_EXPAND_SZ, value)
debug_print('Set key=' + key + ' with value ' + value + ' in registry.')
except Exception as e:
if e.args[0] == 5: # 'Access is denied.'
print('Error! Failed to set the environment variable \'' + key + '\'! Setting environment variables permanently requires administrator access. Please rerun this command with administrative privileges. This can be done for example by holding down the Ctrl and Shift keys while opening a command prompt in start menu.')
sys.exit(1)
print('Failed to write environment variable ' + key + ':', file=sys.stderr)
print(str(e), file=sys.stderr)
win32api.RegCloseKey(folder)
os.environ['PATH'] = prev_path
return None
win32api.RegCloseKey(folder)
os.environ['PATH'] = prev_path
win32api.PostMessage(win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 'Environment')
def win_get_environment_variable(key, system=True):
prev_path = os.environ['PATH']
try:
py = find_used_python()
if py:
py_path = to_native_path(py.expand_vars(py.activated_path))
os.environ['PATH'] = os.environ['PATH'] + ';' + py_path
try:
import win32api
import win32con
if system: # Read globally from ALL USERS section.
folder = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, 'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment')
else: # Register locally from CURRENT USER section.
folder = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, 'Environment')
value = str(win32api.RegQueryValueEx(folder, key)[0])
except Exception:
# PyWin32 is not available - read via os.environ. This has the drawback that expansion items such as %PROGRAMFILES% will have been expanded, so
# need to be precise not to set these back to system registry, or expansion items would be lost.
return os.environ[key]
except Exception as e:
if e.args[0] != 2: # 'The system cannot find the file specified.'
print('Failed to read environment variable ' + key + ':', file=sys.stderr)
print(str(e), file=sys.stderr)
try:
win32api.RegCloseKey(folder)
except Exception:
pass
os.environ['PATH'] = prev_path
return None
win32api.RegCloseKey(folder)
os.environ['PATH'] = prev_path
return value
def win_environment_variable_exists(key, system=True):
value = win_get_environment_variable(key, system)
return value is not None and len(value) > 0
def win_get_active_environment_variable(key):
value = win_get_environment_variable(key, False)
if value is not None:
return value
return win_get_environment_variable(key, True)
def win_set_environment_variable(key, value, system=True):
debug_print('set ' + str(key) + '=' + str(value) + ', in system=' + str(system), file=sys.stderr)
previous_value = win_get_environment_variable(key, system)
if previous_value == value:
debug_print(' no need to set, since same value already exists.')
return # No need to elevate UAC for nothing to set the same value, skip.
if not value:
try:
if system:
cmd = ['REG', 'DELETE', 'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment', '/V', key, '/f']
else:
cmd = ['REG', 'DELETE', 'HKCU\\Environment', '/V', key, '/f']
debug_print(str(cmd))
value = subprocess.call(cmd, stdout=subprocess.PIPE)
except Exception:
return
return
try:
if system:
win_set_environment_variable_direct(key, value, system)
return
value = value.replace('%', '^%') # Escape % signs so that we don't expand references to environment variables.
if len(value) >= 1024:
print('ERROR! The new environment variable ' + key + ' is more than 1024 characters long! A value this long cannot be set via command line: please add the environment variable specified above to system environment manually via Control Panel.', file=sys.stderr)
sys.exit(1)
cmd = ['SETX', key, value]
debug_print(str(cmd))
retcode = subprocess.call(cmd, stdout=subprocess.PIPE)
if retcode != 0:
print('ERROR! Failed to set environment variable ' + key + '=' + value + '. You may need to set it manually.', file=sys.stderr)
except Exception as e:
print('ERROR! Failed to set environment variable ' + key + '=' + value + ':', file=sys.stderr)
print(str(e), file=sys.stderr)
print('You may need to set it manually.', file=sys.stderr)
def win_delete_environment_variable(key, system=True):
debug_print('win_delete_environment_variable(key=' + key + ', system=' + str(system) + ')')
win_set_environment_variable(key, None, system)
# Returns the absolute pathname to the given path inside the Emscripten SDK.
def sdk_path(path):
if os.path.isabs(path):
return path
else:
return to_unix_path(os.path.join(os.path.dirname(os.path.realpath(__file__)), path))
# Modifies the given file in-place to contain '\r\n' line endings.
def file_to_crlf(filename):
text = open(filename, 'r').read()
text = text.replace('\r\n', '\n').replace('\n', '\r\n')
open(filename, 'wb').write(text)
# Modifies the given file in-place to contain '\n' line endings.
def file_to_lf(filename):
text = open(filename, 'r').read()
text = text.replace('\r\n', '\n')
open(filename, 'wb').write(text)
# Removes a single file, suppressing exceptions on failure.
def rmfile(filename):
debug_print('rmfile(' + filename + ')')
try:
os.remove(filename)
except:
pass
def fix_lineendings(filename):
if WINDOWS:
file_to_crlf(filename)
else:
file_to_lf(filename)
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
def mkdir_p(path):
debug_print('mkdir_p(' + path + ')')
if os.path.exists(path):
return
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def num_files_in_directory(path):
if not os.path.isdir(path):
return 0
return len([name for name in os.listdir(path) if os.path.exists(os.path.join(path, name))])
def run(cmd, cwd=None):
debug_print('run(cmd=' + str(cmd) + ', cwd=' + str(cwd) + ')')
process = subprocess.Popen(cmd, cwd=cwd, env=os.environ.copy())
process.communicate()
if process.returncode != 0:
print(str(cmd) + ' failed with error code ' + str(process.returncode) + '!')
return process.returncode
# http://pythonicprose.blogspot.fi/2009/10/python-extract-targz-archive.html
def untargz(source_filename, dest_dir, unpack_even_if_exists=False):
debug_print('untargz(source_filename=' + source_filename + ', dest_dir=' + dest_dir + ')')
if not unpack_even_if_exists and num_files_in_directory(dest_dir) > 0:
print("File '" + source_filename + "' has already been unpacked, skipping.")
return True
print("Unpacking '" + source_filename + "' to '" + dest_dir + "'")
mkdir_p(dest_dir)
run(['tar', '-xvf' if VERBOSE else '-xf', sdk_path(source_filename), '--strip', '1'], cwd=dest_dir)
# tfile = tarfile.open(source_filename, 'r:gz')
# tfile.extractall(dest_dir)
return True
# On Windows, it is not possible to reference path names that are longer than ~260 characters, unless the path is referenced via a "\\?\" prefix.
# See https://msdn.microsoft.com/en-us/library/aa365247.aspx#maxpath and http://stackoverflow.com/questions/3555527/python-win32-filename-length-workaround
# In that mode, forward slashes cannot be used as delimiters.
def fix_potentially_long_windows_pathname(pathname):
if not WINDOWS:
return pathname
# Test if emsdk calls fix_potentially_long_windows_pathname() with long relative paths (which is problematic)
if not os.path.isabs(pathname) and len(pathname) > 200:
print('Warning: Seeing a relative path "' + pathname + '" which is dangerously long for being referenced as a short Windows path name. Refactor emsdk to be able to handle this!')
if pathname.startswith('\\\\?\\'):
return pathname
return '\\\\?\\' + os.path.normpath(pathname.replace('/', '\\'))
# On windows, rename/move will fail if the destination exists, and there is no
# race-free way to do it. This method removes the destination if it exists, so
# the move always works
def move_with_overwrite(src, dest):
if os.path.exists(dest):
os.remove(dest)
os.rename(src, dest)
# http://stackoverflow.com/questions/12886768/simple-way-to-unzip-file-in-python-on-all-oses
def unzip(source_filename, dest_dir, unpack_even_if_exists=False):
debug_print('unzip(source_filename=' + source_filename + ', dest_dir=' + dest_dir + ')')
if not unpack_even_if_exists and num_files_in_directory(dest_dir) > 0:
print("File '" + source_filename + "' has already been unpacked, skipping.")
return True
print("Unpacking '" + source_filename + "' to '" + dest_dir + "'")
mkdir_p(dest_dir)
common_subdir = None
try:
with zipfile.ZipFile(source_filename) as zf:
# Implement '--strip 1' behavior to unzipping by testing if all the files in the zip reside in a common subdirectory, and if so,
# we move the output tree at the end of uncompression step.
for member in zf.infolist():
words = member.filename.split('/')
if len(words) > 1: # If there is a directory component?
if common_subdir is None:
common_subdir = words[0]
elif common_subdir != words[0]:
common_subdir = None
break
else:
common_subdir = None
break
unzip_to_dir = dest_dir
if common_subdir:
unzip_to_dir = os.path.join('/'.join(dest_dir.split('/')[:-1]), 'unzip_temp')
# Now do the actual decompress.
for member in zf.infolist():
zf.extract(member, fix_potentially_long_windows_pathname(unzip_to_dir))
dst_filename = os.path.join(unzip_to_dir, member.filename)
# See: https://stackoverflow.com/questions/42326428/zipfile-in-python-file-permission
unix_attributes = member.external_attr >> 16
if unix_attributes:
os.chmod(dst_filename, unix_attributes)
# Move the extracted file to its final location without the base directory name, if we are stripping that away.
if common_subdir:
if not member.filename.startswith(common_subdir):
raise Exception('Unexpected filename "' + member.filename + '"!')
stripped_filename = '.' + member.filename[len(common_subdir):]
final_dst_filename = os.path.join(dest_dir, stripped_filename)
if stripped_filename.endswith('/'): # Directory?
d = fix_potentially_long_windows_pathname(final_dst_filename)
if not os.path.isdir(d):
os.mkdir(d)
else:
parent_dir = os.path.dirname(fix_potentially_long_windows_pathname(final_dst_filename))
if parent_dir and not os.path.exists(parent_dir):
os.makedirs(parent_dir)
move_with_overwrite(fix_potentially_long_windows_pathname(dst_filename), fix_potentially_long_windows_pathname(final_dst_filename))
if common_subdir:
try:
remove_tree(unzip_to_dir)
except:
pass
except zipfile.BadZipfile as e:
print("Unzipping file '" + source_filename + "' failed due to reason: " + str(e) + "! Removing the corrupted zip file.")
rmfile(source_filename)
return False
except Exception as e:
print("Unzipping file '" + source_filename + "' failed due to reason: " + str(e))
return False
return True
# This function interprets whether the given string looks like a path to a directory instead of a file, without looking at the actual filesystem.
# 'a/b/c' points to directory, so does 'a/b/c/', but 'a/b/c.x' is parsed as a filename
def path_points_to_directory(path):
if path == '.':
return True
last_slash = max(path.rfind('/'), path.rfind('\\'))
last_dot = path.rfind('.')
no_suffix = last_dot < last_slash or last_dot == -1
if no_suffix:
return True
suffix = path[last_dot:]
# Very simple logic for the only file suffixes used by emsdk downloader. Other
# suffixes, like 'clang-3.2' are treated as dirs.
if suffix in ('.exe', '.zip', '.txt'):
return False
else:
return True
def get_content_length(download):
try:
meta = download.info()
if hasattr(meta, "getheaders") and hasattr(meta.getheaders, "Content-Length"):
return int(meta.getheaders("Content-Length")[0])
elif hasattr(download, "getheader") and download.getheader('Content-Length'):
return int(download.getheader('Content-Length'))
elif hasattr(meta, "getheader") and meta.getheader('Content-Length'):
return int(meta.getheader('Content-Length'))
except Exception:
pass
return 0
def get_download_target(url, dstpath, filename_prefix=''):
file_name = filename_prefix + url.split('/')[-1]
if path_points_to_directory(dstpath):
file_name = os.path.join(dstpath, file_name)
else:
file_name = dstpath
# Treat all relative destination paths as relative to the SDK root directory, not the current working directory.
file_name = sdk_path(file_name)
return file_name
# On success, returns the filename on the disk pointing to the destination file that was produced
# On failure, returns None.
def download_file(url, dstpath, download_even_if_exists=False, filename_prefix=''):
debug_print('download_file(url=' + url + ', dstpath=' + dstpath + ')')
file_name = get_download_target(url, dstpath, filename_prefix)
if os.path.exists(file_name) and not download_even_if_exists:
print("File '" + file_name + "' already downloaded, skipping.")
return file_name
try:
u = urlopen(url)
mkdir_p(os.path.dirname(file_name))
with open(file_name, 'wb') as f:
file_size = get_content_length(u)
if file_size > 0:
print("Downloading: %s from %s, %s Bytes" % (file_name, url, file_size))
else:
print("Downloading: %s from %s" % (file_name, url))
file_size_dl = 0
# Draw a progress bar 80 chars wide (in non-TTY mode)
progress_max = 80 - 4
progress_shown = 0
block_sz = 8192
if not TTY_OUTPUT:
print(' [', end='')
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if file_size:
percent = file_size_dl * 100.0 / file_size
if TTY_OUTPUT:
status = r" %10d [%3.02f%%]" % (file_size_dl, percent)
print(status, end='\r')
else:
while progress_shown < progress_max * percent / 100:
print('-', end='')
sys.stdout.flush()
progress_shown += 1
if not TTY_OUTPUT:
print(']')
sys.stdout.flush()
except Exception as e:
print("Error: Downloading URL '" + url + "': " + str(e))
if "SSL: CERTIFICATE_VERIFY_FAILED" in str(e) or "urlopen error unknown url type: https" in str(e):
print("Warning: Possibly SSL/TLS issue. Update or install Python SSL root certificates (2048-bit or greater) supplied in Python folder or https://pypi.org/project/certifi/ and try again.")
rmfile(file_name)
return None
except KeyboardInterrupt:
print("Aborted by User, exiting")
rmfile(file_name)
sys.exit(1)
return file_name
def run_get_output(cmd, cwd=None):
debug_print('run_get_output(cmd=' + str(cmd) + ', cwd=' + str(cwd) + ')')
process = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, env=os.environ.copy(), universal_newlines=True)
stdout, stderr = process.communicate()
return (process.returncode, stdout, stderr)
# must_succeed: If false, the search is performed silently without printing out errors if not found. Empty string is returned if git is not found.
# If true, the search is required to succeed, and the execution will terminate with sys.exit(1) if not found.
def GIT(must_succeed=True):
# The order in the following is important, and specifies the preferred order of using the git tools.
# Primarily use git from emsdk if installed. If not, use system git.
gits = ['git/1.9.4/bin/git.exe', which('git')]
for git in gits:
try:
ret, stdout, stderr = run_get_output([git, '--version'])
if ret == 0:
return git
except:
pass
if must_succeed:
if WINDOWS:
print("ERROR: git executable was not found. Please install it by typing 'emsdk install git-1.9.4', or alternatively by installing it manually from http://git-scm.com/downloads . If you install git manually, remember to add it to PATH")
elif OSX:
print("ERROR: git executable was not found. Please install git for this operation! This can be done from http://git-scm.com/ , or by installing XCode and then the XCode Command Line Tools (see http://stackoverflow.com/questions/9329243/xcode-4-4-command-line-tools )")
elif LINUX:
print("ERROR: git executable was not found. Please install git for this operation! This can be probably be done using your package manager, see http://git-scm.com/book/en/Getting-Started-Installing-Git")
else:
print("ERROR: git executable was not found. Please install git for this operation!")
sys.exit(1)
return '' # Not found
def git_repo_version(repo_path):
returncode, stdout, stderr = run_get_output([GIT(), 'log', '-n', '1', '--pretty="%aD %H"'], cwd=repo_path)
if returncode == 0:
return stdout.strip()
else:
return ""
def git_recent_commits(repo_path, n=20):
returncode, stdout, stderr = run_get_output([GIT(), 'log', '-n', str(n), '--pretty="%H"'], cwd=repo_path)
if returncode == 0:
return stdout.strip().replace('\r', '').replace('"', '').split('\n')
else:
return []
def git_clone(url, dstpath):
debug_print('git_clone(url=' + url + ', dstpath=' + dstpath + ')')
if os.path.isdir(os.path.join(dstpath, '.git')):
print("Repository '" + url + "' already cloned to directory '" + dstpath + "', skipping.")
return True
mkdir_p(dstpath)
git_clone_args = []
if GIT_CLONE_SHALLOW:
git_clone_args += ['--depth', '1']
return run([GIT(), 'clone'] + git_clone_args + [url, dstpath]) == 0
def git_checkout_and_pull(repo_path, branch):
debug_print('git_checkout_and_pull(repo_path=' + repo_path + ', branch=' + branch + ')')
ret = run([GIT(), 'fetch', 'origin'], repo_path)
if ret != 0:
return False
try:
print("Fetching latest changes to the branch '" + branch + "' for '" + repo_path + "'...")
ret = run([GIT(), 'fetch', 'origin'], repo_path)
if ret != 0:
return False
# run([GIT, 'checkout', '-b', branch, '--track', 'origin/'+branch], repo_path)
ret = run([GIT(), 'checkout', '--quiet', branch], repo_path) # this line assumes that the user has not gone and manually messed with the repo and added new remotes to ambiguate the checkout.
if ret != 0:
return False
ret = run([GIT(), 'merge', '--ff-only', 'origin/' + branch], repo_path) # this line assumes that the user has not gone and made local changes to the repo
if ret != 0:
return False
except:
print('git operation failed!')
return False
print("Successfully updated and checked out branch '" + branch + "' on repository '" + repo_path + "'")
print("Current repository version: " + git_repo_version(repo_path))
return True
def git_clone_checkout_and_pull(url, dstpath, branch):
debug_print('git_clone_checkout_and_pull(url=' + url + ', dstpath=' + dstpath + ', branch=' + branch + ')')
success = git_clone(url, dstpath)
if not success:
return False
success = git_checkout_and_pull(dstpath, branch)
return success
# Each tool can have its own build type, or it can be overridden on the command line.
def decide_cmake_build_type(tool):
global CMAKE_BUILD_TYPE_OVERRIDE
if CMAKE_BUILD_TYPE_OVERRIDE:
return CMAKE_BUILD_TYPE_OVERRIDE
else:
return tool.cmake_build_type
# The root directory of the build.
def fastcomp_build_dir(tool):
generator_suffix = ''
if CMAKE_GENERATOR == 'Visual Studio 10':
generator_suffix = '_vs2010'
elif CMAKE_GENERATOR == 'Visual Studio 11':
generator_suffix = '_vs2012'
elif CMAKE_GENERATOR == 'Visual Studio 12':
generator_suffix = '_vs2013'
elif CMAKE_GENERATOR == 'Visual Studio 14':
generator_suffix = '_vs2015'
elif CMAKE_GENERATOR == 'Visual Studio 15':
generator_suffix = '_vs2017'
elif CMAKE_GENERATOR == 'MinGW Makefiles':
generator_suffix = '_mingw'
bitness_suffix = '_32' if tool.bitness == 32 else '_64'
if hasattr(tool, 'git_branch'):
build_dir = 'build_' + tool.git_branch.replace(os.sep, '-') + generator_suffix + bitness_suffix
else:
build_dir = 'build_' + tool.version + generator_suffix + bitness_suffix
return build_dir
def exe_suffix(filename):
if WINDOWS and not filename.endswith('.exe'):
filename += '.exe'
return filename
# The directory where the binaries are produced. (relative to the installation root directory of the tool)
def fastcomp_build_bin_dir(tool):
build_dir = fastcomp_build_dir(tool)
if WINDOWS and 'Visual Studio' in CMAKE_GENERATOR:
old_llvm_bin_dir = os.path.join(build_dir, 'bin', decide_cmake_build_type(tool))
new_llvm_bin_dir = None
default_cmake_build_type = decide_cmake_build_type(tool)
cmake_build_types = [default_cmake_build_type, 'Release', 'RelWithDebInfo', 'MinSizeRel', 'Debug']
for build_type in cmake_build_types:
d = os.path.join(build_dir, build_type, 'bin')
if os.path.isfile(os.path.join(tool.installation_path(), d, exe_suffix('clang'))):
new_llvm_bin_dir = d
break
if new_llvm_bin_dir and os.path.exists(os.path.join(tool.installation_path(), new_llvm_bin_dir)):
return new_llvm_bin_dir
elif os.path.exists(os.path.join(tool.installation_path(), old_llvm_bin_dir)):
return old_llvm_bin_dir
return os.path.join(build_dir, default_cmake_build_type, 'bin')
else:
return os.path.join(build_dir, 'bin')
def build_env(generator):
build_env = os.environ.copy()
# To work around a build issue with older Mac OS X builds, add -stdlib=libc++ to all builds.
# See https://groups.google.com/forum/#!topic/emscripten-discuss/5Or6QIzkqf0
if OSX:
build_env['CXXFLAGS'] = ((build_env['CXXFLAGS'] + ' ') if hasattr(build_env, 'CXXFLAGS') else '') + '-stdlib=libc++'
elif 'Visual Studio 15' in generator:
path = vswhere(15)
build_env['VCTargetsPath'] = os.path.join(path, 'Common7\\IDE\\VC\\VCTargets')
# CMake and VS2017 cl.exe needs to have mspdb140.dll et al. in its PATH.
vc_bin_paths = [vs_filewhere(path, 'amd64', 'cl.exe'),
vs_filewhere(path, 'x86', 'cl.exe')]
for path in vc_bin_paths:
if os.path.isdir(path):
build_env['PATH'] = build_env['PATH'] + ';' + path
elif 'Visual Studio 14' in generator or 'Visual Studio 2015' in generator:
build_env['VCTargetsPath'] = os.path.join(os.environ['ProgramFiles(x86)'], 'MSBuild/Microsoft.Cpp/v4.0/V140')
# CMake and VS2015 cl.exe needs to have mspdb140.dll et al. in its PATH.
vc_bin_paths = [os.path.join(os.environ['ProgramFiles'], 'Microsoft Visual Studio 14.0\\VC\\bin'),
os.path.join(os.environ['ProgramFiles(x86)'], 'Microsoft Visual Studio 14.0\\VC\\bin')]
for path in vc_bin_paths:
if os.path.isdir(path):
build_env['PATH'] = build_env['PATH'] + ';' + path
elif 'Visual Studio 12' in generator or 'Visual Studio 2013' in generator:
build_env['VCTargetsPath'] = os.path.join(os.environ['ProgramFiles(x86)'], 'MSBuild/Microsoft.Cpp/v4.0/V120')
return build_env
def get_generator_for_sln_file(sln_file):
contents = open(sln_file, 'r').read()
if '# Visual Studio 15' in contents:
return 'Visual Studio 15'
if '# Visual Studio Express 2015' in contents or '# Visual Studio 2015' in contents or '# Visual Studio 14' in contents:
return 'Visual Studio 14'
if '# Visual Studio Express 2013' in contents or '# Visual Studio 2013' in contents or '# Visual Studio 12' in contents:
return 'Visual Studio 12'
raise Exception('Unknown generator used to build solution file ' + sln_file)
def find_msbuild(sln_file):
# The following logic attempts to find a Visual Studio version specific MSBuild.exe from a list of known locations. This logic
# exists because it was detected that when multiple Visual Studio versions exist (VS2013 & VS2015), their MSBuild.exes might not
# be able to drive a build proper. This search is messy, and perhaps in VS >= 2017 or similar none of this logic would be needed.
# Ideally would be able to do "cmake --build path/to/cmake/build/directory --config Debug|RelWithDebInfo|MinSizeRel|Release" across
# all platforms, but around VS2013 era this did not work. This could be reattempted when support for VS 2015 is dropped.
search_paths_vs2015 = [os.path.join(os.environ['ProgramFiles'], 'MSBuild/14.0/Bin/amd64'),
os.path.join(os.environ['ProgramFiles(x86)'], 'MSBuild/14.0/Bin/amd64'),
os.path.join(os.environ['ProgramFiles'], 'MSBuild/14.0/Bin'),
os.path.join(os.environ['ProgramFiles(x86)'], 'MSBuild/14.0/Bin')]
search_paths_vs2013 = [os.path.join(os.environ['ProgramFiles'], 'MSBuild/12.0/Bin/amd64'),
os.path.join(os.environ['ProgramFiles(x86)'], 'MSBuild/12.0/Bin/amd64'),
os.path.join(os.environ['ProgramFiles'], 'MSBuild/12.0/Bin'),
os.path.join(os.environ['ProgramFiles(x86)'], 'MSBuild/12.0/Bin')]
search_paths_old = [os.path.join(os.environ["WINDIR"], 'Microsoft.NET/Framework/v4.0.30319')]
generator = get_generator_for_sln_file(sln_file)
debug_print('find_msbuild looking for generator ' + str(generator))
if generator == 'Visual Studio 15':
path = vswhere(15)
search_paths = [os.path.join(path, 'MSBuild/15.0/Bin/amd64'),
os.path.join(path, 'MSBuild/15.0/Bin')]
elif generator == 'Visual Studio 14':
search_paths = search_paths_vs2015
elif generator == 'Visual Studio 12':
search_paths = search_paths_vs2013 + search_paths_old
else:
raise Exception('Unknown generator!')
for path in search_paths:
p = os.path.join(path, 'MSBuild.exe')
debug_print('Searching for MSBuild.exe: ' + p)
if os.path.isfile(p):
return p
debug_print('MSBuild.exe in PATH? ' + str(which('MSBuild.exe')))
return which('MSBuild.exe') # Last fallback, try any MSBuild from PATH (might not be compatible, but best effort)
def make_build(build_root, build_type, build_target_platform='x64'):
debug_print('make_build(build_root=' + build_root + ', build_type=' + build_type + ', build_target_platform=' + build_target_platform + ')')
global CPU_CORES
if CPU_CORES > 1:
print('Performing a parallel build with ' + str(CPU_CORES) + ' cores.')
else:
print('Performing a singlethreaded build.')
generator_to_use = CMAKE_GENERATOR
if WINDOWS:
if 'Visual Studio' in CMAKE_GENERATOR:
solution_name = str(subprocess.check_output(['dir', '/b', '*.sln'], shell=True, cwd=build_root).decode('utf-8').strip())
generator_to_use = get_generator_for_sln_file(os.path.join(build_root, solution_name))
# Disabled for now: Don't pass /maxcpucount argument to msbuild, since it looks like when building, msbuild already automatically spawns the full amount of logical
# cores the system has, and passing the number of logical cores here has been observed to give a quadratic N*N explosion on the number of spawned processes
# (e.g. on a Core i7 5960X with 16 logical cores, it would spawn 16*16=256 cl.exe processes, which would start crashing when running out of system memory)
# make = [find_msbuild(os.path.join(build_root, solution_name)), '/maxcpucount:' + str(CPU_CORES), '/t:Build', '/p:Configuration=' + build_type, '/nologo', '/verbosity:minimal', solution_name]
make = [find_msbuild(os.path.join(build_root, solution_name)), '/t:Build', '/p:Configuration=' + build_type, '/p:Platform=' + build_target_platform, '/nologo', '/verbosity:minimal', solution_name]
else:
make = ['mingw32-make', '-j' + str(CPU_CORES)]
else:
make = ['cmake', '--build', '.', '--', '-j' + str(CPU_CORES)]
# Build
try:
print('Running build: ' + str(make))
ret = subprocess.check_call(make, cwd=build_root, env=build_env(generator_to_use))
if ret != 0:
print('Build failed with exit code ' + ret + '!', file=sys.stderr)
print('Working directory: ' + build_root, file=sys.stderr)
return False
except Exception as e:
print('Build failed due to exception!', file=sys.stderr)
print('Working directory: ' + build_root, file=sys.stderr)
print(str(e), file=sys.stderr)
return False
return True
def cmake_configure(generator, build_root, src_root, build_type, extra_cmake_args=[]):
debug_print('cmake_configure(generator=' + str(generator) + ', build_root=' + str(build_root) + ', src_root=' + str(src_root) + ', build_type=' + str(build_type) + ', extra_cmake_args=' + str(extra_cmake_args) + ')')
# Configure
if not os.path.isdir(build_root):
os.mkdir(build_root) # Create build output directory if it doesn't yet exist.
try:
if generator:
generator = ['-G', generator]
else:
generator = []
cmdline = ['cmake'] + generator + ['-DCMAKE_BUILD_TYPE=' + build_type, '-DPYTHON_EXECUTABLE=' + sys.executable] + extra_cmake_args + [src_root]
print('Running CMake: ' + str(cmdline))
def quote_parens(x):
if ' ' in x:
return '"' + x.replace('"', '\\"') + '"'
else:
return x
open(os.path.join(build_root, 'recmake.' + ('bat' if WINDOWS else 'sh')), 'w').write(' '.join(map(quote_parens, cmdline))) # Create a file 'recmake.bat/sh' in the build root that user can call to manually recmake the build tree with the previous build params
ret = subprocess.check_call(cmdline, cwd=build_root, env=build_env(CMAKE_GENERATOR))
if ret != 0:
print('CMake invocation failed with exit code ' + ret + '!', file=sys.stderr)
print('Working directory: ' + build_root, file=sys.stderr)
return False
except OSError as e:
if e.errno == errno.ENOENT:
print(str(e), file=sys.stderr)
print('Could not run CMake, perhaps it has not been installed?', file=sys.stderr)
if WINDOWS:
print('Installing this package requires CMake. Get it from http://www.cmake.org/', file=sys.stderr)
elif LINUX:
print('Installing this package requires CMake. Get it via your system package manager (e.g. sudo apt-get install cmake), or from http://www.cmake.org/', file=sys.stderr)
elif OSX:
print('Installing this package requires CMake. Get it via a OSX package manager (Homebrew: "brew install cmake", or MacPorts: "sudo port install cmake"), or from http://www.cmake.org/', file=sys.stderr)
return False
raise
except Exception as e:
print('CMake invocation failed due to exception!', file=sys.stderr)
print('Working directory: ' + build_root, file=sys.stderr)
print(str(e), file=sys.stderr)
return False
return True
def xcode_sdk_version():
try:
output = subprocess.check_output(['xcrun', '--show-sdk-version'])
if sys.version_info >= (3,):
output = output.decode('utf8')
return output.strip().split('.')
except:
return subprocess.checkplatform.mac_ver()[0].split('.')
def build_llvm_tool(tool):
debug_print('build_llvm_tool(' + str(tool) + ')')
fastcomp_root = tool.installation_path()
fastcomp_src_root = os.path.join(fastcomp_root, 'src')
if hasattr(tool, 'git_branch'): # Does this tool want to be git cloned from github?
success = git_clone_checkout_and_pull(tool.download_url(), fastcomp_src_root, tool.git_branch)
if not success:
return False
clang_root = os.path.join(fastcomp_src_root, 'tools/clang')
success = git_clone_checkout_and_pull(tool.clang_url, clang_root, tool.git_branch)
if not success:
return False
if hasattr(tool, 'lld_url'):
lld_root = os.path.join(fastcomp_src_root, 'tools/lld')
success = git_clone_checkout_and_pull(tool.lld_url, lld_root, tool.git_branch)
if not success:
return False
else: # Not a git cloned tool, so instead download from git tagged releases
success = download_and_unzip(tool.download_url(), fastcomp_src_root, filename_prefix='llvm-e')
if not success:
return False
success = download_and_unzip(tool.windows_clang_url if WINDOWS else tool.unix_clang_url, os.path.join(fastcomp_src_root, 'tools/clang'), filename_prefix='clang-e')
if not success:
return False
cmake_generator = CMAKE_GENERATOR
if 'Visual Studio' in CMAKE_GENERATOR and tool.bitness == 64:
cmake_generator += ' Win64'
build_dir = fastcomp_build_dir(tool)
build_root = os.path.join(fastcomp_root, build_dir)
build_type = decide_cmake_build_type(tool)
# Configure
global BUILD_FOR_TESTING, ENABLE_LLVM_ASSERTIONS
tests_arg = 'ON' if BUILD_FOR_TESTING else 'OFF'
enable_assertions = ENABLE_LLVM_ASSERTIONS.lower() == 'on' or (ENABLE_LLVM_ASSERTIONS == 'auto' and build_type.lower() != 'release' and build_type.lower() != 'minsizerel')
only_supports_wasm = hasattr(tool, 'only_supports_wasm')
if ARCH == 'x86' or ARCH == 'x86_64':
targets_to_build = 'X86'
elif ARCH == 'arm':
targets_to_build = 'ARM'
elif ARCH == 'aarch64':
targets_to_build = 'AArch64'
else:
# May have problems with emconfigure
targets_to_build = ''
if not only_supports_wasm:
if targets_to_build != '':
targets_to_build += ';'
targets_to_build += 'JSBackend'
args = ['-DLLVM_TARGETS_TO_BUILD=' + targets_to_build, '-DLLVM_INCLUDE_EXAMPLES=OFF', '-DCLANG_INCLUDE_EXAMPLES=OFF', '-DLLVM_INCLUDE_TESTS=' + tests_arg, '-DCLANG_INCLUDE_TESTS=' + tests_arg, '-DLLVM_ENABLE_ASSERTIONS=' + ('ON' if enable_assertions else 'OFF')]
if os.environ.get('LLVM_CMAKE_ARGS'):
extra_args = os.environ['LLVM_CMAKE_ARGS'].split(',')
print('Passing the following extra arguments to LLVM CMake configuration: ' + str(extra_args))
args += extra_args
# MacOS < 10.13 workaround for LLVM build bug https://github.com/kripken/emscripten/issues/5418:
# specify HAVE_FUTIMENS=0 in the build if building with target SDK that is older than 10.13.
if OSX and (not os.environ.get('LLVM_CMAKE_ARGS') or 'HAVE_FUTIMENS' not in os.environ.get('LLVM_CMAKE_ARGS')) and xcode_sdk_version() < ['10', '13']:
print('Passing -DHAVE_FUTIMENS=0 to LLVM CMake configure to workaround https://github.com/kripken/emscripten/issues/5418. Please update to macOS 10.13 or newer')
args += ['-DHAVE_FUTIMENS=0']
success = cmake_configure(cmake_generator, build_root, fastcomp_src_root, build_type, args)
if not success:
return False
# Make
success = make_build(build_root, build_type, 'x64' if tool.bitness == 64 else 'Win32')
return success
# Emscripten asm.js optimizer build scripts:
def optimizer_build_root(tool):
build_root = tool.installation_path().strip()
if build_root.endswith('/') or build_root.endswith('\\'):
build_root = build_root[:-1]
generator_prefix = cmake_generator_prefix()
build_root = build_root + generator_prefix + '_' + str(tool.bitness) + 'bit_optimizer'
return build_root
def uninstall_optimizer(tool):
debug_print('uninstall_optimizer(' + str(tool) + ')')
build_root = optimizer_build_root(tool)
print("Deleting path '" + build_root + "'")
try:
remove_tree(build_root)
os.remove(build_root)
except:
pass
def is_optimizer_installed(tool):
build_root = optimizer_build_root(tool)
return os.path.exists(build_root)
def build_optimizer_tool(tool):
debug_print('build_optimizer_tool(' + str(tool) + ')')
src_root = os.path.join(tool.installation_path(), 'tools', 'optimizer')
build_root = optimizer_build_root(tool)
build_type = decide_cmake_build_type(tool)
# Configure
cmake_generator = CMAKE_GENERATOR
if 'Visual Studio' in CMAKE_GENERATOR and tool.bitness == 64:
cmake_generator += ' Win64'
success = cmake_configure(cmake_generator, build_root, src_root, build_type)
if not success:
return False
# Make
success = make_build(build_root, build_type, 'x64' if tool.bitness == 64 else 'Win32')
return success
# Binaryen build scripts:
def binaryen_build_root(tool):
build_root = tool.installation_path().strip()
if build_root.endswith('/') or build_root.endswith('\\'):
build_root = build_root[:-1]
generator_prefix = cmake_generator_prefix()
build_root = build_root + generator_prefix + '_' + str(tool.bitness) + 'bit_binaryen'
return build_root
def uninstall_binaryen(tool):
debug_print('uninstall_binaryen(' + str(tool) + ')')
build_root = binaryen_build_root(tool)
print("Deleting path '" + build_root + "'")
try:
remove_tree(build_root)
os.remove(build_root)
except:
pass
def is_binaryen_installed(tool):
build_root = binaryen_build_root(tool)
return os.path.exists(build_root)
def build_binaryen_tool(tool):
debug_print('build_binaryen_tool(' + str(tool) + ')')
src_root = tool.installation_path()
build_root = binaryen_build_root(tool)
build_type = decide_cmake_build_type(tool)
# Configure
args = []
cmake_generator = CMAKE_GENERATOR
if 'Visual Studio' in CMAKE_GENERATOR:
if tool.bitness == 64:
cmake_generator += ' Win64'
if BUILD_FOR_TESTING:
args += ['-DRUN_STATIC_ANALYZER=1']
success = cmake_configure(cmake_generator, build_root, src_root, build_type, args)
if not success:
return False
# Make
success = make_build(build_root, build_type, 'x64' if tool.bitness == 64 else 'Win32')
# Deploy scripts needed from source repository to build directory
remove_tree(os.path.join(build_root, 'scripts'))
shutil.copytree(os.path.join(src_root, 'scripts'), os.path.join(build_root, 'scripts'))
remove_tree(os.path.join(build_root, 'src', 'js'))
shutil.copytree(os.path.join(src_root, 'src', 'js'), os.path.join(build_root, 'src', 'js'))
return success
def download_and_unzip(zipfile, dest_dir, download_even_if_exists=False, filename_prefix=''):
debug_print('download_and_unzip(zipfile=' + zipfile + ', dest_dir=' + dest_dir + ')')
url = urljoin(emsdk_packages_url, zipfile)
download_target = get_download_target(url, zips_subdir, filename_prefix)
# If the archive was already downloaded, and the directory it would be
# unpacked to has contents, assume it's the same contents and skip.
if not download_even_if_exists and os.path.exists(download_target) and num_files_in_directory(dest_dir) > 0:
print("The contents of file '" + zipfile + "' already exist in destination '" + dest_dir + "', skipping.")
return True
# Otherwise, if the archive must be downloaded, always write into the
# target directory, since it may be a new version of a tool that gets
# installed to the same place (that is, a different download name
# indicates different contents).
download_even_if_exists = True
received_download_target = download_file(url, zips_subdir, download_even_if_exists, filename_prefix)
if not received_download_target:
return False
assert received_download_target == download_target
if zipfile.endswith('.zip'):
return unzip(download_target, dest_dir, unpack_even_if_exists=download_even_if_exists)
else:
return untargz(download_target, dest_dir, unpack_even_if_exists=download_even_if_exists)
def to_native_path(p):
if WINDOWS and not MSYS:
return to_unix_path(p).replace('/', '\\')
else:
return to_unix_path(p)
# Finds and returns a list of the directories that need to be added to PATH for the given set of tools.
def get_required_path(active_tools):
path_add = [to_native_path(emsdk_path())]
for tool in active_tools:
if hasattr(tool, 'activated_path'):
path_add += [to_native_path(tool.expand_vars(tool.activated_path))]
return path_add
# Returns the absolute path to the file '.emscripten' for the current user on this system.
def dot_emscripten_path():
return os.path.join(emscripten_config_directory, ".emscripten")
dot_emscripten = {}
def parse_key_value(line):
if not line:
return ('', '')
eq = line.find('=')
if eq != -1:
key = line[0:eq].strip()
value = line[eq + 1:].strip()
return (key, value)
else:
return (key, '')
def load_dot_emscripten():
global dot_emscripten
dot_emscripten = {}
lines = []
try:
lines = open(dot_emscripten_path(), "r").read().split('\n')
except:
pass
for line in lines:
try:
key, value = parse_key_value(line)
if value != '':
dot_emscripten[key] = value
# print("Got '" + key + "' = '" + value + "'")
except:
pass
def generate_dot_emscripten(active_tools):
global emscripten_config_directory
if emscripten_config_directory == emsdk_path():
temp_dir = sdk_path('tmp')
mkdir_p(temp_dir)
embedded = True
else:
temp_dir = tempfile.gettempdir().replace('\\', '/')
embedded = False
has_spidermonkey = False
has_node = False
cfg = 'import os\n'
if embedded:
cfg += "emsdk_path=os.path.dirname(os.environ.get('EM_CONFIG')).replace('\\\\', '/')\n"
# Different tools may provide the same activated configs; the latest to be
# activated is the relevant one.
activated_keys_in_order = []
activated_key_values = {}
for tool in active_tools:
tool_cfg = tool.activated_config()
if tool_cfg:
for specific_cfg in tool_cfg.split(';'):
name, value = specific_cfg.split('=')
if name not in activated_key_values:
activated_keys_in_order.append(name)
activated_key_values[name] = value
for name in activated_keys_in_order:
if name == 'SPIDERMONKEY_ENGINE':
has_spidermonkey = True
if name == 'NODE_JS':
has_node = True
cfg += name + ' = ' + activated_key_values[name] + '\n'
# These two vars must always be defined, even though they might not exist.
if not has_spidermonkey:
cfg += "SPIDERMONKEY_ENGINE = ''\n"
if not has_node:
node_fallback = which('nodejs')
if not node_fallback:
node_fallback = 'node'
cfg += "NODE_JS = '" + node_fallback + "'\n"
cfg += '''V8_ENGINE = ''
TEMP_DIR = ''' + "'" + temp_dir + "'" + '''
COMPILER_ENGINE = NODE_JS
JS_ENGINES = [NODE_JS]
'''
if embedded:
cfg = cfg.replace(emscripten_config_directory, "' + emsdk_path + '")
if os.path.exists(dot_emscripten_path()):
backup_path = dot_emscripten_path() + ".old"
print("Backing up old Emscripten configuration file in " + os.path.normpath(backup_path))
move_with_overwrite(dot_emscripten_path(), backup_path)
with open(dot_emscripten_path(), "w") as text_file:
text_file.write(cfg)
# Clear old cached emscripten content.
try:
remove_tree(os.path.join(emscripten_config_directory, ".emscripten_cache"))
os.remove(os.path.join(emscripten_config_directory, ".emscripten_sanity"))
os.remove(os.path.join(emscripten_config_directory, ".emscripten_cache__last_clear"))
except:
pass
print("The Emscripten configuration file " + os.path.normpath(dot_emscripten_path()) + " has been rewritten with the following contents:")
print('')
print(cfg.strip())
print('')
path_add = get_required_path(active_tools)
if not WINDOWS:
emsdk_env = os.path.relpath(sdk_path('emsdk_env.sh'))
if '/' not in emsdk_env:
emsdk_env = './emsdk_env.sh'
print("To conveniently access the selected set of tools from the command line, consider adding the following directories to PATH, or call 'source " + emsdk_env + "' to do this for you.")
print('')
print(' ' + ENVPATH_SEPARATOR.join(path_add))
def find_msbuild_dir():
if 'ProgramFiles' in os.environ and os.environ['ProgramFiles']:
program_files = os.environ['ProgramFiles']
else:
program_files = 'C:/Program Files'
if 'ProgramFiles(x86)' in os.environ and os.environ['ProgramFiles(x86)']:
program_files_x86 = os.environ['ProgramFiles(x86)']
else:
program_files_x86 = 'C:/Program Files (x86)'
MSBUILDX86_DIR = os.path.join(program_files_x86, "MSBuild/Microsoft.Cpp/v4.0/Platforms")
MSBUILD_DIR = os.path.join(program_files, "MSBuild/Microsoft.Cpp/v4.0/Platforms")
if os.path.exists(MSBUILDX86_DIR):
return MSBUILDX86_DIR
elif os.path.exists(MSBUILD_DIR):
return MSBUILD_DIR
else:
return '' # No MSbuild installed.
def get_installed_vstool_version(installed_path):
try:
return open(installed_path + "/version.txt", "r").read()
except:
return None
class Tool(object):
def __init__(self, data):
# Convert the dictionary representation of the tool in 'data' to members of this class for convenience.
for key, value in data.items():
# Python2 compat, convert unicode to str
if sys.version_info < (3,) and isinstance(value, unicode): # noqa
value = value.encode('Latin-1')
setattr(self, key, value)
# Cache the name ID of this Tool (these are read very often)
self.name = self.id + '-' + self.version
if hasattr(self, 'bitness'):
self.name += '-' + str(self.bitness) + 'bit'
def __str__(self):
return self.name
def __repr__(self):
return self.name
def expand_vars(self, str):
if WINDOWS and '%MSBuildPlatformsDir%' in str:
str = str.replace('%MSBuildPlatformsDir%', find_msbuild_dir())
if '%cmake_build_type_on_win%' in str:
str = str.replace('%cmake_build_type_on_win%', (decide_cmake_build_type(self) + '/') if WINDOWS else '')
if '%installation_dir%' in str:
str = str.replace('%installation_dir%', sdk_path(self.installation_dir()))
if '%generator_prefix%' in str:
str = str.replace('%generator_prefix%', cmake_generator_prefix())
str = str.replace('%.exe%', '.exe' if WINDOWS else '')
if '%fastcomp_build_dir%' in str:
str = str.replace('%fastcomp_build_dir%', fastcomp_build_dir(self))
if '%fastcomp_build_bin_dir%' in str:
str = str.replace('%fastcomp_build_bin_dir%', fastcomp_build_bin_dir(self))
return str
# Return true if this tool requires building from source, and false if this is a precompiled tool.
def needs_compilation(self):
if hasattr(self, 'cmake_build_type'):
return True
if hasattr(self, 'uses'):
for tool_name in self.uses:
tool = find_tool(tool_name)
if not tool:
debug_print('Tool ' + str(self) + ' depends on ' + tool_name + ' which does not exist!')
continue
if tool.needs_compilation():
return True
return False
# Specifies the target path where this tool will be installed to. This could either be a directory or a filename (e.g. in case of node.js)
def installation_path(self):
if WINDOWS and hasattr(self, 'windows_install_path'):
pth = self.expand_vars(self.windows_install_path)
return sdk_path(pth)
if hasattr(self, 'install_path'):
pth = self.expand_vars(self.install_path)
return sdk_path(pth)
p = self.version
if hasattr(self, 'bitness') and (not hasattr(self, 'append_bitness') or self.append_bitness):
p += '_' + str(self.bitness) + 'bit'
return sdk_path(os.path.join(self.id, p))
# Specifies the target directory this tool will be installed to.
def installation_dir(self):
dir = self.installation_path()
if path_points_to_directory(dir):
return dir
else:
return os.path.dirname(dir)
# Returns the configuration item that needs to be added to .emscripten to make this Tool active for the current user.
def activated_config(self):
if hasattr(self, 'activated_cfg'):
return to_unix_path(self.expand_vars(self.activated_cfg))
else:
return ''
def activated_environment(self):
if hasattr(self, 'activated_env'):
return self.expand_vars(self.activated_env).split(';')
else:
return []
def compatible_with_this_arch(self):
if hasattr(self, 'arch'):
if self.arch != ARCH:
return False
return True
def compatible_with_this_os(self):
if hasattr(self, 'os'):
if self.os == 'all':
return True
if self.compatible_with_this_arch() and ((WINDOWS and 'win' in self.os) or (LINUX and ('linux' in self.os or 'unix' in self.os)) or (OSX and ('osx' in self.os or 'unix' in self.os))):
return True
else:
return False
else:
if not hasattr(self, 'osx_url') and not hasattr(self, 'windows_url') and not hasattr(self, 'unix_url') and not hasattr(self, 'linux_url'):
return True
if OSX and hasattr(self, 'osx_url') and self.compatible_with_this_arch():
return True
if LINUX and hasattr(self, 'linux_url') and self.compatible_with_this_arch():
return True
if WINDOWS and (hasattr(self, 'windows_url') or hasattr(self, 'windows_install_path')) and self.compatible_with_this_arch():
return True
if UNIX and hasattr(self, 'unix_url'):
return True
return hasattr(self, 'url')
def is_installed(self):
# If this tool/sdk depends on other tools, require that all dependencies are installed for this tool to count as being installed.
if hasattr(self, 'uses'):
for tool_name in self.uses:
tool = find_tool(tool_name)
if tool is None:
print("Manifest error: No tool by name '" + tool_name + "' found! This may indicate an internal SDK error!")
return False
if not tool.is_installed():
return False
if self.download_url() is not None:
# For e.g. fastcomp clang from git repo, the activated PATH is the directory where the compiler is built to, and installation_path is
# the directory where the source tree exists. To distinguish between multiple packages sharing the same source
# (clang-master-32bit, clang-master-64bit, clang-incoming-32bit and clang-incoming-64bit each share the same git repo), require
# that in addition to the installation directory, each item in the activated PATH must exist.
activated_path = self.expand_vars(self.activated_path).split(';') if hasattr(self, 'activated_path') else [self.installation_path()]
def each_path_exists(pathlist):
for path in pathlist:
if not os.path.exists(path):
return False
return True
content_exists = os.path.exists(self.installation_path()) and each_path_exists(activated_path) and (os.path.isfile(self.installation_path()) or num_files_in_directory(self.installation_path()) > 0)
if self.id == 'vs-tool': # vs-tool is a special tool since all versions must be installed to the same dir, so dir name will not differentiate the version.
return content_exists and get_installed_vstool_version(self.installation_path()) == self.version
elif hasattr(self, 'custom_is_installed_script'):
if self.custom_is_installed_script == 'is_optimizer_installed':
return is_optimizer_installed(self)
elif self.custom_is_installed_script == 'is_binaryen_installed':
return is_binaryen_installed(self)
else:
raise Exception('Unknown custom_is_installed_script directive "' + self.custom_is_installed_script + '"!')
else:
return content_exists
else:
return True # This tool does not contain downloadable elements, so it is installed by default.
def is_active(self):
if not self.is_installed():
return False
if self.id == 'vs-tool':
return True # vs-tool is a special tool since all versions must be installed to the same dir, which means that if this tool is installed, it is also active.
# All dependencies of this tool must be active as well.
deps = self.dependencies()
for tool in deps:
if not tool.is_active():
return False
activated_cfg = self.activated_config()
if activated_cfg == '':
return len(deps) > 0
activated_cfg = activated_cfg.split(';')
for cfg in activated_cfg:
cfg = cfg.strip()
key, value = parse_key_value(cfg)
if key not in dot_emscripten:
debug_print(str(self) + ' is not active, because key="' + key + '" does not exist in .emscripten')
return False
# If running in embedded mode, all paths are stored dynamically relative to the emsdk root, so normalize those first.
dot_emscripten_key = dot_emscripten[key].replace("' + emsdk_path + '", emsdk_path())
if dot_emscripten_key != value:
debug_print(str(self) + ' is not active, because key="' + key + '" has value "' + dot_emscripten_key + '" but should have value "' + value + '"')
return False
return True
# Returns true if the system environment variables requires by this tool are currently active.
def is_env_active(self):
envs = self.activated_environment()
for env in envs:
key, value = parse_key_value(env)
if key not in os.environ or to_unix_path(os.environ[key]) != to_unix_path(value):
debug_print(str(self) + ' is not active, because environment variable key="' + key + '" has value "' + str(os.getenv(key)) + '" but should have value "' + value + '"')
return False
if hasattr(self, 'activated_path'):
path = self.expand_vars(self.activated_path).replace('\\', '/')
path = path.split(ENVPATH_SEPARATOR)
for p in path:
path_items = os.environ['PATH'].replace('\\', '/').split(ENVPATH_SEPARATOR)
if not normalized_contains(path_items, p):
debug_print(str(self) + ' is not active, because environment variable PATH item "' + p + '" is not present (PATH=' + os.environ['PATH'] + ')')
return False
return True
def win_activate_env_vars(self, permanently_activate):
if WINDOWS:
envs = self.activated_environment()
for env in envs:
key, value = parse_key_value(env)
if permanently_activate:
win_delete_environment_variable(key, False) # If there is an env var for the LOCAL USER with same name, it will hide the system var, so must remove that first.
win_set_environment_variable(key, value, permanently_activate)
# If this tool can be installed on this system, this function returns True.
# Otherwise, this function returns a string that describes the reason why this tool is not available.
def can_be_installed(self):
if hasattr(self, 'bitness'):
if self.bitness == 64 and not is_os_64bit():
return "this tool is only provided for 64-bit OSes"
if self.id == 'vs-tool':
msbuild_dir = find_msbuild_dir()
if len(msbuild_dir) > 0:
return True
else:
return "Visual Studio 2010 was not found"
else:
return True
def download_url(self):
if WINDOWS and hasattr(self, 'windows_url'):
return self.windows_url
elif OSX and hasattr(self, 'osx_url'):
return self.osx_url
elif LINUX and hasattr(self, 'linux_url'):
return self.linux_url
elif UNIX and hasattr(self, 'unix_url'):
return self.unix_url
elif hasattr(self, 'url'):
return self.url
else:
return None
def install(self):
if self.can_be_installed() is not True:
print("The tool '" + str(self) + "' is not available due to the reason: " + self.can_be_installed())
return False
if self.id == 'sdk':
print("Installing SDK '" + str(self) + "'..")
for tool_name in self.uses:
tool = find_tool(tool_name)
if tool is None:
print("Manifest error: No tool by name '" + tool_name + "' found! This may indicate an internal SDK error!")
success = tool.install()
if not success:
return False
print("Done installing SDK '" + str(self) + "'.")
return True
else:
print("Installing tool '" + str(self) + "'..")
url = self.download_url()
if hasattr(self, 'custom_install_script') and self.custom_install_script == 'build_fastcomp':
success = build_llvm_tool(self)
elif hasattr(self, 'git_branch'):
success = git_clone_checkout_and_pull(url, self.installation_path(), self.git_branch)
elif url.endswith(ARCHIVE_SUFFIXES):
# TODO: explain the vs-tool special-casing
download_even_if_exists = (self.id == 'vs-tool')
# if we are downloading a zip, we will unpack and delete it after immediately anyhow,
# so there is no need to look for an existing one (which may have been left behind
# due to an error in the past)
if url.endswith(ARCHIVE_SUFFIXES):
download_even_if_exists = True
filename_prefix = getattr(self, 'zipfile_prefix', '')
success = download_and_unzip(url, self.installation_path(), download_even_if_exists=download_even_if_exists, filename_prefix=filename_prefix)
else:
dst_file = download_file(urljoin(emsdk_packages_url, self.download_url()), self.installation_path())
if dst_file:
success = True
else:
success = False
if success:
if hasattr(self, 'custom_install_script'):
if self.custom_install_script == 'build_optimizer':
success = build_optimizer_tool(self)
elif self.custom_install_script == 'build_fastcomp':
pass # 'build_fastcomp' is a special one that does the download on its own, others do the download manually.
elif self.custom_install_script == 'build_binaryen':
success = build_binaryen_tool(self)
else:
raise Exception('Unknown custom_install_script command "' + self.custom_install_script + '"!')
# Install an emscripten-version.txt file if told to, and if there is one.
# (If this is not an actual release, but some other build, then we do not
# write anything.)
if hasattr(self, 'emscripten_releases_hash'):
emscripten_version_file_path = os.path.join(to_native_path(self.expand_vars(self.activated_path)), 'emscripten-version.txt')
version = get_emscripten_release_version(self.emscripten_releases_hash)
if version:
open(emscripten_version_file_path, 'w').write('"%s"' % version)
if not success:
print("Installation failed!")
return False
print("Done installing tool '" + str(self) + "'.")
# Sanity check that the installation succeeded, and if so, remove unneeded leftover installation files.
if self.is_installed():
self.cleanup_temp_install_files()
else:
print("Warning: The installation of '" + str(self) + "' seems to have failed, but no error was detected. Either something went wrong with the installation, or this may indicate an internal emsdk error.")
return True
def cleanup_temp_install_files(self):
url = self.download_url()
if url.endswith(ARCHIVE_SUFFIXES):
download_target = get_download_target(url, zips_subdir, getattr(self, 'zipfile_prefix', ''))
debug_print("Deleting temporary zip file " + download_target)
rmfile(download_target)
def uninstall(self):
if not self.is_installed():
print("Tool '" + str(self) + "' was not installed. No need to uninstall.")
return
print("Uninstalling tool '" + str(self) + "'..")
if hasattr(self, 'custom_uninstall_script'):
if self.custom_uninstall_script == 'uninstall_optimizer':
uninstall_optimizer(self)
elif self.custom_uninstall_script == 'uninstall_binaryen':
uninstall_binaryen(self)
else:
raise Exception('Unknown custom_uninstall_script directive "' + self.custom_uninstall_script + '"!')
try:
print("Deleting path '" + self.installation_path() + "'")
remove_tree(self.installation_path())
os.remove(self.installation_path())
except:
pass
print("Done uninstalling '" + str(self) + "'.")
def dependencies(self):
if not hasattr(self, 'uses'):
return []
deps = []
for tool_name in self.uses:
tool = find_tool(tool_name)
if tool:
deps += [tool]
return deps
def recursive_dependencies(self):
if not hasattr(self, 'uses'):
return []
deps = []
for tool_name in self.uses:
tool = find_tool(tool_name)
if tool:
deps += [tool]
deps += tool.recursive_dependencies()
return deps
# A global registry of all known Emscripten SDK tools available in the SDK manifest.
tools = []
tools_map = {}
def add_tool(tool):
tool.is_sdk = False
tools.append(tool)
if find_tool(str(tool)):
raise Exception('Duplicate tool ' + str(tool) + '! Existing:\n{' + ', '.join("%s: %s" % item for item in vars(find_tool(str(tool))).items()) + '}, New:\n{' + ', '.join("%s: %s" % item for item in vars(tool).items()) + '}')
tools_map[str(tool)] = tool
# A global registry of all known SDK toolsets.
sdks = []
sdks_map = {}
def add_sdk(sdk):
sdk.is_sdk = True
sdks.append(sdk)
if find_sdk(str(sdk)):
raise Exception('Duplicate sdk ' + str(sdk) + '! Existing:\n{' + ', '.join("%s: %s" % item for item in vars(find_sdk(str(sdk))).items()) + '}, New:\n{' + ', '.join("%s: %s" % item for item in vars(sdk).items()) + '}')
sdks_map[str(sdk)] = sdk
# N.B. In both tools and sdks list above, we take the convention that the newest items are at the back of the list (ascending chronological order)
def find_tool(name):
return tools_map.get(name)
def find_sdk(name):
return sdks_map.get(name)
def is_os_64bit():
# http://stackoverflow.com/questions/2208828/detect-64bit-os-windows-in-python
return platform.machine().endswith('64')
def find_latest_releases_version():
releases_info = load_releases_info()
return releases_info['latest']
def find_latest_releases_hash():
releases_info = load_releases_info()
return releases_info['releases'][find_latest_releases_version()]
def find_latest_releases_sdk(which):
return 'sdk-releases-%s-%s-64bit' % (which, find_latest_releases_hash())
def find_tot_sdk(which):
if not os.path.exists(tot_path()):
print('Tip-of-tree information was not found, run emsdk update-tags')
sys.exit(1)
tot = open(tot_path()).read()
if not tot:
print('Tip-of-tree build was not found, run emsdk update-tags (however, if there is no recent tip-of-tree build, you may need to wait)')
sys.exit(1)
return 'sdk-releases-%s-%s-64bit' % (which, tot)
# Given a git hash in emscripten-releases, find the emscripten
# version for it. There may not be one if this is not the hash of
# a release, in which case we return None.
def get_emscripten_release_version(emscripten_releases_hash):
releases_info = load_releases_info()
for key, value in dict(releases_info['releases']).items():
if value == emscripten_releases_hash:
return key
return None
def tot_path():
return sdk_path('emscripten-releases-tot.txt')
# Get the tip-of-tree build identifier.
def get_emscripten_releases_tot():
git_clone_checkout_and_pull(emscripten_releases_repo, sdk_path('releases'), 'master')
recent_releases = git_recent_commits(sdk_path('releases'))
# The recent releases are the latest hashes in the git repo. There
# may not be a build for the most recent ones yet; find the last
# that does.
for release in recent_releases:
url = emscripten_releases_download_url_template % (
os_name_for_emscripten_releases(),
release,
'tbz2' if not WINDOWS else 'zip'
)
try:
urlopen(url)
except:
continue
return release
return ''
# Finds the best-matching python tool for use.
def find_used_python():
for t in reversed(tools): # Find newest tool first - those are always at the end of the list.
if t.id == 'python' and t.is_installed() and t.is_active() and t.is_env_active():
return t
for t in reversed(tools):
if t.id == 'python' and t.is_installed() and t.is_active():
return t
for t in reversed(tools):
if t.id == 'python' and t.is_installed():
return t
return None
def version_key(ver):
return list(map(int, re.split('[._-]', ver)))
# A sort function that is compatible with both Python 2 and Python 3 using a custom comparison function.
def python_2_3_sorted(arr, cmp):
if sys.version_info >= (3,):
return sorted(arr, key=functools.cmp_to_key(cmp))
else:
return sorted(arr, cmp=cmp)
def fetch_emscripten_tags():
git = GIT(must_succeed=False)
if git:
print('Fetching emscripten-releases repository...')
emscripten_releases_tot = get_emscripten_releases_tot()
if emscripten_releases_tot:
open(tot_path(), 'w').write(emscripten_releases_tot)
else:
print('Update complete, however skipped fetching the Emscripten tags, since git was not found, which is necessary for update-tags.')
if WINDOWS:
print("Please install git by typing 'emsdk install git-1.9.4', or alternatively by installing it manually from http://git-scm.com/downloads . If you install git manually, remember to add it to PATH.")
elif OSX:
print("Please install git from http://git-scm.com/ , or by installing XCode and then the XCode Command Line Tools (see http://stackoverflow.com/questions/9329243/xcode-4-4-command-line-tools ).")
elif LINUX:
print("Pease install git using your package manager, see http://git-scm.com/book/en/Getting-Started-Installing-Git .")
else:
print("Please install git.")
return
def is_emsdk_sourced_from_github():
return os.path.exists(os.path.join(emsdk_path(), '.git'))
def update_emsdk():
if is_emsdk_sourced_from_github():
print('You seem to have bootstrapped Emscripten SDK by cloning from GitHub. In this case, use "git pull" instead of "emsdk update" to update emsdk. (Not doing that automatically in case you have local changes)', file=sys.stderr)
print('Alternatively, use "emsdk update-tags" to refresh the latest list of tags from the different Git repositories.', file=sys.stderr)
sys.exit(1)
if not download_and_unzip(emsdk_zip_download_url, emsdk_path(), download_even_if_exists=True):
sys.exit(1)
fetch_emscripten_tags()
# Lists all legacy (pre-emscripten-releases) tagged versions directly in the Git repositories. These we can pull and compile from source.
def load_legacy_emscripten_tags():
try:
return open(sdk_path('legacy-emscripten-tags.txt'), 'r').read().split('\n')
except:
return []
def load_legacy_binaryen_tags():
try:
return open(sdk_path('legacy-binaryen-tags.txt'), 'r').read().split('\n')
except:
return []
def remove_prefix(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
else:
return s
def remove_suffix(s, suffix):
if s.endswith(suffix):
return s[:len(s) - len(suffix)]
else:
return s
# filename should be one of: 'llvm-nightlies-32bit.txt', 'llvm-nightlies-64bit.txt', 'llvm-precompiled-tags-32bit.txt', 'llvm-precompiled-tags-64bit.txt', 'emscripten-nightlies.txt'
def load_file_index_list(filename):
try:
items = open(sdk_path(filename), 'r').read().split('\n')
items = map(lambda x: remove_suffix(remove_suffix(remove_prefix(remove_prefix(x, 'emscripten-llvm-e'), 'emscripten-nightly-'), '.tar.gz'), '.zip').strip(), items)
items = filter(lambda x: 'latest' not in x and len(x) > 0, items)
# Sort versions from oldest to newest (the default sort would be lexicographic, i.e. '1.37.1 < 1.37.10 < 1.37.2')
items = sorted(items, key=version_key)[::-1]
return items
except:
return []
def load_llvm_32bit_nightlies():
return load_file_index_list('llvm-nightlies-32bit.txt')
def load_llvm_64bit_nightlies():
return load_file_index_list('llvm-nightlies-64bit.txt')
def load_emscripten_nightlies():
return load_file_index_list('emscripten-nightlies.txt')
def load_llvm_precompiled_tags_32bit():
return load_file_index_list('llvm-tags-32bit.txt')
def load_llvm_precompiled_tags_64bit():
return load_file_index_list('llvm-tags-64bit.txt')
# Load the json info for emscripten-releases.
def load_releases_info():
try:
text = open(sdk_path('emscripten-releases-tags.txt'), 'r').read()
return json.loads(text)
except Exception as e:
print('Error parsing emscripten-releases-tags.txt!')
print(str(e))
sys.exit(1)
# Get a list of tags for emscripten-releases.
def load_releases_tags():
info = load_releases_info()
tags = list(info['releases'].values())
# Add the tip-of-tree, if it exists.
if os.path.exists(tot_path()):
tot = open(tot_path()).read()
if tot:
tags.append(tot)
return tags
def load_releases_versions():
info = load_releases_info()
versions = list(info['releases'].keys())
return versions
def is_string(s):
if sys.version_info[0] >= 3:
return isinstance(s, str)
return isinstance(s, basestring) # noqa
def load_sdk_manifest():
global tools, sdks
try:
manifest = json.loads(open(sdk_path("emsdk_manifest.json"), "r").read())
except Exception as e:
print('Error parsing emsdk_manifest.json!')
print(str(e))
return
emscripten_tags = load_legacy_emscripten_tags()
llvm_precompiled_tags_32bit = list(reversed(load_llvm_precompiled_tags_32bit()))
llvm_precompiled_tags_64bit = list(reversed(load_llvm_precompiled_tags_64bit()))
llvm_precompiled_tags = llvm_precompiled_tags_32bit + llvm_precompiled_tags_64bit
binaryen_tags = load_legacy_binaryen_tags()
llvm_32bit_nightlies = list(reversed(load_llvm_32bit_nightlies()))
llvm_64bit_nightlies = list(reversed(load_llvm_64bit_nightlies()))
emscripten_nightlies = list(reversed(load_emscripten_nightlies()))
releases_tags = load_releases_tags()
def dependencies_exist(sdk):
for tool_name in sdk.uses:
tool = find_tool(tool_name)
if not tool:
return False
return True
def cmp_version(ver, cmp_operand, reference):
if cmp_operand == '<=':
return version_key(ver) <= version_key(reference)
if cmp_operand == '<':
return version_key(ver) < version_key(reference)
if cmp_operand == '>=':
return version_key(ver) >= version_key(reference)
if cmp_operand == '>':
return version_key(ver) > version_key(reference)
if cmp_operand == '==':
return version_key(ver) == version_key(reference)
if cmp_operand == '!=':
return version_key(ver) != version_key(reference)
raise Exception('Invalid cmp_operand "' + cmp_operand + '"!')
def passes_filters(param, ver, filters):
for v in filters:
if v[0] == param and not cmp_version(ver, v[1], v[2]):
return False
return True
# A 'category parameter' is a %foo%-encoded identifier that specifies
# a class of tools instead of just one tool, e.g. %tag% or %nightly..%
def expand_category_param(param, category_list, t, is_sdk):
for i, ver in enumerate(category_list):
if not ver.strip():
continue
t2 = copy.copy(t)
found_param = False
for p, v in vars(t2).items():
if is_string(v) and param in v:
t2.__dict__[p] = v.replace(param, ver)
found_param = True
if not found_param:
continue
t2.is_old = i < len(category_list) - 2
if hasattr(t2, 'uses'):
t2.uses = [x.replace(param, ver) for x in t2.uses]
# Filter out expanded tools by version requirements, such as ["tag", "<=", "1.37.22"]
if hasattr(t2, 'version_filter'):
passes = passes_filters(param, ver, t2.version_filter)
if not passes:
continue
if is_sdk:
if dependencies_exist(t2):
if not find_sdk(t2.name):
add_sdk(t2)
else:
debug_print('SDK ' + str(t2) + ' already existed in manifest, not adding twice')
else:
if not find_tool(t2.name):
add_tool(t2)
else:
debug_print('Tool ' + str(t2) + ' already existed in manifest, not adding twice')
for tool in manifest['tools']:
t = Tool(tool)
if t.compatible_with_this_os():
if not hasattr(t, 'is_old'):
t.is_old = False
# Expand the metapackages that refer to tags or nightlies.
if '%tag%' in t.version:
expand_category_param('%tag%', emscripten_tags, t, is_sdk=False)
elif '%precompiled_tag%' in t.version:
expand_category_param('%precompiled_tag%', llvm_precompiled_tags, t, is_sdk=False)
elif '%precompiled_tag32%' in t.version:
expand_category_param('%precompiled_tag32%', llvm_precompiled_tags_32bit, t, is_sdk=False)
elif '%precompiled_tag64%' in t.version:
expand_category_param('%precompiled_tag64%', llvm_precompiled_tags_64bit, t, is_sdk=False)
elif '%binaryen_tag%' in t.version:
expand_category_param('%binaryen_tag%', binaryen_tags, t, is_sdk=False)
elif '%nightly-llvm-64bit%' in t.version:
expand_category_param('%nightly-llvm-64bit%', llvm_64bit_nightlies, t, is_sdk=False)
elif '%nightly-llvm-32bit%' in t.version:
expand_category_param('%nightly-llvm-32bit%', llvm_32bit_nightlies, t, is_sdk=False)
elif '%nightly-emscripten%' in t.version:
expand_category_param('%nightly-emscripten%', emscripten_nightlies, t, is_sdk=False)
elif '%releases-tag%' in t.version:
expand_category_param('%releases-tag%', releases_tags, t, is_sdk=False)
else:
add_tool(t)
for sdk_str in manifest['sdks']:
sdk_str['id'] = 'sdk'
sdk = Tool(sdk_str)
if sdk.compatible_with_this_os():
if not hasattr(sdk, 'is_old'):
sdk.is_old = False
if '%tag%' in sdk.version:
expand_category_param('%tag%', emscripten_tags, sdk, is_sdk=True)
elif '%precompiled_tag%' in sdk.version:
expand_category_param('%precompiled_tag%', llvm_precompiled_tags, sdk, is_sdk=True)
elif '%precompiled_tag32%' in sdk.version:
expand_category_param('%precompiled_tag32%', llvm_precompiled_tags_32bit, sdk, is_sdk=True)
elif '%precompiled_tag64%' in sdk.version:
expand_category_param('%precompiled_tag64%', llvm_precompiled_tags_64bit, sdk, is_sdk=True)
elif '%nightly-llvm-64bit%' in sdk.version:
expand_category_param('%nightly-llvm-64bit%', llvm_64bit_nightlies, sdk, is_sdk=True)
elif '%nightly-llvm-32bit%' in sdk.version:
expand_category_param('%nightly-llvm-32bit%', llvm_32bit_nightlies, sdk, is_sdk=True)
elif '%nightly-emscripten%' in sdk.version:
expand_category_param('%nightly-emscripten%', emscripten_nightlies, sdk, is_sdk=True)
elif '%releases-tag%' in sdk.version:
expand_category_param('%releases-tag%', releases_tags, sdk, is_sdk=True)
else:
add_sdk(sdk)
# Tests if the two given tools can be active at the same time.
# Currently only a simple check for name for same tool with different versions,
# possibly adds more logic in the future.
def can_simultaneously_activate(tool1, tool2):
return tool1.id != tool2.id
def remove_nonexisting_tools(tool_list, log_errors=True):
i = 0
while i < len(tool_list):
tool = tool_list[i]
if not tool.is_installed():
if log_errors:
print("Warning: The SDK/tool '" + str(tool) + "' cannot be activated since it is not installed! Skipping this tool...")
tool_list.pop(i)
continue
i += 1
return tool_list
# Expands dependencies for each tool, and removes ones that don't exist.
def process_tool_list(tools_to_activate, log_errors=True):
i = 0
# Gather dependencies for each tool
while i < len(tools_to_activate):
tool = tools_to_activate[i]
deps = tool.recursive_dependencies()
tools_to_activate = tools_to_activate[:i] + deps + tools_to_activate[i:]
i += len(deps) + 1
tools_to_activate = remove_nonexisting_tools(tools_to_activate, log_errors=log_errors)
# Remove conflicting tools
i = 0
while i < len(tools_to_activate):
j = 0
while j < i:
secondary_tool = tools_to_activate[j]
primary_tool = tools_to_activate[i]
if not can_simultaneously_activate(primary_tool, secondary_tool):
tools_to_activate.pop(j)
j -= 1
i -= 1
j += 1
i += 1
return tools_to_activate
def run_emcc(tools_to_activate):
for tool in tools_to_activate:
activated_path = getattr(tool, 'activated_path', None)
if activated_path and activated_path.endswith('/emscripten'):
activated_path = to_native_path(tool.expand_vars(tool.activated_path))
emcc_path = os.path.join(activated_path, 'emcc.py')
if os.path.exists(emcc_path):
debug_print('Calling emcc to initialize it')
subprocess.call([sys.executable, emcc_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return
def emscripten_cache_directory():
return os.path.join(emscripten_config_directory, ".emscripten_cache")
# Copy over any emscripten cache contents that were pregenerated. This avoids the user
# needing to immediately build libc etc. on first run.
def copy_pregenerated_cache(tools_to_activate):
for tool in tools_to_activate:
pregenerated_cache = getattr(tool, 'pregenerated_cache', None)
if pregenerated_cache:
# Finish the install of an emscripten-releases build.
install_path = to_native_path(sdk_path(tool.expand_vars(tool.install_path)))
in_cache = os.path.join(install_path, 'lib', pregenerated_cache)
if os.path.exists(in_cache):
out_cache = os.path.join(emscripten_cache_directory(), pregenerated_cache)
os.makedirs(out_cache)
for filename in os.listdir(in_cache):
debug_print('Copying ' + filename + ' to cache dir')
shutil.copy2(os.path.join(in_cache, filename),
os.path.join(out_cache, filename))
# Reconfigure .emscripten to choose the currently activated toolset, set PATH and other environment variables.
# Returns the full list of deduced tools that are now active.
def set_active_tools(tools_to_activate, permanently_activate):
tools_to_activate = process_tool_list(tools_to_activate, log_errors=True)
generate_dot_emscripten(tools_to_activate)
# Generating .emscripten will cause emcc to clear the cache on first run (emcc sees that the file has
# changed, since we write it here in the emsdk, and it never saw it before; so it clears the cache
# as it assumes a new config file means system libraries may need rebuilding). To avoid emcc's clearing
# wiping out the pregenerated cache contents we want to copy in, run emcc here, then copy the cache
# contents.
run_emcc(tools_to_activate)
copy_pregenerated_cache(tools_to_activate)
# Construct a .bat script that will be invoked to set env. vars and PATH
if WINDOWS:
env_string = construct_env(tools_to_activate, False)
open(EMSDK_SET_ENV, 'w').write(env_string)
# Apply environment variables to global all users section.
if WINDOWS and permanently_activate:
# Individual env. vars
for tool in tools_to_activate:
tool.win_activate_env_vars(permanently_activate=True)
# PATH variable
newpath, added_items = adjusted_path(tools_to_activate, system_path_only=True)
if newpath != os.environ['PATH']: # Are there any actual changes?
win_set_environment_variable('PATH', newpath, system=True)
if len(tools_to_activate) > 0:
tools = [x for x in tools_to_activate if not x.is_sdk]
print('\nSet the following tools as active:\n ' + '\n '.join(map(lambda x: str(x), tools)))
print('')
return tools_to_activate
def currently_active_sdk():
for sdk in reversed(sdks):
if sdk.is_active():
return sdk
return None
def currently_active_tools():
active_tools = []
for tool in tools:
if tool.is_active():
active_tools += [tool]
return active_tools
# http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order
def unique_items(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
# Tests if a path is contained in the given list, but with separators normalized.
def normalized_contains(lst, elem):
elem = to_unix_path(elem)
for e in lst:
if elem == to_unix_path(e):
return True
return False
def to_msys_path(p):
p = to_unix_path(p)
new_path = re.sub(r'([a-zA-Z]):/(.*)', r'/\1/\2', p)
if len(new_path) > 3 and new_path[0] == '/' and new_path[2] == '/':
new_path = new_path[0] + new_path[1].lower() + new_path[2:]
return new_path
# Looks at the current PATH and adds and removes entries so that the PATH reflects
# the set of given active tools.
def adjusted_path(tools_to_activate, log_additions=False, system_path_only=False):
# These directories should be added to PATH
path_add = get_required_path(tools_to_activate)
# These already exist.
if WINDOWS and not MSYS:
existing_path = win_get_environment_variable('PATH', system=True)
if not system_path_only:
current_user_path = win_get_environment_variable('PATH', system=False)
if current_user_path:
existing_path += ENVPATH_SEPARATOR + current_user_path
existing_path = existing_path.split(ENVPATH_SEPARATOR)
# Fix up after potential changes made by bug https://github.com/kripken/emscripten/issues/4121
system_root = os.environ['SystemRoot'].lower()
for i in range(len(existing_path)):
p = existing_path[i]
if p.lower() == system_root:
p = '%SystemRoot%'
elif (system_root + '\\system32') in p.lower():
p = '%SystemRoot%\\system32'
elif (system_root + '\\system32\\wbem') in p.lower():
p = '%SystemRoot%\\System32\\Wbem'
elif (system_root + '\\system32\\windowspowershell\v1.0') in p.lower():
p = '%SystemRoot%\\System32\\WindowsPowerShell\v1.0\\'
existing_path[i] = p
else:
existing_path = os.environ['PATH'].split(ENVPATH_SEPARATOR)
emsdk_root_path = to_unix_path(emsdk_path())
existing_emsdk_tools = [item for item in existing_path if to_unix_path(item).startswith(emsdk_root_path)]
new_emsdk_tools = [item for item in path_add if not normalized_contains(existing_emsdk_tools, item)]
# Existing non-emsdk tools
existing_path = [item for item in existing_path if not to_unix_path(item).startswith(emsdk_root_path)]
new_path = [item for item in path_add if not normalized_contains(existing_path, item)]
whole_path = unique_items(new_path + existing_path)
if MSYS:
# XXX Hack: If running native Windows Python in MSYS prompt where PATH entries look like "/c/Windows/System32", os.environ['PATH']
# in Python will transform to show them as "C:\\Windows\\System32", so need to reconvert path delimiter back to forward slashes.
whole_path = list(map(to_msys_path, whole_path))
new_emsdk_tools = list(map(to_msys_path, new_emsdk_tools))
return ((':' if MSYS else ENVPATH_SEPARATOR).join(whole_path), new_emsdk_tools)
def construct_env(tools_to_activate, permanent):
global emscripten_config_directory
env_string = ''
newpath, added_path = adjusted_path(tools_to_activate)
# Dont permanently add to PATH, since this will break the whole system if there are more than 1024 chars in PATH.
# (SETX truncates to set only 1024 chars)
# if permanent:
# print('SETX PATH "' + newpath + '"')
# else:
if os.environ['PATH'] != newpath: # Don't bother setting the path if there are no changes.
if POWERSHELL:
env_string += '$env:PATH="' + newpath + '"\n'
elif WINDOWS and not MSYS:
env_string += 'SET PATH=' + newpath + '\n'
else:
env_string += 'export PATH="' + newpath + '"\n'
if len(added_path) > 0:
print('Adding directories to PATH:')
for item in added_path:
print('PATH += ' + item)
print('')
env_vars_to_add = []
# A core variable EMSDK points to the root of Emscripten SDK directory.
env_vars_to_add += [('EMSDK', to_unix_path(emsdk_path()))]
em_config_path = os.path.normpath(dot_emscripten_path())
if 'EM_CONFIG' not in os.environ or to_unix_path(os.environ['EM_CONFIG']) != to_unix_path(em_config_path):
env_vars_to_add += [('EM_CONFIG', em_config_path)]
if emscripten_config_directory == emsdk_path():
em_cache_dir = sdk_path('.emscripten_cache')
if 'EM_CACHE' not in os.environ or to_unix_path(os.environ['EM_CACHE']) != to_unix_path(em_cache_dir):
env_vars_to_add += [('EM_CACHE', em_cache_dir)]
mkdir_p(em_cache_dir)
for tool in tools_to_activate:
envs = tool.activated_environment()
for env in envs:
key, value = parse_key_value(env)
value = to_native_path(tool.expand_vars(value))
if key not in os.environ or to_unix_path(os.environ[key]) != to_unix_path(value): # Don't set env. vars which are already set to the correct value.
env_vars_to_add += [(key, value)]
if len(env_vars_to_add) > 0:
print('Setting environment variables:')
for key, value in env_vars_to_add:
if POWERSHELL:
env_string += '$env:' + key + '="' + value + '"\n'
elif WINDOWS and not MSYS:
if permanent:
env_string += 'SETX ' + key + ' "' + value + '"\n'
else:
env_string += 'SET ' + key + '=' + value + '\n'
else:
env_string += 'export ' + key + '="' + value + '"\n'
print(key + ' = ' + value)
print('')
return env_string
def silentremove(filename):
try:
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def main():
global emscripten_config_directory, BUILD_FOR_TESTING, ENABLE_LLVM_ASSERTIONS, TTY_OUTPUT
if len(sys.argv) <= 1 or sys.argv[1] == 'help' or sys.argv[1] == '--help':
if len(sys.argv) <= 1:
print(' emsdk: No command given. Please call one of the following:')
else:
print(' emsdk: Available commands:')
print('''
emsdk list [--old] [--uses] - Lists all available SDKs and tools and their
current installation status. With the --old
parameter, also historical versions are
shown. If --uses is passed, displays the
composition of different SDK packages and
dependencies.
emsdk update - Updates emsdk to the newest version, and also
runs 'update-tags' (below). If you have
bootstrapped emsdk via cloning directly from
GitHub, call "git pull" instead to update emsdk.
emsdk update-tags - Fetches the most up to date list of available
Emscripten tagged and other releases from the
servers.
emsdk install [options] <tool 1> <tool 2> <tool 3> ...
- Downloads and installs given tools or SDKs.
Options can contain:
-j<num>: Specifies the number of cores to use when
building the tool. Default: use one less
than the # of detected cores.
--build=<type>: Controls what kind of build of LLVM to
perform. Pass either 'Debug', 'Release',
'MinSizeRel' or 'RelWithDebInfo'. Default:
'Release' for LLVM master branch, and
'RelWithDebInfo' for LLVM incoming branch.
--generator=<type>: Specifies the CMake Generator to be used
during the build. Possible values are the
same as what your CMake supports and whether
the generator is valid depends on the tools
you have installed. Defaults to 'Unix Makefiles'
on *nix systems. If generator name is multiple
words, enclose with single or double quotes.
--shallow: When installing tools from one of the git
development branches 'master' or 'incoming',
this parameter can be passed to perform a
shallow git clone instead of a full one.
This reduces the amount of network transfer
that is needed. This option should only be
used when you are interested in downloading
one of the development branches, but are not
looking to develop Emscripten yourself.
Default: disabled, i.e. do a full clone.
--build-tests: If enabled, LLVM is built with internal tests
included. Pass this to enable running test
other.test_llvm_lit in the Emscripten test
suite. Default: disabled.
--enable-assertions: If specified, LLVM is built with assert()
checks enabled. Useful for development
purposes. Default: Enabled for 'incoming'
branch, disabled for 'master' branch.
--disable-assertions: Forces assertions off during the build.
--vs2013/--vs2015/--vs2017: If building from source, overrides to build
using the specified compiler. When installing
precompiled packages, this has no effect.
Note: The same compiler specifier must be
passed to the emsdk activate command to
activate the desired version.
Notes on building from source:
To pass custom CMake directives when configuring
LLVM build, specify the environment variable
LLVM_CMAKE_ARGS="param1=value1,param2=value2"
in the environment where the build is invoked.
See README.md for details.
emsdk uninstall <tool/sdk> - Removes the given tool or SDK from disk.''')
if WINDOWS:
print('''
emsdk activate [--global] [--embedded] [--build=type] [--vs2013/--vs2015/--vs2017] <tool/sdk>
- Activates the given tool or SDK in the
environment of the current shell. If the
--global option is passed, the registration
is done globally to all users in the system
environment. If the --embedded option is
passed, all Emcripten configuration files as
well as the temp, cache and ports directories
are located inside the Emscripten SDK
directory rather than the user home
directory. If a custom compiler version was
used to override the compiler to use, pass
the same --vs2013/--vs2015/--vs2017 parameter
here to choose which version to activate.
emcmdprompt.bat - Spawns a new command prompt window with the
Emscripten environment active.''')
else:
print(''' emsdk activate [--embedded] [--build=type] <tool/sdk>
- Activates the given tool or SDK in the
environment of the current shell. If the
--embedded option is passed, all Emcripten
configuration files as well as the temp, cache
and ports directories are located inside the
Emscripten SDK directory rather than the user
home directory.''')
print('''
Both commands 'install' and 'activate' accept an optional parameter
'--build=type', which can be used to override what kind of installation
or activation to perform. Possible values for type are Debug, Release,
MinSizeRel or RelWithDebInfo. Note: When overriding a custom build type,
be sure to match the same --build= option to both 'install' and
'activate' commands and the invocation of 'emsdk_env', or otherwise
these commands will default to operating on the default build types,
which are Release for the 'master' SDK, and RelWithDebInfo for the
'incoming' SDK.''')
return 1
# Extracts a boolean command line argument from sys.argv and returns True if it was present
def extract_bool_arg(name):
old_argv = sys.argv
sys.argv = list(filter(lambda a: a != name, sys.argv))
return len(sys.argv) != len(old_argv)
arg_old = extract_bool_arg('--old')
arg_uses = extract_bool_arg('--uses')
arg_global = extract_bool_arg('--global')
arg_embedded = extract_bool_arg('--embedded')
arg_notty = extract_bool_arg('--notty')
if arg_notty:
TTY_OUTPUT = False
cmd = sys.argv[1]
# On first run when tag list is not present, populate it to bootstrap.
if (cmd == 'install' or cmd == 'list') and not os.path.isfile(sdk_path('llvm-tags-64bit.txt')):
fetch_emscripten_tags()
load_dot_emscripten()
load_sdk_manifest()
# Process global args
for i in range(2, len(sys.argv)):
if sys.argv[i].startswith('--generator='):
build_generator = re.match(r'''^--generator=['"]?([^'"]+)['"]?$''', sys.argv[i])
if build_generator:
global CMAKE_GENERATOR
CMAKE_GENERATOR = build_generator.group(1)
sys.argv[i] = ''
else:
print("Cannot parse CMake generator string: " + sys.argv[i] + ". Try wrapping generator string with quotes", file=sys.stderr)
return 1
elif sys.argv[i].startswith('--build='):
build_type = re.match(r'^--build=(.+)$', sys.argv[i])
if build_type:
global CMAKE_BUILD_TYPE_OVERRIDE
build_type = build_type.group(1)
build_types = ['Debug', 'MinSizeRel', 'RelWithDebInfo', 'Release']
try:
build_type_index = [x.lower() for x in build_types].index(build_type.lower())
CMAKE_BUILD_TYPE_OVERRIDE = build_types[build_type_index]
sys.argv[i] = ''
except:
print('Unknown CMake build type "' + build_type + '" specified! Please specify one of ' + str(build_types), file=sys.stderr)
return 1
else:
print("Invalid command line parameter " + sys.argv[i] + ' specified!', file=sys.stderr)
return 1
sys.argv = [x for x in sys.argv if not len(x) == 0]
releases_info = load_releases_info()['releases']
# Replace meta-packages with the real package names.
if cmd in ('update', 'install', 'activate'):
for i in range(2, len(sys.argv)):
arg = sys.argv[i]
if arg in ('latest', 'sdk-latest', 'latest-64bit', 'sdk-latest-64bit', 'latest-fastcomp', 'latest-releases-fastcomp'):
sys.argv[i] = str(find_latest_releases_sdk('fastcomp'))
elif arg in ('latest-upstream', 'latest-clang-upstream', 'latest-releases-upstream'):
sys.argv[i] = str(find_latest_releases_sdk('upstream'))
elif arg == 'tot-upstream':
sys.argv[i] = str(find_tot_sdk('upstream'))
elif arg in ('tot-fastcomp', 'sdk-nightly-latest'):
sys.argv[i] = str(find_tot_sdk('fastcomp'))
else:
# check if it's a release handled by an emscripten-releases version,
# and if so use that by using the right hash. we support a few notations,
# x.y.z[-(upstream|fastcomp_])
# sdk-x.y.z[-(upstream|fastcomp_])-64bit
# TODO: support short notation for old builds too?
upstream = False
if '-upstream' in arg:
arg = arg.replace('-upstream', '')
upstream = True
elif '-fastcomp' in arg:
arg = arg.replace('-fastcomp', '')
upstream = False
arg = arg.replace('sdk-', '').replace('-64bit', '').replace('tag-', '')
release_hash = releases_info.get(arg, None) or releases_info.get('sdk-' + arg + '-64bit')
if release_hash:
sys.argv[i] = 'sdk-releases-%s-%s-64bit' % ('upstream' if upstream else 'fastcomp', release_hash)
if cmd == 'list':
print('')
if (LINUX or OSX or WINDOWS) and (ARCH == 'x86' or ARCH == 'x86_64'):
print('The *recommended* precompiled SDK download is %s (%s).' % (find_latest_releases_version(), find_latest_releases_hash()))
print()
print('To install/activate it, use one of:')
print(' latest [default (fastcomp) backend]')
print(' latest-upstream [upstream LLVM wasm backend]')
print('')
print('Those are equivalent to installing/activating the following:')
print(' %s' % find_latest_releases_version())
print(' %s-upstream' % find_latest_releases_version())
print('')
else:
print('Warning: your platform does not have precompiled SDKs available.')
print('You may install components from source.')
print('')
print('All recent (non-legacy) installable versions are:')
releases_versions = sorted(load_releases_versions())
releases_versions.reverse()
for ver in releases_versions:
print(' %s' % ver)
print(' %s-upstream' % ver)
print()
has_partially_active_tools = [False] # Use array to work around the lack of being able to mutate from enclosing function.
if len(sdks) > 0:
def find_sdks(needs_compilation):
s = []
for sdk in sdks:
if sdk.is_old and not arg_old:
continue
if sdk.needs_compilation() == needs_compilation:
s += [sdk]
return s
def print_sdks(s):
for sdk in s:
installed = '\tINSTALLED' if sdk.is_installed() else ''
active = '*' if sdk.is_active() else ' '
print(' ' + active + ' {0: <25}'.format(str(sdk)) + installed)
if arg_uses:
for dep in sdk.uses:
print(' - {0: <25}'.format(dep))
print('')
print('The additional following precompiled SDKs are also available for download:')
print_sdks(find_sdks(False))
print('The following SDKs can be compiled from source:')
print_sdks(find_sdks(True))
if len(tools) > 0:
def find_tools(needs_compilation):
t = []
for tool in tools:
if tool.is_old and not arg_old:
continue
if tool.needs_compilation() != needs_compilation:
continue
t += [tool]
return t
def print_tools(t):
for tool in t:
if tool.is_old and not arg_old:
continue
if tool.can_be_installed() is True:
installed = '\tINSTALLED' if tool.is_installed() else ''
else:
installed = '\tNot available: ' + tool.can_be_installed()
tool_is_active = tool.is_active()
tool_is_env_active = tool_is_active and tool.is_env_active()
if tool_is_env_active:
active = ' * '
elif tool_is_active:
active = '(*)'
has_partially_active_tools[0] = has_partially_active_tools[0] or True
else:
active = ' '
print(' ' + active + ' {0: <25}'.format(str(tool)) + installed)
print('')
print('The following precompiled tool packages are available for download:')
print_tools(find_tools(needs_compilation=False))
print('The following tools can be compiled from source:')
print_tools(find_tools(needs_compilation=True))
else:
if is_emsdk_sourced_from_github():
print("There are no tools available. Run 'git pull' followed by 'emsdk update-tags' to fetch the latest set of tools.")
else:
print("There are no tools available. Run 'emsdk update' to fetch the latest set of tools.")
print('')
print('Items marked with * are activated for the current user.')
if has_partially_active_tools[0]:
env_cmd = 'emsdk_env.bat' if WINDOWS else 'source ./emsdk_env.sh'
print('Items marked with (*) are selected for use, but your current shell environment is not configured to use them. Type "' + env_cmd + '" to set up your current shell to use them' + (', or call "emsdk activate --global <name_of_sdk>" to permanently activate them.' if WINDOWS else '.'))
if not arg_old:
print('')
print("To access the historical archived versions, type 'emsdk list --old'")
print('')
if is_emsdk_sourced_from_github():
print('Run "git pull" followed by "./emsdk update-tags" to pull in the latest list.')
else:
print('Run "./emsdk update" to pull in the latest list.')
return 0
elif cmd == 'construct_env':
if len(sys.argv) == 2:
outfile = EMSDK_SET_ENV
silentremove(EMSDK_SET_ENV) # Clean up old temp file up front, in case of failure later before we get to write out the new one.
else:
outfile = sys.argv[2]
tools_to_activate = currently_active_tools()
tools_to_activate = process_tool_list(tools_to_activate, log_errors=True)
env_string = construct_env(tools_to_activate, len(sys.argv) >= 3 and 'perm' in sys.argv[2])
open(outfile, 'w').write(env_string)
if UNIX:
os.chmod(outfile, 0o755)
return 0
elif cmd == 'update':
update_emsdk()
silentremove(sdk_path(EMSDK_SET_ENV)) # Clean up litter after old emsdk update which may have left this temp file around.
return 0
elif cmd == 'update-tags':
fetch_emscripten_tags()
return 0
elif cmd == 'activate':
if arg_global:
print('Registering active Emscripten environment globally for all users.')
print('')
if arg_embedded:
# Activating the emsdk tools locally relative to Emscripten SDK directory.
emscripten_config_directory = emsdk_path()
print('Writing .emscripten configuration file to Emscripten SDK directory ' + emscripten_config_directory)
else:
print('Writing .emscripten configuration file to user home directory ' + emscripten_config_directory)
# Remove .emscripten from emsdk dir, since its presence is used to detect whether emsdk is activate in embedded mode or not.
try:
os.remove(os.path.join(emsdk_path(), ".emscripten"))
except:
pass
sys.argv = [x for x in sys.argv if not x.startswith('--')]
tools_to_activate = currently_active_tools()
for i in range(2, len(sys.argv)):
tool = find_tool(sys.argv[i])
if tool is None:
tool = find_sdk(sys.argv[i])
if tool is None:
print("Error: No tool or SDK found by name '" + sys.argv[i] + "'.")
return 1
tools_to_activate += [tool]
if len(tools_to_activate) == 0:
print('No tools/SDKs specified to activate! Usage:\n emsdk activate tool/sdk1 [tool/sdk2] [...]')
return 1
tools_to_activate = set_active_tools(tools_to_activate, permanently_activate=arg_global)
if len(tools_to_activate) == 0:
print('No tools/SDKs found to activate! Usage:\n emsdk activate tool/sdk1 [tool/sdk2] [...]')
return 1
if WINDOWS and not arg_global:
print('The changes made to environment variables only apply to the currently running shell instance. Use the \'emsdk_env.bat\' to re-enter this environment later, or if you\'d like to permanently register this environment globally to all users in Windows Registry, rerun this command with the option --global.')
return 0
elif cmd == 'install':
# Process args
for i in range(2, len(sys.argv)):
if sys.argv[i].startswith('-j'):
multicore = re.match(r'^-j(\d+)$', sys.argv[i])
if multicore:
global CPU_CORES
CPU_CORES = int(multicore.group(1))
sys.argv[i] = ''
else:
print("Invalid command line parameter " + sys.argv[i] + ' specified!', file=sys.stderr)
return 1
elif sys.argv[i] == '--shallow':
global GIT_CLONE_SHALLOW
GIT_CLONE_SHALLOW = True
sys.argv[i] = ''
elif sys.argv[i] == '--build-tests':
BUILD_FOR_TESTING = True
sys.argv[i] = ''
elif sys.argv[i] == '--enable-assertions':
ENABLE_LLVM_ASSERTIONS = 'ON'
sys.argv[i] = ''
elif sys.argv[i] == '--disable-assertions':
ENABLE_LLVM_ASSERTIONS = 'OFF'
sys.argv[i] = ''
sys.argv = [x for x in sys.argv if not len(x) == 0]
if len(sys.argv) <= 2:
print("Missing parameter. Type 'emsdk install <tool name>' to install a tool or an SDK. Type 'emsdk list' to obtain a list of available tools. Type 'emsdk install latest' to automatically install the newest version of the SDK.")
return 1
for t in sys.argv[2:]:
tool = find_tool(t)
if tool is None:
tool = find_sdk(t)
if tool is None:
print("Error: No tool or SDK found by name '" + t + "'.")
return 1
success = tool.install()
if not success:
return 1
return 0
elif cmd == 'uninstall':
if len(sys.argv) <= 2:
print("Syntax error. Call 'emsdk uninstall <tool name>'. Call 'emsdk list' to obtain a list of available tools.")
return 1
tool = find_tool(sys.argv[2])
if tool is None:
print("Error: Tool by name '" + sys.argv[2] + "' was not found.")
return 1
tool.uninstall()
return 0
print("Unknown command '" + cmd + "' given! Type 'emsdk help' to get a list of commands.")
return 1
if __name__ == '__main__':
sys.exit(main())
| 40.656478 | 323 | 0.6712 |
from __future__ import print_function
import copy
import errno
import json
import multiprocessing
import os
import os.path
import platform
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import zipfile
if sys.version_info >= (3,):
from urllib.parse import urljoin
from urllib.request import urlopen
import functools
else:
from urlparse import urljoin
from urllib2 import urlopen
# staging source repository instead.
EMSDK_DEV = bool(os.getenv('EMSDK_DEV')) if os.getenv('EMSDK_DEV') is not None else False
if EMSDK_DEV:
print('EMSDK_DEV active.')
emsdk_master_server = 'http://clb.demon.fi/emscripten_dev/packages/'
else:
emsdk_master_server = 'https://storage.googleapis.com/webassembly/emscripten-releases-builds/deps/'
emsdk_packages_url = emsdk_master_server
emscripten_releases_repo = 'https://chromium.googlesource.com/emscripten-releases'
emscripten_releases_download_url_template = "https://storage.googleapis.com/webassembly/emscripten-releases-builds/%s/%s/wasm-binaries.%s"
emsdk_zip_download_url = 'https://github.com/emscripten-core/emsdk/archive/master.zip'
zips_subdir = 'zips/'
# Enable this to do very verbose printing about the different steps that are being run. Useful for debugging.
VERBOSE = int(os.getenv('EMSDK_VERBOSE', '0'))
TTY_OUTPUT = not os.getenv('EMSDK_NOTTY', not sys.stdout.isatty())
POWERSHELL = bool(os.getenv('EMSDK_POWERSHELL'))
WINDOWS = False
if os.name == 'nt' or (os.getenv('SYSTEMROOT') is not None and 'windows' in os.getenv('SYSTEMROOT').lower()) or (os.getenv('COMSPEC') is not None and 'windows' in os.getenv('COMSPEC').lower()):
WINDOWS = True
ENVPATH_SEPARATOR = ';'
MSYS = False
if os.getenv('MSYSTEM'):
MSYS = True
if os.getenv('MSYSTEM') != 'MSYS' and os.getenv('MSYSTEM') != 'MINGW64':
print('Warning: MSYSTEM environment variable is present, and is set to "' + os.getenv('MSYSTEM') + '". This shell has not been tested with emsdk and may not work.') # https://stackoverflow.com/questions/37460073/msys-vs-mingw-internal-environment-variables
OSX = False
if platform.mac_ver()[0] != '':
OSX = True
ENVPATH_SEPARATOR = ':'
LINUX = False
if not OSX and (platform.system() == 'Linux' or os.name == 'posix'):
LINUX = True
ENVPATH_SEPARATOR = ':'
UNIX = (OSX or LINUX)
ARCH = 'unknown'
# platform.machine() may return AMD64 on windows, so standardize the case.
machine = platform.machine().lower()
if machine.startswith('x64') or machine.startswith('amd64') or machine.startswith('x86_64'):
ARCH = 'x86_64'
elif machine.endswith('86'):
ARCH = 'x86'
elif machine.startswith('aarch64') or machine.lower().startswith('arm64'):
ARCH = 'aarch64'
elif platform.machine().startswith('arm'):
ARCH = 'arm'
else:
print("Warning: unknown machine architecture " + machine)
print()
# Don't saturate all cores to not steal the whole system, but be aggressive.
CPU_CORES = int(os.environ.get('EMSDK_NUM_CORES', max(multiprocessing.cpu_count() - 1, 1)))
CMAKE_BUILD_TYPE_OVERRIDE = None
GIT_CLONE_SHALLOW = False
BUILD_FOR_TESTING = False
ENABLE_LLVM_ASSERTIONS = 'auto'
def os_name():
if WINDOWS:
return 'win'
elif LINUX:
return 'linux'
elif OSX:
return 'osx'
else:
raise Exception('unknown OS')
def os_name_for_emscripten_releases():
if WINDOWS:
return 'win'
elif LINUX:
return 'linux'
elif OSX:
return 'mac'
else:
raise Exception('unknown OS')
def debug_print(msg, **args):
if VERBOSE:
print(msg, **args)
def to_unix_path(p):
return p.replace('\\', '/')
def emsdk_path():
return to_unix_path(os.path.dirname(os.path.realpath(__file__)))
emscripten_config_directory = os.path.expanduser("~/")
if os.path.exists(os.path.join(emsdk_path(), '.emscripten')):
emscripten_config_directory = emsdk_path()
EMSDK_SET_ENV = 'emsdk_set_env.ps1' if POWERSHELL else 'emsdk_set_env.bat' if (WINDOWS and not MSYS) else 'emsdk_set_env.sh'
ARCHIVE_SUFFIXES = ('zip', '.tar', '.gz', '.xz', '.tbz2', '.bz2')
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and (WINDOWS or os.access(fpath, os.X_OK))
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
if WINDOWS and '.' not in fname:
if is_exe(exe_file + '.exe'):
return exe_file + '.exe'
if is_exe(exe_file + '.cmd'):
return exe_file + '.cmd'
if is_exe(exe_file + '.bat'):
return exe_file + '.bat'
return None
def vswhere(version):
try:
program_files = os.environ['ProgramFiles(x86)'] if 'ProgramFiles(x86)' in os.environ else os.environ['ProgramFiles']
vswhere_path = os.path.join(program_files, 'Microsoft Visual Studio', 'Installer', 'vswhere.exe')
output = json.loads(subprocess.check_output([vswhere_path, '-latest', '-version', '[%s.0,%s.0)' % (version, version + 1), '-requires', 'Microsoft.VisualStudio.Component.VC.Tools.x86.x64', '-property', 'installationPath', '-format', 'json']))
# Visual Studio 2017 Express is not included in the above search, and it does not have the VC.Tools.x86.x64 tool, so do a catch-all attempt as a fallback, to detect Express version.
if len(output) == 0:
output = json.loads(subprocess.check_output([vswhere_path, '-latest', '-version', '[%s.0,%s.0)' % (version, version + 1), '-products', '*', '-property', 'installationPath', '-format', 'json']))
return str(output[0]['installationPath']) if len(output) > 0 else ''
except Exception:
return ''
def vs_filewhere(installation_path, platform, file):
try:
vcvarsall = os.path.join(installation_path, 'VC\\Auxiliary\\Build\\vcvarsall.bat')
env = subprocess.check_output('cmd /c "%s" %s & where %s' % (vcvarsall, platform, file))
paths = [path[:-len(file)] for path in env.split('\r\n') if path.endswith(file)]
return paths[0]
except Exception:
return ''
CMAKE_GENERATOR = 'Unix Makefiles'
if WINDOWS:
# Detect which CMake generator to use when building on Windows
if '--mingw' in sys.argv:
CMAKE_GENERATOR = 'MinGW Makefiles'
elif '--vs2013' in sys.argv:
CMAKE_GENERATOR = 'Visual Studio 12'
elif '--vs2015' in sys.argv:
CMAKE_GENERATOR = 'Visual Studio 14'
elif '--vs2017' in sys.argv:
CMAKE_GENERATOR = 'Visual Studio 15'
else:
program_files = os.environ['ProgramFiles(x86)'] if 'ProgramFiles(x86)' in os.environ else os.environ['ProgramFiles']
vs2017_exists = len(vswhere(15)) > 0
vs2015_exists = 'VS140COMNTOOLS' in os.environ or 'VSSDK140Install' in os.environ or os.path.isdir(os.path.join(program_files, 'Microsoft Visual Studio 14.0'))
vs2013_exists = 'VS120COMNTOOLS' in os.environ or os.path.isdir(os.path.join(program_files, 'Microsoft Visual Studio 12.0'))
mingw_exists = which('mingw32-make') is not None and which('g++') is not None
if vs2015_exists:
CMAKE_GENERATOR = 'Visual Studio 14'
elif vs2017_exists:
CMAKE_GENERATOR = 'Visual Studio 15' # VS2017 has an LLVM build issue, see https://github.com/kripken/emscripten-fastcomp/issues/185
elif mingw_exists:
CMAKE_GENERATOR = 'MinGW Makefiles'
elif vs2013_exists:
CMAKE_GENERATOR = 'Visual Studio 12' # VS2013 is no longer supported, so attempt it as a last resort if someone might want to insist using it.
else:
CMAKE_GENERATOR = '' # No detected generator
sys.argv = [a for a in sys.argv if a not in ('--mingw', '--vs2013', '--vs2015', '--vs2017')]
# Computes a suitable path prefix to use when building with a given generator.
def cmake_generator_prefix():
if CMAKE_GENERATOR == 'Visual Studio 15':
return '_vs2017'
elif CMAKE_GENERATOR == 'Visual Studio 14':
return '_vs2015'
elif CMAKE_GENERATOR == 'MinGW Makefiles':
return '_mingw'
return '' # Unix Makefiles and Visual Studio 2013 do not specify a path prefix for backwards path compatibility
# Removes a directory tree even if it was readonly, and doesn't throw exception on failure.
def remove_tree(d):
debug_print('remove_tree(' + str(d) + ')')
try:
def remove_readonly_and_try_again(func, path, exc_info):
if not (os.stat(path).st_mode & stat.S_IWRITE):
os.chmod(path, stat.S_IWRITE)
func(path)
else:
raise
shutil.rmtree(d, onerror=remove_readonly_and_try_again)
except Exception as e:
debug_print('remove_tree threw an exception, ignoring: ' + str(e))
def import_pywin32():
if WINDOWS:
try:
import win32api
import win32con
return win32api, win32con
except Exception:
print('Failed to import Python Windows extensions win32api and win32con. Make sure you are using the version of python available in emsdk, or install PyWin extensions to the distribution of Python you are attempting to use. (This script was launched in python instance from "' + sys.executable + '")')
sys.exit(1)
def win_set_environment_variable_direct(key, value, system=True):
prev_path = os.environ['PATH']
try:
py = find_used_python()
if py:
py_path = to_native_path(py.expand_vars(py.activated_path))
os.environ['PATH'] = os.environ['PATH'] + ';' + py_path
win32api, win32con = import_pywin32()
if system:
# Read globally from ALL USERS section.
folder = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, 'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment', 0, win32con.KEY_ALL_ACCESS)
else:
# Register locally from CURRENT USER section.
folder = win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER, 'Environment', 0, win32con.KEY_ALL_ACCESS)
win32api.RegSetValueEx(folder, key, 0, win32con.REG_EXPAND_SZ, value)
debug_print('Set key=' + key + ' with value ' + value + ' in registry.')
except Exception as e:
if e.args[0] == 5: # 'Access is denied.'
print('Error! Failed to set the environment variable \'' + key + '\'! Setting environment variables permanently requires administrator access. Please rerun this command with administrative privileges. This can be done for example by holding down the Ctrl and Shift keys while opening a command prompt in start menu.')
sys.exit(1)
print('Failed to write environment variable ' + key + ':', file=sys.stderr)
print(str(e), file=sys.stderr)
win32api.RegCloseKey(folder)
os.environ['PATH'] = prev_path
return None
win32api.RegCloseKey(folder)
os.environ['PATH'] = prev_path
win32api.PostMessage(win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 'Environment')
def win_get_environment_variable(key, system=True):
prev_path = os.environ['PATH']
try:
py = find_used_python()
if py:
py_path = to_native_path(py.expand_vars(py.activated_path))
os.environ['PATH'] = os.environ['PATH'] + ';' + py_path
try:
import win32api
import win32con
if system: # Read globally from ALL USERS section.
folder = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, 'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment')
else: # Register locally from CURRENT USER section.
folder = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, 'Environment')
value = str(win32api.RegQueryValueEx(folder, key)[0])
except Exception:
# PyWin32 is not available - read via os.environ. This has the drawback that expansion items such as %PROGRAMFILES% will have been expanded, so
# need to be precise not to set these back to system registry, or expansion items would be lost.
return os.environ[key]
except Exception as e:
if e.args[0] != 2: # 'The system cannot find the file specified.'
print('Failed to read environment variable ' + key + ':', file=sys.stderr)
print(str(e), file=sys.stderr)
try:
win32api.RegCloseKey(folder)
except Exception:
pass
os.environ['PATH'] = prev_path
return None
win32api.RegCloseKey(folder)
os.environ['PATH'] = prev_path
return value
def win_environment_variable_exists(key, system=True):
value = win_get_environment_variable(key, system)
return value is not None and len(value) > 0
def win_get_active_environment_variable(key):
value = win_get_environment_variable(key, False)
if value is not None:
return value
return win_get_environment_variable(key, True)
def win_set_environment_variable(key, value, system=True):
debug_print('set ' + str(key) + '=' + str(value) + ', in system=' + str(system), file=sys.stderr)
previous_value = win_get_environment_variable(key, system)
if previous_value == value:
debug_print(' no need to set, since same value already exists.')
return # No need to elevate UAC for nothing to set the same value, skip.
if not value:
try:
if system:
cmd = ['REG', 'DELETE', 'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment', '/V', key, '/f']
else:
cmd = ['REG', 'DELETE', 'HKCU\\Environment', '/V', key, '/f']
debug_print(str(cmd))
value = subprocess.call(cmd, stdout=subprocess.PIPE)
except Exception:
return
return
try:
if system:
win_set_environment_variable_direct(key, value, system)
return
value = value.replace('%', '^%') # Escape % signs so that we don't expand references to environment variables.
if len(value) >= 1024:
print('ERROR! The new environment variable ' + key + ' is more than 1024 characters long! A value this long cannot be set via command line: please add the environment variable specified above to system environment manually via Control Panel.', file=sys.stderr)
sys.exit(1)
cmd = ['SETX', key, value]
debug_print(str(cmd))
retcode = subprocess.call(cmd, stdout=subprocess.PIPE)
if retcode != 0:
print('ERROR! Failed to set environment variable ' + key + '=' + value + '. You may need to set it manually.', file=sys.stderr)
except Exception as e:
print('ERROR! Failed to set environment variable ' + key + '=' + value + ':', file=sys.stderr)
print(str(e), file=sys.stderr)
print('You may need to set it manually.', file=sys.stderr)
def win_delete_environment_variable(key, system=True):
debug_print('win_delete_environment_variable(key=' + key + ', system=' + str(system) + ')')
win_set_environment_variable(key, None, system)
# Returns the absolute pathname to the given path inside the Emscripten SDK.
def sdk_path(path):
if os.path.isabs(path):
return path
else:
return to_unix_path(os.path.join(os.path.dirname(os.path.realpath(__file__)), path))
# Modifies the given file in-place to contain '\r\n' line endings.
def file_to_crlf(filename):
text = open(filename, 'r').read()
text = text.replace('\r\n', '\n').replace('\n', '\r\n')
open(filename, 'wb').write(text)
# Modifies the given file in-place to contain '\n' line endings.
def file_to_lf(filename):
text = open(filename, 'r').read()
text = text.replace('\r\n', '\n')
open(filename, 'wb').write(text)
# Removes a single file, suppressing exceptions on failure.
def rmfile(filename):
debug_print('rmfile(' + filename + ')')
try:
os.remove(filename)
except:
pass
def fix_lineendings(filename):
if WINDOWS:
file_to_crlf(filename)
else:
file_to_lf(filename)
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
def mkdir_p(path):
debug_print('mkdir_p(' + path + ')')
if os.path.exists(path):
return
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def num_files_in_directory(path):
if not os.path.isdir(path):
return 0
return len([name for name in os.listdir(path) if os.path.exists(os.path.join(path, name))])
def run(cmd, cwd=None):
debug_print('run(cmd=' + str(cmd) + ', cwd=' + str(cwd) + ')')
process = subprocess.Popen(cmd, cwd=cwd, env=os.environ.copy())
process.communicate()
if process.returncode != 0:
print(str(cmd) + ' failed with error code ' + str(process.returncode) + '!')
return process.returncode
# http://pythonicprose.blogspot.fi/2009/10/python-extract-targz-archive.html
def untargz(source_filename, dest_dir, unpack_even_if_exists=False):
debug_print('untargz(source_filename=' + source_filename + ', dest_dir=' + dest_dir + ')')
if not unpack_even_if_exists and num_files_in_directory(dest_dir) > 0:
print("File '" + source_filename + "' has already been unpacked, skipping.")
return True
print("Unpacking '" + source_filename + "' to '" + dest_dir + "'")
mkdir_p(dest_dir)
run(['tar', '-xvf' if VERBOSE else '-xf', sdk_path(source_filename), '--strip', '1'], cwd=dest_dir)
# tfile = tarfile.open(source_filename, 'r:gz')
# tfile.extractall(dest_dir)
return True
# On Windows, it is not possible to reference path names that are longer than ~260 characters, unless the path is referenced via a "\\?\" prefix.
# See https://msdn.microsoft.com/en-us/library/aa365247.aspx#maxpath and http://stackoverflow.com/questions/3555527/python-win32-filename-length-workaround
# In that mode, forward slashes cannot be used as delimiters.
def fix_potentially_long_windows_pathname(pathname):
if not WINDOWS:
return pathname
# Test if emsdk calls fix_potentially_long_windows_pathname() with long relative paths (which is problematic)
if not os.path.isabs(pathname) and len(pathname) > 200:
print('Warning: Seeing a relative path "' + pathname + '" which is dangerously long for being referenced as a short Windows path name. Refactor emsdk to be able to handle this!')
if pathname.startswith('\\\\?\\'):
return pathname
return '\\\\?\\' + os.path.normpath(pathname.replace('/', '\\'))
# On windows, rename/move will fail if the destination exists, and there is no
# race-free way to do it. This method removes the destination if it exists, so
# the move always works
def move_with_overwrite(src, dest):
if os.path.exists(dest):
os.remove(dest)
os.rename(src, dest)
# http://stackoverflow.com/questions/12886768/simple-way-to-unzip-file-in-python-on-all-oses
def unzip(source_filename, dest_dir, unpack_even_if_exists=False):
debug_print('unzip(source_filename=' + source_filename + ', dest_dir=' + dest_dir + ')')
if not unpack_even_if_exists and num_files_in_directory(dest_dir) > 0:
print("File '" + source_filename + "' has already been unpacked, skipping.")
return True
print("Unpacking '" + source_filename + "' to '" + dest_dir + "'")
mkdir_p(dest_dir)
common_subdir = None
try:
with zipfile.ZipFile(source_filename) as zf:
# Implement '--strip 1' behavior to unzipping by testing if all the files in the zip reside in a common subdirectory, and if so,
# we move the output tree at the end of uncompression step.
for member in zf.infolist():
words = member.filename.split('/')
if len(words) > 1: # If there is a directory component?
if common_subdir is None:
common_subdir = words[0]
elif common_subdir != words[0]:
common_subdir = None
break
else:
common_subdir = None
break
unzip_to_dir = dest_dir
if common_subdir:
unzip_to_dir = os.path.join('/'.join(dest_dir.split('/')[:-1]), 'unzip_temp')
# Now do the actual decompress.
for member in zf.infolist():
zf.extract(member, fix_potentially_long_windows_pathname(unzip_to_dir))
dst_filename = os.path.join(unzip_to_dir, member.filename)
# See: https://stackoverflow.com/questions/42326428/zipfile-in-python-file-permission
unix_attributes = member.external_attr >> 16
if unix_attributes:
os.chmod(dst_filename, unix_attributes)
# Move the extracted file to its final location without the base directory name, if we are stripping that away.
if common_subdir:
if not member.filename.startswith(common_subdir):
raise Exception('Unexpected filename "' + member.filename + '"!')
stripped_filename = '.' + member.filename[len(common_subdir):]
final_dst_filename = os.path.join(dest_dir, stripped_filename)
if stripped_filename.endswith('/'): # Directory?
d = fix_potentially_long_windows_pathname(final_dst_filename)
if not os.path.isdir(d):
os.mkdir(d)
else:
parent_dir = os.path.dirname(fix_potentially_long_windows_pathname(final_dst_filename))
if parent_dir and not os.path.exists(parent_dir):
os.makedirs(parent_dir)
move_with_overwrite(fix_potentially_long_windows_pathname(dst_filename), fix_potentially_long_windows_pathname(final_dst_filename))
if common_subdir:
try:
remove_tree(unzip_to_dir)
except:
pass
except zipfile.BadZipfile as e:
print("Unzipping file '" + source_filename + "' failed due to reason: " + str(e) + "! Removing the corrupted zip file.")
rmfile(source_filename)
return False
except Exception as e:
print("Unzipping file '" + source_filename + "' failed due to reason: " + str(e))
return False
return True
# This function interprets whether the given string looks like a path to a directory instead of a file, without looking at the actual filesystem.
# 'a/b/c' points to directory, so does 'a/b/c/', but 'a/b/c.x' is parsed as a filename
def path_points_to_directory(path):
if path == '.':
return True
last_slash = max(path.rfind('/'), path.rfind('\\'))
last_dot = path.rfind('.')
no_suffix = last_dot < last_slash or last_dot == -1
if no_suffix:
return True
suffix = path[last_dot:]
# Very simple logic for the only file suffixes used by emsdk downloader. Other
# suffixes, like 'clang-3.2' are treated as dirs.
if suffix in ('.exe', '.zip', '.txt'):
return False
else:
return True
def get_content_length(download):
try:
meta = download.info()
if hasattr(meta, "getheaders") and hasattr(meta.getheaders, "Content-Length"):
return int(meta.getheaders("Content-Length")[0])
elif hasattr(download, "getheader") and download.getheader('Content-Length'):
return int(download.getheader('Content-Length'))
elif hasattr(meta, "getheader") and meta.getheader('Content-Length'):
return int(meta.getheader('Content-Length'))
except Exception:
pass
return 0
def get_download_target(url, dstpath, filename_prefix=''):
file_name = filename_prefix + url.split('/')[-1]
if path_points_to_directory(dstpath):
file_name = os.path.join(dstpath, file_name)
else:
file_name = dstpath
# Treat all relative destination paths as relative to the SDK root directory, not the current working directory.
file_name = sdk_path(file_name)
return file_name
# On success, returns the filename on the disk pointing to the destination file that was produced
# On failure, returns None.
def download_file(url, dstpath, download_even_if_exists=False, filename_prefix=''):
debug_print('download_file(url=' + url + ', dstpath=' + dstpath + ')')
file_name = get_download_target(url, dstpath, filename_prefix)
if os.path.exists(file_name) and not download_even_if_exists:
print("File '" + file_name + "' already downloaded, skipping.")
return file_name
try:
u = urlopen(url)
mkdir_p(os.path.dirname(file_name))
with open(file_name, 'wb') as f:
file_size = get_content_length(u)
if file_size > 0:
print("Downloading: %s from %s, %s Bytes" % (file_name, url, file_size))
else:
print("Downloading: %s from %s" % (file_name, url))
file_size_dl = 0
# Draw a progress bar 80 chars wide (in non-TTY mode)
progress_max = 80 - 4
progress_shown = 0
block_sz = 8192
if not TTY_OUTPUT:
print(' [', end='')
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if file_size:
percent = file_size_dl * 100.0 / file_size
if TTY_OUTPUT:
status = r" %10d [%3.02f%%]" % (file_size_dl, percent)
print(status, end='\r')
else:
while progress_shown < progress_max * percent / 100:
print('-', end='')
sys.stdout.flush()
progress_shown += 1
if not TTY_OUTPUT:
print(']')
sys.stdout.flush()
except Exception as e:
print("Error: Downloading URL '" + url + "': " + str(e))
if "SSL: CERTIFICATE_VERIFY_FAILED" in str(e) or "urlopen error unknown url type: https" in str(e):
print("Warning: Possibly SSL/TLS issue. Update or install Python SSL root certificates (2048-bit or greater) supplied in Python folder or https://pypi.org/project/certifi/ and try again.")
rmfile(file_name)
return None
except KeyboardInterrupt:
print("Aborted by User, exiting")
rmfile(file_name)
sys.exit(1)
return file_name
def run_get_output(cmd, cwd=None):
debug_print('run_get_output(cmd=' + str(cmd) + ', cwd=' + str(cwd) + ')')
process = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, env=os.environ.copy(), universal_newlines=True)
stdout, stderr = process.communicate()
return (process.returncode, stdout, stderr)
# must_succeed: If false, the search is performed silently without printing out errors if not found. Empty string is returned if git is not found.
# If true, the search is required to succeed, and the execution will terminate with sys.exit(1) if not found.
def GIT(must_succeed=True):
# The order in the following is important, and specifies the preferred order of using the git tools.
# Primarily use git from emsdk if installed. If not, use system git.
gits = ['git/1.9.4/bin/git.exe', which('git')]
for git in gits:
try:
ret, stdout, stderr = run_get_output([git, '--version'])
if ret == 0:
return git
except:
pass
if must_succeed:
if WINDOWS:
print("ERROR: git executable was not found. Please install it by typing 'emsdk install git-1.9.4', or alternatively by installing it manually from http://git-scm.com/downloads . If you install git manually, remember to add it to PATH")
elif OSX:
print("ERROR: git executable was not found. Please install git for this operation! This can be done from http://git-scm.com/ , or by installing XCode and then the XCode Command Line Tools (see http://stackoverflow.com/questions/9329243/xcode-4-4-command-line-tools )")
elif LINUX:
print("ERROR: git executable was not found. Please install git for this operation! This can be probably be done using your package manager, see http://git-scm.com/book/en/Getting-Started-Installing-Git")
else:
print("ERROR: git executable was not found. Please install git for this operation!")
sys.exit(1)
return '' # Not found
def git_repo_version(repo_path):
returncode, stdout, stderr = run_get_output([GIT(), 'log', '-n', '1', '--pretty="%aD %H"'], cwd=repo_path)
if returncode == 0:
return stdout.strip()
else:
return ""
def git_recent_commits(repo_path, n=20):
returncode, stdout, stderr = run_get_output([GIT(), 'log', '-n', str(n), '--pretty="%H"'], cwd=repo_path)
if returncode == 0:
return stdout.strip().replace('\r', '').replace('"', '').split('\n')
else:
return []
def git_clone(url, dstpath):
debug_print('git_clone(url=' + url + ', dstpath=' + dstpath + ')')
if os.path.isdir(os.path.join(dstpath, '.git')):
print("Repository '" + url + "' already cloned to directory '" + dstpath + "', skipping.")
return True
mkdir_p(dstpath)
git_clone_args = []
if GIT_CLONE_SHALLOW:
git_clone_args += ['--depth', '1']
return run([GIT(), 'clone'] + git_clone_args + [url, dstpath]) == 0
def git_checkout_and_pull(repo_path, branch):
debug_print('git_checkout_and_pull(repo_path=' + repo_path + ', branch=' + branch + ')')
ret = run([GIT(), 'fetch', 'origin'], repo_path)
if ret != 0:
return False
try:
print("Fetching latest changes to the branch '" + branch + "' for '" + repo_path + "'...")
ret = run([GIT(), 'fetch', 'origin'], repo_path)
if ret != 0:
return False
ret = run([GIT(), 'checkout', '--quiet', branch], repo_path)
if ret != 0:
return False
ret = run([GIT(), 'merge', '--ff-only', 'origin/' + branch], repo_path)
if ret != 0:
return False
except:
print('git operation failed!')
return False
print("Successfully updated and checked out branch '" + branch + "' on repository '" + repo_path + "'")
print("Current repository version: " + git_repo_version(repo_path))
return True
def git_clone_checkout_and_pull(url, dstpath, branch):
debug_print('git_clone_checkout_and_pull(url=' + url + ', dstpath=' + dstpath + ', branch=' + branch + ')')
success = git_clone(url, dstpath)
if not success:
return False
success = git_checkout_and_pull(dstpath, branch)
return success
def decide_cmake_build_type(tool):
global CMAKE_BUILD_TYPE_OVERRIDE
if CMAKE_BUILD_TYPE_OVERRIDE:
return CMAKE_BUILD_TYPE_OVERRIDE
else:
return tool.cmake_build_type
def fastcomp_build_dir(tool):
generator_suffix = ''
if CMAKE_GENERATOR == 'Visual Studio 10':
generator_suffix = '_vs2010'
elif CMAKE_GENERATOR == 'Visual Studio 11':
generator_suffix = '_vs2012'
elif CMAKE_GENERATOR == 'Visual Studio 12':
generator_suffix = '_vs2013'
elif CMAKE_GENERATOR == 'Visual Studio 14':
generator_suffix = '_vs2015'
elif CMAKE_GENERATOR == 'Visual Studio 15':
generator_suffix = '_vs2017'
elif CMAKE_GENERATOR == 'MinGW Makefiles':
generator_suffix = '_mingw'
bitness_suffix = '_32' if tool.bitness == 32 else '_64'
if hasattr(tool, 'git_branch'):
build_dir = 'build_' + tool.git_branch.replace(os.sep, '-') + generator_suffix + bitness_suffix
else:
build_dir = 'build_' + tool.version + generator_suffix + bitness_suffix
return build_dir
def exe_suffix(filename):
if WINDOWS and not filename.endswith('.exe'):
filename += '.exe'
return filename
def fastcomp_build_bin_dir(tool):
build_dir = fastcomp_build_dir(tool)
if WINDOWS and 'Visual Studio' in CMAKE_GENERATOR:
old_llvm_bin_dir = os.path.join(build_dir, 'bin', decide_cmake_build_type(tool))
new_llvm_bin_dir = None
default_cmake_build_type = decide_cmake_build_type(tool)
cmake_build_types = [default_cmake_build_type, 'Release', 'RelWithDebInfo', 'MinSizeRel', 'Debug']
for build_type in cmake_build_types:
d = os.path.join(build_dir, build_type, 'bin')
if os.path.isfile(os.path.join(tool.installation_path(), d, exe_suffix('clang'))):
new_llvm_bin_dir = d
break
if new_llvm_bin_dir and os.path.exists(os.path.join(tool.installation_path(), new_llvm_bin_dir)):
return new_llvm_bin_dir
elif os.path.exists(os.path.join(tool.installation_path(), old_llvm_bin_dir)):
return old_llvm_bin_dir
return os.path.join(build_dir, default_cmake_build_type, 'bin')
else:
return os.path.join(build_dir, 'bin')
def build_env(generator):
build_env = os.environ.copy()
((build_env['CXXFLAGS'] + ' ') if hasattr(build_env, 'CXXFLAGS') else '') + '-stdlib=libc++'
elif 'Visual Studio 15' in generator:
path = vswhere(15)
build_env['VCTargetsPath'] = os.path.join(path, 'Common7\\IDE\\VC\\VCTargets')
vc_bin_paths = [vs_filewhere(path, 'amd64', 'cl.exe'),
vs_filewhere(path, 'x86', 'cl.exe')]
for path in vc_bin_paths:
if os.path.isdir(path):
build_env['PATH'] = build_env['PATH'] + ';' + path
elif 'Visual Studio 14' in generator or 'Visual Studio 2015' in generator:
build_env['VCTargetsPath'] = os.path.join(os.environ['ProgramFiles(x86)'], 'MSBuild/Microsoft.Cpp/v4.0/V140')
vc_bin_paths = [os.path.join(os.environ['ProgramFiles'], 'Microsoft Visual Studio 14.0\\VC\\bin'),
os.path.join(os.environ['ProgramFiles(x86)'], 'Microsoft Visual Studio 14.0\\VC\\bin')]
for path in vc_bin_paths:
if os.path.isdir(path):
build_env['PATH'] = build_env['PATH'] + ';' + path
elif 'Visual Studio 12' in generator or 'Visual Studio 2013' in generator:
build_env['VCTargetsPath'] = os.path.join(os.environ['ProgramFiles(x86)'], 'MSBuild/Microsoft.Cpp/v4.0/V120')
return build_env
def get_generator_for_sln_file(sln_file):
contents = open(sln_file, 'r').read()
if '# Visual Studio 15' in contents:
return 'Visual Studio 15'
if '# Visual Studio Express 2015' in contents or '# Visual Studio 2015' in contents or '# Visual Studio 14' in contents:
return 'Visual Studio 14'
if '# Visual Studio Express 2013' in contents or '# Visual Studio 2013' in contents or '# Visual Studio 12' in contents:
return 'Visual Studio 12'
raise Exception('Unknown generator used to build solution file ' + sln_file)
def find_msbuild(sln_file):
search_paths_vs2015 = [os.path.join(os.environ['ProgramFiles'], 'MSBuild/14.0/Bin/amd64'),
os.path.join(os.environ['ProgramFiles(x86)'], 'MSBuild/14.0/Bin/amd64'),
os.path.join(os.environ['ProgramFiles'], 'MSBuild/14.0/Bin'),
os.path.join(os.environ['ProgramFiles(x86)'], 'MSBuild/14.0/Bin')]
search_paths_vs2013 = [os.path.join(os.environ['ProgramFiles'], 'MSBuild/12.0/Bin/amd64'),
os.path.join(os.environ['ProgramFiles(x86)'], 'MSBuild/12.0/Bin/amd64'),
os.path.join(os.environ['ProgramFiles'], 'MSBuild/12.0/Bin'),
os.path.join(os.environ['ProgramFiles(x86)'], 'MSBuild/12.0/Bin')]
search_paths_old = [os.path.join(os.environ["WINDIR"], 'Microsoft.NET/Framework/v4.0.30319')]
generator = get_generator_for_sln_file(sln_file)
debug_print('find_msbuild looking for generator ' + str(generator))
if generator == 'Visual Studio 15':
path = vswhere(15)
search_paths = [os.path.join(path, 'MSBuild/15.0/Bin/amd64'),
os.path.join(path, 'MSBuild/15.0/Bin')]
elif generator == 'Visual Studio 14':
search_paths = search_paths_vs2015
elif generator == 'Visual Studio 12':
search_paths = search_paths_vs2013 + search_paths_old
else:
raise Exception('Unknown generator!')
for path in search_paths:
p = os.path.join(path, 'MSBuild.exe')
debug_print('Searching for MSBuild.exe: ' + p)
if os.path.isfile(p):
return p
debug_print('MSBuild.exe in PATH? ' + str(which('MSBuild.exe')))
return which('MSBuild.exe')
def make_build(build_root, build_type, build_target_platform='x64'):
debug_print('make_build(build_root=' + build_root + ', build_type=' + build_type + ', build_target_platform=' + build_target_platform + ')')
global CPU_CORES
if CPU_CORES > 1:
print('Performing a parallel build with ' + str(CPU_CORES) + ' cores.')
else:
print('Performing a singlethreaded build.')
generator_to_use = CMAKE_GENERATOR
if WINDOWS:
if 'Visual Studio' in CMAKE_GENERATOR:
solution_name = str(subprocess.check_output(['dir', '/b', '*.sln'], shell=True, cwd=build_root).decode('utf-8').strip())
generator_to_use = get_generator_for_sln_file(os.path.join(build_root, solution_name))
# cores the system has, and passing the number of logical cores here has been observed to give a quadratic N*N explosion on the number of spawned processes
# (e.g. on a Core i7 5960X with 16 logical cores, it would spawn 16*16=256 cl.exe processes, which would start crashing when running out of system memory)
# make = [find_msbuild(os.path.join(build_root, solution_name)), '/maxcpucount:' + str(CPU_CORES), '/t:Build', '/p:Configuration=' + build_type, '/nologo', '/verbosity:minimal', solution_name]
make = [find_msbuild(os.path.join(build_root, solution_name)), '/t:Build', '/p:Configuration=' + build_type, '/p:Platform=' + build_target_platform, '/nologo', '/verbosity:minimal', solution_name]
else:
make = ['mingw32-make', '-j' + str(CPU_CORES)]
else:
make = ['cmake', '--build', '.', '--', '-j' + str(CPU_CORES)]
# Build
try:
print('Running build: ' + str(make))
ret = subprocess.check_call(make, cwd=build_root, env=build_env(generator_to_use))
if ret != 0:
print('Build failed with exit code ' + ret + '!', file=sys.stderr)
print('Working directory: ' + build_root, file=sys.stderr)
return False
except Exception as e:
print('Build failed due to exception!', file=sys.stderr)
print('Working directory: ' + build_root, file=sys.stderr)
print(str(e), file=sys.stderr)
return False
return True
def cmake_configure(generator, build_root, src_root, build_type, extra_cmake_args=[]):
debug_print('cmake_configure(generator=' + str(generator) + ', build_root=' + str(build_root) + ', src_root=' + str(src_root) + ', build_type=' + str(build_type) + ', extra_cmake_args=' + str(extra_cmake_args) + ')')
# Configure
if not os.path.isdir(build_root):
os.mkdir(build_root) # Create build output directory if it doesn't yet exist.
try:
if generator:
generator = ['-G', generator]
else:
generator = []
cmdline = ['cmake'] + generator + ['-DCMAKE_BUILD_TYPE=' + build_type, '-DPYTHON_EXECUTABLE=' + sys.executable] + extra_cmake_args + [src_root]
print('Running CMake: ' + str(cmdline))
def quote_parens(x):
if ' ' in x:
return '"' + x.replace('"', '\\"') + '"'
else:
return x
open(os.path.join(build_root, 'recmake.' + ('bat' if WINDOWS else 'sh')), 'w').write(' '.join(map(quote_parens, cmdline)))
ret = subprocess.check_call(cmdline, cwd=build_root, env=build_env(CMAKE_GENERATOR))
if ret != 0:
print('CMake invocation failed with exit code ' + ret + '!', file=sys.stderr)
print('Working directory: ' + build_root, file=sys.stderr)
return False
except OSError as e:
if e.errno == errno.ENOENT:
print(str(e), file=sys.stderr)
print('Could not run CMake, perhaps it has not been installed?', file=sys.stderr)
if WINDOWS:
print('Installing this package requires CMake. Get it from http://www.cmake.org/', file=sys.stderr)
elif LINUX:
print('Installing this package requires CMake. Get it via your system package manager (e.g. sudo apt-get install cmake), or from http://www.cmake.org/', file=sys.stderr)
elif OSX:
print('Installing this package requires CMake. Get it via a OSX package manager (Homebrew: "brew install cmake", or MacPorts: "sudo port install cmake"), or from http://www.cmake.org/', file=sys.stderr)
return False
raise
except Exception as e:
print('CMake invocation failed due to exception!', file=sys.stderr)
print('Working directory: ' + build_root, file=sys.stderr)
print(str(e), file=sys.stderr)
return False
return True
def xcode_sdk_version():
try:
output = subprocess.check_output(['xcrun', '--show-sdk-version'])
if sys.version_info >= (3,):
output = output.decode('utf8')
return output.strip().split('.')
except:
return subprocess.checkplatform.mac_ver()[0].split('.')
def build_llvm_tool(tool):
debug_print('build_llvm_tool(' + str(tool) + ')')
fastcomp_root = tool.installation_path()
fastcomp_src_root = os.path.join(fastcomp_root, 'src')
if hasattr(tool, 'git_branch'):
success = git_clone_checkout_and_pull(tool.download_url(), fastcomp_src_root, tool.git_branch)
if not success:
return False
clang_root = os.path.join(fastcomp_src_root, 'tools/clang')
success = git_clone_checkout_and_pull(tool.clang_url, clang_root, tool.git_branch)
if not success:
return False
if hasattr(tool, 'lld_url'):
lld_root = os.path.join(fastcomp_src_root, 'tools/lld')
success = git_clone_checkout_and_pull(tool.lld_url, lld_root, tool.git_branch)
if not success:
return False
else:
success = download_and_unzip(tool.download_url(), fastcomp_src_root, filename_prefix='llvm-e')
if not success:
return False
success = download_and_unzip(tool.windows_clang_url if WINDOWS else tool.unix_clang_url, os.path.join(fastcomp_src_root, 'tools/clang'), filename_prefix='clang-e')
if not success:
return False
cmake_generator = CMAKE_GENERATOR
if 'Visual Studio' in CMAKE_GENERATOR and tool.bitness == 64:
cmake_generator += ' Win64'
build_dir = fastcomp_build_dir(tool)
build_root = os.path.join(fastcomp_root, build_dir)
build_type = decide_cmake_build_type(tool)
global BUILD_FOR_TESTING, ENABLE_LLVM_ASSERTIONS
tests_arg = 'ON' if BUILD_FOR_TESTING else 'OFF'
enable_assertions = ENABLE_LLVM_ASSERTIONS.lower() == 'on' or (ENABLE_LLVM_ASSERTIONS == 'auto' and build_type.lower() != 'release' and build_type.lower() != 'minsizerel')
only_supports_wasm = hasattr(tool, 'only_supports_wasm')
if ARCH == 'x86' or ARCH == 'x86_64':
targets_to_build = 'X86'
elif ARCH == 'arm':
targets_to_build = 'ARM'
elif ARCH == 'aarch64':
targets_to_build = 'AArch64'
else:
targets_to_build = ''
if not only_supports_wasm:
if targets_to_build != '':
targets_to_build += ';'
targets_to_build += 'JSBackend'
args = ['-DLLVM_TARGETS_TO_BUILD=' + targets_to_build, '-DLLVM_INCLUDE_EXAMPLES=OFF', '-DCLANG_INCLUDE_EXAMPLES=OFF', '-DLLVM_INCLUDE_TESTS=' + tests_arg, '-DCLANG_INCLUDE_TESTS=' + tests_arg, '-DLLVM_ENABLE_ASSERTIONS=' + ('ON' if enable_assertions else 'OFF')]
if os.environ.get('LLVM_CMAKE_ARGS'):
extra_args = os.environ['LLVM_CMAKE_ARGS'].split(',')
print('Passing the following extra arguments to LLVM CMake configuration: ' + str(extra_args))
args += extra_args
if OSX and (not os.environ.get('LLVM_CMAKE_ARGS') or 'HAVE_FUTIMENS' not in os.environ.get('LLVM_CMAKE_ARGS')) and xcode_sdk_version() < ['10', '13']:
print('Passing -DHAVE_FUTIMENS=0 to LLVM CMake configure to workaround https://github.com/kripken/emscripten/issues/5418. Please update to macOS 10.13 or newer')
args += ['-DHAVE_FUTIMENS=0']
success = cmake_configure(cmake_generator, build_root, fastcomp_src_root, build_type, args)
if not success:
return False
success = make_build(build_root, build_type, 'x64' if tool.bitness == 64 else 'Win32')
return success
def optimizer_build_root(tool):
build_root = tool.installation_path().strip()
if build_root.endswith('/') or build_root.endswith('\\'):
build_root = build_root[:-1]
generator_prefix = cmake_generator_prefix()
build_root = build_root + generator_prefix + '_' + str(tool.bitness) + 'bit_optimizer'
return build_root
def uninstall_optimizer(tool):
debug_print('uninstall_optimizer(' + str(tool) + ')')
build_root = optimizer_build_root(tool)
print("Deleting path '" + build_root + "'")
try:
remove_tree(build_root)
os.remove(build_root)
except:
pass
def is_optimizer_installed(tool):
build_root = optimizer_build_root(tool)
return os.path.exists(build_root)
def build_optimizer_tool(tool):
debug_print('build_optimizer_tool(' + str(tool) + ')')
src_root = os.path.join(tool.installation_path(), 'tools', 'optimizer')
build_root = optimizer_build_root(tool)
build_type = decide_cmake_build_type(tool)
cmake_generator = CMAKE_GENERATOR
if 'Visual Studio' in CMAKE_GENERATOR and tool.bitness == 64:
cmake_generator += ' Win64'
success = cmake_configure(cmake_generator, build_root, src_root, build_type)
if not success:
return False
success = make_build(build_root, build_type, 'x64' if tool.bitness == 64 else 'Win32')
return success
def binaryen_build_root(tool):
build_root = tool.installation_path().strip()
if build_root.endswith('/') or build_root.endswith('\\'):
build_root = build_root[:-1]
generator_prefix = cmake_generator_prefix()
build_root = build_root + generator_prefix + '_' + str(tool.bitness) + 'bit_binaryen'
return build_root
def uninstall_binaryen(tool):
debug_print('uninstall_binaryen(' + str(tool) + ')')
build_root = binaryen_build_root(tool)
print("Deleting path '" + build_root + "'")
try:
remove_tree(build_root)
os.remove(build_root)
except:
pass
def is_binaryen_installed(tool):
build_root = binaryen_build_root(tool)
return os.path.exists(build_root)
def build_binaryen_tool(tool):
debug_print('build_binaryen_tool(' + str(tool) + ')')
src_root = tool.installation_path()
build_root = binaryen_build_root(tool)
build_type = decide_cmake_build_type(tool)
args = []
cmake_generator = CMAKE_GENERATOR
if 'Visual Studio' in CMAKE_GENERATOR:
if tool.bitness == 64:
cmake_generator += ' Win64'
if BUILD_FOR_TESTING:
args += ['-DRUN_STATIC_ANALYZER=1']
success = cmake_configure(cmake_generator, build_root, src_root, build_type, args)
if not success:
return False
success = make_build(build_root, build_type, 'x64' if tool.bitness == 64 else 'Win32')
remove_tree(os.path.join(build_root, 'scripts'))
shutil.copytree(os.path.join(src_root, 'scripts'), os.path.join(build_root, 'scripts'))
remove_tree(os.path.join(build_root, 'src', 'js'))
shutil.copytree(os.path.join(src_root, 'src', 'js'), os.path.join(build_root, 'src', 'js'))
return success
def download_and_unzip(zipfile, dest_dir, download_even_if_exists=False, filename_prefix=''):
debug_print('download_and_unzip(zipfile=' + zipfile + ', dest_dir=' + dest_dir + ')')
url = urljoin(emsdk_packages_url, zipfile)
download_target = get_download_target(url, zips_subdir, filename_prefix)
if not download_even_if_exists and os.path.exists(download_target) and num_files_in_directory(dest_dir) > 0:
print("The contents of file '" + zipfile + "' already exist in destination '" + dest_dir + "', skipping.")
return True
# Otherwise, if the archive must be downloaded, always write into the
# target directory, since it may be a new version of a tool that gets
# installed to the same place (that is, a different download name
# indicates different contents).
download_even_if_exists = True
received_download_target = download_file(url, zips_subdir, download_even_if_exists, filename_prefix)
if not received_download_target:
return False
assert received_download_target == download_target
if zipfile.endswith('.zip'):
return unzip(download_target, dest_dir, unpack_even_if_exists=download_even_if_exists)
else:
return untargz(download_target, dest_dir, unpack_even_if_exists=download_even_if_exists)
def to_native_path(p):
if WINDOWS and not MSYS:
return to_unix_path(p).replace('/', '\\')
else:
return to_unix_path(p)
# Finds and returns a list of the directories that need to be added to PATH for the given set of tools.
def get_required_path(active_tools):
path_add = [to_native_path(emsdk_path())]
for tool in active_tools:
if hasattr(tool, 'activated_path'):
path_add += [to_native_path(tool.expand_vars(tool.activated_path))]
return path_add
# Returns the absolute path to the file '.emscripten' for the current user on this system.
def dot_emscripten_path():
return os.path.join(emscripten_config_directory, ".emscripten")
dot_emscripten = {}
def parse_key_value(line):
if not line:
return ('', '')
eq = line.find('=')
if eq != -1:
key = line[0:eq].strip()
value = line[eq + 1:].strip()
return (key, value)
else:
return (key, '')
def load_dot_emscripten():
global dot_emscripten
dot_emscripten = {}
lines = []
try:
lines = open(dot_emscripten_path(), "r").read().split('\n')
except:
pass
for line in lines:
try:
key, value = parse_key_value(line)
if value != '':
dot_emscripten[key] = value
# print("Got '" + key + "' = '" + value + "'")
except:
pass
def generate_dot_emscripten(active_tools):
global emscripten_config_directory
if emscripten_config_directory == emsdk_path():
temp_dir = sdk_path('tmp')
mkdir_p(temp_dir)
embedded = True
else:
temp_dir = tempfile.gettempdir().replace('\\', '/')
embedded = False
has_spidermonkey = False
has_node = False
cfg = 'import os\n'
if embedded:
cfg += "emsdk_path=os.path.dirname(os.environ.get('EM_CONFIG')).replace('\\\\', '/')\n"
# Different tools may provide the same activated configs; the latest to be
# activated is the relevant one.
activated_keys_in_order = []
activated_key_values = {}
for tool in active_tools:
tool_cfg = tool.activated_config()
if tool_cfg:
for specific_cfg in tool_cfg.split(';'):
name, value = specific_cfg.split('=')
if name not in activated_key_values:
activated_keys_in_order.append(name)
activated_key_values[name] = value
for name in activated_keys_in_order:
if name == 'SPIDERMONKEY_ENGINE':
has_spidermonkey = True
if name == 'NODE_JS':
has_node = True
cfg += name + ' = ' + activated_key_values[name] + '\n'
# These two vars must always be defined, even though they might not exist.
if not has_spidermonkey:
cfg += "SPIDERMONKEY_ENGINE = ''\n"
if not has_node:
node_fallback = which('nodejs')
if not node_fallback:
node_fallback = 'node'
cfg += "NODE_JS = '" + node_fallback + "'\n"
cfg += '''V8_ENGINE = ''
TEMP_DIR = ''' + "'" + temp_dir + "'" + '''
COMPILER_ENGINE = NODE_JS
JS_ENGINES = [NODE_JS]
'''
if embedded:
cfg = cfg.replace(emscripten_config_directory, "' + emsdk_path + '")
if os.path.exists(dot_emscripten_path()):
backup_path = dot_emscripten_path() + ".old"
print("Backing up old Emscripten configuration file in " + os.path.normpath(backup_path))
move_with_overwrite(dot_emscripten_path(), backup_path)
with open(dot_emscripten_path(), "w") as text_file:
text_file.write(cfg)
# Clear old cached emscripten content.
try:
remove_tree(os.path.join(emscripten_config_directory, ".emscripten_cache"))
os.remove(os.path.join(emscripten_config_directory, ".emscripten_sanity"))
os.remove(os.path.join(emscripten_config_directory, ".emscripten_cache__last_clear"))
except:
pass
print("The Emscripten configuration file " + os.path.normpath(dot_emscripten_path()) + " has been rewritten with the following contents:")
print('')
print(cfg.strip())
print('')
path_add = get_required_path(active_tools)
if not WINDOWS:
emsdk_env = os.path.relpath(sdk_path('emsdk_env.sh'))
if '/' not in emsdk_env:
emsdk_env = './emsdk_env.sh'
print("To conveniently access the selected set of tools from the command line, consider adding the following directories to PATH, or call 'source " + emsdk_env + "' to do this for you.")
print('')
print(' ' + ENVPATH_SEPARATOR.join(path_add))
def find_msbuild_dir():
if 'ProgramFiles' in os.environ and os.environ['ProgramFiles']:
program_files = os.environ['ProgramFiles']
else:
program_files = 'C:/Program Files'
if 'ProgramFiles(x86)' in os.environ and os.environ['ProgramFiles(x86)']:
program_files_x86 = os.environ['ProgramFiles(x86)']
else:
program_files_x86 = 'C:/Program Files (x86)'
MSBUILDX86_DIR = os.path.join(program_files_x86, "MSBuild/Microsoft.Cpp/v4.0/Platforms")
MSBUILD_DIR = os.path.join(program_files, "MSBuild/Microsoft.Cpp/v4.0/Platforms")
if os.path.exists(MSBUILDX86_DIR):
return MSBUILDX86_DIR
elif os.path.exists(MSBUILD_DIR):
return MSBUILD_DIR
else:
return '' # No MSbuild installed.
def get_installed_vstool_version(installed_path):
try:
return open(installed_path + "/version.txt", "r").read()
except:
return None
class Tool(object):
def __init__(self, data):
# Convert the dictionary representation of the tool in 'data' to members of this class for convenience.
for key, value in data.items():
# Python2 compat, convert unicode to str
if sys.version_info < (3,) and isinstance(value, unicode): # noqa
value = value.encode('Latin-1')
setattr(self, key, value)
# Cache the name ID of this Tool (these are read very often)
self.name = self.id + '-' + self.version
if hasattr(self, 'bitness'):
self.name += '-' + str(self.bitness) + 'bit'
def __str__(self):
return self.name
def __repr__(self):
return self.name
def expand_vars(self, str):
if WINDOWS and '%MSBuildPlatformsDir%' in str:
str = str.replace('%MSBuildPlatformsDir%', find_msbuild_dir())
if '%cmake_build_type_on_win%' in str:
str = str.replace('%cmake_build_type_on_win%', (decide_cmake_build_type(self) + '/') if WINDOWS else '')
if '%installation_dir%' in str:
str = str.replace('%installation_dir%', sdk_path(self.installation_dir()))
if '%generator_prefix%' in str:
str = str.replace('%generator_prefix%', cmake_generator_prefix())
str = str.replace('%.exe%', '.exe' if WINDOWS else '')
if '%fastcomp_build_dir%' in str:
str = str.replace('%fastcomp_build_dir%', fastcomp_build_dir(self))
if '%fastcomp_build_bin_dir%' in str:
str = str.replace('%fastcomp_build_bin_dir%', fastcomp_build_bin_dir(self))
return str
# Return true if this tool requires building from source, and false if this is a precompiled tool.
def needs_compilation(self):
if hasattr(self, 'cmake_build_type'):
return True
if hasattr(self, 'uses'):
for tool_name in self.uses:
tool = find_tool(tool_name)
if not tool:
debug_print('Tool ' + str(self) + ' depends on ' + tool_name + ' which does not exist!')
continue
if tool.needs_compilation():
return True
return False
# Specifies the target path where this tool will be installed to. This could either be a directory or a filename (e.g. in case of node.js)
def installation_path(self):
if WINDOWS and hasattr(self, 'windows_install_path'):
pth = self.expand_vars(self.windows_install_path)
return sdk_path(pth)
if hasattr(self, 'install_path'):
pth = self.expand_vars(self.install_path)
return sdk_path(pth)
p = self.version
if hasattr(self, 'bitness') and (not hasattr(self, 'append_bitness') or self.append_bitness):
p += '_' + str(self.bitness) + 'bit'
return sdk_path(os.path.join(self.id, p))
# Specifies the target directory this tool will be installed to.
def installation_dir(self):
dir = self.installation_path()
if path_points_to_directory(dir):
return dir
else:
return os.path.dirname(dir)
# Returns the configuration item that needs to be added to .emscripten to make this Tool active for the current user.
def activated_config(self):
if hasattr(self, 'activated_cfg'):
return to_unix_path(self.expand_vars(self.activated_cfg))
else:
return ''
def activated_environment(self):
if hasattr(self, 'activated_env'):
return self.expand_vars(self.activated_env).split(';')
else:
return []
def compatible_with_this_arch(self):
if hasattr(self, 'arch'):
if self.arch != ARCH:
return False
return True
def compatible_with_this_os(self):
if hasattr(self, 'os'):
if self.os == 'all':
return True
if self.compatible_with_this_arch() and ((WINDOWS and 'win' in self.os) or (LINUX and ('linux' in self.os or 'unix' in self.os)) or (OSX and ('osx' in self.os or 'unix' in self.os))):
return True
else:
return False
else:
if not hasattr(self, 'osx_url') and not hasattr(self, 'windows_url') and not hasattr(self, 'unix_url') and not hasattr(self, 'linux_url'):
return True
if OSX and hasattr(self, 'osx_url') and self.compatible_with_this_arch():
return True
if LINUX and hasattr(self, 'linux_url') and self.compatible_with_this_arch():
return True
if WINDOWS and (hasattr(self, 'windows_url') or hasattr(self, 'windows_install_path')) and self.compatible_with_this_arch():
return True
if UNIX and hasattr(self, 'unix_url'):
return True
return hasattr(self, 'url')
def is_installed(self):
# If this tool/sdk depends on other tools, require that all dependencies are installed for this tool to count as being installed.
if hasattr(self, 'uses'):
for tool_name in self.uses:
tool = find_tool(tool_name)
if tool is None:
print("Manifest error: No tool by name '" + tool_name + "' found! This may indicate an internal SDK error!")
return False
if not tool.is_installed():
return False
if self.download_url() is not None:
# For e.g. fastcomp clang from git repo, the activated PATH is the directory where the compiler is built to, and installation_path is
# the directory where the source tree exists. To distinguish between multiple packages sharing the same source
# (clang-master-32bit, clang-master-64bit, clang-incoming-32bit and clang-incoming-64bit each share the same git repo), require
# that in addition to the installation directory, each item in the activated PATH must exist.
activated_path = self.expand_vars(self.activated_path).split(';') if hasattr(self, 'activated_path') else [self.installation_path()]
def each_path_exists(pathlist):
for path in pathlist:
if not os.path.exists(path):
return False
return True
content_exists = os.path.exists(self.installation_path()) and each_path_exists(activated_path) and (os.path.isfile(self.installation_path()) or num_files_in_directory(self.installation_path()) > 0)
if self.id == 'vs-tool': # vs-tool is a special tool since all versions must be installed to the same dir, so dir name will not differentiate the version.
return content_exists and get_installed_vstool_version(self.installation_path()) == self.version
elif hasattr(self, 'custom_is_installed_script'):
if self.custom_is_installed_script == 'is_optimizer_installed':
return is_optimizer_installed(self)
elif self.custom_is_installed_script == 'is_binaryen_installed':
return is_binaryen_installed(self)
else:
raise Exception('Unknown custom_is_installed_script directive "' + self.custom_is_installed_script + '"!')
else:
return content_exists
else:
return True # This tool does not contain downloadable elements, so it is installed by default.
def is_active(self):
if not self.is_installed():
return False
if self.id == 'vs-tool':
return True # vs-tool is a special tool since all versions must be installed to the same dir, which means that if this tool is installed, it is also active.
# All dependencies of this tool must be active as well.
deps = self.dependencies()
for tool in deps:
if not tool.is_active():
return False
activated_cfg = self.activated_config()
if activated_cfg == '':
return len(deps) > 0
activated_cfg = activated_cfg.split(';')
for cfg in activated_cfg:
cfg = cfg.strip()
key, value = parse_key_value(cfg)
if key not in dot_emscripten:
debug_print(str(self) + ' is not active, because key="' + key + '" does not exist in .emscripten')
return False
# If running in embedded mode, all paths are stored dynamically relative to the emsdk root, so normalize those first.
dot_emscripten_key = dot_emscripten[key].replace("' + emsdk_path + '", emsdk_path())
if dot_emscripten_key != value:
debug_print(str(self) + ' is not active, because key="' + key + '" has value "' + dot_emscripten_key + '" but should have value "' + value + '"')
return False
return True
# Returns true if the system environment variables requires by this tool are currently active.
def is_env_active(self):
envs = self.activated_environment()
for env in envs:
key, value = parse_key_value(env)
if key not in os.environ or to_unix_path(os.environ[key]) != to_unix_path(value):
debug_print(str(self) + ' is not active, because environment variable key="' + key + '" has value "' + str(os.getenv(key)) + '" but should have value "' + value + '"')
return False
if hasattr(self, 'activated_path'):
path = self.expand_vars(self.activated_path).replace('\\', '/')
path = path.split(ENVPATH_SEPARATOR)
for p in path:
path_items = os.environ['PATH'].replace('\\', '/').split(ENVPATH_SEPARATOR)
if not normalized_contains(path_items, p):
debug_print(str(self) + ' is not active, because environment variable PATH item "' + p + '" is not present (PATH=' + os.environ['PATH'] + ')')
return False
return True
def win_activate_env_vars(self, permanently_activate):
if WINDOWS:
envs = self.activated_environment()
for env in envs:
key, value = parse_key_value(env)
if permanently_activate:
win_delete_environment_variable(key, False) # If there is an env var for the LOCAL USER with same name, it will hide the system var, so must remove that first.
win_set_environment_variable(key, value, permanently_activate)
# If this tool can be installed on this system, this function returns True.
# Otherwise, this function returns a string that describes the reason why this tool is not available.
def can_be_installed(self):
if hasattr(self, 'bitness'):
if self.bitness == 64 and not is_os_64bit():
return "this tool is only provided for 64-bit OSes"
if self.id == 'vs-tool':
msbuild_dir = find_msbuild_dir()
if len(msbuild_dir) > 0:
return True
else:
return "Visual Studio 2010 was not found"
else:
return True
def download_url(self):
if WINDOWS and hasattr(self, 'windows_url'):
return self.windows_url
elif OSX and hasattr(self, 'osx_url'):
return self.osx_url
elif LINUX and hasattr(self, 'linux_url'):
return self.linux_url
elif UNIX and hasattr(self, 'unix_url'):
return self.unix_url
elif hasattr(self, 'url'):
return self.url
else:
return None
def install(self):
if self.can_be_installed() is not True:
print("The tool '" + str(self) + "' is not available due to the reason: " + self.can_be_installed())
return False
if self.id == 'sdk':
print("Installing SDK '" + str(self) + "'..")
for tool_name in self.uses:
tool = find_tool(tool_name)
if tool is None:
print("Manifest error: No tool by name '" + tool_name + "' found! This may indicate an internal SDK error!")
success = tool.install()
if not success:
return False
print("Done installing SDK '" + str(self) + "'.")
return True
else:
print("Installing tool '" + str(self) + "'..")
url = self.download_url()
if hasattr(self, 'custom_install_script') and self.custom_install_script == 'build_fastcomp':
success = build_llvm_tool(self)
elif hasattr(self, 'git_branch'):
success = git_clone_checkout_and_pull(url, self.installation_path(), self.git_branch)
elif url.endswith(ARCHIVE_SUFFIXES):
# TODO: explain the vs-tool special-casing
download_even_if_exists = (self.id == 'vs-tool')
# if we are downloading a zip, we will unpack and delete it after immediately anyhow,
# so there is no need to look for an existing one (which may have been left behind
# due to an error in the past)
if url.endswith(ARCHIVE_SUFFIXES):
download_even_if_exists = True
filename_prefix = getattr(self, 'zipfile_prefix', '')
success = download_and_unzip(url, self.installation_path(), download_even_if_exists=download_even_if_exists, filename_prefix=filename_prefix)
else:
dst_file = download_file(urljoin(emsdk_packages_url, self.download_url()), self.installation_path())
if dst_file:
success = True
else:
success = False
if success:
if hasattr(self, 'custom_install_script'):
if self.custom_install_script == 'build_optimizer':
success = build_optimizer_tool(self)
elif self.custom_install_script == 'build_fastcomp':
pass # 'build_fastcomp' is a special one that does the download on its own, others do the download manually.
elif self.custom_install_script == 'build_binaryen':
success = build_binaryen_tool(self)
else:
raise Exception('Unknown custom_install_script command "' + self.custom_install_script + '"!')
# Install an emscripten-version.txt file if told to, and if there is one.
# (If this is not an actual release, but some other build, then we do not
# write anything.)
if hasattr(self, 'emscripten_releases_hash'):
emscripten_version_file_path = os.path.join(to_native_path(self.expand_vars(self.activated_path)), 'emscripten-version.txt')
version = get_emscripten_release_version(self.emscripten_releases_hash)
if version:
open(emscripten_version_file_path, 'w').write('"%s"' % version)
if not success:
print("Installation failed!")
return False
print("Done installing tool '" + str(self) + "'.")
# Sanity check that the installation succeeded, and if so, remove unneeded leftover installation files.
if self.is_installed():
self.cleanup_temp_install_files()
else:
print("Warning: The installation of '" + str(self) + "' seems to have failed, but no error was detected. Either something went wrong with the installation, or this may indicate an internal emsdk error.")
return True
def cleanup_temp_install_files(self):
url = self.download_url()
if url.endswith(ARCHIVE_SUFFIXES):
download_target = get_download_target(url, zips_subdir, getattr(self, 'zipfile_prefix', ''))
debug_print("Deleting temporary zip file " + download_target)
rmfile(download_target)
def uninstall(self):
if not self.is_installed():
print("Tool '" + str(self) + "' was not installed. No need to uninstall.")
return
print("Uninstalling tool '" + str(self) + "'..")
if hasattr(self, 'custom_uninstall_script'):
if self.custom_uninstall_script == 'uninstall_optimizer':
uninstall_optimizer(self)
elif self.custom_uninstall_script == 'uninstall_binaryen':
uninstall_binaryen(self)
else:
raise Exception('Unknown custom_uninstall_script directive "' + self.custom_uninstall_script + '"!')
try:
print("Deleting path '" + self.installation_path() + "'")
remove_tree(self.installation_path())
os.remove(self.installation_path())
except:
pass
print("Done uninstalling '" + str(self) + "'.")
def dependencies(self):
if not hasattr(self, 'uses'):
return []
deps = []
for tool_name in self.uses:
tool = find_tool(tool_name)
if tool:
deps += [tool]
return deps
def recursive_dependencies(self):
if not hasattr(self, 'uses'):
return []
deps = []
for tool_name in self.uses:
tool = find_tool(tool_name)
if tool:
deps += [tool]
deps += tool.recursive_dependencies()
return deps
# A global registry of all known Emscripten SDK tools available in the SDK manifest.
tools = []
tools_map = {}
def add_tool(tool):
tool.is_sdk = False
tools.append(tool)
if find_tool(str(tool)):
raise Exception('Duplicate tool ' + str(tool) + '! Existing:\n{' + ', '.join("%s: %s" % item for item in vars(find_tool(str(tool))).items()) + '}, New:\n{' + ', '.join("%s: %s" % item for item in vars(tool).items()) + '}')
tools_map[str(tool)] = tool
# A global registry of all known SDK toolsets.
sdks = []
sdks_map = {}
def add_sdk(sdk):
sdk.is_sdk = True
sdks.append(sdk)
if find_sdk(str(sdk)):
raise Exception('Duplicate sdk ' + str(sdk) + '! Existing:\n{' + ', '.join("%s: %s" % item for item in vars(find_sdk(str(sdk))).items()) + '}, New:\n{' + ', '.join("%s: %s" % item for item in vars(sdk).items()) + '}')
sdks_map[str(sdk)] = sdk
# N.B. In both tools and sdks list above, we take the convention that the newest items are at the back of the list (ascending chronological order)
def find_tool(name):
return tools_map.get(name)
def find_sdk(name):
return sdks_map.get(name)
def is_os_64bit():
# http://stackoverflow.com/questions/2208828/detect-64bit-os-windows-in-python
return platform.machine().endswith('64')
def find_latest_releases_version():
releases_info = load_releases_info()
return releases_info['latest']
def find_latest_releases_hash():
releases_info = load_releases_info()
return releases_info['releases'][find_latest_releases_version()]
def find_latest_releases_sdk(which):
return 'sdk-releases-%s-%s-64bit' % (which, find_latest_releases_hash())
def find_tot_sdk(which):
if not os.path.exists(tot_path()):
print('Tip-of-tree information was not found, run emsdk update-tags')
sys.exit(1)
tot = open(tot_path()).read()
if not tot:
print('Tip-of-tree build was not found, run emsdk update-tags (however, if there is no recent tip-of-tree build, you may need to wait)')
sys.exit(1)
return 'sdk-releases-%s-%s-64bit' % (which, tot)
# Given a git hash in emscripten-releases, find the emscripten
# version for it. There may not be one if this is not the hash of
# a release, in which case we return None.
def get_emscripten_release_version(emscripten_releases_hash):
releases_info = load_releases_info()
for key, value in dict(releases_info['releases']).items():
if value == emscripten_releases_hash:
return key
return None
def tot_path():
return sdk_path('emscripten-releases-tot.txt')
# Get the tip-of-tree build identifier.
def get_emscripten_releases_tot():
git_clone_checkout_and_pull(emscripten_releases_repo, sdk_path('releases'), 'master')
recent_releases = git_recent_commits(sdk_path('releases'))
# The recent releases are the latest hashes in the git repo. There
# may not be a build for the most recent ones yet; find the last
# that does.
for release in recent_releases:
url = emscripten_releases_download_url_template % (
os_name_for_emscripten_releases(),
release,
'tbz2' if not WINDOWS else 'zip'
)
try:
urlopen(url)
except:
continue
return release
return ''
# Finds the best-matching python tool for use.
def find_used_python():
for t in reversed(tools): # Find newest tool first - those are always at the end of the list.
if t.id == 'python' and t.is_installed() and t.is_active() and t.is_env_active():
return t
for t in reversed(tools):
if t.id == 'python' and t.is_installed() and t.is_active():
return t
for t in reversed(tools):
if t.id == 'python' and t.is_installed():
return t
return None
def version_key(ver):
return list(map(int, re.split('[._-]', ver)))
# A sort function that is compatible with both Python 2 and Python 3 using a custom comparison function.
def python_2_3_sorted(arr, cmp):
if sys.version_info >= (3,):
return sorted(arr, key=functools.cmp_to_key(cmp))
else:
return sorted(arr, cmp=cmp)
def fetch_emscripten_tags():
git = GIT(must_succeed=False)
if git:
print('Fetching emscripten-releases repository...')
emscripten_releases_tot = get_emscripten_releases_tot()
if emscripten_releases_tot:
open(tot_path(), 'w').write(emscripten_releases_tot)
else:
print('Update complete, however skipped fetching the Emscripten tags, since git was not found, which is necessary for update-tags.')
if WINDOWS:
print("Please install git by typing 'emsdk install git-1.9.4', or alternatively by installing it manually from http://git-scm.com/downloads . If you install git manually, remember to add it to PATH.")
elif OSX:
print("Please install git from http://git-scm.com/ , or by installing XCode and then the XCode Command Line Tools (see http://stackoverflow.com/questions/9329243/xcode-4-4-command-line-tools ).")
elif LINUX:
print("Pease install git using your package manager, see http://git-scm.com/book/en/Getting-Started-Installing-Git .")
else:
print("Please install git.")
return
def is_emsdk_sourced_from_github():
return os.path.exists(os.path.join(emsdk_path(), '.git'))
def update_emsdk():
if is_emsdk_sourced_from_github():
print('You seem to have bootstrapped Emscripten SDK by cloning from GitHub. In this case, use "git pull" instead of "emsdk update" to update emsdk. (Not doing that automatically in case you have local changes)', file=sys.stderr)
print('Alternatively, use "emsdk update-tags" to refresh the latest list of tags from the different Git repositories.', file=sys.stderr)
sys.exit(1)
if not download_and_unzip(emsdk_zip_download_url, emsdk_path(), download_even_if_exists=True):
sys.exit(1)
fetch_emscripten_tags()
# Lists all legacy (pre-emscripten-releases) tagged versions directly in the Git repositories. These we can pull and compile from source.
def load_legacy_emscripten_tags():
try:
return open(sdk_path('legacy-emscripten-tags.txt'), 'r').read().split('\n')
except:
return []
def load_legacy_binaryen_tags():
try:
return open(sdk_path('legacy-binaryen-tags.txt'), 'r').read().split('\n')
except:
return []
def remove_prefix(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
else:
return s
def remove_suffix(s, suffix):
if s.endswith(suffix):
return s[:len(s) - len(suffix)]
else:
return s
# filename should be one of: 'llvm-nightlies-32bit.txt', 'llvm-nightlies-64bit.txt', 'llvm-precompiled-tags-32bit.txt', 'llvm-precompiled-tags-64bit.txt', 'emscripten-nightlies.txt'
def load_file_index_list(filename):
try:
items = open(sdk_path(filename), 'r').read().split('\n')
items = map(lambda x: remove_suffix(remove_suffix(remove_prefix(remove_prefix(x, 'emscripten-llvm-e'), 'emscripten-nightly-'), '.tar.gz'), '.zip').strip(), items)
items = filter(lambda x: 'latest' not in x and len(x) > 0, items)
# Sort versions from oldest to newest (the default sort would be lexicographic, i.e. '1.37.1 < 1.37.10 < 1.37.2')
items = sorted(items, key=version_key)[::-1]
return items
except:
return []
def load_llvm_32bit_nightlies():
return load_file_index_list('llvm-nightlies-32bit.txt')
def load_llvm_64bit_nightlies():
return load_file_index_list('llvm-nightlies-64bit.txt')
def load_emscripten_nightlies():
return load_file_index_list('emscripten-nightlies.txt')
def load_llvm_precompiled_tags_32bit():
return load_file_index_list('llvm-tags-32bit.txt')
def load_llvm_precompiled_tags_64bit():
return load_file_index_list('llvm-tags-64bit.txt')
# Load the json info for emscripten-releases.
def load_releases_info():
try:
text = open(sdk_path('emscripten-releases-tags.txt'), 'r').read()
return json.loads(text)
except Exception as e:
print('Error parsing emscripten-releases-tags.txt!')
print(str(e))
sys.exit(1)
# Get a list of tags for emscripten-releases.
def load_releases_tags():
info = load_releases_info()
tags = list(info['releases'].values())
# Add the tip-of-tree, if it exists.
if os.path.exists(tot_path()):
tot = open(tot_path()).read()
if tot:
tags.append(tot)
return tags
def load_releases_versions():
info = load_releases_info()
versions = list(info['releases'].keys())
return versions
def is_string(s):
if sys.version_info[0] >= 3:
return isinstance(s, str)
return isinstance(s, basestring) # noqa
def load_sdk_manifest():
global tools, sdks
try:
manifest = json.loads(open(sdk_path("emsdk_manifest.json"), "r").read())
except Exception as e:
print('Error parsing emsdk_manifest.json!')
print(str(e))
return
emscripten_tags = load_legacy_emscripten_tags()
llvm_precompiled_tags_32bit = list(reversed(load_llvm_precompiled_tags_32bit()))
llvm_precompiled_tags_64bit = list(reversed(load_llvm_precompiled_tags_64bit()))
llvm_precompiled_tags = llvm_precompiled_tags_32bit + llvm_precompiled_tags_64bit
binaryen_tags = load_legacy_binaryen_tags()
llvm_32bit_nightlies = list(reversed(load_llvm_32bit_nightlies()))
llvm_64bit_nightlies = list(reversed(load_llvm_64bit_nightlies()))
emscripten_nightlies = list(reversed(load_emscripten_nightlies()))
releases_tags = load_releases_tags()
def dependencies_exist(sdk):
for tool_name in sdk.uses:
tool = find_tool(tool_name)
if not tool:
return False
return True
def cmp_version(ver, cmp_operand, reference):
if cmp_operand == '<=':
return version_key(ver) <= version_key(reference)
if cmp_operand == '<':
return version_key(ver) < version_key(reference)
if cmp_operand == '>=':
return version_key(ver) >= version_key(reference)
if cmp_operand == '>':
return version_key(ver) > version_key(reference)
if cmp_operand == '==':
return version_key(ver) == version_key(reference)
if cmp_operand == '!=':
return version_key(ver) != version_key(reference)
raise Exception('Invalid cmp_operand "' + cmp_operand + '"!')
def passes_filters(param, ver, filters):
for v in filters:
if v[0] == param and not cmp_version(ver, v[1], v[2]):
return False
return True
# A 'category parameter' is a %foo%-encoded identifier that specifies
# a class of tools instead of just one tool, e.g. %tag% or %nightly..%
def expand_category_param(param, category_list, t, is_sdk):
for i, ver in enumerate(category_list):
if not ver.strip():
continue
t2 = copy.copy(t)
found_param = False
for p, v in vars(t2).items():
if is_string(v) and param in v:
t2.__dict__[p] = v.replace(param, ver)
found_param = True
if not found_param:
continue
t2.is_old = i < len(category_list) - 2
if hasattr(t2, 'uses'):
t2.uses = [x.replace(param, ver) for x in t2.uses]
# Filter out expanded tools by version requirements, such as ["tag", "<=", "1.37.22"]
if hasattr(t2, 'version_filter'):
passes = passes_filters(param, ver, t2.version_filter)
if not passes:
continue
if is_sdk:
if dependencies_exist(t2):
if not find_sdk(t2.name):
add_sdk(t2)
else:
debug_print('SDK ' + str(t2) + ' already existed in manifest, not adding twice')
else:
if not find_tool(t2.name):
add_tool(t2)
else:
debug_print('Tool ' + str(t2) + ' already existed in manifest, not adding twice')
for tool in manifest['tools']:
t = Tool(tool)
if t.compatible_with_this_os():
if not hasattr(t, 'is_old'):
t.is_old = False
# Expand the metapackages that refer to tags or nightlies.
if '%tag%' in t.version:
expand_category_param('%tag%', emscripten_tags, t, is_sdk=False)
elif '%precompiled_tag%' in t.version:
expand_category_param('%precompiled_tag%', llvm_precompiled_tags, t, is_sdk=False)
elif '%precompiled_tag32%' in t.version:
expand_category_param('%precompiled_tag32%', llvm_precompiled_tags_32bit, t, is_sdk=False)
elif '%precompiled_tag64%' in t.version:
expand_category_param('%precompiled_tag64%', llvm_precompiled_tags_64bit, t, is_sdk=False)
elif '%binaryen_tag%' in t.version:
expand_category_param('%binaryen_tag%', binaryen_tags, t, is_sdk=False)
elif '%nightly-llvm-64bit%' in t.version:
expand_category_param('%nightly-llvm-64bit%', llvm_64bit_nightlies, t, is_sdk=False)
elif '%nightly-llvm-32bit%' in t.version:
expand_category_param('%nightly-llvm-32bit%', llvm_32bit_nightlies, t, is_sdk=False)
elif '%nightly-emscripten%' in t.version:
expand_category_param('%nightly-emscripten%', emscripten_nightlies, t, is_sdk=False)
elif '%releases-tag%' in t.version:
expand_category_param('%releases-tag%', releases_tags, t, is_sdk=False)
else:
add_tool(t)
for sdk_str in manifest['sdks']:
sdk_str['id'] = 'sdk'
sdk = Tool(sdk_str)
if sdk.compatible_with_this_os():
if not hasattr(sdk, 'is_old'):
sdk.is_old = False
if '%tag%' in sdk.version:
expand_category_param('%tag%', emscripten_tags, sdk, is_sdk=True)
elif '%precompiled_tag%' in sdk.version:
expand_category_param('%precompiled_tag%', llvm_precompiled_tags, sdk, is_sdk=True)
elif '%precompiled_tag32%' in sdk.version:
expand_category_param('%precompiled_tag32%', llvm_precompiled_tags_32bit, sdk, is_sdk=True)
elif '%precompiled_tag64%' in sdk.version:
expand_category_param('%precompiled_tag64%', llvm_precompiled_tags_64bit, sdk, is_sdk=True)
elif '%nightly-llvm-64bit%' in sdk.version:
expand_category_param('%nightly-llvm-64bit%', llvm_64bit_nightlies, sdk, is_sdk=True)
elif '%nightly-llvm-32bit%' in sdk.version:
expand_category_param('%nightly-llvm-32bit%', llvm_32bit_nightlies, sdk, is_sdk=True)
elif '%nightly-emscripten%' in sdk.version:
expand_category_param('%nightly-emscripten%', emscripten_nightlies, sdk, is_sdk=True)
elif '%releases-tag%' in sdk.version:
expand_category_param('%releases-tag%', releases_tags, sdk, is_sdk=True)
else:
add_sdk(sdk)
# Tests if the two given tools can be active at the same time.
# Currently only a simple check for name for same tool with different versions,
# possibly adds more logic in the future.
def can_simultaneously_activate(tool1, tool2):
return tool1.id != tool2.id
def remove_nonexisting_tools(tool_list, log_errors=True):
i = 0
while i < len(tool_list):
tool = tool_list[i]
if not tool.is_installed():
if log_errors:
print("Warning: The SDK/tool '" + str(tool) + "' cannot be activated since it is not installed! Skipping this tool...")
tool_list.pop(i)
continue
i += 1
return tool_list
# Expands dependencies for each tool, and removes ones that don't exist.
def process_tool_list(tools_to_activate, log_errors=True):
i = 0
while i < len(tools_to_activate):
tool = tools_to_activate[i]
deps = tool.recursive_dependencies()
tools_to_activate = tools_to_activate[:i] + deps + tools_to_activate[i:]
i += len(deps) + 1
tools_to_activate = remove_nonexisting_tools(tools_to_activate, log_errors=log_errors)
i = 0
while i < len(tools_to_activate):
j = 0
while j < i:
secondary_tool = tools_to_activate[j]
primary_tool = tools_to_activate[i]
if not can_simultaneously_activate(primary_tool, secondary_tool):
tools_to_activate.pop(j)
j -= 1
i -= 1
j += 1
i += 1
return tools_to_activate
def run_emcc(tools_to_activate):
for tool in tools_to_activate:
activated_path = getattr(tool, 'activated_path', None)
if activated_path and activated_path.endswith('/emscripten'):
activated_path = to_native_path(tool.expand_vars(tool.activated_path))
emcc_path = os.path.join(activated_path, 'emcc.py')
if os.path.exists(emcc_path):
debug_print('Calling emcc to initialize it')
subprocess.call([sys.executable, emcc_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return
def emscripten_cache_directory():
return os.path.join(emscripten_config_directory, ".emscripten_cache")
def copy_pregenerated_cache(tools_to_activate):
for tool in tools_to_activate:
pregenerated_cache = getattr(tool, 'pregenerated_cache', None)
if pregenerated_cache:
install_path = to_native_path(sdk_path(tool.expand_vars(tool.install_path)))
in_cache = os.path.join(install_path, 'lib', pregenerated_cache)
if os.path.exists(in_cache):
out_cache = os.path.join(emscripten_cache_directory(), pregenerated_cache)
os.makedirs(out_cache)
for filename in os.listdir(in_cache):
debug_print('Copying ' + filename + ' to cache dir')
shutil.copy2(os.path.join(in_cache, filename),
os.path.join(out_cache, filename))
def set_active_tools(tools_to_activate, permanently_activate):
tools_to_activate = process_tool_list(tools_to_activate, log_errors=True)
generate_dot_emscripten(tools_to_activate)
# wiping out the pregenerated cache contents we want to copy in, run emcc here, then copy the cache
# contents.
run_emcc(tools_to_activate)
copy_pregenerated_cache(tools_to_activate)
# Construct a .bat script that will be invoked to set env. vars and PATH
if WINDOWS:
env_string = construct_env(tools_to_activate, False)
open(EMSDK_SET_ENV, 'w').write(env_string)
# Apply environment variables to global all users section.
if WINDOWS and permanently_activate:
# Individual env. vars
for tool in tools_to_activate:
tool.win_activate_env_vars(permanently_activate=True)
# PATH variable
newpath, added_items = adjusted_path(tools_to_activate, system_path_only=True)
if newpath != os.environ['PATH']: # Are there any actual changes?
win_set_environment_variable('PATH', newpath, system=True)
if len(tools_to_activate) > 0:
tools = [x for x in tools_to_activate if not x.is_sdk]
print('\nSet the following tools as active:\n ' + '\n '.join(map(lambda x: str(x), tools)))
print('')
return tools_to_activate
def currently_active_sdk():
for sdk in reversed(sdks):
if sdk.is_active():
return sdk
return None
def currently_active_tools():
active_tools = []
for tool in tools:
if tool.is_active():
active_tools += [tool]
return active_tools
# http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order
def unique_items(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
# Tests if a path is contained in the given list, but with separators normalized.
def normalized_contains(lst, elem):
elem = to_unix_path(elem)
for e in lst:
if elem == to_unix_path(e):
return True
return False
def to_msys_path(p):
p = to_unix_path(p)
new_path = re.sub(r'([a-zA-Z]):/(.*)', r'/\1/\2', p)
if len(new_path) > 3 and new_path[0] == '/' and new_path[2] == '/':
new_path = new_path[0] + new_path[1].lower() + new_path[2:]
return new_path
# Looks at the current PATH and adds and removes entries so that the PATH reflects
# the set of given active tools.
def adjusted_path(tools_to_activate, log_additions=False, system_path_only=False):
# These directories should be added to PATH
path_add = get_required_path(tools_to_activate)
# These already exist.
if WINDOWS and not MSYS:
existing_path = win_get_environment_variable('PATH', system=True)
if not system_path_only:
current_user_path = win_get_environment_variable('PATH', system=False)
if current_user_path:
existing_path += ENVPATH_SEPARATOR + current_user_path
existing_path = existing_path.split(ENVPATH_SEPARATOR)
# Fix up after potential changes made by bug https://github.com/kripken/emscripten/issues/4121
system_root = os.environ['SystemRoot'].lower()
for i in range(len(existing_path)):
p = existing_path[i]
if p.lower() == system_root:
p = '%SystemRoot%'
elif (system_root + '\\system32') in p.lower():
p = '%SystemRoot%\\system32'
elif (system_root + '\\system32\\wbem') in p.lower():
p = '%SystemRoot%\\System32\\Wbem'
elif (system_root + '\\system32\\windowspowershell\v1.0') in p.lower():
p = '%SystemRoot%\\System32\\WindowsPowerShell\v1.0\\'
existing_path[i] = p
else:
existing_path = os.environ['PATH'].split(ENVPATH_SEPARATOR)
emsdk_root_path = to_unix_path(emsdk_path())
existing_emsdk_tools = [item for item in existing_path if to_unix_path(item).startswith(emsdk_root_path)]
new_emsdk_tools = [item for item in path_add if not normalized_contains(existing_emsdk_tools, item)]
# Existing non-emsdk tools
existing_path = [item for item in existing_path if not to_unix_path(item).startswith(emsdk_root_path)]
new_path = [item for item in path_add if not normalized_contains(existing_path, item)]
whole_path = unique_items(new_path + existing_path)
if MSYS:
# XXX Hack: If running native Windows Python in MSYS prompt where PATH entries look like "/c/Windows/System32", os.environ['PATH']
# in Python will transform to show them as "C:\\Windows\\System32", so need to reconvert path delimiter back to forward slashes.
whole_path = list(map(to_msys_path, whole_path))
new_emsdk_tools = list(map(to_msys_path, new_emsdk_tools))
return ((':' if MSYS else ENVPATH_SEPARATOR).join(whole_path), new_emsdk_tools)
def construct_env(tools_to_activate, permanent):
global emscripten_config_directory
env_string = ''
newpath, added_path = adjusted_path(tools_to_activate)
# Dont permanently add to PATH, since this will break the whole system if there are more than 1024 chars in PATH.
# (SETX truncates to set only 1024 chars)
# if permanent:
# print('SETX PATH "' + newpath + '"')
# else:
if os.environ['PATH'] != newpath: # Don't bother setting the path if there are no changes.
if POWERSHELL:
env_string += '$env:PATH="' + newpath + '"\n'
elif WINDOWS and not MSYS:
env_string += 'SET PATH=' + newpath + '\n'
else:
env_string += 'export PATH="' + newpath + '"\n'
if len(added_path) > 0:
print('Adding directories to PATH:')
for item in added_path:
print('PATH += ' + item)
print('')
env_vars_to_add = []
env_vars_to_add += [('EMSDK', to_unix_path(emsdk_path()))]
em_config_path = os.path.normpath(dot_emscripten_path())
if 'EM_CONFIG' not in os.environ or to_unix_path(os.environ['EM_CONFIG']) != to_unix_path(em_config_path):
env_vars_to_add += [('EM_CONFIG', em_config_path)]
if emscripten_config_directory == emsdk_path():
em_cache_dir = sdk_path('.emscripten_cache')
if 'EM_CACHE' not in os.environ or to_unix_path(os.environ['EM_CACHE']) != to_unix_path(em_cache_dir):
env_vars_to_add += [('EM_CACHE', em_cache_dir)]
mkdir_p(em_cache_dir)
for tool in tools_to_activate:
envs = tool.activated_environment()
for env in envs:
key, value = parse_key_value(env)
value = to_native_path(tool.expand_vars(value))
if key not in os.environ or to_unix_path(os.environ[key]) != to_unix_path(value):
env_vars_to_add += [(key, value)]
if len(env_vars_to_add) > 0:
print('Setting environment variables:')
for key, value in env_vars_to_add:
if POWERSHELL:
env_string += '$env:' + key + '="' + value + '"\n'
elif WINDOWS and not MSYS:
if permanent:
env_string += 'SETX ' + key + ' "' + value + '"\n'
else:
env_string += 'SET ' + key + '=' + value + '\n'
else:
env_string += 'export ' + key + '="' + value + '"\n'
print(key + ' = ' + value)
print('')
return env_string
def silentremove(filename):
try:
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def main():
global emscripten_config_directory, BUILD_FOR_TESTING, ENABLE_LLVM_ASSERTIONS, TTY_OUTPUT
if len(sys.argv) <= 1 or sys.argv[1] == 'help' or sys.argv[1] == '--help':
if len(sys.argv) <= 1:
print(' emsdk: No command given. Please call one of the following:')
else:
print(' emsdk: Available commands:')
print('''
emsdk list [--old] [--uses] - Lists all available SDKs and tools and their
current installation status. With the --old
parameter, also historical versions are
shown. If --uses is passed, displays the
composition of different SDK packages and
dependencies.
emsdk update - Updates emsdk to the newest version, and also
runs 'update-tags' (below). If you have
bootstrapped emsdk via cloning directly from
GitHub, call "git pull" instead to update emsdk.
emsdk update-tags - Fetches the most up to date list of available
Emscripten tagged and other releases from the
servers.
emsdk install [options] <tool 1> <tool 2> <tool 3> ...
- Downloads and installs given tools or SDKs.
Options can contain:
-j<num>: Specifies the number of cores to use when
building the tool. Default: use one less
than the # of detected cores.
--build=<type>: Controls what kind of build of LLVM to
perform. Pass either 'Debug', 'Release',
'MinSizeRel' or 'RelWithDebInfo'. Default:
'Release' for LLVM master branch, and
'RelWithDebInfo' for LLVM incoming branch.
--generator=<type>: Specifies the CMake Generator to be used
during the build. Possible values are the
same as what your CMake supports and whether
the generator is valid depends on the tools
you have installed. Defaults to 'Unix Makefiles'
on *nix systems. If generator name is multiple
words, enclose with single or double quotes.
--shallow: When installing tools from one of the git
development branches 'master' or 'incoming',
this parameter can be passed to perform a
shallow git clone instead of a full one.
This reduces the amount of network transfer
that is needed. This option should only be
used when you are interested in downloading
one of the development branches, but are not
looking to develop Emscripten yourself.
Default: disabled, i.e. do a full clone.
--build-tests: If enabled, LLVM is built with internal tests
included. Pass this to enable running test
other.test_llvm_lit in the Emscripten test
suite. Default: disabled.
--enable-assertions: If specified, LLVM is built with assert()
checks enabled. Useful for development
purposes. Default: Enabled for 'incoming'
branch, disabled for 'master' branch.
--disable-assertions: Forces assertions off during the build.
--vs2013/--vs2015/--vs2017: If building from source, overrides to build
using the specified compiler. When installing
precompiled packages, this has no effect.
Note: The same compiler specifier must be
passed to the emsdk activate command to
activate the desired version.
Notes on building from source:
To pass custom CMake directives when configuring
LLVM build, specify the environment variable
LLVM_CMAKE_ARGS="param1=value1,param2=value2"
in the environment where the build is invoked.
See README.md for details.
emsdk uninstall <tool/sdk> - Removes the given tool or SDK from disk.''')
if WINDOWS:
print('''
emsdk activate [--global] [--embedded] [--build=type] [--vs2013/--vs2015/--vs2017] <tool/sdk>
- Activates the given tool or SDK in the
environment of the current shell. If the
--global option is passed, the registration
is done globally to all users in the system
environment. If the --embedded option is
passed, all Emcripten configuration files as
well as the temp, cache and ports directories
are located inside the Emscripten SDK
directory rather than the user home
directory. If a custom compiler version was
used to override the compiler to use, pass
the same --vs2013/--vs2015/--vs2017 parameter
here to choose which version to activate.
emcmdprompt.bat - Spawns a new command prompt window with the
Emscripten environment active.''')
else:
print(''' emsdk activate [--embedded] [--build=type] <tool/sdk>
- Activates the given tool or SDK in the
environment of the current shell. If the
--embedded option is passed, all Emcripten
configuration files as well as the temp, cache
and ports directories are located inside the
Emscripten SDK directory rather than the user
home directory.''')
print('''
Both commands 'install' and 'activate' accept an optional parameter
'--build=type', which can be used to override what kind of installation
or activation to perform. Possible values for type are Debug, Release,
MinSizeRel or RelWithDebInfo. Note: When overriding a custom build type,
be sure to match the same --build= option to both 'install' and
'activate' commands and the invocation of 'emsdk_env', or otherwise
these commands will default to operating on the default build types,
which are Release for the 'master' SDK, and RelWithDebInfo for the
'incoming' SDK.''')
return 1
# Extracts a boolean command line argument from sys.argv and returns True if it was present
def extract_bool_arg(name):
old_argv = sys.argv
sys.argv = list(filter(lambda a: a != name, sys.argv))
return len(sys.argv) != len(old_argv)
arg_old = extract_bool_arg('--old')
arg_uses = extract_bool_arg('--uses')
arg_global = extract_bool_arg('--global')
arg_embedded = extract_bool_arg('--embedded')
arg_notty = extract_bool_arg('--notty')
if arg_notty:
TTY_OUTPUT = False
cmd = sys.argv[1]
# On first run when tag list is not present, populate it to bootstrap.
if (cmd == 'install' or cmd == 'list') and not os.path.isfile(sdk_path('llvm-tags-64bit.txt')):
fetch_emscripten_tags()
load_dot_emscripten()
load_sdk_manifest()
# Process global args
for i in range(2, len(sys.argv)):
if sys.argv[i].startswith('--generator='):
build_generator = re.match(r'''^--generator=['"]?([^'"]+)['"]?$''', sys.argv[i])
if build_generator:
global CMAKE_GENERATOR
CMAKE_GENERATOR = build_generator.group(1)
sys.argv[i] = ''
else:
print("Cannot parse CMake generator string: " + sys.argv[i] + ". Try wrapping generator string with quotes", file=sys.stderr)
return 1
elif sys.argv[i].startswith('--build='):
build_type = re.match(r'^--build=(.+)$', sys.argv[i])
if build_type:
global CMAKE_BUILD_TYPE_OVERRIDE
build_type = build_type.group(1)
build_types = ['Debug', 'MinSizeRel', 'RelWithDebInfo', 'Release']
try:
build_type_index = [x.lower() for x in build_types].index(build_type.lower())
CMAKE_BUILD_TYPE_OVERRIDE = build_types[build_type_index]
sys.argv[i] = ''
except:
print('Unknown CMake build type "' + build_type + '" specified! Please specify one of ' + str(build_types), file=sys.stderr)
return 1
else:
print("Invalid command line parameter " + sys.argv[i] + ' specified!', file=sys.stderr)
return 1
sys.argv = [x for x in sys.argv if not len(x) == 0]
releases_info = load_releases_info()['releases']
# Replace meta-packages with the real package names.
if cmd in ('update', 'install', 'activate'):
for i in range(2, len(sys.argv)):
arg = sys.argv[i]
if arg in ('latest', 'sdk-latest', 'latest-64bit', 'sdk-latest-64bit', 'latest-fastcomp', 'latest-releases-fastcomp'):
sys.argv[i] = str(find_latest_releases_sdk('fastcomp'))
elif arg in ('latest-upstream', 'latest-clang-upstream', 'latest-releases-upstream'):
sys.argv[i] = str(find_latest_releases_sdk('upstream'))
elif arg == 'tot-upstream':
sys.argv[i] = str(find_tot_sdk('upstream'))
elif arg in ('tot-fastcomp', 'sdk-nightly-latest'):
sys.argv[i] = str(find_tot_sdk('fastcomp'))
else:
# check if it's a release handled by an emscripten-releases version,
# and if so use that by using the right hash. we support a few notations,
# x.y.z[-(upstream|fastcomp_])
# sdk-x.y.z[-(upstream|fastcomp_])-64bit
# TODO: support short notation for old builds too?
upstream = False
if '-upstream' in arg:
arg = arg.replace('-upstream', '')
upstream = True
elif '-fastcomp' in arg:
arg = arg.replace('-fastcomp', '')
upstream = False
arg = arg.replace('sdk-', '').replace('-64bit', '').replace('tag-', '')
release_hash = releases_info.get(arg, None) or releases_info.get('sdk-' + arg + '-64bit')
if release_hash:
sys.argv[i] = 'sdk-releases-%s-%s-64bit' % ('upstream' if upstream else 'fastcomp', release_hash)
if cmd == 'list':
print('')
if (LINUX or OSX or WINDOWS) and (ARCH == 'x86' or ARCH == 'x86_64'):
print('The *recommended* precompiled SDK download is %s (%s).' % (find_latest_releases_version(), find_latest_releases_hash()))
print()
print('To install/activate it, use one of:')
print(' latest [default (fastcomp) backend]')
print(' latest-upstream [upstream LLVM wasm backend]')
print('')
print('Those are equivalent to installing/activating the following:')
print(' %s' % find_latest_releases_version())
print(' %s-upstream' % find_latest_releases_version())
print('')
else:
print('Warning: your platform does not have precompiled SDKs available.')
print('You may install components from source.')
print('')
print('All recent (non-legacy) installable versions are:')
releases_versions = sorted(load_releases_versions())
releases_versions.reverse()
for ver in releases_versions:
print(' %s' % ver)
print(' %s-upstream' % ver)
print()
has_partially_active_tools = [False] # Use array to work around the lack of being able to mutate from enclosing function.
if len(sdks) > 0:
def find_sdks(needs_compilation):
s = []
for sdk in sdks:
if sdk.is_old and not arg_old:
continue
if sdk.needs_compilation() == needs_compilation:
s += [sdk]
return s
def print_sdks(s):
for sdk in s:
installed = '\tINSTALLED' if sdk.is_installed() else ''
active = '*' if sdk.is_active() else ' '
print(' ' + active + ' {0: <25}'.format(str(sdk)) + installed)
if arg_uses:
for dep in sdk.uses:
print(' - {0: <25}'.format(dep))
print('')
print('The additional following precompiled SDKs are also available for download:')
print_sdks(find_sdks(False))
print('The following SDKs can be compiled from source:')
print_sdks(find_sdks(True))
if len(tools) > 0:
def find_tools(needs_compilation):
t = []
for tool in tools:
if tool.is_old and not arg_old:
continue
if tool.needs_compilation() != needs_compilation:
continue
t += [tool]
return t
def print_tools(t):
for tool in t:
if tool.is_old and not arg_old:
continue
if tool.can_be_installed() is True:
installed = '\tINSTALLED' if tool.is_installed() else ''
else:
installed = '\tNot available: ' + tool.can_be_installed()
tool_is_active = tool.is_active()
tool_is_env_active = tool_is_active and tool.is_env_active()
if tool_is_env_active:
active = ' * '
elif tool_is_active:
active = '(*)'
has_partially_active_tools[0] = has_partially_active_tools[0] or True
else:
active = ' '
print(' ' + active + ' {0: <25}'.format(str(tool)) + installed)
print('')
print('The following precompiled tool packages are available for download:')
print_tools(find_tools(needs_compilation=False))
print('The following tools can be compiled from source:')
print_tools(find_tools(needs_compilation=True))
else:
if is_emsdk_sourced_from_github():
print("There are no tools available. Run 'git pull' followed by 'emsdk update-tags' to fetch the latest set of tools.")
else:
print("There are no tools available. Run 'emsdk update' to fetch the latest set of tools.")
print('')
print('Items marked with * are activated for the current user.')
if has_partially_active_tools[0]:
env_cmd = 'emsdk_env.bat' if WINDOWS else 'source ./emsdk_env.sh'
print('Items marked with (*) are selected for use, but your current shell environment is not configured to use them. Type "' + env_cmd + '" to set up your current shell to use them' + (', or call "emsdk activate --global <name_of_sdk>" to permanently activate them.' if WINDOWS else '.'))
if not arg_old:
print('')
print("To access the historical archived versions, type 'emsdk list --old'")
print('')
if is_emsdk_sourced_from_github():
print('Run "git pull" followed by "./emsdk update-tags" to pull in the latest list.')
else:
print('Run "./emsdk update" to pull in the latest list.')
return 0
elif cmd == 'construct_env':
if len(sys.argv) == 2:
outfile = EMSDK_SET_ENV
silentremove(EMSDK_SET_ENV) # Clean up old temp file up front, in case of failure later before we get to write out the new one.
else:
outfile = sys.argv[2]
tools_to_activate = currently_active_tools()
tools_to_activate = process_tool_list(tools_to_activate, log_errors=True)
env_string = construct_env(tools_to_activate, len(sys.argv) >= 3 and 'perm' in sys.argv[2])
open(outfile, 'w').write(env_string)
if UNIX:
os.chmod(outfile, 0o755)
return 0
elif cmd == 'update':
update_emsdk()
silentremove(sdk_path(EMSDK_SET_ENV)) # Clean up litter after old emsdk update which may have left this temp file around.
return 0
elif cmd == 'update-tags':
fetch_emscripten_tags()
return 0
elif cmd == 'activate':
if arg_global:
print('Registering active Emscripten environment globally for all users.')
print('')
if arg_embedded:
# Activating the emsdk tools locally relative to Emscripten SDK directory.
emscripten_config_directory = emsdk_path()
print('Writing .emscripten configuration file to Emscripten SDK directory ' + emscripten_config_directory)
else:
print('Writing .emscripten configuration file to user home directory ' + emscripten_config_directory)
# Remove .emscripten from emsdk dir, since its presence is used to detect whether emsdk is activate in embedded mode or not.
try:
os.remove(os.path.join(emsdk_path(), ".emscripten"))
except:
pass
sys.argv = [x for x in sys.argv if not x.startswith('--')]
tools_to_activate = currently_active_tools()
for i in range(2, len(sys.argv)):
tool = find_tool(sys.argv[i])
if tool is None:
tool = find_sdk(sys.argv[i])
if tool is None:
print("Error: No tool or SDK found by name '" + sys.argv[i] + "'.")
return 1
tools_to_activate += [tool]
if len(tools_to_activate) == 0:
print('No tools/SDKs specified to activate! Usage:\n emsdk activate tool/sdk1 [tool/sdk2] [...]')
return 1
tools_to_activate = set_active_tools(tools_to_activate, permanently_activate=arg_global)
if len(tools_to_activate) == 0:
print('No tools/SDKs found to activate! Usage:\n emsdk activate tool/sdk1 [tool/sdk2] [...]')
return 1
if WINDOWS and not arg_global:
print('The changes made to environment variables only apply to the currently running shell instance. Use the \'emsdk_env.bat\' to re-enter this environment later, or if you\'d like to permanently register this environment globally to all users in Windows Registry, rerun this command with the option --global.')
return 0
elif cmd == 'install':
# Process args
for i in range(2, len(sys.argv)):
if sys.argv[i].startswith('-j'):
multicore = re.match(r'^-j(\d+)$', sys.argv[i])
if multicore:
global CPU_CORES
CPU_CORES = int(multicore.group(1))
sys.argv[i] = ''
else:
print("Invalid command line parameter " + sys.argv[i] + ' specified!', file=sys.stderr)
return 1
elif sys.argv[i] == '--shallow':
global GIT_CLONE_SHALLOW
GIT_CLONE_SHALLOW = True
sys.argv[i] = ''
elif sys.argv[i] == '--build-tests':
BUILD_FOR_TESTING = True
sys.argv[i] = ''
elif sys.argv[i] == '--enable-assertions':
ENABLE_LLVM_ASSERTIONS = 'ON'
sys.argv[i] = ''
elif sys.argv[i] == '--disable-assertions':
ENABLE_LLVM_ASSERTIONS = 'OFF'
sys.argv[i] = ''
sys.argv = [x for x in sys.argv if not len(x) == 0]
if len(sys.argv) <= 2:
print("Missing parameter. Type 'emsdk install <tool name>' to install a tool or an SDK. Type 'emsdk list' to obtain a list of available tools. Type 'emsdk install latest' to automatically install the newest version of the SDK.")
return 1
for t in sys.argv[2:]:
tool = find_tool(t)
if tool is None:
tool = find_sdk(t)
if tool is None:
print("Error: No tool or SDK found by name '" + t + "'.")
return 1
success = tool.install()
if not success:
return 1
return 0
elif cmd == 'uninstall':
if len(sys.argv) <= 2:
print("Syntax error. Call 'emsdk uninstall <tool name>'. Call 'emsdk list' to obtain a list of available tools.")
return 1
tool = find_tool(sys.argv[2])
if tool is None:
print("Error: Tool by name '" + sys.argv[2] + "' was not found.")
return 1
tool.uninstall()
return 0
print("Unknown command '" + cmd + "' given! Type 'emsdk help' to get a list of commands.")
return 1
if __name__ == '__main__':
sys.exit(main())
| true | true |
1c2d1683276d18336a20cddf892fc118e276e093 | 15,191 | py | Python | tests/users/test_auth.py | itsmingjie/CTFd | 80df88f25d095ea7f7fc9cf8c8d0bf9cc7c2bf1f | [
"Apache-2.0"
] | 9 | 2020-05-07T01:39:06.000Z | 2022-01-21T09:32:03.000Z | tests/users/test_auth.py | itsmingjie/CTFd | 80df88f25d095ea7f7fc9cf8c8d0bf9cc7c2bf1f | [
"Apache-2.0"
] | 3 | 2021-03-11T00:50:06.000Z | 2022-02-10T23:13:17.000Z | tests/users/test_auth.py | erseco/CTFd | a6a4906ab058b79bca7af48038fa7badc1744340 | [
"Apache-2.0"
] | 7 | 2020-06-16T20:42:00.000Z | 2022-01-13T08:06:14.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import six
from freezegun import freeze_time
from mock import patch
from CTFd.models import Users, db
from CTFd.utils import get_config, set_config
from CTFd.utils.crypto import verify_password
from CTFd.utils.security.signing import serialize
from tests.helpers import create_ctfd, destroy_ctfd, login_as_user, register_user
def test_register_user():
"""Can a user be registered"""
app = create_ctfd()
with app.app_context():
register_user(app)
user_count = Users.query.count()
assert user_count == 2 # There's the admin user and the created user
destroy_ctfd(app)
def test_register_unicode_user():
"""Can a user with a unicode name be registered"""
app = create_ctfd()
with app.app_context():
register_user(app, name="你好")
user_count = Users.query.count()
assert user_count == 2 # There's the admin user and the created user
destroy_ctfd(app)
def test_register_duplicate_username():
"""A user shouldn't be able to use an already registered team name"""
app = create_ctfd()
with app.app_context():
register_user(
app,
name="user1",
email="user1@ctfd.io",
password="password",
raise_for_error=False,
)
register_user(
app,
name="user1",
email="user2@ctfd.io",
password="password",
raise_for_error=False,
)
register_user(
app,
name="admin ",
email="admin2@ctfd.io",
password="password",
raise_for_error=False,
)
user_count = Users.query.count()
assert user_count == 2 # There's the admin user and the first created user
destroy_ctfd(app)
def test_register_duplicate_email():
"""A user shouldn't be able to use an already registered email address"""
app = create_ctfd()
with app.app_context():
register_user(
app,
name="user1",
email="user1@ctfd.io",
password="password",
raise_for_error=False,
)
register_user(
app,
name="user2",
email="user1@ctfd.io",
password="password",
raise_for_error=False,
)
user_count = Users.query.count()
assert user_count == 2 # There's the admin user and the first created user
destroy_ctfd(app)
def test_register_whitelisted_email():
"""A user shouldn't be able to register with an email that isn't on the whitelist"""
app = create_ctfd()
with app.app_context():
set_config(
"domain_whitelist", "whitelisted.com, whitelisted.org, whitelisted.net"
)
register_user(
app, name="not_whitelisted", email="user@nope.com", raise_for_error=False
)
assert Users.query.count() == 1
register_user(app, name="user1", email="user@whitelisted.com")
assert Users.query.count() == 2
register_user(app, name="user2", email="user@whitelisted.org")
assert Users.query.count() == 3
register_user(app, name="user3", email="user@whitelisted.net")
assert Users.query.count() == 4
destroy_ctfd(app)
def test_user_bad_login():
"""A user should not be able to login with an incorrect password"""
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(
app, name="user", password="wrong_password", raise_for_error=False
)
with client.session_transaction() as sess:
assert sess.get("id") is None
r = client.get("/profile")
assert r.location.startswith(
"http://localhost/login"
) # We got redirected to login
destroy_ctfd(app)
def test_user_login():
"""Can a registered user can login"""
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(app)
r = client.get("/profile")
assert (
r.location != "http://localhost/login"
) # We didn't get redirected to login
assert r.status_code == 200
destroy_ctfd(app)
def test_user_login_with_email():
"""Can a registered user can login with an email address instead of a team name"""
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(app, name="user@ctfd.io", password="password")
r = client.get("/profile")
assert (
r.location != "http://localhost/login"
) # We didn't get redirected to login
assert r.status_code == 200
destroy_ctfd(app)
def test_user_get_logout():
"""Can a registered user load /logout"""
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(app)
client.get("/logout", follow_redirects=True)
r = client.get("/challenges")
assert r.location == "http://localhost/login?next=%2Fchallenges%3F"
assert r.status_code == 302
destroy_ctfd(app)
def test_user_isnt_admin():
"""A registered user cannot access admin pages"""
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(app)
for page in [
"pages",
"users",
"teams",
"scoreboard",
"challenges",
"statistics",
"config",
]:
r = client.get("/admin/{}".format(page))
assert r.location.startswith("http://localhost/login?next=")
assert r.status_code == 302
destroy_ctfd(app)
def test_expired_confirmation_links():
"""Test that expired confirmation links are reported to the user"""
app = create_ctfd()
with app.app_context(), freeze_time("2019-02-24 03:21:34"):
set_config("verify_emails", True)
register_user(app, email="user@user.com")
client = login_as_user(app, name="user", password="password")
# user@user.com "2012-01-14 03:21:34"
confirm_link = "http://localhost/confirm/InVzZXJAdXNlci5jb20i.TxD0vg.cAGwAy8cK1T0saEEbrDEBVF2plI"
r = client.get(confirm_link)
assert "Your confirmation link has expired" in r.get_data(as_text=True)
user = Users.query.filter_by(email="user@user.com").first()
assert user.verified is not True
destroy_ctfd(app)
def test_invalid_confirmation_links():
"""Test that invalid confirmation links are reported to the user"""
app = create_ctfd()
with app.app_context():
set_config("verify_emails", True)
register_user(app, email="user@user.com")
client = login_as_user(app, name="user", password="password")
# user@user.com "2012-01-14 03:21:34"
confirm_link = "http://localhost/confirm/a8375iyu<script>alert(1)<script>hn3048wueorighkgnsfg"
r = client.get(confirm_link)
assert "Your confirmation token is invalid" in r.get_data(as_text=True)
user = Users.query.filter_by(email="user@user.com").first()
assert user.verified is not True
destroy_ctfd(app)
def test_expired_reset_password_link():
"""Test that expired reset password links are reported to the user"""
app = create_ctfd()
with app.app_context():
set_config("mail_server", "localhost")
set_config("mail_port", 25)
set_config("mail_useauth", True)
set_config("mail_username", "username")
set_config("mail_password", "password")
register_user(app, name="user1", email="user@user.com")
with app.test_client() as client, freeze_time("2019-02-24 03:21:34"):
# user@user.com "2012-01-14 03:21:34"
forgot_link = "http://localhost/reset_password/InVzZXJAdXNlci5jb20i.TxD0vg.cAGwAy8cK1T0saEEbrDEBVF2plI"
r = client.get(forgot_link)
assert "Your link has expired" in r.get_data(as_text=True)
destroy_ctfd(app)
def test_invalid_reset_password_link():
"""Test that invalid reset password links are reported to the user"""
app = create_ctfd()
with app.app_context():
set_config("mail_server", "localhost")
set_config("mail_port", 25)
set_config("mail_useauth", True)
set_config("mail_username", "username")
set_config("mail_password", "password")
register_user(app, name="user1", email="user@user.com")
with app.test_client() as client:
# user@user.com "2012-01-14 03:21:34"
forgot_link = "http://localhost/reset_password/5678ytfghjiu876tyfg<INVALID DATA>hvbnmkoi9u87y6trdf"
r = client.get(forgot_link)
assert "Your reset token is invalid" in r.get_data(as_text=True)
destroy_ctfd(app)
def test_contact_for_password_reset():
"""Test that if there is no mailserver configured, users should contact admins"""
app = create_ctfd()
with app.app_context():
register_user(app, name="user1", email="user@user.com")
with app.test_client() as client:
forgot_link = "http://localhost/reset_password"
r = client.get(forgot_link)
assert "Contact a CTF organizer" in r.get_data(as_text=True)
destroy_ctfd(app)
@patch("smtplib.SMTP")
def test_user_can_confirm_email(mock_smtp):
"""Test that a user is capable of confirming their email address"""
app = create_ctfd()
with app.app_context(), freeze_time("2012-01-14 03:21:34"):
# Set CTFd to only allow confirmed users and send emails
set_config("verify_emails", True)
set_config("mail_server", "localhost")
set_config("mail_port", 25)
set_config("mail_useauth", True)
set_config("mail_username", "username")
set_config("mail_password", "password")
register_user(app, name="user1", email="user@user.com")
# Teams are not verified by default
user = Users.query.filter_by(email="user@user.com").first()
assert user.verified is False
client = login_as_user(app, name="user1", password="password")
r = client.get("http://localhost/confirm")
assert "Need to resend the confirmation email?" in r.get_data(as_text=True)
# smtp send message function was called
if six.PY2:
mock_smtp.return_value.sendmail.assert_called()
else:
mock_smtp.return_value.send_message.assert_called()
with client.session_transaction() as sess:
data = {"nonce": sess.get("nonce")}
r = client.post("http://localhost/confirm", data=data)
assert "confirmation email has been resent" in r.get_data(as_text=True)
r = client.get("/challenges")
assert (
r.location == "http://localhost/confirm"
) # We got redirected to /confirm
r = client.get("http://localhost/confirm/" + serialize("user@user.com"))
assert r.location == "http://localhost/challenges"
# The team is now verified
user = Users.query.filter_by(email="user@user.com").first()
assert user.verified is True
r = client.get("http://localhost/confirm")
assert r.location == "http://localhost/settings"
destroy_ctfd(app)
@patch("smtplib.SMTP")
def test_user_can_reset_password(mock_smtp):
"""Test that a user is capable of resetting their password"""
from email.mime.text import MIMEText
if six.PY3:
from email.message import EmailMessage
app = create_ctfd()
with app.app_context(), freeze_time("2012-01-14 03:21:34"):
# Set CTFd to send emails
set_config("mail_server", "localhost")
set_config("mail_port", 25)
set_config("mail_useauth", True)
set_config("mail_username", "username")
set_config("mail_password", "password")
# Create a user
register_user(app, name="user1", email="user@user.com")
with app.test_client() as client:
client.get("/reset_password")
# Build reset password data
with client.session_transaction() as sess:
data = {"nonce": sess.get("nonce"), "email": "user@user.com"}
# Issue the password reset request
client.post("/reset_password", data=data)
ctf_name = get_config("ctf_name")
from_addr = get_config("mailfrom_addr") or app.config.get("MAILFROM_ADDR")
from_addr = "{} <{}>".format(ctf_name, from_addr)
to_addr = "user@user.com"
# Build the email
msg = (
"Did you initiate a password reset? If you didn't initiate this request you can ignore this email. "
"\n\nClick the following link to reset your password:\n"
"http://localhost/reset_password/InVzZXJAdXNlci5jb20i.TxD0vg.28dY_Gzqb1TH9nrcE_H7W8YFM-U"
)
ctf_name = get_config("ctf_name")
if six.PY2:
email_msg = MIMEText(msg)
else:
email_msg = EmailMessage()
email_msg.set_content(msg)
email_msg["Subject"] = "Password Reset Request from {ctf_name}".format(
ctf_name=ctf_name
)
email_msg["From"] = from_addr
email_msg["To"] = to_addr
# Make sure that the reset password email is sent
if six.PY2:
mock_smtp.return_value.sendmail.assert_called_with(
from_addr, [to_addr], email_msg.as_string()
)
else:
mock_smtp.return_value.send_message.assert_called()
assert str(mock_smtp.return_value.send_message.call_args[0][0]) == str(
email_msg
)
# Get user's original password
user = Users.query.filter_by(email="user@user.com").first()
# Build the POST data
with client.session_transaction() as sess:
data = {"nonce": sess.get("nonce"), "password": "passwordtwo"}
# Do the password reset
client.get(
"/reset_password/InVzZXJAdXNlci5jb20i.TxD0vg.28dY_Gzqb1TH9nrcE_H7W8YFM-U"
)
client.post(
"/reset_password/InVzZXJAdXNlci5jb20i.TxD0vg.28dY_Gzqb1TH9nrcE_H7W8YFM-U",
data=data,
)
# Make sure that the user's password changed
user = Users.query.filter_by(email="user@user.com").first()
assert verify_password("passwordtwo", user.password)
destroy_ctfd(app)
def test_banned_user():
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(app)
user = Users.query.filter_by(id=2).first()
user.banned = True
db.session.commit()
routes = ["/", "/challenges", "/api/v1/challenges"]
for route in routes:
r = client.get(route)
assert r.status_code == 403
destroy_ctfd(app)
| 34.603645 | 116 | 0.612995 |
import six
from freezegun import freeze_time
from mock import patch
from CTFd.models import Users, db
from CTFd.utils import get_config, set_config
from CTFd.utils.crypto import verify_password
from CTFd.utils.security.signing import serialize
from tests.helpers import create_ctfd, destroy_ctfd, login_as_user, register_user
def test_register_user():
app = create_ctfd()
with app.app_context():
register_user(app)
user_count = Users.query.count()
assert user_count == 2
destroy_ctfd(app)
def test_register_unicode_user():
app = create_ctfd()
with app.app_context():
register_user(app, name="你好")
user_count = Users.query.count()
assert user_count == 2 # There's the admin user and the created user
destroy_ctfd(app)
def test_register_duplicate_username():
app = create_ctfd()
with app.app_context():
register_user(
app,
name="user1",
email="user1@ctfd.io",
password="password",
raise_for_error=False,
)
register_user(
app,
name="user1",
email="user2@ctfd.io",
password="password",
raise_for_error=False,
)
register_user(
app,
name="admin ",
email="admin2@ctfd.io",
password="password",
raise_for_error=False,
)
user_count = Users.query.count()
assert user_count == 2
destroy_ctfd(app)
def test_register_duplicate_email():
app = create_ctfd()
with app.app_context():
register_user(
app,
name="user1",
email="user1@ctfd.io",
password="password",
raise_for_error=False,
)
register_user(
app,
name="user2",
email="user1@ctfd.io",
password="password",
raise_for_error=False,
)
user_count = Users.query.count()
assert user_count == 2 # There's the admin user and the first created user
destroy_ctfd(app)
def test_register_whitelisted_email():
app = create_ctfd()
with app.app_context():
set_config(
"domain_whitelist", "whitelisted.com, whitelisted.org, whitelisted.net"
)
register_user(
app, name="not_whitelisted", email="user@nope.com", raise_for_error=False
)
assert Users.query.count() == 1
register_user(app, name="user1", email="user@whitelisted.com")
assert Users.query.count() == 2
register_user(app, name="user2", email="user@whitelisted.org")
assert Users.query.count() == 3
register_user(app, name="user3", email="user@whitelisted.net")
assert Users.query.count() == 4
destroy_ctfd(app)
def test_user_bad_login():
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(
app, name="user", password="wrong_password", raise_for_error=False
)
with client.session_transaction() as sess:
assert sess.get("id") is None
r = client.get("/profile")
assert r.location.startswith(
"http://localhost/login"
)
destroy_ctfd(app)
def test_user_login():
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(app)
r = client.get("/profile")
assert (
r.location != "http://localhost/login"
)
assert r.status_code == 200
destroy_ctfd(app)
def test_user_login_with_email():
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(app, name="user@ctfd.io", password="password")
r = client.get("/profile")
assert (
r.location != "http://localhost/login"
) # We didn't get redirected to login
assert r.status_code == 200
destroy_ctfd(app)
def test_user_get_logout():
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(app)
client.get("/logout", follow_redirects=True)
r = client.get("/challenges")
assert r.location == "http://localhost/login?next=%2Fchallenges%3F"
assert r.status_code == 302
destroy_ctfd(app)
def test_user_isnt_admin():
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(app)
for page in [
"pages",
"users",
"teams",
"scoreboard",
"challenges",
"statistics",
"config",
]:
r = client.get("/admin/{}".format(page))
assert r.location.startswith("http://localhost/login?next=")
assert r.status_code == 302
destroy_ctfd(app)
def test_expired_confirmation_links():
app = create_ctfd()
with app.app_context(), freeze_time("2019-02-24 03:21:34"):
set_config("verify_emails", True)
register_user(app, email="user@user.com")
client = login_as_user(app, name="user", password="password")
confirm_link = "http://localhost/confirm/InVzZXJAdXNlci5jb20i.TxD0vg.cAGwAy8cK1T0saEEbrDEBVF2plI"
r = client.get(confirm_link)
assert "Your confirmation link has expired" in r.get_data(as_text=True)
user = Users.query.filter_by(email="user@user.com").first()
assert user.verified is not True
destroy_ctfd(app)
def test_invalid_confirmation_links():
app = create_ctfd()
with app.app_context():
set_config("verify_emails", True)
register_user(app, email="user@user.com")
client = login_as_user(app, name="user", password="password")
confirm_link = "http://localhost/confirm/a8375iyu<script>alert(1)<script>hn3048wueorighkgnsfg"
r = client.get(confirm_link)
assert "Your confirmation token is invalid" in r.get_data(as_text=True)
user = Users.query.filter_by(email="user@user.com").first()
assert user.verified is not True
destroy_ctfd(app)
def test_expired_reset_password_link():
app = create_ctfd()
with app.app_context():
set_config("mail_server", "localhost")
set_config("mail_port", 25)
set_config("mail_useauth", True)
set_config("mail_username", "username")
set_config("mail_password", "password")
register_user(app, name="user1", email="user@user.com")
with app.test_client() as client, freeze_time("2019-02-24 03:21:34"):
forgot_link = "http://localhost/reset_password/InVzZXJAdXNlci5jb20i.TxD0vg.cAGwAy8cK1T0saEEbrDEBVF2plI"
r = client.get(forgot_link)
assert "Your link has expired" in r.get_data(as_text=True)
destroy_ctfd(app)
def test_invalid_reset_password_link():
app = create_ctfd()
with app.app_context():
set_config("mail_server", "localhost")
set_config("mail_port", 25)
set_config("mail_useauth", True)
set_config("mail_username", "username")
set_config("mail_password", "password")
register_user(app, name="user1", email="user@user.com")
with app.test_client() as client:
forgot_link = "http://localhost/reset_password/5678ytfghjiu876tyfg<INVALID DATA>hvbnmkoi9u87y6trdf"
r = client.get(forgot_link)
assert "Your reset token is invalid" in r.get_data(as_text=True)
destroy_ctfd(app)
def test_contact_for_password_reset():
app = create_ctfd()
with app.app_context():
register_user(app, name="user1", email="user@user.com")
with app.test_client() as client:
forgot_link = "http://localhost/reset_password"
r = client.get(forgot_link)
assert "Contact a CTF organizer" in r.get_data(as_text=True)
destroy_ctfd(app)
@patch("smtplib.SMTP")
def test_user_can_confirm_email(mock_smtp):
app = create_ctfd()
with app.app_context(), freeze_time("2012-01-14 03:21:34"):
set_config("verify_emails", True)
set_config("mail_server", "localhost")
set_config("mail_port", 25)
set_config("mail_useauth", True)
set_config("mail_username", "username")
set_config("mail_password", "password")
register_user(app, name="user1", email="user@user.com")
user = Users.query.filter_by(email="user@user.com").first()
assert user.verified is False
client = login_as_user(app, name="user1", password="password")
r = client.get("http://localhost/confirm")
assert "Need to resend the confirmation email?" in r.get_data(as_text=True)
if six.PY2:
mock_smtp.return_value.sendmail.assert_called()
else:
mock_smtp.return_value.send_message.assert_called()
with client.session_transaction() as sess:
data = {"nonce": sess.get("nonce")}
r = client.post("http://localhost/confirm", data=data)
assert "confirmation email has been resent" in r.get_data(as_text=True)
r = client.get("/challenges")
assert (
r.location == "http://localhost/confirm"
)
r = client.get("http://localhost/confirm/" + serialize("user@user.com"))
assert r.location == "http://localhost/challenges"
user = Users.query.filter_by(email="user@user.com").first()
assert user.verified is True
r = client.get("http://localhost/confirm")
assert r.location == "http://localhost/settings"
destroy_ctfd(app)
@patch("smtplib.SMTP")
def test_user_can_reset_password(mock_smtp):
from email.mime.text import MIMEText
if six.PY3:
from email.message import EmailMessage
app = create_ctfd()
with app.app_context(), freeze_time("2012-01-14 03:21:34"):
set_config("mail_server", "localhost")
set_config("mail_port", 25)
set_config("mail_useauth", True)
set_config("mail_username", "username")
set_config("mail_password", "password")
register_user(app, name="user1", email="user@user.com")
with app.test_client() as client:
client.get("/reset_password")
with client.session_transaction() as sess:
data = {"nonce": sess.get("nonce"), "email": "user@user.com"}
client.post("/reset_password", data=data)
ctf_name = get_config("ctf_name")
from_addr = get_config("mailfrom_addr") or app.config.get("MAILFROM_ADDR")
from_addr = "{} <{}>".format(ctf_name, from_addr)
to_addr = "user@user.com"
msg = (
"Did you initiate a password reset? If you didn't initiate this request you can ignore this email. "
"\n\nClick the following link to reset your password:\n"
"http://localhost/reset_password/InVzZXJAdXNlci5jb20i.TxD0vg.28dY_Gzqb1TH9nrcE_H7W8YFM-U"
)
ctf_name = get_config("ctf_name")
if six.PY2:
email_msg = MIMEText(msg)
else:
email_msg = EmailMessage()
email_msg.set_content(msg)
email_msg["Subject"] = "Password Reset Request from {ctf_name}".format(
ctf_name=ctf_name
)
email_msg["From"] = from_addr
email_msg["To"] = to_addr
# Make sure that the reset password email is sent
if six.PY2:
mock_smtp.return_value.sendmail.assert_called_with(
from_addr, [to_addr], email_msg.as_string()
)
else:
mock_smtp.return_value.send_message.assert_called()
assert str(mock_smtp.return_value.send_message.call_args[0][0]) == str(
email_msg
)
# Get user's original password
user = Users.query.filter_by(email="user@user.com").first()
with client.session_transaction() as sess:
data = {"nonce": sess.get("nonce"), "password": "passwordtwo"}
client.get(
"/reset_password/InVzZXJAdXNlci5jb20i.TxD0vg.28dY_Gzqb1TH9nrcE_H7W8YFM-U"
)
client.post(
"/reset_password/InVzZXJAdXNlci5jb20i.TxD0vg.28dY_Gzqb1TH9nrcE_H7W8YFM-U",
data=data,
)
user = Users.query.filter_by(email="user@user.com").first()
assert verify_password("passwordtwo", user.password)
destroy_ctfd(app)
def test_banned_user():
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(app)
user = Users.query.filter_by(id=2).first()
user.banned = True
db.session.commit()
routes = ["/", "/challenges", "/api/v1/challenges"]
for route in routes:
r = client.get(route)
assert r.status_code == 403
destroy_ctfd(app)
| true | true |
1c2d18c57908c297096770b862aec97c9ff96d52 | 11,283 | py | Python | modify_weights.py | rayjyh/PyTorch_CIFAR10_rram_compenstion | 61a53c85a74abc965a5bea3e4b102e9a7ad8f03a | [
"MIT"
] | null | null | null | modify_weights.py | rayjyh/PyTorch_CIFAR10_rram_compenstion | 61a53c85a74abc965a5bea3e4b102e9a7ad8f03a | [
"MIT"
] | null | null | null | modify_weights.py | rayjyh/PyTorch_CIFAR10_rram_compenstion | 61a53c85a74abc965a5bea3e4b102e9a7ad8f03a | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import torch
'''
compensation = {
"no_comp":
"simple":
"dynamic":
"bias":
"double_sim":
}
'''
quantized = True
visualize = False
symmetric = True
def asym_quant_param(w, k):
num_intervals = 2 ** k - 1
scale = (torch.max(w) - torch.min(w)) / num_intervals
zero = int(-torch.min(w) / scale)
return num_intervals, scale, zero
def sym_quant_param(w, k):
num_intervals = 2 ** k - 1
scale = max(torch.max(w), -torch.min(w)) / num_intervals
return num_intervals, scale
def asym_quant(w, num_intervals, scale, zero):
w_q = torch.round(w / scale) + zero
w_q = torch.clamp(w_q, 0, num_intervals)
return w_q
def sym_quant(w, num_intervals, scale):
w_q = torch.round(w / scale)
w_q = torch.clamp(w_q, -num_intervals, num_intervals - 1)
return w_q
def asym_de_quant(w_q, scale, zero):
w_dq = (w_q - zero) * scale
return w_dq
def sym_de_quant(w_q, scale):
w_dq = w_q * scale
return w_dq
class Compensation:
def __init__(self,
absolute_variation=0,
normal_dist=0,
var=5,
var_percentage=7.2,
scale=10,
sd=5,
sd_percentage=0,
bias=5,
k_bits=4):
self.absolute_variation = absolute_variation
self.normal_dist = normal_dist
# self.var = var
self.var_percentage = var_percentage
self.scale = scale
self.sd = sd
self.sd_percentage = sd_percentage
self.k_bits = k_bits
self.max = 2 ** self.k_bits - 1
self.sd = self.sd_percentage * self.max
self.var = var # * self.max
self.bias = bias * self.max
def no_comp(self, w):
if self.absolute_variation:
pos = torch.abs(w)
maximum = torch.mean(pos)
std = self.var * maximum.item()
real = torch.normal(w, std)
else:
if self.normal_dist:
real = torch.normal(w, self.var_percentage * abs(w))
else:
real = w * torch.exp(torch.normal(0, self.var_percentage, size=w.shape, device=w.device))
# real = torch.clamp(real, 0, self.max)
return real
def simple(self, w):
if self.absolute_variation:
real = torch.normal(w, self.var)
else:
real = torch.normal(w, self.var_percentage * abs(w))
# real = torch.clamp(real, 0, self.max)
gtz = w > torch.zeros_like(w)
# stz = w < torch.zeros_like(w)
gtw = real > w
# stw = real < w
# condition = (gtz * gtw) + (stz * stw)
condition = gtz * gtw
pre_diff = w - real
if absolute_variation:
comp = torch.normal(pre_diff * self.scale, self.var)
else:
comp = torch.normal(pre_diff * self.scale, self.var_percentage * abs(pre_diff * self.scale))
# comp = torch.clamp(comp, 0, self.max)
new = real + comp / self.scale
new = torch.where(condition, real, new)
post_diff = w - new
return new
def dynamic(self, w):
# self.sd = self.sd_percentage * self.var_percentage * w
# pos_shift_target = w - self.sd
# neg_shift_target = w + self.sd
if self.absolute_variation:
pos_real = torch.normal(w - self.sd, self.var)
neg_real = torch.normal(w + self.sd, self.var)
real = torch.where(w > 0, pos_real, neg_real)
else:
pos_real = torch.normal(w - self.sd, self.var_percentage * abs(w - self.sd))
neg_real = torch.normal(w + self.sd, self.var_percentage * abs(w + self.sd))
real = torch.where(w > 0, pos_real, neg_real)
# real = torch.clamp(real, 0, self.max)
gtz = w > torch.zeros_like(w)
# stz = w < torch.zeros_like(w)
gtw = real > w
# stw = real < w
# condition = (gtz * gtw) + (stz * stw)
condition = gtz * gtw
pre_diff = w - real
if self.absolute_variation:
comp = torch.normal(pre_diff * self.scale, self.var)
else:
comp = torch.normal(pre_diff * self.scale, self.var_percentage * abs(pre_diff * self.scale))
# comp = torch.clamp(comp, 0, self.max)
new = real + comp / self.scale
new = torch.where(condition, real, new)
post_diff = w - new
return new # , real, pre_diff, post_diff
def bias_comp(self, w):
if self.absolute_variation:
real = torch.normal(w, self.var)
else:
real = torch.normal(w, self.var_percentage * abs(w))
# real = torch.clamp(real, 0, self.max)
pre_diff = w - real
pre_diff = torch.clamp(pre_diff, min=-self.bias)
if self.absolute_variation:
comp = torch.normal((pre_diff + self.bias) * self.scale, self.var)
else:
comp = torch.normal((pre_diff + self.bias) * self.scale,
self.var_percentage * abs((pre_diff + self.bias) * self.scale))
# comp = torch.where(comp > x, comp, gmin * torch.ones_like(comp))
# comp = torch.clamp(comp, min=0)
new = real + comp / self.scale - self.bias
post_diff = w - new
# return real, new, pre_diff, post_diff
return new
def double_sim(self, w):
if self.absolute_variation:
real = torch.normal(w, self.var)
else:
real = torch.normal(w, self.var_percentage * abs(w))
# real = torch.clamp(real, 0, self.max)
diff = w - real
if self.absolute_variation:
comp = torch.normal(diff * self.scale, self.var)
else:
comp = torch.normal(diff * self.scale, self.var_percentage * abs(diff * self.scale))
comp = torch.clamp(comp, -self.max, self.max)
return real + comp / self.scale
def multi_cell(w, k_bits, n_cells, compensation, modification=None, method="normal"):
assert k_bits % n_cells == 0, "k_bits must be divisible by n_cells!"
# quantization
if symmetric:
num_intervals, scale = sym_quant_param(w, k_bits)
w_q = sym_quant(w, num_intervals, scale).to(torch.int64)
neg_mask = torch.where(w_q < 0, -1, 1)
w_q = w_q * neg_mask
else:
num_intervals, scale, zero = asym_quant_param(w, k_bits)
w_q = asym_quant(w, num_intervals, scale, zero).to(torch.int64)
if n_cells > 1:
# convert to binary digits
mask = 2 ** torch.arange(k_bits - 1, -1, -1, device=w.device)
w_binary = w_q.unsqueeze(-1).bitwise_and(mask).ne(0).int()
# split into multi cells
bits_per_cell = int(k_bits / n_cells)
w_binary_mc = torch.chunk(w_binary, n_cells, -1)
# get the decimal of each cell
mask_mc = 2 ** torch.arange(bits_per_cell - 1, -1, -1, device=w.device)
mask_mc = [chunk * torch.ones(w.shape, device=w.device) for chunk in mask_mc]
mask_mc = torch.stack(mask_mc, -1)
w_mc = [chunk.mul(mask_mc).sum(-1) for chunk in w_binary_mc]
# apply variation and compensation if any
if modification is not None:
compensation.var = compensation.var / (2 ** (k_bits - bits_per_cell))
compensation.var_percentage = compensation.var_percentage / (2 ** (k_bits - bits_per_cell))
compensation.sd = compensation.sd / (2 ** (k_bits - bits_per_cell))
compensation.bias = compensation.bias / (2 ** (k_bits - bits_per_cell))
# variation-aware programming
if method == "variation_aware_programming":
w_mc_m = []
vap_flag = torch.zeros_like(w_mc[0]).type(torch.bool)
for i, chunk in enumerate(w_mc):
cell_max = modification((2 ** bits_per_cell - 1) * torch.ones_like(chunk))
w_mc_m.append(torch.where(vap_flag, cell_max, modification(chunk)))
msb_diff = abs(w_mc[i] - w_mc_m[i])
vap_flag = torch.logical_or(vap_flag, torch.where(msb_diff > 0.5, True, False))
elif method == "msb_only":
msb = [modification(w_mc[0])]
lsb = [compensation.no_comp(chunk) for chunk in w_mc[1:]]
w_mc_m = msb + lsb
else:
w_mc_m = [modification(chunk) for chunk in w_mc]
w_mc = w_mc_m
# merge multi cells into one weight
magnitude = (2 ** bits_per_cell) ** torch.arange(n_cells - 1, -1, -1, device=w.device)
mag_w = list(zip(w_mc, magnitude))
w_mc_mag = [chunk[0] * chunk[1] for chunk in mag_w]
w_q_m = torch.stack(w_mc_mag, -1).sum(-1)
else:
if modification is None:
w_q_m = w_q
else:
w_q_m = modification(w_q.to(torch.float32))
# dequantize
if symmetric:
w_q_m = w_q_m * neg_mask
w_q_m_dq = sym_de_quant(w_q_m, scale)
else:
w_q_m_dq = asym_de_quant(w_q_m, scale, zero)
return w_q_m_dq
def modify_weight(weight_dict, layer_list, k_bits, n_cells, compensation, modification=None):
for layer in weight_dict.keys():
# if len(re.findall(r"(fc|conv)", layer)) != 0:
if layer in layer_list:
# visualize weight distribution
og_weight = weight_dict[layer].detach().clone()
if quantized:
weight_dict[layer] = multi_cell(weight_dict[layer], k_bits, n_cells, compensation, modification)
else:
if modification is None:
return
else:
# weight_dict[layer], ns_weight, pre_diff, post_diff = modification(weight_dict[layer])
weight_dict[layer] = modification(weight_dict[layer])
dq_weight = weight_dict[layer].detach().clone()
if visualize:
# weight_np = weight_dict[conv + '.weight'].numpy()
# weight_np = weight_dict[layer].numpy()
# plt.figure()
plt.hist(og_weight.cpu().numpy().flatten(), 200, alpha=0.4, color='red', label='weight')
# plt.hist(w_q.cpu().numpy().flatten(), 200, alpha=0.6, color='yellow', label='quantized_weight')
plt.hist(dq_weight.cpu().numpy().flatten(), 200, alpha=0.4, color='blue', label='de_quant_weight')
plt.legend()
# plt.show()
plt.savefig("/home/huangyc/jingyuhe/Pytorch_CIFAR10/dy_comp_weight/" + layer + "_weight.png")
plt.close()
# plt.figure()
plt.hist(pre_diff.cpu().numpy().flatten(), 200, alpha=0.4, color='purple', label='pre_comp errors')
plt.hist(post_diff.cpu().numpy().flatten(), 200, alpha=0.4, color='yellow', label='post_comp errors')
plt.legend()
# plt.show()
plt.savefig("/home/huangyc/jingyuhe/Pytorch_CIFAR10/dy_comp_weight/" + layer + "_diff.png")
plt.close()
'''
tmp = torch.normal(0, 0.2, size=(3, 3), device="cuda:0")
compensation = Compensation()
multi_cell(tmp, 6, 3, compensation, compensation.dynamic, method="variation_aware_programming")
''' | 39.041522 | 117 | 0.572011 | import numpy as np
import matplotlib.pyplot as plt
import torch
quantized = True
visualize = False
symmetric = True
def asym_quant_param(w, k):
num_intervals = 2 ** k - 1
scale = (torch.max(w) - torch.min(w)) / num_intervals
zero = int(-torch.min(w) / scale)
return num_intervals, scale, zero
def sym_quant_param(w, k):
num_intervals = 2 ** k - 1
scale = max(torch.max(w), -torch.min(w)) / num_intervals
return num_intervals, scale
def asym_quant(w, num_intervals, scale, zero):
w_q = torch.round(w / scale) + zero
w_q = torch.clamp(w_q, 0, num_intervals)
return w_q
def sym_quant(w, num_intervals, scale):
w_q = torch.round(w / scale)
w_q = torch.clamp(w_q, -num_intervals, num_intervals - 1)
return w_q
def asym_de_quant(w_q, scale, zero):
w_dq = (w_q - zero) * scale
return w_dq
def sym_de_quant(w_q, scale):
w_dq = w_q * scale
return w_dq
class Compensation:
def __init__(self,
absolute_variation=0,
normal_dist=0,
var=5,
var_percentage=7.2,
scale=10,
sd=5,
sd_percentage=0,
bias=5,
k_bits=4):
self.absolute_variation = absolute_variation
self.normal_dist = normal_dist
self.var_percentage = var_percentage
self.scale = scale
self.sd = sd
self.sd_percentage = sd_percentage
self.k_bits = k_bits
self.max = 2 ** self.k_bits - 1
self.sd = self.sd_percentage * self.max
self.var = var
self.bias = bias * self.max
def no_comp(self, w):
if self.absolute_variation:
pos = torch.abs(w)
maximum = torch.mean(pos)
std = self.var * maximum.item()
real = torch.normal(w, std)
else:
if self.normal_dist:
real = torch.normal(w, self.var_percentage * abs(w))
else:
real = w * torch.exp(torch.normal(0, self.var_percentage, size=w.shape, device=w.device))
return real
def simple(self, w):
if self.absolute_variation:
real = torch.normal(w, self.var)
else:
real = torch.normal(w, self.var_percentage * abs(w))
gtz = w > torch.zeros_like(w)
gtw = real > w
condition = gtz * gtw
pre_diff = w - real
if absolute_variation:
comp = torch.normal(pre_diff * self.scale, self.var)
else:
comp = torch.normal(pre_diff * self.scale, self.var_percentage * abs(pre_diff * self.scale))
new = real + comp / self.scale
new = torch.where(condition, real, new)
post_diff = w - new
return new
def dynamic(self, w):
if self.absolute_variation:
pos_real = torch.normal(w - self.sd, self.var)
neg_real = torch.normal(w + self.sd, self.var)
real = torch.where(w > 0, pos_real, neg_real)
else:
pos_real = torch.normal(w - self.sd, self.var_percentage * abs(w - self.sd))
neg_real = torch.normal(w + self.sd, self.var_percentage * abs(w + self.sd))
real = torch.where(w > 0, pos_real, neg_real)
gtz = w > torch.zeros_like(w)
gtw = real > w
condition = gtz * gtw
pre_diff = w - real
if self.absolute_variation:
comp = torch.normal(pre_diff * self.scale, self.var)
else:
comp = torch.normal(pre_diff * self.scale, self.var_percentage * abs(pre_diff * self.scale))
new = real + comp / self.scale
new = torch.where(condition, real, new)
post_diff = w - new
return new
def bias_comp(self, w):
if self.absolute_variation:
real = torch.normal(w, self.var)
else:
real = torch.normal(w, self.var_percentage * abs(w))
pre_diff = w - real
pre_diff = torch.clamp(pre_diff, min=-self.bias)
if self.absolute_variation:
comp = torch.normal((pre_diff + self.bias) * self.scale, self.var)
else:
comp = torch.normal((pre_diff + self.bias) * self.scale,
self.var_percentage * abs((pre_diff + self.bias) * self.scale))
new = real + comp / self.scale - self.bias
post_diff = w - new
return new
def double_sim(self, w):
if self.absolute_variation:
real = torch.normal(w, self.var)
else:
real = torch.normal(w, self.var_percentage * abs(w))
diff = w - real
if self.absolute_variation:
comp = torch.normal(diff * self.scale, self.var)
else:
comp = torch.normal(diff * self.scale, self.var_percentage * abs(diff * self.scale))
comp = torch.clamp(comp, -self.max, self.max)
return real + comp / self.scale
def multi_cell(w, k_bits, n_cells, compensation, modification=None, method="normal"):
assert k_bits % n_cells == 0, "k_bits must be divisible by n_cells!"
if symmetric:
num_intervals, scale = sym_quant_param(w, k_bits)
w_q = sym_quant(w, num_intervals, scale).to(torch.int64)
neg_mask = torch.where(w_q < 0, -1, 1)
w_q = w_q * neg_mask
else:
num_intervals, scale, zero = asym_quant_param(w, k_bits)
w_q = asym_quant(w, num_intervals, scale, zero).to(torch.int64)
if n_cells > 1:
mask = 2 ** torch.arange(k_bits - 1, -1, -1, device=w.device)
w_binary = w_q.unsqueeze(-1).bitwise_and(mask).ne(0).int()
bits_per_cell = int(k_bits / n_cells)
w_binary_mc = torch.chunk(w_binary, n_cells, -1)
mask_mc = 2 ** torch.arange(bits_per_cell - 1, -1, -1, device=w.device)
mask_mc = [chunk * torch.ones(w.shape, device=w.device) for chunk in mask_mc]
mask_mc = torch.stack(mask_mc, -1)
w_mc = [chunk.mul(mask_mc).sum(-1) for chunk in w_binary_mc]
if modification is not None:
compensation.var = compensation.var / (2 ** (k_bits - bits_per_cell))
compensation.var_percentage = compensation.var_percentage / (2 ** (k_bits - bits_per_cell))
compensation.sd = compensation.sd / (2 ** (k_bits - bits_per_cell))
compensation.bias = compensation.bias / (2 ** (k_bits - bits_per_cell))
if method == "variation_aware_programming":
w_mc_m = []
vap_flag = torch.zeros_like(w_mc[0]).type(torch.bool)
for i, chunk in enumerate(w_mc):
cell_max = modification((2 ** bits_per_cell - 1) * torch.ones_like(chunk))
w_mc_m.append(torch.where(vap_flag, cell_max, modification(chunk)))
msb_diff = abs(w_mc[i] - w_mc_m[i])
vap_flag = torch.logical_or(vap_flag, torch.where(msb_diff > 0.5, True, False))
elif method == "msb_only":
msb = [modification(w_mc[0])]
lsb = [compensation.no_comp(chunk) for chunk in w_mc[1:]]
w_mc_m = msb + lsb
else:
w_mc_m = [modification(chunk) for chunk in w_mc]
w_mc = w_mc_m
magnitude = (2 ** bits_per_cell) ** torch.arange(n_cells - 1, -1, -1, device=w.device)
mag_w = list(zip(w_mc, magnitude))
w_mc_mag = [chunk[0] * chunk[1] for chunk in mag_w]
w_q_m = torch.stack(w_mc_mag, -1).sum(-1)
else:
if modification is None:
w_q_m = w_q
else:
w_q_m = modification(w_q.to(torch.float32))
if symmetric:
w_q_m = w_q_m * neg_mask
w_q_m_dq = sym_de_quant(w_q_m, scale)
else:
w_q_m_dq = asym_de_quant(w_q_m, scale, zero)
return w_q_m_dq
def modify_weight(weight_dict, layer_list, k_bits, n_cells, compensation, modification=None):
for layer in weight_dict.keys():
if layer in layer_list:
og_weight = weight_dict[layer].detach().clone()
if quantized:
weight_dict[layer] = multi_cell(weight_dict[layer], k_bits, n_cells, compensation, modification)
else:
if modification is None:
return
else:
weight_dict[layer] = modification(weight_dict[layer])
dq_weight = weight_dict[layer].detach().clone()
if visualize:
plt.hist(og_weight.cpu().numpy().flatten(), 200, alpha=0.4, color='red', label='weight')
plt.hist(dq_weight.cpu().numpy().flatten(), 200, alpha=0.4, color='blue', label='de_quant_weight')
plt.legend()
plt.savefig("/home/huangyc/jingyuhe/Pytorch_CIFAR10/dy_comp_weight/" + layer + "_weight.png")
plt.close()
plt.hist(pre_diff.cpu().numpy().flatten(), 200, alpha=0.4, color='purple', label='pre_comp errors')
plt.hist(post_diff.cpu().numpy().flatten(), 200, alpha=0.4, color='yellow', label='post_comp errors')
plt.legend()
plt.savefig("/home/huangyc/jingyuhe/Pytorch_CIFAR10/dy_comp_weight/" + layer + "_diff.png")
plt.close()
| true | true |
1c2d19186929b1032fb2c4eb224c5f81db2d2d09 | 312 | py | Python | src/vox/linters/python/clonedigger.py | Peilonrayz/vox | 026a82bb3c0d47988cd20d18639bcb0e249ee211 | [
"MIT"
] | null | null | null | src/vox/linters/python/clonedigger.py | Peilonrayz/vox | 026a82bb3c0d47988cd20d18639bcb0e249ee211 | [
"MIT"
] | null | null | null | src/vox/linters/python/clonedigger.py | Peilonrayz/vox | 026a82bb3c0d47988cd20d18639bcb0e249ee211 | [
"MIT"
] | null | null | null | import vox
from vox import flaggy, linty
from ..base_linter import BaseLinter
class Clonedigger(BaseLinter):
COMMAND = vox.FlagsBuilder().sugar(program="clonedigger")
DEPENDENCIES = ["clonedigger"]
FORMAT = None
NAME = "clonedigger"
PYTHON = "2.7"
extract_errors = linty.from_str.echo
| 22.285714 | 61 | 0.711538 | import vox
from vox import flaggy, linty
from ..base_linter import BaseLinter
class Clonedigger(BaseLinter):
COMMAND = vox.FlagsBuilder().sugar(program="clonedigger")
DEPENDENCIES = ["clonedigger"]
FORMAT = None
NAME = "clonedigger"
PYTHON = "2.7"
extract_errors = linty.from_str.echo
| true | true |
1c2d19569551341805cf3d50cc5ffb640693bf50 | 4,652 | py | Python | FemMed Microservice/ecommerce/settings.py | HimanshuBarak/FemPower | d3b7ed0d7f2f6eece9d9a1149fae083d88c1bb06 | [
"MIT"
] | null | null | null | FemMed Microservice/ecommerce/settings.py | HimanshuBarak/FemPower | d3b7ed0d7f2f6eece9d9a1149fae083d88c1bb06 | [
"MIT"
] | null | null | null | FemMed Microservice/ecommerce/settings.py | HimanshuBarak/FemPower | d3b7ed0d7f2f6eece9d9a1149fae083d88c1bb06 | [
"MIT"
] | null | null | null | """
Django settings for ecommerce project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# import fasttext
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lzj2axnee5!=1s5y17#ud4l^_n)krr*9wmn=^t6+o!^ry%&v_*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'wishlists.apps.WishlistsConfig',
'orders.apps.OrdersConfig',
# 'users.apps.UsersConfig',
'cart.apps.CartConfig',
'payments.apps.PaymentsConfig',
'products.apps.ProductsConfig',
'categories.apps.CategoriesConfig',
'pages.apps.PagesConfig',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'grappelli',
'django.contrib.admin',
'django_cleanup',
'users',
'crispy_forms',
'app',
'processdata',
'django.contrib.humanize',
'polls',
'personal',
'mathfilters',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR , 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'groc4all',
# 'USER':'postgres',
# 'PASSWORD':'admin',
# 'HOST':'localhost',
# 'PORT':'5432',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Calcutta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
AUTH_USER_MODEL = 'users.User'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
GRAPPELLI_ADMIN_TITLE = 'EcomSite'
CART_SESSION_ID ='cart'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR , 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS=[(os.path.join(BASE_DIR,'ecommerce/static'))]
# model1 = fasttext.load_model("amazon.ftz")
model1 = 'amazon'
# Media Folder Settings
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_USE_TLS = True
EMAIL_HOST = "smtp.gmail.com"
EMAIL_PORT = 587
EMAIL_HOST_USER = "groc4all@gmail.com"
EMAIL_HOST_PASSWORD = "troubleshooters"
SENDER_EMAIL = "groc4all@gmail.com"
STRIPE_SECRET_KEY = 'sk_test_taIXxge0gFAph97YgS4hugoz00mm5q8OKv'
STRIPE_PUBLISHABLE_KEY = 'pk_test_3CEtRqmlnYvIOtQmsEslfEO500DRlJ7IyR'
| 25.844444 | 91 | 0.693465 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'lzj2axnee5!=1s5y17#ud4l^_n)krr*9wmn=^t6+o!^ry%&v_*'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'wishlists.apps.WishlistsConfig',
'orders.apps.OrdersConfig',
# 'users.apps.UsersConfig',
'cart.apps.CartConfig',
'payments.apps.PaymentsConfig',
'products.apps.ProductsConfig',
'categories.apps.CategoriesConfig',
'pages.apps.PagesConfig',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'grappelli',
'django.contrib.admin',
'django_cleanup',
'users',
'crispy_forms',
'app',
'processdata',
'django.contrib.humanize',
'polls',
'personal',
'mathfilters',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR , 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'groc4all',
# 'USER':'postgres',
# 'PASSWORD':'admin',
# 'HOST':'localhost',
# 'PORT':'5432',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Calcutta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
AUTH_USER_MODEL = 'users.User'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
GRAPPELLI_ADMIN_TITLE = 'EcomSite'
CART_SESSION_ID ='cart'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR , 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS=[(os.path.join(BASE_DIR,'ecommerce/static'))]
# model1 = fasttext.load_model("amazon.ftz")
model1 = 'amazon'
# Media Folder Settings
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_USE_TLS = True
EMAIL_HOST = "smtp.gmail.com"
EMAIL_PORT = 587
EMAIL_HOST_USER = "groc4all@gmail.com"
EMAIL_HOST_PASSWORD = "troubleshooters"
SENDER_EMAIL = "groc4all@gmail.com"
STRIPE_SECRET_KEY = 'sk_test_taIXxge0gFAph97YgS4hugoz00mm5q8OKv'
STRIPE_PUBLISHABLE_KEY = 'pk_test_3CEtRqmlnYvIOtQmsEslfEO500DRlJ7IyR'
| true | true |
1c2d19be1cf2c7370f09d63ee85ffc190a87325c | 13,747 | py | Python | src/sage/modular/modform_hecketriangle/element.py | defeo/sage | d8822036a9843bd4d75845024072515ede56bcb9 | [
"BSL-1.0"
] | 2 | 2018-06-30T01:37:35.000Z | 2018-06-30T01:37:39.000Z | src/sage/modular/modform_hecketriangle/element.py | boothby/sage | 1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f | [
"BSL-1.0"
] | null | null | null | src/sage/modular/modform_hecketriangle/element.py | boothby/sage | 1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f | [
"BSL-1.0"
] | null | null | null | r"""
Elements of Hecke modular forms spaces
AUTHORS:
- Jonas Jermann (2013): initial version
"""
from __future__ import absolute_import
#*****************************************************************************
# Copyright (C) 2013-2014 Jonas Jermann <jjermann2@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from .graded_ring_element import FormsRingElement
class FormsElement(FormsRingElement):
"""
(Hecke) modular forms.
"""
def __init__(self, parent, rat):
r"""
An element of a space of (Hecke) modular forms.
INPUT:
- ``parent`` -- a modular form space
- ``rat`` -- a rational function which corresponds to a
modular form in the modular form space
OUTPUT:
A (Hecke) modular form element corresponding to the given rational function
with the given parent space.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: (x,y,z,d)=var("x,y,z,d")
sage: MF = ModularForms(n=5, k=20/3, ep=1)
sage: MF.default_prec(3)
sage: el = MF(x^5*d-y^2*d)
sage: el
q - 9/(200*d)*q^2 + O(q^3)
sage: el.rat()
x^5*d - y^2*d
sage: el.parent()
ModularForms(n=5, k=20/3, ep=1) over Integer Ring
sage: el.rat().parent()
Fraction Field of Multivariate Polynomial Ring in x, y, z, d over Integer Ring
sage: subspace = MF.subspace([MF.gen(1)])
sage: ss_el = subspace(x^5*d-y^2*d)
sage: ss_el == el
True
sage: ss_el.parent()
Subspace of dimension 1 of ModularForms(n=5, k=20/3, ep=1) over Integer Ring
"""
super(FormsElement, self).__init__(parent, rat)
if self.AT(["quasi"])>=self._analytic_type:
pass
elif not (\
self.is_homogeneous() and\
self._weight == parent.weight() and\
self._ep == parent.ep() ):
raise ValueError("{} does not correspond to an element of {}.".format(rat, parent))
from .subspace import SubSpaceForms
if isinstance(parent, SubSpaceForms) and (parent._module is not None):
try:
self.coordinate_vector()
except TypeError:
raise ValueError("{} does not correspond to an element of {}.".format(rat, parent))
def _repr_(self):
"""
Return the string representation of self.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms
sage: (x,y,z,d)=var("x,y,z,d")
sage: QuasiModularForms(n=5, k=10, ep=-1)(x^3*z^3-y^3)
21/(20*d)*q - 4977/(16000*d^2)*q^2 + 297829/(12800000*d^3)*q^3 + 27209679/(20480000000*d^4)*q^4 + O(q^5)
sage: QuasiModularForms(n=infinity, k=8, ep=1)(x*(x-y^2))
64*q + 512*q^2 + 768*q^3 - 4096*q^4 + O(q^5)
"""
return self._qexp_repr()
# This function is just listed here to emphasize the choice used
# for the latex representation of ``self``
def _latex_(self):
r"""
Return the LaTeX representation of ``self``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms
sage: (x,y,z,d)=var("x,y,z,d")
sage: latex(QuasiModularForms(n=5, k=10, ep=-1)(x^3*z^3-y^3))
f_{\rho}^{3} E_{2}^{3} - f_{i}^{3}
sage: latex(QuasiModularForms(n=infinity, k=8, ep=1)(x*(x-y^2)))
- E_{4} f_{i}^{2} + E_{4}^{2}
"""
return super(FormsElement, self)._latex_()
def coordinate_vector(self):
r"""
Return the coordinate vector of ``self`` with
respect to ``self.parent().gens()``.
.. NOTE:
This uses the corresponding function of the
parent. If the parent has not defined a coordinate
vector function or a module for coordinate vectors
then an exception is raised by the parent
(default implementation).
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(n=4, k=24, ep=-1)
sage: MF.gen(0).coordinate_vector().parent()
Vector space of dimension 3 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: MF.gen(0).coordinate_vector()
(1, 0, 0)
sage: subspace = MF.subspace([MF.gen(0), MF.gen(2)])
sage: subspace.gen(0).coordinate_vector().parent()
Vector space of dimension 2 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: subspace.gen(0).coordinate_vector()
(1, 0)
sage: subspace.gen(0).coordinate_vector() == subspace.coordinate_vector(subspace.gen(0))
True
"""
return self.parent().coordinate_vector(self)
def ambient_coordinate_vector(self):
r"""
Return the coordinate vector of ``self`` with
respect to ``self.parent().ambient_space().gens()``.
The returned coordinate vector is an element
of ``self.parent().module()``.
Mote: This uses the corresponding function of the
parent. If the parent has not defined a coordinate
vector function or an ambient module for
coordinate vectors then an exception is raised
by the parent (default implementation).
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(n=4, k=24, ep=-1)
sage: MF.gen(0).ambient_coordinate_vector().parent()
Vector space of dimension 3 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: MF.gen(0).ambient_coordinate_vector()
(1, 0, 0)
sage: subspace = MF.subspace([MF.gen(0), MF.gen(2)])
sage: subspace.gen(0).ambient_coordinate_vector().parent()
Vector space of degree 3 and dimension 2 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
Basis matrix:
[1 0 0]
[0 0 1]
sage: subspace.gen(0).ambient_coordinate_vector()
(1, 0, 0)
sage: subspace.gen(0).ambient_coordinate_vector() == subspace.ambient_coordinate_vector(subspace.gen(0))
True
"""
return self.parent().ambient_coordinate_vector(self)
def lseries(self, num_prec=None, max_imaginary_part=0, max_asymp_coeffs=40):
r"""
Return the L-series of ``self`` if ``self`` is modular and holomorphic.
Note: This relies on the (pari) based function ``Dokchitser``.
INPUT:
- ``num_prec`` -- An integer denoting the to-be-used numerical precision.
If integer ``num_prec=None`` (default) the default
numerical precision of the parent of ``self`` is used.
- ``max_imaginary_part`` -- A real number (default: 0), indicating up to which
imaginary part the L-series is going to be studied.
- ``max_asymp_coeffs`` -- An integer (default: 40).
OUTPUT:
An interface to Tim Dokchitser's program for computing L-series, namely
the series given by the Fourier coefficients of ``self``.
EXAMPLES::
sage: from sage.modular.modform.eis_series import eisenstein_series_lseries
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: f = ModularForms(n=3, k=4).E4()/240
sage: L = f.lseries()
sage: L
L-series associated to the modular form 1/240 + q + 9*q^2 + 28*q^3 + 73*q^4 + O(q^5)
sage: L.conductor
1
sage: L(1).prec()
53
sage: L.check_functional_equation() < 2^(-50)
True
sage: L(1)
-0.0304484570583...
sage: abs(L(1) - eisenstein_series_lseries(4)(1)) < 2^(-53)
True
sage: L.derivative(1, 1)
-0.0504570844798...
sage: L.derivative(1, 2)/2
-0.0350657360354...
sage: L.taylor_series(1, 3)
-0.0304484570583... - 0.0504570844798...*z - 0.0350657360354...*z^2 + O(z^3)
sage: coeffs = f.q_expansion_vector(min_exp=0, max_exp=20, fix_d=True)
sage: sum([coeffs[k]*k^(-10) for k in range(1,len(coeffs))]).n(53)
1.00935215408...
sage: L(10)
1.00935215649...
sage: f = ModularForms(n=6, k=4).E4()
sage: L = f.lseries(num_prec=200)
sage: L.conductor
3
sage: L.check_functional_equation() < 2^(-180)
True
sage: L(1)
-2.92305187760575399490414692523085855811204642031749788...
sage: L(1).prec()
200
sage: coeffs = f.q_expansion_vector(min_exp=0, max_exp=20, fix_d=True)
sage: sum([coeffs[k]*k^(-10) for k in range(1,len(coeffs))]).n(53)
24.2281438789...
sage: L(10).n(53)
24.2281439447...
sage: f = ModularForms(n=8, k=6, ep=-1).E6()
sage: L = f.lseries()
sage: L.check_functional_equation() < 2^(-45)
True
sage: L.taylor_series(3, 3)
0.000000000000... + 0.867197036668...*z + 0.261129628199...*z^2 + O(z^3)
sage: coeffs = f.q_expansion_vector(min_exp=0, max_exp=20, fix_d=True)
sage: sum([coeffs[k]*k^(-10) for k in range(1,len(coeffs))]).n(53)
-13.0290002560...
sage: L(10).n(53)
-13.0290184579...
sage: f = (ModularForms(n=17, k=24).Delta()^2) # long time
sage: L = f.lseries() # long time
sage: L.check_functional_equation() < 2^(-50) # long time
True
sage: L.taylor_series(12, 3) # long time
0.000683924755280... - 0.000875942285963...*z + 0.000647618966023...*z^2 + O(z^3)
sage: coeffs = f.q_expansion_vector(min_exp=0, max_exp=20, fix_d=True) # long time
sage: sum([coeffs[k]*k^(-30) for k in range(1,len(coeffs))]).n(53) # long time
9.31562890589...e-10
sage: L(30).n(53) # long time
9.31562890589...e-10
sage: f = ModularForms(n=infinity, k=2, ep=-1).f_i()
sage: L = f.lseries()
sage: L.check_functional_equation() < 2^(-50)
True
sage: L.taylor_series(1, 3)
0.000000000000... + 5.76543616701...*z + 9.92776715593...*z^2 + O(z^3)
sage: coeffs = f.q_expansion_vector(min_exp=0, max_exp=20, fix_d=True)
sage: sum([coeffs[k]*k^(-10) for k in range(1,len(coeffs))]).n(53)
-23.9781792831...
sage: L(10).n(53)
-23.9781792831...
"""
from sage.rings.all import ZZ
from sage.symbolic.all import pi
from sage.functions.other import sqrt
from sage.lfunctions.dokchitser import Dokchitser
if (not (self.is_modular() and self.is_holomorphic()) or self.weight() == 0):
raise NotImplementedError("L-series are only implemented for non-trivial holomorphic modular forms.")
if (num_prec is None):
num_prec = self.parent().default_num_prec()
conductor = self.group().lam()**2
if (self.group().is_arithmetic()):
conductor = ZZ(conductor)
else:
conductor = conductor.n(num_prec)
gammaV = [0, 1]
weight = self.weight()
eps = self.ep()
# L^*(s) = cor_factor * (2*pi)^(-s)gamma(s)*L(f,s),
cor_factor = (2*sqrt(pi)).n(num_prec)
if (self.is_cuspidal()):
poles = []
residues = []
else:
poles = [ weight ]
val_inf = self.q_expansion_fixed_d(prec=1, d_num_prec=num_prec)[0]
residue = eps * val_inf * cor_factor
# (pari) BUG?
# The residue of the above L^*(s) differs by a factor -1 from
# the residue pari expects (?!?).
residue *= -1
residues = [ residue ]
L = Dokchitser(conductor = conductor,
gammaV = gammaV,
weight = weight,
eps = eps,
poles = poles,
residues = residues,
prec = num_prec)
# TODO for later: Figure out the correct coefficient growth and do L.set_coeff_growth(...)
# num_coeffs = L.num_coeffs()
num_coeffs = L.num_coeffs(1.2)
coeff_vector = [coeff for coeff in self.q_expansion_vector(min_exp=0, max_exp=num_coeffs + 1, fix_d=True)]
pari_precode = "coeff = {};".format(coeff_vector)
L.init_coeffs(v = "coeff[k+1]", pari_precode = pari_precode, max_imaginary_part = max_imaginary_part, max_asymp_coeffs = max_asymp_coeffs)
L.check_functional_equation()
L.rename("L-series associated to the {} form {}".format("cusp" if self.is_cuspidal() else "modular", self))
return L
| 39.277143 | 146 | 0.552339 | from __future__ import absolute_import
from .graded_ring_element import FormsRingElement
class FormsElement(FormsRingElement):
def __init__(self, parent, rat):
super(FormsElement, self).__init__(parent, rat)
if self.AT(["quasi"])>=self._analytic_type:
pass
elif not (\
self.is_homogeneous() and\
self._weight == parent.weight() and\
self._ep == parent.ep() ):
raise ValueError("{} does not correspond to an element of {}.".format(rat, parent))
from .subspace import SubSpaceForms
if isinstance(parent, SubSpaceForms) and (parent._module is not None):
try:
self.coordinate_vector()
except TypeError:
raise ValueError("{} does not correspond to an element of {}.".format(rat, parent))
def _repr_(self):
return self._qexp_repr()
def _latex_(self):
return super(FormsElement, self)._latex_()
def coordinate_vector(self):
return self.parent().coordinate_vector(self)
def ambient_coordinate_vector(self):
return self.parent().ambient_coordinate_vector(self)
def lseries(self, num_prec=None, max_imaginary_part=0, max_asymp_coeffs=40):
from sage.rings.all import ZZ
from sage.symbolic.all import pi
from sage.functions.other import sqrt
from sage.lfunctions.dokchitser import Dokchitser
if (not (self.is_modular() and self.is_holomorphic()) or self.weight() == 0):
raise NotImplementedError("L-series are only implemented for non-trivial holomorphic modular forms.")
if (num_prec is None):
num_prec = self.parent().default_num_prec()
conductor = self.group().lam()**2
if (self.group().is_arithmetic()):
conductor = ZZ(conductor)
else:
conductor = conductor.n(num_prec)
gammaV = [0, 1]
weight = self.weight()
eps = self.ep()
cor_factor = (2*sqrt(pi)).n(num_prec)
if (self.is_cuspidal()):
poles = []
residues = []
else:
poles = [ weight ]
val_inf = self.q_expansion_fixed_d(prec=1, d_num_prec=num_prec)[0]
residue = eps * val_inf * cor_factor
residue *= -1
residues = [ residue ]
L = Dokchitser(conductor = conductor,
gammaV = gammaV,
weight = weight,
eps = eps,
poles = poles,
residues = residues,
prec = num_prec)
num_coeffs = L.num_coeffs(1.2)
coeff_vector = [coeff for coeff in self.q_expansion_vector(min_exp=0, max_exp=num_coeffs + 1, fix_d=True)]
pari_precode = "coeff = {};".format(coeff_vector)
L.init_coeffs(v = "coeff[k+1]", pari_precode = pari_precode, max_imaginary_part = max_imaginary_part, max_asymp_coeffs = max_asymp_coeffs)
L.check_functional_equation()
L.rename("L-series associated to the {} form {}".format("cusp" if self.is_cuspidal() else "modular", self))
return L
| true | true |
1c2d1b0956967dc60e6e8d41c3f9a916abc2aa28 | 3,292 | py | Python | config/settings.py | laactech/mastering-django-orm | 2e6e2806311250147023133a0812c1b0e109db11 | [
"BSD-3-Clause"
] | null | null | null | config/settings.py | laactech/mastering-django-orm | 2e6e2806311250147023133a0812c1b0e109db11 | [
"BSD-3-Clause"
] | null | null | null | config/settings.py | laactech/mastering-django-orm | 2e6e2806311250147023133a0812c1b0e109db11 | [
"BSD-3-Clause"
] | null | null | null | """
Django settings for mastering_django_orm project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "django-insecure-4h--2s#hczboh8*30j0@ui9r__-6&mtd=)$##u@y)fd(y-+q_c"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS: list[str] = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "mastering_django_orm.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "mastering_django_orm.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = "/static/"
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
| 26.126984 | 91 | 0.703524 |
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = "django-insecure-4h--2s#hczboh8*30j0@ui9r__-6&mtd=)$##u@y)fd(y-+q_c"
DEBUG = True
ALLOWED_HOSTS: list[str] = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "mastering_django_orm.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "mastering_django_orm.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = "/static/"
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
| true | true |
1c2d1b0d281c0e2d6a7fb417e8c2a8b60e46b793 | 12,148 | py | Python | Commands/Counter/counter_StreamlabsSystem.py | RzR32/_streamlabs-chatbot-scripts | 8bedc18f112bf50c6795fc34993ed5fd4bdf6f00 | [
"MIT"
] | 1 | 2020-07-27T20:04:42.000Z | 2020-07-27T20:04:42.000Z | Commands/Counter/counter_StreamlabsSystem.py | RzR32/_streamlabs-chatbot-scripts | 8bedc18f112bf50c6795fc34993ed5fd4bdf6f00 | [
"MIT"
] | null | null | null | Commands/Counter/counter_StreamlabsSystem.py | RzR32/_streamlabs-chatbot-scripts | 8bedc18f112bf50c6795fc34993ed5fd4bdf6f00 | [
"MIT"
] | null | null | null | # coding=utf-8
# ---------------------------
# Import Libraries
# ---------------------------
import codecs
import os
import json
import clr
clr.AddReference("IronPython.SQLite.dll")
clr.AddReference("IronPython.Modules.dll")
# ---------------------------
# [Required] Script Information
# ---------------------------
ScriptName = "Twitch Commands - Counter"
Website = "https://twitch.tv/RzR32"
Description = "Commands - Counter for the Stream - RzR32"
Creator = "RzR32"
Version = "0.1"
# ---------------------------
# Define Global Variables
# ---------------------------
settingsFile = os.path.join(os.path.dirname(__file__), "settings.json")
# ---------------------------------------
# Classes
# ---------------------------------------
class Settings:
"""" Loads settings from file if file is found if not uses default values"""
# The 'default' variable names need to match UI_Config
def __init__(self, settingsFile=None):
if settingsFile and os.path.isfile(settingsFile):
with codecs.open(settingsFile, encoding='utf-8-sig', mode='r') as f:
self.__dict__ = json.load(f, encoding='utf-8-sig')
else: # set variables if no settings file is found
# int
self.int_Trigger = "!int"
self.int_Cooldown = 10
self.int_Permission = "Everyone"
self.int_Target = "TargetUser"
self.int_SpecialUser = "RzR32"
self.int_Output = "ist zum <X> mal die Mitte runtergerannt!"
# cannon
self.cannon_Trigger = "!cannon"
self.cannon_Cooldown = 10
self.cannon_Permission = "Everyone"
self.cannon_Target = "TargetUser"
self.cannon_SpecialUser = "RzR32"
self.cannon_Output = "hat schon <X> mal den Cannon angetoucht!"
# ult
self.ult_Trigger = "!ult"
self.ult_Cooldown = 10
self.ult_Permission = "Everyone"
self.ult_Target = "TargetUser"
self.ult_SpecialUser = "RzR32"
self.ult_Output = "hat schon <X> mal kein Zielwasser getrunken!"
# flash
self.flash_Trigger = "!flash"
self.flash_Cooldown = 10
self.flash_Permission = "Everyone"
self.flash_Target = "TargetUser"
self.flash_SpecialUser = "RzR32"
self.flash_Output = "hat schon <X> mal sein Flash vergessen!"
# ignite
self.ignite_Trigger = "!ignite"
self.ignite_Cooldown = 10
self.ignite_Permission = "Everyone"
self.ignite_Target = "TargetUser"
self.ignite_SpecialUser = "RzR32"
self.ignite_Output = "hat schon <X> mal nicht genug Feufeu gehabt!"
# Reload settings on save through UI
def Reload(self, data):
"""Reload settings on save through UI"""
self.__dict__ = json.loads(data, encoding='utf-8-sig')
def Save(self, settingsfile):
""" Save settings contained within the .json and .js settings files. """
try:
with codecs.open(settingsfile, encoding="utf-8-sig", mode="w+") as f:
json.dump(self.__dict__, f, encoding="utf-8", ensure_ascii=False)
with codecs.open(settingsfile.replace("json", "js"), encoding="utf-8-sig", mode="w+") as f:
f.write("var settings = {0};".format(json.dumps(self.__dict__, encoding='utf-8', ensure_ascii=False)))
except ValueError:
Parent.Log(ScriptName, "Failed to save settings to file.")
# ---------------------------
# [Required] Initialize Data (Only called on load)
# ---------------------------
def Init():
# Load settings
global ScriptSettings
ScriptSettings = Settings(settingsFile)
# if needed, create *int* folder
directory = os.path.join(os.path.dirname(__file__), "int")
if not os.path.exists(directory):
os.makedirs(directory)
# if needed, create *cannon* folder
directory = os.path.join(os.path.dirname(__file__), "cannon")
if not os.path.exists(directory):
os.makedirs(directory)
# if needed, create *ult* folder
directory = os.path.join(os.path.dirname(__file__), "ult")
if not os.path.exists(directory):
os.makedirs(directory)
# if needed, create *flash* folder
directory = os.path.join(os.path.dirname(__file__), "flash")
if not os.path.exists(directory):
os.makedirs(directory)
# if needed, create *ignite* folder
directory = os.path.join(os.path.dirname(__file__), "ignite")
if not os.path.exists(directory):
os.makedirs(directory)
return
# ---------------------------
# [Required] Execute Data / Process messages
# ---------------------------
def Execute(data):
#
# int
# if the cmd is on cool down
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.int_Trigger and Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.int_Trigger, data.User):
Parent.SendStreamMessage("Time Remaining " + str(Parent.GetUserCooldownDuration(
ScriptName, ScriptSettings.int_Trigger, data.User)))
# make the cmd, if not on cool down
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.int_Trigger and not Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.int_Trigger, data.User) and Parent.HasPermission(
data.User, ScriptSettings.int_Permission, data.User):
if data.IsFromTwitch():
counter(data, "int", ScriptSettings.int_Trigger, ScriptSettings.int_Cooldown,
ScriptSettings.int_Target, ScriptSettings.int_SpecialUser, ScriptSettings.int_Output)
#
# cannon
# if the cmd is on cool down
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.cannon_Trigger and Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.cannon_Trigger, data.User):
Parent.SendStreamMessage("Time Remaining " + str(Parent.GetUserCooldownDuration(
ScriptName, ScriptSettings.cannon_Trigger, data.User)))
# make the cmd, if not on cool down
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.cannon_Trigger and not Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.cannon_Trigger, data.User) and Parent.HasPermission(
data.User, ScriptSettings.cannon_Permission, data.User):
if data.IsFromTwitch():
counter(data, "cannon", ScriptSettings.cannon_Trigger, ScriptSettings.cannon_Cooldown,
ScriptSettings.cannon_Target, ScriptSettings.cannon_SpecialUser, ScriptSettings.cannon_Output)
#
# ult
# if the cmd is on cool down
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.ult_Trigger and Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.ult_Trigger, data.User):
Parent.SendStreamMessage("Time Remaining " + str(Parent.GetUserCooldownDuration(
ScriptName, ScriptSettings.ult_Trigger, data.User)))
# make the cmd, if not on cool down
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.ult_Trigger and not Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.ult_Trigger, data.User) and Parent.HasPermission(
data.User, ScriptSettings.ult_Permission, data.User):
if data.IsFromTwitch():
counter(data, "ult", ScriptSettings.ult_Trigger, ScriptSettings.ult_Cooldown,
ScriptSettings.ult_Target, ScriptSettings.ult_SpecialUser, ScriptSettings.ult_Output)
#
# flash
# if the cmd is on cool down
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.flash_Trigger and Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.flash_Trigger, data.User):
Parent.SendStreamMessage("Time Remaining " + str(Parent.GetUserCooldownDuration(
ScriptName, ScriptSettings.flash_Trigger, data.User)))
# make the cmd, if not on cool down
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.flash_Trigger and not Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.flash_Trigger, data.User) and Parent.HasPermission(
data.User, ScriptSettings.flash_Permission, data.User):
if data.IsFromTwitch():
counter(data, "flash", ScriptSettings.flash_Trigger, ScriptSettings.flash_Cooldown,
ScriptSettings.flash_Target, ScriptSettings.flash_SpecialUser, ScriptSettings.flash_Output)
# ignite
# if the cmd is on cool down
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.ignite_Trigger and Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.ignite_Trigger, data.User):
Parent.SendStreamMessage("Time Remaining " + str(Parent.GetUserCooldownDuration(
ScriptName, ScriptSettings.ignite_Trigger, data.User)))
# make the cmd, if not on cool down
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.ignite_Trigger and not Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.ignite_Trigger, data.User) and Parent.HasPermission(
data.User, ScriptSettings.ignite_Permission, data.User):
if data.IsFromTwitch():
counter(data, "ignite", ScriptSettings.ignite_Trigger, ScriptSettings.ignite_Cooldown,
ScriptSettings.ignite_Target, ScriptSettings.ignite_SpecialUser, ScriptSettings.ignite_Output)
return
# ---------------------------------------
# counter - all - functions
# ---------------------------------------
def counter(data, folder, Trigger, Cooldown, Target, SpecialUser, Output):
username = data.GetParam(1)
name = ""
# get target - name
if Target.__eq__("TargetUser"):
if not username.__eq__(""):
if not username.startswith("@"):
Parent.SendStreamMessage("Bitte gib ein Twitch Namen an! (mit @)")
return
else:
name = username
else:
Parent.SendStreamMessage("Bitte gib ein Twitch Namen an!")
return
elif Target.__eq__("StreamUser"):
name = "@" + Parent.GetChannelName()
elif Target.__eq__("SpecialUser"):
name = "@" + SpecialUser
# get current ignite from file
file_path___counter = "Services/Scripts/Counter/" + folder + "/" + name + ".txt"
# check if file exists - if not create
if not os.path.exists(file_path___counter):
with open(file_path___counter, 'w') as my_file:
my_file.write("0")
pass
# read line from the file
file_counter = open(file_path___counter, "r")
i_counter = file_counter.readline()
file_counter.close()
# add one
i_counter = int(i_counter) + 1
# write line to the file
file_counter = open(file_path___counter, "w")
file_counter.write(i_counter.__str__())
file_counter.close()
# get the output, replace the var with the new ignite
text = Output
text = text.replace("<X>", i_counter.__str__())
text = name + " " + text
# send the message
Parent.SendStreamMessage(text)
# set the cool down
Parent.AddUserCooldown(ScriptName, Trigger, data.User, Cooldown)
return
# ---------------------------
# [Required] Tick method (Gets called during every iteration even when there is no incoming data)
# ---------------------------
def Tick():
return
# ---------------------------
# [Optional] Reload Settings (Called when a user clicks the Save Settings button in the Chatbot UI)
# ---------------------------
def ReloadSettings(jsonData):
"""Reload settings on Save"""
global ScriptSettings
ScriptSettings.Reload(jsonData)
# ---------------------------
# [Optional] Unload (Called when a user reloads their scripts or closes the bot / cleanup stuff)
# ---------------------------
def Unload():
return
# ---------------------------
# [Optional] ScriptToggled (Notifies you when a user disables your script or enables it)
# ---------------------------
def ScriptToggled(state):
return
| 41.745704 | 122 | 0.627428 |
import codecs
import os
import json
import clr
clr.AddReference("IronPython.SQLite.dll")
clr.AddReference("IronPython.Modules.dll")
ScriptName = "Twitch Commands - Counter"
Website = "https://twitch.tv/RzR32"
Description = "Commands - Counter for the Stream - RzR32"
Creator = "RzR32"
Version = "0.1"
settingsFile = os.path.join(os.path.dirname(__file__), "settings.json")
class Settings:
def __init__(self, settingsFile=None):
if settingsFile and os.path.isfile(settingsFile):
with codecs.open(settingsFile, encoding='utf-8-sig', mode='r') as f:
self.__dict__ = json.load(f, encoding='utf-8-sig')
else:
self.int_Trigger = "!int"
self.int_Cooldown = 10
self.int_Permission = "Everyone"
self.int_Target = "TargetUser"
self.int_SpecialUser = "RzR32"
self.int_Output = "ist zum <X> mal die Mitte runtergerannt!"
self.cannon_Trigger = "!cannon"
self.cannon_Cooldown = 10
self.cannon_Permission = "Everyone"
self.cannon_Target = "TargetUser"
self.cannon_SpecialUser = "RzR32"
self.cannon_Output = "hat schon <X> mal den Cannon angetoucht!"
self.ult_Trigger = "!ult"
self.ult_Cooldown = 10
self.ult_Permission = "Everyone"
self.ult_Target = "TargetUser"
self.ult_SpecialUser = "RzR32"
self.ult_Output = "hat schon <X> mal kein Zielwasser getrunken!"
self.flash_Trigger = "!flash"
self.flash_Cooldown = 10
self.flash_Permission = "Everyone"
self.flash_Target = "TargetUser"
self.flash_SpecialUser = "RzR32"
self.flash_Output = "hat schon <X> mal sein Flash vergessen!"
self.ignite_Trigger = "!ignite"
self.ignite_Cooldown = 10
self.ignite_Permission = "Everyone"
self.ignite_Target = "TargetUser"
self.ignite_SpecialUser = "RzR32"
self.ignite_Output = "hat schon <X> mal nicht genug Feufeu gehabt!"
def Reload(self, data):
self.__dict__ = json.loads(data, encoding='utf-8-sig')
def Save(self, settingsfile):
try:
with codecs.open(settingsfile, encoding="utf-8-sig", mode="w+") as f:
json.dump(self.__dict__, f, encoding="utf-8", ensure_ascii=False)
with codecs.open(settingsfile.replace("json", "js"), encoding="utf-8-sig", mode="w+") as f:
f.write("var settings = {0};".format(json.dumps(self.__dict__, encoding='utf-8', ensure_ascii=False)))
except ValueError:
Parent.Log(ScriptName, "Failed to save settings to file.")
def Init():
global ScriptSettings
ScriptSettings = Settings(settingsFile)
directory = os.path.join(os.path.dirname(__file__), "int")
if not os.path.exists(directory):
os.makedirs(directory)
directory = os.path.join(os.path.dirname(__file__), "cannon")
if not os.path.exists(directory):
os.makedirs(directory)
directory = os.path.join(os.path.dirname(__file__), "ult")
if not os.path.exists(directory):
os.makedirs(directory)
directory = os.path.join(os.path.dirname(__file__), "flash")
if not os.path.exists(directory):
os.makedirs(directory)
directory = os.path.join(os.path.dirname(__file__), "ignite")
if not os.path.exists(directory):
os.makedirs(directory)
return
def Execute(data):
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.int_Trigger and Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.int_Trigger, data.User):
Parent.SendStreamMessage("Time Remaining " + str(Parent.GetUserCooldownDuration(
ScriptName, ScriptSettings.int_Trigger, data.User)))
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.int_Trigger and not Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.int_Trigger, data.User) and Parent.HasPermission(
data.User, ScriptSettings.int_Permission, data.User):
if data.IsFromTwitch():
counter(data, "int", ScriptSettings.int_Trigger, ScriptSettings.int_Cooldown,
ScriptSettings.int_Target, ScriptSettings.int_SpecialUser, ScriptSettings.int_Output)
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.cannon_Trigger and Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.cannon_Trigger, data.User):
Parent.SendStreamMessage("Time Remaining " + str(Parent.GetUserCooldownDuration(
ScriptName, ScriptSettings.cannon_Trigger, data.User)))
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.cannon_Trigger and not Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.cannon_Trigger, data.User) and Parent.HasPermission(
data.User, ScriptSettings.cannon_Permission, data.User):
if data.IsFromTwitch():
counter(data, "cannon", ScriptSettings.cannon_Trigger, ScriptSettings.cannon_Cooldown,
ScriptSettings.cannon_Target, ScriptSettings.cannon_SpecialUser, ScriptSettings.cannon_Output)
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.ult_Trigger and Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.ult_Trigger, data.User):
Parent.SendStreamMessage("Time Remaining " + str(Parent.GetUserCooldownDuration(
ScriptName, ScriptSettings.ult_Trigger, data.User)))
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.ult_Trigger and not Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.ult_Trigger, data.User) and Parent.HasPermission(
data.User, ScriptSettings.ult_Permission, data.User):
if data.IsFromTwitch():
counter(data, "ult", ScriptSettings.ult_Trigger, ScriptSettings.ult_Cooldown,
ScriptSettings.ult_Target, ScriptSettings.ult_SpecialUser, ScriptSettings.ult_Output)
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.flash_Trigger and Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.flash_Trigger, data.User):
Parent.SendStreamMessage("Time Remaining " + str(Parent.GetUserCooldownDuration(
ScriptName, ScriptSettings.flash_Trigger, data.User)))
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.flash_Trigger and not Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.flash_Trigger, data.User) and Parent.HasPermission(
data.User, ScriptSettings.flash_Permission, data.User):
if data.IsFromTwitch():
counter(data, "flash", ScriptSettings.flash_Trigger, ScriptSettings.flash_Cooldown,
ScriptSettings.flash_Target, ScriptSettings.flash_SpecialUser, ScriptSettings.flash_Output)
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.ignite_Trigger and Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.ignite_Trigger, data.User):
Parent.SendStreamMessage("Time Remaining " + str(Parent.GetUserCooldownDuration(
ScriptName, ScriptSettings.ignite_Trigger, data.User)))
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.ignite_Trigger and not Parent.IsOnUserCooldown(
ScriptName, ScriptSettings.ignite_Trigger, data.User) and Parent.HasPermission(
data.User, ScriptSettings.ignite_Permission, data.User):
if data.IsFromTwitch():
counter(data, "ignite", ScriptSettings.ignite_Trigger, ScriptSettings.ignite_Cooldown,
ScriptSettings.ignite_Target, ScriptSettings.ignite_SpecialUser, ScriptSettings.ignite_Output)
return
def counter(data, folder, Trigger, Cooldown, Target, SpecialUser, Output):
username = data.GetParam(1)
name = ""
if Target.__eq__("TargetUser"):
if not username.__eq__(""):
if not username.startswith("@"):
Parent.SendStreamMessage("Bitte gib ein Twitch Namen an! (mit @)")
return
else:
name = username
else:
Parent.SendStreamMessage("Bitte gib ein Twitch Namen an!")
return
elif Target.__eq__("StreamUser"):
name = "@" + Parent.GetChannelName()
elif Target.__eq__("SpecialUser"):
name = "@" + SpecialUser
file_path___counter = "Services/Scripts/Counter/" + folder + "/" + name + ".txt"
if not os.path.exists(file_path___counter):
with open(file_path___counter, 'w') as my_file:
my_file.write("0")
pass
file_counter = open(file_path___counter, "r")
i_counter = file_counter.readline()
file_counter.close()
i_counter = int(i_counter) + 1
file_counter = open(file_path___counter, "w")
file_counter.write(i_counter.__str__())
file_counter.close()
text = Output
text = text.replace("<X>", i_counter.__str__())
text = name + " " + text
Parent.SendStreamMessage(text)
Parent.AddUserCooldown(ScriptName, Trigger, data.User, Cooldown)
return
def Tick():
return
def ReloadSettings(jsonData):
global ScriptSettings
ScriptSettings.Reload(jsonData)
def Unload():
return
def ScriptToggled(state):
return
| true | true |
1c2d1b3c4b4a86f83826a190d19ee704cbcf08cd | 17,569 | py | Python | influxdb_client/__init__.py | tomklapka/influxdb-client-python | f8d1407a6190533faed4d645f6ba973b4d2bc8b2 | [
"MIT"
] | null | null | null | influxdb_client/__init__.py | tomklapka/influxdb-client-python | f8d1407a6190533faed4d645f6ba973b4d2bc8b2 | [
"MIT"
] | null | null | null | influxdb_client/__init__.py | tomklapka/influxdb-client-python | f8d1407a6190533faed4d645f6ba973b4d2bc8b2 | [
"MIT"
] | null | null | null | # coding: utf-8
# flake8: noqa
"""
Influx API Service.
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
# import apis into sdk package
from influxdb_client.service.authorizations_service import AuthorizationsService
from influxdb_client.service.buckets_service import BucketsService
from influxdb_client.service.cells_service import CellsService
from influxdb_client.service.checks_service import ChecksService
from influxdb_client.service.dbr_ps_service import DBRPsService
from influxdb_client.service.dashboards_service import DashboardsService
from influxdb_client.service.health_service import HealthService
from influxdb_client.service.labels_service import LabelsService
from influxdb_client.service.notification_endpoints_service import NotificationEndpointsService
from influxdb_client.service.notification_rules_service import NotificationRulesService
from influxdb_client.service.organizations_service import OrganizationsService
from influxdb_client.service.query_service import QueryService
from influxdb_client.service.ready_service import ReadyService
from influxdb_client.service.rules_service import RulesService
from influxdb_client.service.scraper_targets_service import ScraperTargetsService
from influxdb_client.service.secrets_service import SecretsService
from influxdb_client.service.setup_service import SetupService
from influxdb_client.service.sources_service import SourcesService
from influxdb_client.service.tasks_service import TasksService
from influxdb_client.service.telegrafs_service import TelegrafsService
from influxdb_client.service.templates_service import TemplatesService
from influxdb_client.service.users_service import UsersService
from influxdb_client.service.variables_service import VariablesService
from influxdb_client.service.views_service import ViewsService
from influxdb_client.service.write_service import WriteService
from influxdb_client.service.default_service import DefaultService
# import ApiClient
from influxdb_client.api_client import ApiClient
from influxdb_client.configuration import Configuration
# import models into sdk package
from influxdb_client.domain.ast_response import ASTResponse
from influxdb_client.domain.add_resource_member_request_body import AddResourceMemberRequestBody
from influxdb_client.domain.analyze_query_response import AnalyzeQueryResponse
from influxdb_client.domain.analyze_query_response_errors import AnalyzeQueryResponseErrors
from influxdb_client.domain.array_expression import ArrayExpression
from influxdb_client.domain.authorization import Authorization
from influxdb_client.domain.authorization_update_request import AuthorizationUpdateRequest
from influxdb_client.domain.authorizations import Authorizations
from influxdb_client.domain.axes import Axes
from influxdb_client.domain.axis import Axis
from influxdb_client.domain.axis_scale import AxisScale
from influxdb_client.domain.bad_statement import BadStatement
from influxdb_client.domain.binary_expression import BinaryExpression
from influxdb_client.domain.block import Block
from influxdb_client.domain.boolean_literal import BooleanLiteral
from influxdb_client.domain.bucket import Bucket
from influxdb_client.domain.bucket_links import BucketLinks
from influxdb_client.domain.bucket_retention_rules import BucketRetentionRules
from influxdb_client.domain.buckets import Buckets
from influxdb_client.domain.builder_config import BuilderConfig
from influxdb_client.domain.builder_config_aggregate_window import BuilderConfigAggregateWindow
from influxdb_client.domain.builder_functions_type import BuilderFunctionsType
from influxdb_client.domain.builder_tags_type import BuilderTagsType
from influxdb_client.domain.builtin_statement import BuiltinStatement
from influxdb_client.domain.call_expression import CallExpression
from influxdb_client.domain.cell import Cell
from influxdb_client.domain.cell_links import CellLinks
from influxdb_client.domain.cell_update import CellUpdate
from influxdb_client.domain.check import Check
from influxdb_client.domain.check_base import CheckBase
from influxdb_client.domain.check_base_links import CheckBaseLinks
from influxdb_client.domain.check_base_tags import CheckBaseTags
from influxdb_client.domain.check_discriminator import CheckDiscriminator
from influxdb_client.domain.check_patch import CheckPatch
from influxdb_client.domain.check_status_level import CheckStatusLevel
from influxdb_client.domain.check_view_properties import CheckViewProperties
from influxdb_client.domain.checks import Checks
from influxdb_client.domain.conditional_expression import ConditionalExpression
from influxdb_client.domain.constant_variable_properties import ConstantVariableProperties
from influxdb_client.domain.create_cell import CreateCell
from influxdb_client.domain.create_dashboard_request import CreateDashboardRequest
from influxdb_client.domain.dbrp import DBRP
from influxdb_client.domain.dbrp_update import DBRPUpdate
from influxdb_client.domain.dbr_ps import DBRPs
from influxdb_client.domain.dashboard import Dashboard
from influxdb_client.domain.dashboard_color import DashboardColor
from influxdb_client.domain.dashboard_query import DashboardQuery
from influxdb_client.domain.dashboards import Dashboards
from influxdb_client.domain.date_time_literal import DateTimeLiteral
from influxdb_client.domain.deadman_check import DeadmanCheck
from influxdb_client.domain.decimal_places import DecimalPlaces
from influxdb_client.domain.delete_predicate_request import DeletePredicateRequest
from influxdb_client.domain.dialect import Dialect
from influxdb_client.domain.document import Document
from influxdb_client.domain.document_create import DocumentCreate
from influxdb_client.domain.document_links import DocumentLinks
from influxdb_client.domain.document_list_entry import DocumentListEntry
from influxdb_client.domain.document_meta import DocumentMeta
from influxdb_client.domain.document_update import DocumentUpdate
from influxdb_client.domain.documents import Documents
from influxdb_client.domain.duration import Duration
from influxdb_client.domain.duration_literal import DurationLiteral
from influxdb_client.domain.error import Error
from influxdb_client.domain.expression import Expression
from influxdb_client.domain.expression_statement import ExpressionStatement
from influxdb_client.domain.field import Field
from influxdb_client.domain.file import File
from influxdb_client.domain.float_literal import FloatLiteral
from influxdb_client.domain.flux_response import FluxResponse
from influxdb_client.domain.flux_suggestion import FluxSuggestion
from influxdb_client.domain.flux_suggestions import FluxSuggestions
from influxdb_client.domain.function_expression import FunctionExpression
from influxdb_client.domain.gauge_view_properties import GaugeViewProperties
from influxdb_client.domain.greater_threshold import GreaterThreshold
from influxdb_client.domain.http_notification_endpoint import HTTPNotificationEndpoint
from influxdb_client.domain.http_notification_rule import HTTPNotificationRule
from influxdb_client.domain.http_notification_rule_base import HTTPNotificationRuleBase
from influxdb_client.domain.health_check import HealthCheck
from influxdb_client.domain.heatmap_view_properties import HeatmapViewProperties
from influxdb_client.domain.histogram_view_properties import HistogramViewProperties
from influxdb_client.domain.identifier import Identifier
from influxdb_client.domain.import_declaration import ImportDeclaration
from influxdb_client.domain.index_expression import IndexExpression
from influxdb_client.domain.integer_literal import IntegerLiteral
from influxdb_client.domain.is_onboarding import IsOnboarding
from influxdb_client.domain.label import Label
from influxdb_client.domain.label_create_request import LabelCreateRequest
from influxdb_client.domain.label_mapping import LabelMapping
from influxdb_client.domain.label_response import LabelResponse
from influxdb_client.domain.label_update import LabelUpdate
from influxdb_client.domain.labels_response import LabelsResponse
from influxdb_client.domain.language_request import LanguageRequest
from influxdb_client.domain.legend import Legend
from influxdb_client.domain.lesser_threshold import LesserThreshold
from influxdb_client.domain.line_plus_single_stat_properties import LinePlusSingleStatProperties
from influxdb_client.domain.line_protocol_error import LineProtocolError
from influxdb_client.domain.line_protocol_length_error import LineProtocolLengthError
from influxdb_client.domain.links import Links
from influxdb_client.domain.log_event import LogEvent
from influxdb_client.domain.logical_expression import LogicalExpression
from influxdb_client.domain.logs import Logs
from influxdb_client.domain.map_variable_properties import MapVariableProperties
from influxdb_client.domain.markdown_view_properties import MarkdownViewProperties
from influxdb_client.domain.member_assignment import MemberAssignment
from influxdb_client.domain.member_expression import MemberExpression
from influxdb_client.domain.model_property import ModelProperty
from influxdb_client.domain.node import Node
from influxdb_client.domain.notification_endpoint import NotificationEndpoint
from influxdb_client.domain.notification_endpoint_base import NotificationEndpointBase
from influxdb_client.domain.notification_endpoint_base_links import NotificationEndpointBaseLinks
from influxdb_client.domain.notification_endpoint_discriminator import NotificationEndpointDiscriminator
from influxdb_client.domain.notification_endpoint_type import NotificationEndpointType
from influxdb_client.domain.notification_endpoint_update import NotificationEndpointUpdate
from influxdb_client.domain.notification_endpoints import NotificationEndpoints
from influxdb_client.domain.notification_rule import NotificationRule
from influxdb_client.domain.notification_rule_base import NotificationRuleBase
from influxdb_client.domain.notification_rule_base_links import NotificationRuleBaseLinks
from influxdb_client.domain.notification_rule_discriminator import NotificationRuleDiscriminator
from influxdb_client.domain.notification_rule_update import NotificationRuleUpdate
from influxdb_client.domain.notification_rules import NotificationRules
from influxdb_client.domain.object_expression import ObjectExpression
from influxdb_client.domain.onboarding_request import OnboardingRequest
from influxdb_client.domain.onboarding_response import OnboardingResponse
from influxdb_client.domain.option_statement import OptionStatement
from influxdb_client.domain.organization import Organization
from influxdb_client.domain.organization_links import OrganizationLinks
from influxdb_client.domain.organizations import Organizations
from influxdb_client.domain.package import Package
from influxdb_client.domain.package_clause import PackageClause
from influxdb_client.domain.pager_duty_notification_endpoint import PagerDutyNotificationEndpoint
from influxdb_client.domain.pager_duty_notification_rule import PagerDutyNotificationRule
from influxdb_client.domain.pager_duty_notification_rule_base import PagerDutyNotificationRuleBase
from influxdb_client.domain.paren_expression import ParenExpression
from influxdb_client.domain.password_reset_body import PasswordResetBody
from influxdb_client.domain.permission import Permission
from influxdb_client.domain.permission_resource import PermissionResource
from influxdb_client.domain.pipe_expression import PipeExpression
from influxdb_client.domain.pipe_literal import PipeLiteral
from influxdb_client.domain.post_bucket_request import PostBucketRequest
from influxdb_client.domain.post_check import PostCheck
from influxdb_client.domain.post_notification_endpoint import PostNotificationEndpoint
from influxdb_client.domain.post_notification_rule import PostNotificationRule
from influxdb_client.domain.property_key import PropertyKey
from influxdb_client.domain.query import Query
from influxdb_client.domain.query_edit_mode import QueryEditMode
from influxdb_client.domain.query_variable_properties import QueryVariableProperties
from influxdb_client.domain.query_variable_properties_values import QueryVariablePropertiesValues
from influxdb_client.domain.range_threshold import RangeThreshold
from influxdb_client.domain.ready import Ready
from influxdb_client.domain.regexp_literal import RegexpLiteral
from influxdb_client.domain.renamable_field import RenamableField
from influxdb_client.domain.resource_member import ResourceMember
from influxdb_client.domain.resource_members import ResourceMembers
from influxdb_client.domain.resource_owner import ResourceOwner
from influxdb_client.domain.resource_owners import ResourceOwners
from influxdb_client.domain.return_statement import ReturnStatement
from influxdb_client.domain.routes import Routes
from influxdb_client.domain.routes_external import RoutesExternal
from influxdb_client.domain.routes_query import RoutesQuery
from influxdb_client.domain.routes_system import RoutesSystem
from influxdb_client.domain.rule_status_level import RuleStatusLevel
from influxdb_client.domain.run import Run
from influxdb_client.domain.run_links import RunLinks
from influxdb_client.domain.run_log import RunLog
from influxdb_client.domain.run_manually import RunManually
from influxdb_client.domain.runs import Runs
from influxdb_client.domain.smtp_notification_rule import SMTPNotificationRule
from influxdb_client.domain.smtp_notification_rule_base import SMTPNotificationRuleBase
from influxdb_client.domain.scatter_view_properties import ScatterViewProperties
from influxdb_client.domain.scraper_target_request import ScraperTargetRequest
from influxdb_client.domain.scraper_target_response import ScraperTargetResponse
from influxdb_client.domain.scraper_target_responses import ScraperTargetResponses
from influxdb_client.domain.secret_keys import SecretKeys
from influxdb_client.domain.secret_keys_response import SecretKeysResponse
from influxdb_client.domain.single_stat_view_properties import SingleStatViewProperties
from influxdb_client.domain.slack_notification_endpoint import SlackNotificationEndpoint
from influxdb_client.domain.slack_notification_rule import SlackNotificationRule
from influxdb_client.domain.slack_notification_rule_base import SlackNotificationRuleBase
from influxdb_client.domain.source import Source
from influxdb_client.domain.source_links import SourceLinks
from influxdb_client.domain.sources import Sources
from influxdb_client.domain.statement import Statement
from influxdb_client.domain.status_rule import StatusRule
from influxdb_client.domain.string_literal import StringLiteral
from influxdb_client.domain.table_view_properties import TableViewProperties
from influxdb_client.domain.tag_rule import TagRule
from influxdb_client.domain.task import Task
from influxdb_client.domain.task_create_request import TaskCreateRequest
from influxdb_client.domain.task_links import TaskLinks
from influxdb_client.domain.task_status_type import TaskStatusType
from influxdb_client.domain.task_update_request import TaskUpdateRequest
from influxdb_client.domain.tasks import Tasks
from influxdb_client.domain.telegraf import Telegraf
from influxdb_client.domain.telegraf_plugin import TelegrafPlugin
from influxdb_client.domain.telegraf_request import TelegrafRequest
from influxdb_client.domain.telegraf_request_metadata import TelegrafRequestMetadata
from influxdb_client.domain.telegrafs import Telegrafs
from influxdb_client.domain.test_statement import TestStatement
from influxdb_client.domain.threshold import Threshold
from influxdb_client.domain.threshold_base import ThresholdBase
from influxdb_client.domain.threshold_check import ThresholdCheck
from influxdb_client.domain.unary_expression import UnaryExpression
from influxdb_client.domain.unsigned_integer_literal import UnsignedIntegerLiteral
from influxdb_client.domain.user import User
from influxdb_client.domain.user_links import UserLinks
from influxdb_client.domain.users import Users
from influxdb_client.domain.users_links import UsersLinks
from influxdb_client.domain.variable import Variable
from influxdb_client.domain.variable_assignment import VariableAssignment
from influxdb_client.domain.variable_links import VariableLinks
from influxdb_client.domain.variable_properties import VariableProperties
from influxdb_client.domain.variables import Variables
from influxdb_client.domain.view import View
from influxdb_client.domain.view_links import ViewLinks
from influxdb_client.domain.view_properties import ViewProperties
from influxdb_client.domain.views import Views
from influxdb_client.domain.write_precision import WritePrecision
from influxdb_client.domain.xy_geom import XYGeom
from influxdb_client.domain.xy_view_properties import XYViewProperties
from influxdb_client.client.authorizations_api import AuthorizationsApi
from influxdb_client.client.bucket_api import BucketsApi
from influxdb_client.client.labels_api import LabelsApi
from influxdb_client.client.organizations_api import OrganizationsApi
from influxdb_client.client.query_api import QueryApi
from influxdb_client.client.tasks_api import TasksApi
from influxdb_client.client.users_api import UsersApi
from influxdb_client.client.write_api import WriteApi, WriteOptions
from influxdb_client.client.influxdb_client import InfluxDBClient
from influxdb_client.client.write.point import Point
__version__ = '1.11.0dev'
| 62.523132 | 120 | 0.906369 |
from __future__ import absolute_import
from influxdb_client.service.authorizations_service import AuthorizationsService
from influxdb_client.service.buckets_service import BucketsService
from influxdb_client.service.cells_service import CellsService
from influxdb_client.service.checks_service import ChecksService
from influxdb_client.service.dbr_ps_service import DBRPsService
from influxdb_client.service.dashboards_service import DashboardsService
from influxdb_client.service.health_service import HealthService
from influxdb_client.service.labels_service import LabelsService
from influxdb_client.service.notification_endpoints_service import NotificationEndpointsService
from influxdb_client.service.notification_rules_service import NotificationRulesService
from influxdb_client.service.organizations_service import OrganizationsService
from influxdb_client.service.query_service import QueryService
from influxdb_client.service.ready_service import ReadyService
from influxdb_client.service.rules_service import RulesService
from influxdb_client.service.scraper_targets_service import ScraperTargetsService
from influxdb_client.service.secrets_service import SecretsService
from influxdb_client.service.setup_service import SetupService
from influxdb_client.service.sources_service import SourcesService
from influxdb_client.service.tasks_service import TasksService
from influxdb_client.service.telegrafs_service import TelegrafsService
from influxdb_client.service.templates_service import TemplatesService
from influxdb_client.service.users_service import UsersService
from influxdb_client.service.variables_service import VariablesService
from influxdb_client.service.views_service import ViewsService
from influxdb_client.service.write_service import WriteService
from influxdb_client.service.default_service import DefaultService
from influxdb_client.api_client import ApiClient
from influxdb_client.configuration import Configuration
from influxdb_client.domain.ast_response import ASTResponse
from influxdb_client.domain.add_resource_member_request_body import AddResourceMemberRequestBody
from influxdb_client.domain.analyze_query_response import AnalyzeQueryResponse
from influxdb_client.domain.analyze_query_response_errors import AnalyzeQueryResponseErrors
from influxdb_client.domain.array_expression import ArrayExpression
from influxdb_client.domain.authorization import Authorization
from influxdb_client.domain.authorization_update_request import AuthorizationUpdateRequest
from influxdb_client.domain.authorizations import Authorizations
from influxdb_client.domain.axes import Axes
from influxdb_client.domain.axis import Axis
from influxdb_client.domain.axis_scale import AxisScale
from influxdb_client.domain.bad_statement import BadStatement
from influxdb_client.domain.binary_expression import BinaryExpression
from influxdb_client.domain.block import Block
from influxdb_client.domain.boolean_literal import BooleanLiteral
from influxdb_client.domain.bucket import Bucket
from influxdb_client.domain.bucket_links import BucketLinks
from influxdb_client.domain.bucket_retention_rules import BucketRetentionRules
from influxdb_client.domain.buckets import Buckets
from influxdb_client.domain.builder_config import BuilderConfig
from influxdb_client.domain.builder_config_aggregate_window import BuilderConfigAggregateWindow
from influxdb_client.domain.builder_functions_type import BuilderFunctionsType
from influxdb_client.domain.builder_tags_type import BuilderTagsType
from influxdb_client.domain.builtin_statement import BuiltinStatement
from influxdb_client.domain.call_expression import CallExpression
from influxdb_client.domain.cell import Cell
from influxdb_client.domain.cell_links import CellLinks
from influxdb_client.domain.cell_update import CellUpdate
from influxdb_client.domain.check import Check
from influxdb_client.domain.check_base import CheckBase
from influxdb_client.domain.check_base_links import CheckBaseLinks
from influxdb_client.domain.check_base_tags import CheckBaseTags
from influxdb_client.domain.check_discriminator import CheckDiscriminator
from influxdb_client.domain.check_patch import CheckPatch
from influxdb_client.domain.check_status_level import CheckStatusLevel
from influxdb_client.domain.check_view_properties import CheckViewProperties
from influxdb_client.domain.checks import Checks
from influxdb_client.domain.conditional_expression import ConditionalExpression
from influxdb_client.domain.constant_variable_properties import ConstantVariableProperties
from influxdb_client.domain.create_cell import CreateCell
from influxdb_client.domain.create_dashboard_request import CreateDashboardRequest
from influxdb_client.domain.dbrp import DBRP
from influxdb_client.domain.dbrp_update import DBRPUpdate
from influxdb_client.domain.dbr_ps import DBRPs
from influxdb_client.domain.dashboard import Dashboard
from influxdb_client.domain.dashboard_color import DashboardColor
from influxdb_client.domain.dashboard_query import DashboardQuery
from influxdb_client.domain.dashboards import Dashboards
from influxdb_client.domain.date_time_literal import DateTimeLiteral
from influxdb_client.domain.deadman_check import DeadmanCheck
from influxdb_client.domain.decimal_places import DecimalPlaces
from influxdb_client.domain.delete_predicate_request import DeletePredicateRequest
from influxdb_client.domain.dialect import Dialect
from influxdb_client.domain.document import Document
from influxdb_client.domain.document_create import DocumentCreate
from influxdb_client.domain.document_links import DocumentLinks
from influxdb_client.domain.document_list_entry import DocumentListEntry
from influxdb_client.domain.document_meta import DocumentMeta
from influxdb_client.domain.document_update import DocumentUpdate
from influxdb_client.domain.documents import Documents
from influxdb_client.domain.duration import Duration
from influxdb_client.domain.duration_literal import DurationLiteral
from influxdb_client.domain.error import Error
from influxdb_client.domain.expression import Expression
from influxdb_client.domain.expression_statement import ExpressionStatement
from influxdb_client.domain.field import Field
from influxdb_client.domain.file import File
from influxdb_client.domain.float_literal import FloatLiteral
from influxdb_client.domain.flux_response import FluxResponse
from influxdb_client.domain.flux_suggestion import FluxSuggestion
from influxdb_client.domain.flux_suggestions import FluxSuggestions
from influxdb_client.domain.function_expression import FunctionExpression
from influxdb_client.domain.gauge_view_properties import GaugeViewProperties
from influxdb_client.domain.greater_threshold import GreaterThreshold
from influxdb_client.domain.http_notification_endpoint import HTTPNotificationEndpoint
from influxdb_client.domain.http_notification_rule import HTTPNotificationRule
from influxdb_client.domain.http_notification_rule_base import HTTPNotificationRuleBase
from influxdb_client.domain.health_check import HealthCheck
from influxdb_client.domain.heatmap_view_properties import HeatmapViewProperties
from influxdb_client.domain.histogram_view_properties import HistogramViewProperties
from influxdb_client.domain.identifier import Identifier
from influxdb_client.domain.import_declaration import ImportDeclaration
from influxdb_client.domain.index_expression import IndexExpression
from influxdb_client.domain.integer_literal import IntegerLiteral
from influxdb_client.domain.is_onboarding import IsOnboarding
from influxdb_client.domain.label import Label
from influxdb_client.domain.label_create_request import LabelCreateRequest
from influxdb_client.domain.label_mapping import LabelMapping
from influxdb_client.domain.label_response import LabelResponse
from influxdb_client.domain.label_update import LabelUpdate
from influxdb_client.domain.labels_response import LabelsResponse
from influxdb_client.domain.language_request import LanguageRequest
from influxdb_client.domain.legend import Legend
from influxdb_client.domain.lesser_threshold import LesserThreshold
from influxdb_client.domain.line_plus_single_stat_properties import LinePlusSingleStatProperties
from influxdb_client.domain.line_protocol_error import LineProtocolError
from influxdb_client.domain.line_protocol_length_error import LineProtocolLengthError
from influxdb_client.domain.links import Links
from influxdb_client.domain.log_event import LogEvent
from influxdb_client.domain.logical_expression import LogicalExpression
from influxdb_client.domain.logs import Logs
from influxdb_client.domain.map_variable_properties import MapVariableProperties
from influxdb_client.domain.markdown_view_properties import MarkdownViewProperties
from influxdb_client.domain.member_assignment import MemberAssignment
from influxdb_client.domain.member_expression import MemberExpression
from influxdb_client.domain.model_property import ModelProperty
from influxdb_client.domain.node import Node
from influxdb_client.domain.notification_endpoint import NotificationEndpoint
from influxdb_client.domain.notification_endpoint_base import NotificationEndpointBase
from influxdb_client.domain.notification_endpoint_base_links import NotificationEndpointBaseLinks
from influxdb_client.domain.notification_endpoint_discriminator import NotificationEndpointDiscriminator
from influxdb_client.domain.notification_endpoint_type import NotificationEndpointType
from influxdb_client.domain.notification_endpoint_update import NotificationEndpointUpdate
from influxdb_client.domain.notification_endpoints import NotificationEndpoints
from influxdb_client.domain.notification_rule import NotificationRule
from influxdb_client.domain.notification_rule_base import NotificationRuleBase
from influxdb_client.domain.notification_rule_base_links import NotificationRuleBaseLinks
from influxdb_client.domain.notification_rule_discriminator import NotificationRuleDiscriminator
from influxdb_client.domain.notification_rule_update import NotificationRuleUpdate
from influxdb_client.domain.notification_rules import NotificationRules
from influxdb_client.domain.object_expression import ObjectExpression
from influxdb_client.domain.onboarding_request import OnboardingRequest
from influxdb_client.domain.onboarding_response import OnboardingResponse
from influxdb_client.domain.option_statement import OptionStatement
from influxdb_client.domain.organization import Organization
from influxdb_client.domain.organization_links import OrganizationLinks
from influxdb_client.domain.organizations import Organizations
from influxdb_client.domain.package import Package
from influxdb_client.domain.package_clause import PackageClause
from influxdb_client.domain.pager_duty_notification_endpoint import PagerDutyNotificationEndpoint
from influxdb_client.domain.pager_duty_notification_rule import PagerDutyNotificationRule
from influxdb_client.domain.pager_duty_notification_rule_base import PagerDutyNotificationRuleBase
from influxdb_client.domain.paren_expression import ParenExpression
from influxdb_client.domain.password_reset_body import PasswordResetBody
from influxdb_client.domain.permission import Permission
from influxdb_client.domain.permission_resource import PermissionResource
from influxdb_client.domain.pipe_expression import PipeExpression
from influxdb_client.domain.pipe_literal import PipeLiteral
from influxdb_client.domain.post_bucket_request import PostBucketRequest
from influxdb_client.domain.post_check import PostCheck
from influxdb_client.domain.post_notification_endpoint import PostNotificationEndpoint
from influxdb_client.domain.post_notification_rule import PostNotificationRule
from influxdb_client.domain.property_key import PropertyKey
from influxdb_client.domain.query import Query
from influxdb_client.domain.query_edit_mode import QueryEditMode
from influxdb_client.domain.query_variable_properties import QueryVariableProperties
from influxdb_client.domain.query_variable_properties_values import QueryVariablePropertiesValues
from influxdb_client.domain.range_threshold import RangeThreshold
from influxdb_client.domain.ready import Ready
from influxdb_client.domain.regexp_literal import RegexpLiteral
from influxdb_client.domain.renamable_field import RenamableField
from influxdb_client.domain.resource_member import ResourceMember
from influxdb_client.domain.resource_members import ResourceMembers
from influxdb_client.domain.resource_owner import ResourceOwner
from influxdb_client.domain.resource_owners import ResourceOwners
from influxdb_client.domain.return_statement import ReturnStatement
from influxdb_client.domain.routes import Routes
from influxdb_client.domain.routes_external import RoutesExternal
from influxdb_client.domain.routes_query import RoutesQuery
from influxdb_client.domain.routes_system import RoutesSystem
from influxdb_client.domain.rule_status_level import RuleStatusLevel
from influxdb_client.domain.run import Run
from influxdb_client.domain.run_links import RunLinks
from influxdb_client.domain.run_log import RunLog
from influxdb_client.domain.run_manually import RunManually
from influxdb_client.domain.runs import Runs
from influxdb_client.domain.smtp_notification_rule import SMTPNotificationRule
from influxdb_client.domain.smtp_notification_rule_base import SMTPNotificationRuleBase
from influxdb_client.domain.scatter_view_properties import ScatterViewProperties
from influxdb_client.domain.scraper_target_request import ScraperTargetRequest
from influxdb_client.domain.scraper_target_response import ScraperTargetResponse
from influxdb_client.domain.scraper_target_responses import ScraperTargetResponses
from influxdb_client.domain.secret_keys import SecretKeys
from influxdb_client.domain.secret_keys_response import SecretKeysResponse
from influxdb_client.domain.single_stat_view_properties import SingleStatViewProperties
from influxdb_client.domain.slack_notification_endpoint import SlackNotificationEndpoint
from influxdb_client.domain.slack_notification_rule import SlackNotificationRule
from influxdb_client.domain.slack_notification_rule_base import SlackNotificationRuleBase
from influxdb_client.domain.source import Source
from influxdb_client.domain.source_links import SourceLinks
from influxdb_client.domain.sources import Sources
from influxdb_client.domain.statement import Statement
from influxdb_client.domain.status_rule import StatusRule
from influxdb_client.domain.string_literal import StringLiteral
from influxdb_client.domain.table_view_properties import TableViewProperties
from influxdb_client.domain.tag_rule import TagRule
from influxdb_client.domain.task import Task
from influxdb_client.domain.task_create_request import TaskCreateRequest
from influxdb_client.domain.task_links import TaskLinks
from influxdb_client.domain.task_status_type import TaskStatusType
from influxdb_client.domain.task_update_request import TaskUpdateRequest
from influxdb_client.domain.tasks import Tasks
from influxdb_client.domain.telegraf import Telegraf
from influxdb_client.domain.telegraf_plugin import TelegrafPlugin
from influxdb_client.domain.telegraf_request import TelegrafRequest
from influxdb_client.domain.telegraf_request_metadata import TelegrafRequestMetadata
from influxdb_client.domain.telegrafs import Telegrafs
from influxdb_client.domain.test_statement import TestStatement
from influxdb_client.domain.threshold import Threshold
from influxdb_client.domain.threshold_base import ThresholdBase
from influxdb_client.domain.threshold_check import ThresholdCheck
from influxdb_client.domain.unary_expression import UnaryExpression
from influxdb_client.domain.unsigned_integer_literal import UnsignedIntegerLiteral
from influxdb_client.domain.user import User
from influxdb_client.domain.user_links import UserLinks
from influxdb_client.domain.users import Users
from influxdb_client.domain.users_links import UsersLinks
from influxdb_client.domain.variable import Variable
from influxdb_client.domain.variable_assignment import VariableAssignment
from influxdb_client.domain.variable_links import VariableLinks
from influxdb_client.domain.variable_properties import VariableProperties
from influxdb_client.domain.variables import Variables
from influxdb_client.domain.view import View
from influxdb_client.domain.view_links import ViewLinks
from influxdb_client.domain.view_properties import ViewProperties
from influxdb_client.domain.views import Views
from influxdb_client.domain.write_precision import WritePrecision
from influxdb_client.domain.xy_geom import XYGeom
from influxdb_client.domain.xy_view_properties import XYViewProperties
from influxdb_client.client.authorizations_api import AuthorizationsApi
from influxdb_client.client.bucket_api import BucketsApi
from influxdb_client.client.labels_api import LabelsApi
from influxdb_client.client.organizations_api import OrganizationsApi
from influxdb_client.client.query_api import QueryApi
from influxdb_client.client.tasks_api import TasksApi
from influxdb_client.client.users_api import UsersApi
from influxdb_client.client.write_api import WriteApi, WriteOptions
from influxdb_client.client.influxdb_client import InfluxDBClient
from influxdb_client.client.write.point import Point
__version__ = '1.11.0dev'
| true | true |
1c2d1c95cee30dd87c9a443915be8fd3956f0364 | 3,695 | py | Python | critiquebrainz/frontend/external/mbspotify.py | code-master5/critiquebrainz | a231ef27923f54f8c3abb0c368e871215423546e | [
"Apache-2.0"
] | null | null | null | critiquebrainz/frontend/external/mbspotify.py | code-master5/critiquebrainz | a231ef27923f54f8c3abb0c368e871215423546e | [
"Apache-2.0"
] | null | null | null | critiquebrainz/frontend/external/mbspotify.py | code-master5/critiquebrainz | a231ef27923f54f8c3abb0c368e871215423546e | [
"Apache-2.0"
] | null | null | null | """
This module provides interface to Spotify ID mapper - mbspotify.
Source code of mbspotify is available at https://github.com/metabrainz/mbspotify.
"""
import json
import requests
from requests.exceptions import RequestException
from requests.adapters import HTTPAdapter
from flask_babel import lazy_gettext
from brainzutils import cache
from critiquebrainz.frontend import flash
_base_url = ""
_key = ""
_CACHE_NAMESPACE = "mbspotify_mappings"
_UNAVAILABLE_MSG = "Spotify mapping server is unavailable. You will not see an embedded player."
def init(base_url, access_key):
global _base_url, _key
_base_url = base_url
_key = access_key
def mappings(mbid=None):
"""Get mappings to Spotify for a specified MusicBrainz ID.
Returns:
List containing Spotify URIs that are mapped to specified MBID.
"""
if _base_url is None:
flash.warn(lazy_gettext(_UNAVAILABLE_MSG))
return []
data = cache.get(mbid, _CACHE_NAMESPACE)
if not data:
try:
session = requests.Session()
session.mount(_base_url, HTTPAdapter(max_retries=2))
resp = session.post(
url=_base_url + 'mapping',
headers={'Content-Type': 'application/json'},
data=json.dumps({'mbid': mbid}),
)
resp.raise_for_status()
data = resp.json().get('mappings')
except RequestException:
flash.warn(lazy_gettext("Spotify mapping server is unavailable. You will not see an embedded player."))
return []
cache.set(key=mbid, namespace=_CACHE_NAMESPACE, val=data)
return data
def add_mapping(mbid, spotify_uri, user_id):
"""Submit new Spotify mapping.
Returns:
Returns two values. First one is a boolean that indicates whether the submission has been successful.
The second is an exception in case errors occur. If there are no errors, this value is None.
"""
try:
if _base_url is None or _key is None:
raise ValueError("Missing MBSPOTIFY_BASE_URI or MBSPOTIFY_ACCESS_KEY.")
session = requests.Session()
session.mount(_base_url, HTTPAdapter(max_retries=2))
resp = session.post(_base_url + 'mapping/add',
params={'key': _key},
headers={'Content-Type': 'application/json'},
data=json.dumps({'mbid': str(mbid), 'spotify_uri': str(spotify_uri), 'user': str(user_id)}))
cache.delete(mbid, _CACHE_NAMESPACE)
return resp.status_code == 200, None
except (RequestException, ValueError) as e:
return False, e
def vote(mbid, spotify_uri, user_id):
"""Submit report about incorrect Spotify mapping.
Returns:
Returns two values. First one is a boolean that indicates whether the submission has been successful.
The second is an exception in case errors occur. If there are no errors, this value is None.
"""
try:
if _base_url is None or _key is None:
raise ValueError("Missing MBSPOTIFY_BASE_URI or MBSPOTIFY_ACCESS_KEY.")
session = requests.Session()
session.mount(_base_url, HTTPAdapter(max_retries=2))
resp = session.post(_base_url + 'mapping/vote',
params={'key': _key},
headers={'Content-Type': 'application/json'},
data=json.dumps({'mbid': str(mbid), 'spotify_uri': str(spotify_uri), 'user': str(user_id)}))
cache.delete(mbid, _CACHE_NAMESPACE)
return resp.status_code == 200, None
except (RequestException, ValueError) as e:
return False, e
| 37.704082 | 120 | 0.646279 | import json
import requests
from requests.exceptions import RequestException
from requests.adapters import HTTPAdapter
from flask_babel import lazy_gettext
from brainzutils import cache
from critiquebrainz.frontend import flash
_base_url = ""
_key = ""
_CACHE_NAMESPACE = "mbspotify_mappings"
_UNAVAILABLE_MSG = "Spotify mapping server is unavailable. You will not see an embedded player."
def init(base_url, access_key):
global _base_url, _key
_base_url = base_url
_key = access_key
def mappings(mbid=None):
if _base_url is None:
flash.warn(lazy_gettext(_UNAVAILABLE_MSG))
return []
data = cache.get(mbid, _CACHE_NAMESPACE)
if not data:
try:
session = requests.Session()
session.mount(_base_url, HTTPAdapter(max_retries=2))
resp = session.post(
url=_base_url + 'mapping',
headers={'Content-Type': 'application/json'},
data=json.dumps({'mbid': mbid}),
)
resp.raise_for_status()
data = resp.json().get('mappings')
except RequestException:
flash.warn(lazy_gettext("Spotify mapping server is unavailable. You will not see an embedded player."))
return []
cache.set(key=mbid, namespace=_CACHE_NAMESPACE, val=data)
return data
def add_mapping(mbid, spotify_uri, user_id):
try:
if _base_url is None or _key is None:
raise ValueError("Missing MBSPOTIFY_BASE_URI or MBSPOTIFY_ACCESS_KEY.")
session = requests.Session()
session.mount(_base_url, HTTPAdapter(max_retries=2))
resp = session.post(_base_url + 'mapping/add',
params={'key': _key},
headers={'Content-Type': 'application/json'},
data=json.dumps({'mbid': str(mbid), 'spotify_uri': str(spotify_uri), 'user': str(user_id)}))
cache.delete(mbid, _CACHE_NAMESPACE)
return resp.status_code == 200, None
except (RequestException, ValueError) as e:
return False, e
def vote(mbid, spotify_uri, user_id):
try:
if _base_url is None or _key is None:
raise ValueError("Missing MBSPOTIFY_BASE_URI or MBSPOTIFY_ACCESS_KEY.")
session = requests.Session()
session.mount(_base_url, HTTPAdapter(max_retries=2))
resp = session.post(_base_url + 'mapping/vote',
params={'key': _key},
headers={'Content-Type': 'application/json'},
data=json.dumps({'mbid': str(mbid), 'spotify_uri': str(spotify_uri), 'user': str(user_id)}))
cache.delete(mbid, _CACHE_NAMESPACE)
return resp.status_code == 200, None
except (RequestException, ValueError) as e:
return False, e
| true | true |
1c2d1dd07514cd2508c30d802fca461fd7afaa77 | 21,327 | py | Python | nssrc/com/citrix/netscaler/nitro/resource/config/lsn/lsnappsprofile.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/lsn/lsnappsprofile.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/lsn/lsnappsprofile.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lsnappsprofile(base_resource) :
""" Configuration for LSN Application Profile resource. """
def __init__(self) :
self._appsprofilename = None
self._transportprotocol = None
self._ippooling = None
self._mapping = None
self._filtering = None
self._tcpproxy = None
self._td = None
self._l2info = None
self.___count = None
@property
def appsprofilename(self) :
r"""Name for the LSN application profile. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the LSN application profile is created. The following requirement applies only to the Citrix ADC CLI: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "lsn application profile1" or 'lsn application profile1').<br/>Minimum length = 1<br/>Maximum length = 127.
"""
try :
return self._appsprofilename
except Exception as e:
raise e
@appsprofilename.setter
def appsprofilename(self, appsprofilename) :
r"""Name for the LSN application profile. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the LSN application profile is created. The following requirement applies only to the Citrix ADC CLI: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "lsn application profile1" or 'lsn application profile1').<br/>Minimum length = 1<br/>Maximum length = 127
"""
try :
self._appsprofilename = appsprofilename
except Exception as e:
raise e
@property
def transportprotocol(self) :
r"""Name of the protocol for which the parameters of this LSN application profile applies.<br/>Possible values = TCP, UDP, ICMP.
"""
try :
return self._transportprotocol
except Exception as e:
raise e
@transportprotocol.setter
def transportprotocol(self, transportprotocol) :
r"""Name of the protocol for which the parameters of this LSN application profile applies.<br/>Possible values = TCP, UDP, ICMP
"""
try :
self._transportprotocol = transportprotocol
except Exception as e:
raise e
@property
def ippooling(self) :
r"""NAT IP address allocation options for sessions associated with the same subscriber.
Available options function as follows:
* Paired - The Citrix ADC allocates the same NAT IP address for all sessions associated with the same subscriber. When all the ports of a NAT IP address are used in LSN sessions (for same or multiple subscribers), the Citrix ADC ADC drops any new connection from the subscriber.
* Random - The Citrix ADC allocates random NAT IP addresses, from the pool, for different sessions associated with the same subscriber.
This parameter is applicable to dynamic NAT allocation only.<br/>Default value: RANDOM<br/>Possible values = PAIRED, RANDOM.
"""
try :
return self._ippooling
except Exception as e:
raise e
@ippooling.setter
def ippooling(self, ippooling) :
r"""NAT IP address allocation options for sessions associated with the same subscriber.
Available options function as follows:
* Paired - The Citrix ADC allocates the same NAT IP address for all sessions associated with the same subscriber. When all the ports of a NAT IP address are used in LSN sessions (for same or multiple subscribers), the Citrix ADC ADC drops any new connection from the subscriber.
* Random - The Citrix ADC allocates random NAT IP addresses, from the pool, for different sessions associated with the same subscriber.
This parameter is applicable to dynamic NAT allocation only.<br/>Default value: RANDOM<br/>Possible values = PAIRED, RANDOM
"""
try :
self._ippooling = ippooling
except Exception as e:
raise e
@property
def mapping(self) :
r"""Type of LSN mapping to apply to subsequent packets originating from the same subscriber IP address and port.
Consider an example of an LSN mapping that includes the mapping of the subscriber IP:port (X:x), NAT IP:port (N:n), and external host IP:port (Y:y).
Available options function as follows:
* ENDPOINT-INDEPENDENT - Reuse the LSN mapping for subsequent packets sent from the same subscriber IP address and port (X:x) to any external IP address and port.
* ADDRESS-DEPENDENT - Reuse the LSN mapping for subsequent packets sent from the same subscriber IP address and port (X:x) to the same external IP address (Y), regardless of the external port.
* ADDRESS-PORT-DEPENDENT - Reuse the LSN mapping for subsequent packets sent from the same internal IP address and port (X:x) to the same external IP address and port (Y:y) while the mapping is still active.<br/>Default value: ADDRESS-PORT-DEPENDENT<br/>Possible values = ENDPOINT-INDEPENDENT, ADDRESS-DEPENDENT, ADDRESS-PORT-DEPENDENT.
"""
try :
return self._mapping
except Exception as e:
raise e
@mapping.setter
def mapping(self, mapping) :
r"""Type of LSN mapping to apply to subsequent packets originating from the same subscriber IP address and port.
Consider an example of an LSN mapping that includes the mapping of the subscriber IP:port (X:x), NAT IP:port (N:n), and external host IP:port (Y:y).
Available options function as follows:
* ENDPOINT-INDEPENDENT - Reuse the LSN mapping for subsequent packets sent from the same subscriber IP address and port (X:x) to any external IP address and port.
* ADDRESS-DEPENDENT - Reuse the LSN mapping for subsequent packets sent from the same subscriber IP address and port (X:x) to the same external IP address (Y), regardless of the external port.
* ADDRESS-PORT-DEPENDENT - Reuse the LSN mapping for subsequent packets sent from the same internal IP address and port (X:x) to the same external IP address and port (Y:y) while the mapping is still active.<br/>Default value: ADDRESS-PORT-DEPENDENT<br/>Possible values = ENDPOINT-INDEPENDENT, ADDRESS-DEPENDENT, ADDRESS-PORT-DEPENDENT
"""
try :
self._mapping = mapping
except Exception as e:
raise e
@property
def filtering(self) :
r"""Type of filter to apply to packets originating from external hosts.
Consider an example of an LSN mapping that includes the mapping of subscriber IP:port (X:x), NAT IP:port (N:n), and external host IP:port (Y:y).
Available options function as follows:
* ENDPOINT INDEPENDENT - Filters out only packets not destined to the subscriber IP address and port X:x, regardless of the external host IP address and port source (Z:z). The Citrix ADC forwards any packets destined to X:x. In other words, sending packets from the subscriber to any external IP address is sufficient to allow packets from any external hosts to the subscriber.
* ADDRESS DEPENDENT - Filters out packets not destined to subscriber IP address and port X:x. In addition, the ADC filters out packets from Y:y destined for the subscriber (X:x) if the client has not previously sent packets to Y:anyport (external port independent). In other words, receiving packets from a specific external host requires that the subscriber first send packets to that specific external host's IP address.
* ADDRESS PORT DEPENDENT (the default) - Filters out packets not destined to subscriber IP address and port (X:x). In addition, the Citrix ADC filters out packets from Y:y destined for the subscriber (X:x) if the subscriber has not previously sent packets to Y:y. In other words, receiving packets from a specific external host requires that the subscriber first send packets first to that external IP address and port.<br/>Default value: ADDRESS-PORT-DEPENDENT<br/>Possible values = ENDPOINT-INDEPENDENT, ADDRESS-DEPENDENT, ADDRESS-PORT-DEPENDENT.
"""
try :
return self._filtering
except Exception as e:
raise e
@filtering.setter
def filtering(self, filtering) :
r"""Type of filter to apply to packets originating from external hosts.
Consider an example of an LSN mapping that includes the mapping of subscriber IP:port (X:x), NAT IP:port (N:n), and external host IP:port (Y:y).
Available options function as follows:
* ENDPOINT INDEPENDENT - Filters out only packets not destined to the subscriber IP address and port X:x, regardless of the external host IP address and port source (Z:z). The Citrix ADC forwards any packets destined to X:x. In other words, sending packets from the subscriber to any external IP address is sufficient to allow packets from any external hosts to the subscriber.
* ADDRESS DEPENDENT - Filters out packets not destined to subscriber IP address and port X:x. In addition, the ADC filters out packets from Y:y destined for the subscriber (X:x) if the client has not previously sent packets to Y:anyport (external port independent). In other words, receiving packets from a specific external host requires that the subscriber first send packets to that specific external host's IP address.
* ADDRESS PORT DEPENDENT (the default) - Filters out packets not destined to subscriber IP address and port (X:x). In addition, the Citrix ADC filters out packets from Y:y destined for the subscriber (X:x) if the subscriber has not previously sent packets to Y:y. In other words, receiving packets from a specific external host requires that the subscriber first send packets first to that external IP address and port.<br/>Default value: ADDRESS-PORT-DEPENDENT<br/>Possible values = ENDPOINT-INDEPENDENT, ADDRESS-DEPENDENT, ADDRESS-PORT-DEPENDENT
"""
try :
self._filtering = filtering
except Exception as e:
raise e
@property
def tcpproxy(self) :
r"""Enable TCP proxy, which enables the Citrix ADC to optimize the TCP traffic by using Layer 4 features.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._tcpproxy
except Exception as e:
raise e
@tcpproxy.setter
def tcpproxy(self, tcpproxy) :
r"""Enable TCP proxy, which enables the Citrix ADC to optimize the TCP traffic by using Layer 4 features.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._tcpproxy = tcpproxy
except Exception as e:
raise e
@property
def td(self) :
r"""ID of the traffic domain through which the Citrix ADC sends the outbound traffic after performing LSN.
If you do not specify an ID, the ADC sends the outbound traffic through the default traffic domain, which has an ID of 0.<br/>Default value: 4095.
"""
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
r"""ID of the traffic domain through which the Citrix ADC sends the outbound traffic after performing LSN.
If you do not specify an ID, the ADC sends the outbound traffic through the default traffic domain, which has an ID of 0.<br/>Default value: 4095
"""
try :
self._td = td
except Exception as e:
raise e
@property
def l2info(self) :
r"""Enable l2info by creating natpcbs for LSN, which enables the Citrix ADC to use L2CONN/MBF with LSN.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._l2info
except Exception as e:
raise e
@l2info.setter
def l2info(self, l2info) :
r"""Enable l2info by creating natpcbs for LSN, which enables the Citrix ADC to use L2CONN/MBF with LSN.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._l2info = l2info
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lsnappsprofile_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lsnappsprofile
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.appsprofilename is not None :
return str(self.appsprofilename)
return None
except Exception as e :
raise e
@classmethod
def filter_add_parameters(cls, resource) :
r""" Use this function to create a resource with only add operation specific parameters.
"""
addresource = lsnappsprofile()
addresource.appsprofilename = resource.appsprofilename
addresource.transportprotocol = resource.transportprotocol
addresource.ippooling = resource.ippooling
addresource.mapping = resource.mapping
addresource.filtering = resource.filtering
addresource.tcpproxy = resource.tcpproxy
addresource.td = resource.td
addresource.l2info = resource.l2info
return addresource
@classmethod
def add(cls, client, resource) :
r""" Use this API to add lsnappsprofile.
"""
try :
if type(resource) is not list :
addresource = cls.filter_add_parameters(resource)
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ lsnappsprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i] = cls.filter_add_parameters(resource[i])
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def filter_delete_parameters(cls, resource) :
r""" Use this function to create a resource with only delete operation specific parameters.
"""
deleteresource = lsnappsprofile()
deleteresource.appsprofilename = resource.appsprofilename
return deleteresource
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete lsnappsprofile.
"""
try :
if type(resource) is not list :
deleteresource = lsnappsprofile()
if type(resource) != type(deleteresource):
deleteresource.appsprofilename = resource
else :
deleteresource = cls.filter_delete_parameters(resource)
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ lsnappsprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].appsprofilename = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ lsnappsprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i] = cls.filter_delete_parameters(resource)
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def filter_update_parameters(cls, resource) :
r""" Use this function to create a resource with only update operation specific parameters.
"""
updateresource = lsnappsprofile()
updateresource.appsprofilename = resource.appsprofilename
updateresource.ippooling = resource.ippooling
updateresource.mapping = resource.mapping
updateresource.filtering = resource.filtering
updateresource.tcpproxy = resource.tcpproxy
updateresource.td = resource.td
updateresource.l2info = resource.l2info
return updateresource
@classmethod
def update(cls, client, resource) :
r""" Use this API to update lsnappsprofile.
"""
try :
if type(resource) is not list :
updateresource = cls.filter_update_parameters(resource)
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ lsnappsprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_update_parameters(resource[i])
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of lsnappsprofile resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = lsnappsprofile()
if type(resource) != type(unsetresource):
unsetresource.appsprofilename = resource
else :
unsetresource.appsprofilename = resource.appsprofilename
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ lsnappsprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].appsprofilename = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ lsnappsprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].appsprofilename = resource[i].appsprofilename
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the lsnappsprofile resources that are configured on netscaler.
"""
try :
if not name :
obj = lsnappsprofile()
response = obj.get_resources(client, option_)
else :
if type(name) is not list :
if type(name) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name)))
obj = lsnappsprofile()
obj.appsprofilename = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
if type(name[0]) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name[0])))
response = [lsnappsprofile() for _ in range(len(name))]
obj = [lsnappsprofile() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = lsnappsprofile()
obj[i].appsprofilename = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of lsnappsprofile resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lsnappsprofile()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the lsnappsprofile resources configured on NetScaler.
"""
try :
obj = lsnappsprofile()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of lsnappsprofile resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lsnappsprofile()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Mapping:
ENDPOINT_INDEPENDENT = "ENDPOINT-INDEPENDENT"
ADDRESS_DEPENDENT = "ADDRESS-DEPENDENT"
ADDRESS_PORT_DEPENDENT = "ADDRESS-PORT-DEPENDENT"
class L2info:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Filtering:
ENDPOINT_INDEPENDENT = "ENDPOINT-INDEPENDENT"
ADDRESS_DEPENDENT = "ADDRESS-DEPENDENT"
ADDRESS_PORT_DEPENDENT = "ADDRESS-PORT-DEPENDENT"
class Ippooling:
PAIRED = "PAIRED"
RANDOM = "RANDOM"
class Tcpproxy:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Transportprotocol:
TCP = "TCP"
UDP = "UDP"
ICMP = "ICMP"
class lsnappsprofile_response(base_response) :
def __init__(self, length=1) :
self.lsnappsprofile = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lsnappsprofile = [lsnappsprofile() for _ in range(length)]
| 44.06405 | 595 | 0.736625 |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lsnappsprofile(base_resource) :
def __init__(self) :
self._appsprofilename = None
self._transportprotocol = None
self._ippooling = None
self._mapping = None
self._filtering = None
self._tcpproxy = None
self._td = None
self._l2info = None
self.___count = None
@property
def appsprofilename(self) :
try :
return self._appsprofilename
except Exception as e:
raise e
@appsprofilename.setter
def appsprofilename(self, appsprofilename) :
try :
self._appsprofilename = appsprofilename
except Exception as e:
raise e
@property
def transportprotocol(self) :
try :
return self._transportprotocol
except Exception as e:
raise e
@transportprotocol.setter
def transportprotocol(self, transportprotocol) :
try :
self._transportprotocol = transportprotocol
except Exception as e:
raise e
@property
def ippooling(self) :
try :
return self._ippooling
except Exception as e:
raise e
@ippooling.setter
def ippooling(self, ippooling) :
try :
self._ippooling = ippooling
except Exception as e:
raise e
@property
def mapping(self) :
try :
return self._mapping
except Exception as e:
raise e
@mapping.setter
def mapping(self, mapping) :
try :
self._mapping = mapping
except Exception as e:
raise e
@property
def filtering(self) :
try :
return self._filtering
except Exception as e:
raise e
@filtering.setter
def filtering(self, filtering) :
try :
self._filtering = filtering
except Exception as e:
raise e
@property
def tcpproxy(self) :
try :
return self._tcpproxy
except Exception as e:
raise e
@tcpproxy.setter
def tcpproxy(self, tcpproxy) :
try :
self._tcpproxy = tcpproxy
except Exception as e:
raise e
@property
def td(self) :
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
try :
self._td = td
except Exception as e:
raise e
@property
def l2info(self) :
try :
return self._l2info
except Exception as e:
raise e
@l2info.setter
def l2info(self, l2info) :
try :
self._l2info = l2info
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
try :
result = service.payload_formatter.string_to_resource(lsnappsprofile_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lsnappsprofile
except Exception as e :
raise e
def _get_object_name(self) :
try :
if self.appsprofilename is not None :
return str(self.appsprofilename)
return None
except Exception as e :
raise e
@classmethod
def filter_add_parameters(cls, resource) :
addresource = lsnappsprofile()
addresource.appsprofilename = resource.appsprofilename
addresource.transportprotocol = resource.transportprotocol
addresource.ippooling = resource.ippooling
addresource.mapping = resource.mapping
addresource.filtering = resource.filtering
addresource.tcpproxy = resource.tcpproxy
addresource.td = resource.td
addresource.l2info = resource.l2info
return addresource
@classmethod
def add(cls, client, resource) :
try :
if type(resource) is not list :
addresource = cls.filter_add_parameters(resource)
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ lsnappsprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i] = cls.filter_add_parameters(resource[i])
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def filter_delete_parameters(cls, resource) :
deleteresource = lsnappsprofile()
deleteresource.appsprofilename = resource.appsprofilename
return deleteresource
@classmethod
def delete(cls, client, resource) :
try :
if type(resource) is not list :
deleteresource = lsnappsprofile()
if type(resource) != type(deleteresource):
deleteresource.appsprofilename = resource
else :
deleteresource = cls.filter_delete_parameters(resource)
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ lsnappsprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].appsprofilename = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ lsnappsprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i] = cls.filter_delete_parameters(resource)
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def filter_update_parameters(cls, resource) :
updateresource = lsnappsprofile()
updateresource.appsprofilename = resource.appsprofilename
updateresource.ippooling = resource.ippooling
updateresource.mapping = resource.mapping
updateresource.filtering = resource.filtering
updateresource.tcpproxy = resource.tcpproxy
updateresource.td = resource.td
updateresource.l2info = resource.l2info
return updateresource
@classmethod
def update(cls, client, resource) :
try :
if type(resource) is not list :
updateresource = cls.filter_update_parameters(resource)
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ lsnappsprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_update_parameters(resource[i])
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
try :
if type(resource) is not list :
unsetresource = lsnappsprofile()
if type(resource) != type(unsetresource):
unsetresource.appsprofilename = resource
else :
unsetresource.appsprofilename = resource.appsprofilename
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ lsnappsprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].appsprofilename = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ lsnappsprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].appsprofilename = resource[i].appsprofilename
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
try :
if not name :
obj = lsnappsprofile()
response = obj.get_resources(client, option_)
else :
if type(name) is not list :
if type(name) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name)))
obj = lsnappsprofile()
obj.appsprofilename = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
if type(name[0]) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name[0])))
response = [lsnappsprofile() for _ in range(len(name))]
obj = [lsnappsprofile() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = lsnappsprofile()
obj[i].appsprofilename = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
try :
obj = lsnappsprofile()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
try :
obj = lsnappsprofile()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
try :
obj = lsnappsprofile()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Mapping:
ENDPOINT_INDEPENDENT = "ENDPOINT-INDEPENDENT"
ADDRESS_DEPENDENT = "ADDRESS-DEPENDENT"
ADDRESS_PORT_DEPENDENT = "ADDRESS-PORT-DEPENDENT"
class L2info:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Filtering:
ENDPOINT_INDEPENDENT = "ENDPOINT-INDEPENDENT"
ADDRESS_DEPENDENT = "ADDRESS-DEPENDENT"
ADDRESS_PORT_DEPENDENT = "ADDRESS-PORT-DEPENDENT"
class Ippooling:
PAIRED = "PAIRED"
RANDOM = "RANDOM"
class Tcpproxy:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Transportprotocol:
TCP = "TCP"
UDP = "UDP"
ICMP = "ICMP"
class lsnappsprofile_response(base_response) :
def __init__(self, length=1) :
self.lsnappsprofile = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lsnappsprofile = [lsnappsprofile() for _ in range(length)]
| true | true |
1c2d1f49886d1c91a4c8d57b0a630de28838ef98 | 9,508 | py | Python | test/test_testcasecontainer.py | noralsydmp/icetea | b486cdc8e0d2211e118f1f8211aa4d284ca02422 | [
"Apache-2.0"
] | 6 | 2018-08-10T17:11:10.000Z | 2020-04-29T07:05:36.000Z | test/test_testcasecontainer.py | noralsydmp/icetea | b486cdc8e0d2211e118f1f8211aa4d284ca02422 | [
"Apache-2.0"
] | 58 | 2018-08-13T08:36:08.000Z | 2021-07-07T08:32:52.000Z | test/test_testcasecontainer.py | noralsydmp/icetea | b486cdc8e0d2211e118f1f8211aa4d284ca02422 | [
"Apache-2.0"
] | 7 | 2018-08-10T12:53:18.000Z | 2021-11-08T05:15:42.000Z | # pylint: disable=missing-docstring,no-self-use,redefined-builtin,too-many-arguments
# pylint: disable=protected-access,unused-variable
"""
Copyright 2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
import unittest
import argparse
import mock
from icetea_lib.TestSuite.TestcaseContainer import TestcaseContainer, TestStatus
from icetea_lib.Result import Result
from icetea_lib.bench import Bench
def mock_load_tc(*args, **kwargs): # pylint: disable=unused-argument
return None
def mock_raise_type(*args, **kwargs): # pylint: disable=unused-argument
raise TypeError()
def mock_raise_import(*args, **kwargs): # pylint: disable=unused-argument
raise ImportError()
class MockInstance(object):
def __init__(self, name, version, type=None, skip_val=True, skip_info=None):
self.info = skip_info if skip_info else {"only_type": "process"}
self.config = {
"compatible": {
"framework": {
"name": name, "version": version}
},
"requirements": {
"duts": {
"*": {
"type": type
}
}
}
}
self.skip_val = skip_val
def get_result(self):
return Result()
def get_test_name(self):
return "Icetea"
def skip(self):
return self.skip_val
def skip_info(self):
return self.info
def skip_reason(self):
return "test"
class TCContainerTestcase(unittest.TestCase):
def setUp(self):
with open(os.path.join("./icetea_lib", 'tc_schema.json')) as data_file:
self.tc_meta_schema = json.load(data_file)
self.args_tc = argparse.Namespace(
available=False, version=False, bin=None, binary=False, channel=None,
clean=False, cloud=False, component=False, device='*', gdb=None,
gdbs=None, gdbs_port=2345, group=False, iface=None, kill_putty=False, list=False,
listsuites=False, log='./log', my_duts=None, nobuf=None,
pause_when_external_dut=False, putty=False, reset=False, silent=True,
skip_case=False, skip_rampdown=False, skip_rampup=False,
status=False, suite=None, tc="test_cmdline", tc_cfg=None, tcdir="examples",
testtype=False, type="process", subtype=None, use_sniffer=False,
valgrind=False, valgrind_tool=None, verbose=False, repeat=0, feature=None,
suitedir="./test/suites", forceflash_once=True, forceflash=True,
stop_on_failure=False, ignore_invalid_params=False)
@mock.patch("icetea_lib.TestSuite.TestcaseContainer.load_class")
def test_load_testcase_fails(self, mock_loadclass):
testcase = TestcaseContainer.find_testcases("examples.test_cmdline", "./examples",
self.tc_meta_schema)[0]
with self.assertRaises(TypeError):
testcase._load_testcase(1)
mock_loadclass.side_effect = [ValueError, None]
with self.assertRaises(ImportError):
testcase._load_testcase("test_case")
with self.assertRaises(ImportError):
testcase._load_testcase("test_case")
def test_check_major_version(self):
testcase = TestcaseContainer.find_testcases("examples.test_cmdline", "examples",
self.tc_meta_schema)[0]
self.assertFalse(testcase._check_major_version("1.0.0", "0.9.1"))
self.assertFalse(testcase._check_major_version("1.0.0", ">0.0.2"))
self.assertFalse(testcase._check_major_version("1.0.0", ">=0.0.3"))
@mock.patch("icetea_lib.TestSuite.TestcaseContainer.get_fw_version")
def test_version_checker(self, mock_fwver):
mock_fwver.return_value = "0.9.0"
testcase = TestcaseContainer.find_testcases("examples.test_cmdline", "examples",
self.tc_meta_schema)[0]
self.assertIsNone(testcase._check_version(MockInstance("Icetea", "0.9.0")))
res = testcase._check_version(MockInstance("Icetea", "0.2.2"))
self.assertEqual(res.get_verdict(), "skip")
mock_fwver.return_value = "0.2.2"
self.assertIsNone(testcase._check_version(MockInstance("Icetea", "<0.9.0")))
res = testcase._check_version(MockInstance("Icetea", ">0.9.0"))
self.assertEqual(res.get_verdict(), "skip")
mock_fwver.return_value = "0.9.0"
self.assertIsNone(testcase._check_version(MockInstance("Icetea", ">=0.9.0")))
mock_fwver.return_value = "0.9.1"
self.assertIsNone(testcase._check_version(MockInstance("Icetea", ">=0.9.0")))
def test_check_skip(self):
testcase = TestcaseContainer.find_testcases("examples.test_cmdline", "./examples",
self.tc_meta_schema)[0]
res = testcase._check_skip(MockInstance("test", "0.9.0", "process"))
self.assertFalse(res)
self.assertFalse(testcase._check_skip(MockInstance("test", "0.9.0", "hardware",
skip_val=False)))
res = testcase._check_skip(MockInstance("test", "0.9.0", "process", True, {"test": "test"}))
self.assertEqual(res.get_verdict(), "skip")
def test_find_testcases(self):
lst = TestcaseContainer.find_testcases("test.testbase.dummy_multiples", "./test/testbase",
self.tc_meta_schema)
self.assertEqual(len(lst), 2)
lst = TestcaseContainer.find_testcases("test.testbase.dummy", "./test/testbase",
self.tc_meta_schema)
self.assertEqual(len(lst), 1)
with self.assertRaises(TypeError):
TestcaseContainer.find_testcases(1, "./test/testbase", self.tc_meta_schema)
with self.assertRaises(ValueError):
TestcaseContainer.find_testcases("", "./test/testbase", self.tc_meta_schema)
@mock.patch("icetea_lib.TestSuite.TestcaseContainer.import_module")
def test_find_testcases_error(self, mocked_import):
mocked_import.side_effect = [ImportError]
with self.assertRaises(ImportError):
lst = TestcaseContainer.find_testcases("test.testbase.dummy_multiples",
"./test/testbase",
self.tc_meta_schema)
def test_create_new_bench_instance(self):
lst = TestcaseContainer.find_testcases("test.testbase.dummy", "./test/testbase",
self.tc_meta_schema)
inst = lst[0]._create_new_bench_instance("test.testbase.dummy")
self.assertTrue(isinstance(inst, Bench))
@mock.patch("icetea_lib.TestSuite.TestcaseContainer.TestcaseContainer.get_instance")
@mock.patch("icetea_lib.TestSuite.TestcaseContainer.TestcaseContainer._check_version")
@mock.patch("icetea_lib.TestSuite.TestcaseContainer.TestcaseContainer._check_skip")
@mock.patch("icetea_lib.TestSuite.TestcaseContainer.get_tc_arguments")
def test_run(self, mock_parser, mock_skip, mock_version, mock_instance):
testcase = TestcaseContainer.find_testcases("examples.test_cmdline", "./examples",
self.tc_meta_schema)[0]
# Initialize mocks
parser = mock.MagicMock()
instance = mock.MagicMock()
instance.run = mock.MagicMock()
instance.run.return_value = 0
instance.get_result = mock.MagicMock()
instance.get_result.return_value = Result()
mock_instance.return_value = instance
mock_skip.return_value = None
mock_version.return_value = None
parser.parse_known_args = mock.MagicMock()
parser.parse_known_args.return_value = (mock.MagicMock(), [])
mock_parser.return_value = parser
# Mocked a succesful run
testcase.run()
# Skip returns 1, tc should be skipped
mock_skip.return_value = 1
mock_version.return_value = None
self.assertEqual(testcase.status, TestStatus.FINISHED)
testcase.run()
# Version mismatch
mock_skip.return_value = None
mock_version.return_value = 1
self.assertEqual(testcase.status, TestStatus.FINISHED)
testcase.run()
# Unknown arguments
mock_version.return_value = None
parser.parse_known_args.return_value = (self.args_tc, [1])
res = testcase.run()
self.assertEqual(testcase.status, TestStatus.FINISHED)
self.assertEqual(res.get_verdict(), 'inconclusive')
result = Result()
result.retcode = 1012
instance.get_result.return_value = result
instance.run.return_value = 1012
parser.parse_known_args.return_value = (mock.MagicMock(), [])
testcase.run()
if __name__ == '__main__':
unittest.main()
| 42.636771 | 100 | 0.640618 |
import json
import os
import unittest
import argparse
import mock
from icetea_lib.TestSuite.TestcaseContainer import TestcaseContainer, TestStatus
from icetea_lib.Result import Result
from icetea_lib.bench import Bench
def mock_load_tc(*args, **kwargs):
return None
def mock_raise_type(*args, **kwargs):
raise TypeError()
def mock_raise_import(*args, **kwargs):
raise ImportError()
class MockInstance(object):
def __init__(self, name, version, type=None, skip_val=True, skip_info=None):
self.info = skip_info if skip_info else {"only_type": "process"}
self.config = {
"compatible": {
"framework": {
"name": name, "version": version}
},
"requirements": {
"duts": {
"*": {
"type": type
}
}
}
}
self.skip_val = skip_val
def get_result(self):
return Result()
def get_test_name(self):
return "Icetea"
def skip(self):
return self.skip_val
def skip_info(self):
return self.info
def skip_reason(self):
return "test"
class TCContainerTestcase(unittest.TestCase):
def setUp(self):
with open(os.path.join("./icetea_lib", 'tc_schema.json')) as data_file:
self.tc_meta_schema = json.load(data_file)
self.args_tc = argparse.Namespace(
available=False, version=False, bin=None, binary=False, channel=None,
clean=False, cloud=False, component=False, device='*', gdb=None,
gdbs=None, gdbs_port=2345, group=False, iface=None, kill_putty=False, list=False,
listsuites=False, log='./log', my_duts=None, nobuf=None,
pause_when_external_dut=False, putty=False, reset=False, silent=True,
skip_case=False, skip_rampdown=False, skip_rampup=False,
status=False, suite=None, tc="test_cmdline", tc_cfg=None, tcdir="examples",
testtype=False, type="process", subtype=None, use_sniffer=False,
valgrind=False, valgrind_tool=None, verbose=False, repeat=0, feature=None,
suitedir="./test/suites", forceflash_once=True, forceflash=True,
stop_on_failure=False, ignore_invalid_params=False)
@mock.patch("icetea_lib.TestSuite.TestcaseContainer.load_class")
def test_load_testcase_fails(self, mock_loadclass):
testcase = TestcaseContainer.find_testcases("examples.test_cmdline", "./examples",
self.tc_meta_schema)[0]
with self.assertRaises(TypeError):
testcase._load_testcase(1)
mock_loadclass.side_effect = [ValueError, None]
with self.assertRaises(ImportError):
testcase._load_testcase("test_case")
with self.assertRaises(ImportError):
testcase._load_testcase("test_case")
def test_check_major_version(self):
testcase = TestcaseContainer.find_testcases("examples.test_cmdline", "examples",
self.tc_meta_schema)[0]
self.assertFalse(testcase._check_major_version("1.0.0", "0.9.1"))
self.assertFalse(testcase._check_major_version("1.0.0", ">0.0.2"))
self.assertFalse(testcase._check_major_version("1.0.0", ">=0.0.3"))
@mock.patch("icetea_lib.TestSuite.TestcaseContainer.get_fw_version")
def test_version_checker(self, mock_fwver):
mock_fwver.return_value = "0.9.0"
testcase = TestcaseContainer.find_testcases("examples.test_cmdline", "examples",
self.tc_meta_schema)[0]
self.assertIsNone(testcase._check_version(MockInstance("Icetea", "0.9.0")))
res = testcase._check_version(MockInstance("Icetea", "0.2.2"))
self.assertEqual(res.get_verdict(), "skip")
mock_fwver.return_value = "0.2.2"
self.assertIsNone(testcase._check_version(MockInstance("Icetea", "<0.9.0")))
res = testcase._check_version(MockInstance("Icetea", ">0.9.0"))
self.assertEqual(res.get_verdict(), "skip")
mock_fwver.return_value = "0.9.0"
self.assertIsNone(testcase._check_version(MockInstance("Icetea", ">=0.9.0")))
mock_fwver.return_value = "0.9.1"
self.assertIsNone(testcase._check_version(MockInstance("Icetea", ">=0.9.0")))
def test_check_skip(self):
testcase = TestcaseContainer.find_testcases("examples.test_cmdline", "./examples",
self.tc_meta_schema)[0]
res = testcase._check_skip(MockInstance("test", "0.9.0", "process"))
self.assertFalse(res)
self.assertFalse(testcase._check_skip(MockInstance("test", "0.9.0", "hardware",
skip_val=False)))
res = testcase._check_skip(MockInstance("test", "0.9.0", "process", True, {"test": "test"}))
self.assertEqual(res.get_verdict(), "skip")
def test_find_testcases(self):
lst = TestcaseContainer.find_testcases("test.testbase.dummy_multiples", "./test/testbase",
self.tc_meta_schema)
self.assertEqual(len(lst), 2)
lst = TestcaseContainer.find_testcases("test.testbase.dummy", "./test/testbase",
self.tc_meta_schema)
self.assertEqual(len(lst), 1)
with self.assertRaises(TypeError):
TestcaseContainer.find_testcases(1, "./test/testbase", self.tc_meta_schema)
with self.assertRaises(ValueError):
TestcaseContainer.find_testcases("", "./test/testbase", self.tc_meta_schema)
@mock.patch("icetea_lib.TestSuite.TestcaseContainer.import_module")
def test_find_testcases_error(self, mocked_import):
mocked_import.side_effect = [ImportError]
with self.assertRaises(ImportError):
lst = TestcaseContainer.find_testcases("test.testbase.dummy_multiples",
"./test/testbase",
self.tc_meta_schema)
def test_create_new_bench_instance(self):
lst = TestcaseContainer.find_testcases("test.testbase.dummy", "./test/testbase",
self.tc_meta_schema)
inst = lst[0]._create_new_bench_instance("test.testbase.dummy")
self.assertTrue(isinstance(inst, Bench))
@mock.patch("icetea_lib.TestSuite.TestcaseContainer.TestcaseContainer.get_instance")
@mock.patch("icetea_lib.TestSuite.TestcaseContainer.TestcaseContainer._check_version")
@mock.patch("icetea_lib.TestSuite.TestcaseContainer.TestcaseContainer._check_skip")
@mock.patch("icetea_lib.TestSuite.TestcaseContainer.get_tc_arguments")
def test_run(self, mock_parser, mock_skip, mock_version, mock_instance):
testcase = TestcaseContainer.find_testcases("examples.test_cmdline", "./examples",
self.tc_meta_schema)[0]
parser = mock.MagicMock()
instance = mock.MagicMock()
instance.run = mock.MagicMock()
instance.run.return_value = 0
instance.get_result = mock.MagicMock()
instance.get_result.return_value = Result()
mock_instance.return_value = instance
mock_skip.return_value = None
mock_version.return_value = None
parser.parse_known_args = mock.MagicMock()
parser.parse_known_args.return_value = (mock.MagicMock(), [])
mock_parser.return_value = parser
testcase.run()
mock_skip.return_value = 1
mock_version.return_value = None
self.assertEqual(testcase.status, TestStatus.FINISHED)
testcase.run()
mock_skip.return_value = None
mock_version.return_value = 1
self.assertEqual(testcase.status, TestStatus.FINISHED)
testcase.run()
mock_version.return_value = None
parser.parse_known_args.return_value = (self.args_tc, [1])
res = testcase.run()
self.assertEqual(testcase.status, TestStatus.FINISHED)
self.assertEqual(res.get_verdict(), 'inconclusive')
result = Result()
result.retcode = 1012
instance.get_result.return_value = result
instance.run.return_value = 1012
parser.parse_known_args.return_value = (mock.MagicMock(), [])
testcase.run()
if __name__ == '__main__':
unittest.main()
| true | true |
1c2d1fc8a2b5942f018d1861c44ee51d2467d88e | 1,886 | py | Python | src/extended_webdrivers/frame.py | dillonm197/extended-webdrivers | 9cb4cdb75f37c66ee1ac7fa13b947ae3bcb17863 | [
"MIT"
] | null | null | null | src/extended_webdrivers/frame.py | dillonm197/extended-webdrivers | 9cb4cdb75f37c66ee1ac7fa13b947ae3bcb17863 | [
"MIT"
] | null | null | null | src/extended_webdrivers/frame.py | dillonm197/extended-webdrivers | 9cb4cdb75f37c66ee1ac7fa13b947ae3bcb17863 | [
"MIT"
] | 1 | 2019-08-07T01:48:36.000Z | 2019-08-07T01:48:36.000Z | class Frame:
""" Base class for handling switching to and from iframes using context managers. """
def __init__(self, child_frame):
self.driver = child_frame.parent
assert child_frame.tag_name.lower() == 'iframe'
self.child_frame = child_frame
def _switch_to(self):
""" Switches to the specified frame. """
# Store the parent window and frame to access when we leave the child frame.
self.parent_window = self.driver.current_window_handle
self.parent_frame = self.driver.frame
# Switch to the child frame.
self.driver.switch_to.frame(self.child_frame)
self.driver.angular = self.driver._test_angular()
self.driver.jquery = self.driver._test_jquery()
if self.driver.sync_angular:
self.driver.wait_for_angular()
if self.driver.sync_jquery:
self.driver.wait_for_jquery(self.driver._script_timeout)
def __enter__(self):
self._switch_to()
return self
def _switch_from(self):
""" Switches to the previous frame. """
# Switch to the default window and frame.
self.driver.switch_to.default_content()
# Switch to the parent window.
if self.driver.current_window_handle != self.parent_window:
self.driver.switch_to.window(self.parent_window)
# Switch to parent frame if it exists.
if self.parent_frame is not None:
self.driver.switch_to.frame(self.parent_frame)
self.driver.angular = self.driver._test_angular()
self.driver.jquery = self.driver._test_jquery()
if self.driver.sync_angular:
self.driver.wait_for_angular()
if self.driver.sync_jquery:
self.driver.wait_for_jquery(self.driver._script_timeout)
def __exit__(self, exc_type, exc_val, exc_tb):
self._switch_from()
| 35.584906 | 89 | 0.662248 | class Frame:
def __init__(self, child_frame):
self.driver = child_frame.parent
assert child_frame.tag_name.lower() == 'iframe'
self.child_frame = child_frame
def _switch_to(self):
self.parent_window = self.driver.current_window_handle
self.parent_frame = self.driver.frame
self.driver.switch_to.frame(self.child_frame)
self.driver.angular = self.driver._test_angular()
self.driver.jquery = self.driver._test_jquery()
if self.driver.sync_angular:
self.driver.wait_for_angular()
if self.driver.sync_jquery:
self.driver.wait_for_jquery(self.driver._script_timeout)
def __enter__(self):
self._switch_to()
return self
def _switch_from(self):
self.driver.switch_to.default_content()
if self.driver.current_window_handle != self.parent_window:
self.driver.switch_to.window(self.parent_window)
if self.parent_frame is not None:
self.driver.switch_to.frame(self.parent_frame)
self.driver.angular = self.driver._test_angular()
self.driver.jquery = self.driver._test_jquery()
if self.driver.sync_angular:
self.driver.wait_for_angular()
if self.driver.sync_jquery:
self.driver.wait_for_jquery(self.driver._script_timeout)
def __exit__(self, exc_type, exc_val, exc_tb):
self._switch_from()
| true | true |
1c2d204ef61bc14c86ad018669c95549458a72ce | 16,764 | py | Python | cogs/queuecog.py | stealthlego/discord-queue-bot | 4f15a6e5aad4de333a685dfd32854c2898d4c6e6 | [
"MIT"
] | 3 | 2020-10-08T04:48:46.000Z | 2020-11-17T12:55:56.000Z | cogs/queuecog.py | stealthlego/discord-queue-bot | 4f15a6e5aad4de333a685dfd32854c2898d4c6e6 | [
"MIT"
] | 6 | 2020-10-07T19:05:53.000Z | 2021-05-09T12:55:33.000Z | cogs/queuecog.py | stealthlego/discord-queue-bot | 4f15a6e5aad4de333a685dfd32854c2898d4c6e6 | [
"MIT"
] | 1 | 2020-11-24T18:10:46.000Z | 2020-11-24T18:10:46.000Z | from asyncio.tasks import current_task
import os
import random
import time
import datetime
from attr import dataclass
import discord
from discord.ext import commands, tasks
import asyncio
server_handler = {}
reactions = ["\U000027A1", "\U00002705", "\U0000274C", "\U0001F504", "\U0001F500", "\U0001F6D1"]
instructions = ["Next player", "Add yourself to the queue", "Remove yourself from the queue", "Force update the queue", "Shuffle queue", "End queue"]
class PlayerQueue():
'''Queue object to maintain queue list and timing'''
def __init__(self, voice, text, users):
super().__init__()
self.user_list = users
self.last_event = datetime.datetime.now()
self.voice_channel = voice
self.text_channel = text
self.embed_exists = False
self.embed = None
self.embed_msg = None
## Internal Functions ##
async def append_user(self, user):
'''adds user to user list'''
self.user_list.append(user)
self.last_event = datetime.datetime.now()
async def remove_user(self, user):
'''removes user from list'''
self.user_list.remove(user)
self.last_event = datetime.datetime.now()
async def next_user(self):
'''updates list and gets next person'''
user = self.user_list.pop(0)
self.user_list.append(user)
self.last_event = datetime.datetime.now()
return self.user_list[0].mention
async def shuffle_queue(self):
'''shuffles list'''
random.shuffle(self.user_list)
self.last_event = datetime.datetime.now()
async def whos_up(self):
'''returns who is currently up'''
self.last_event = datetime.datetime.now()
return self.user_list[0]
async def current_queue(self):
'''returns current list - should not be modified'''
self.last_event = datetime.datetime.now()
return self.user_list
async def generate_embed(self):
'''prints current queue to text channel'''
user_list = await self.current_queue()
queue_string = ''
for i, user in enumerate(user_list):
if i == 0:
queue_string = queue_string + f'{i+1}: {user.mention} is currently up!\n'
elif i == 1:
queue_string = queue_string + f'{i+1}: {user.mention} is on deck\n'
else:
queue_string = queue_string + f'{i+1}: {user.display_name}\n'
# add command prompts for reactions
commands_string = ''
react_qty = len(reactions)
for i in range(react_qty):
commands_string = commands_string + f'{reactions[i]} = {instructions[i]}\n'
self.embed = discord.Embed(
title = f'{self.voice_channel.name} Queue',
#description = queue_string,
color = discord.Color.blue()
)
self.embed.add_field(name='Queue', value=queue_string)
self.embed.add_field(name='Commands', value=commands_string)
async def update_queue(self):
'''checks voice channel to see if anything has changed'''
msgs = []
user_list = await self.current_queue()
#gets voice channel and creates base for missing members
current_members = self.voice_channel.members
user_set = set(user_list)
current_set = set(current_members)
#check to see if the lists have the same contents
if set(user_list) == set(current_members):
#if they have the same contents pass
pass
else:
if len(user_list) > len(current_members)-1:
#removes members who are no longer part of the voice chat
to_prune = user_set.difference(current_members)
for user in to_prune:
await self.remove_user(user)
elif len(user_list) == len(current_members)-1:
#same number in voice chat, but members are different
to_add = current_set.difference(user_list)
to_prune = user_set.difference(current_members)
for user in to_prune:
await self.remove_user(user)
for user in to_add:
if user.bot:
#await self.text_channel.send(f'Cannot add {user.display_name} since they are a bot')
pass
else:
await self.append_user(user)
else:
#more members, so add the new ones
to_add = user_set.difference(current_members)
for user in to_add:
if user.bot:
#await self.text_channel.send(f'Cannot add {user.display_name} since they are a bot')
pass
else:
await self.append_user(user)
#prints updated queue
msgs.append(await self.text_channel.send(f'Queue updated!'))
await self.print_queue()
self.last_event = datetime.datetime.now()
return msgs
class QueueCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.queue_prune.start()
### helper functions ###
async def get_user_list(self, ctx):
'''returns PlayerQueue for voice channel or new PlayerQueue if new'''
if server_handler.get(ctx.message.author.voice.channel.id) == None:
msgs = []
msgs.append(await ctx.send('Queue for this channel does not exist, try creating one first'))
#clean up
await self.msg_cleanup(msgs, 5)
else:
return server_handler.get(ctx.message.author.voice.channel.id)
async def msg_cleanup(self, msgs, delay):
'''deletes messages after given delay'''
await asyncio.sleep(delay)
for msg in msgs:
await msg.delete()
async def update_embed(self, queue):
'''Updates embed with current queue'''
await queue.generate_embed()
#paste embed
if queue.embed_exists == False:
queue.embed_msg = await queue.text_channel.send(embed=queue.embed)
queue.embed_exists = True
else:
await queue.embed_msg.edit(embed=queue.embed)
for emoji in reactions:
await queue.embed_msg.add_reaction(emoji)
@tasks.loop(seconds=360)
async def queue_prune(self):
'''prunes old queues'''
key_holder = []
if len(server_handler) != 0:
print('Checking for stale queues...')
for key in server_handler:
time_d = datetime.datetime.now() - server_handler[key].last_event
if time_d.total_seconds() > 3600: #1 hour
msgs = []
text_channel = server_handler[key].text_channel
msgs.append(await text_channel.send('Queue timed out, ended queue and disconnected. See you next time!'))
key_holder.append(key)
#clean up
await self.msg_cleanup(msgs, 5)
for key in key_holder:
del server_handler[key]
await self.bot.change_presence(activity=discord.Game(f"{'{'}help | {len(server_handler)} queues"))
### commands ###
@commands.command(name='create', help='Creates initial queue of users')
async def create_queue(self, ctx):
'''Creates initial queue of users'''
msgs = []
msgs.append(ctx.message)
#checks if user is in voice chat
try:
voice_obj = ctx.message.author.voice.channel.id
if voice_obj != None:
pass
except Exception as e:
print(str(e))
#remind user they are not in a voice chat room
msgs.append(await ctx.send('You are not in a voice channel. Please join and try again'))
#clean up
await self.msg_cleanup(msgs, 5)
return
#check if there is a queue for this voice chat
if ctx.message.author.voice.channel.id in server_handler.keys():
msgs.append(await ctx.send('There is already a queue for this voice channel'))
else:
#create user list
voice = ctx.message.author.voice.channel
text = ctx.message.channel
user_queue = []
for user in ctx.message.author.voice.channel.members:
if user.bot:
pass
else:
user_queue.append(user)
if len(user_queue) == 0:
msgs.append(await ctx.send('An error has occurred!. Please re-join your voice channel and try again'))
else:
#add to server handler object for storage
current_queue = PlayerQueue(voice, text, user_queue)
server_handler[voice.id] = current_queue
await server_handler[voice.id].shuffle_queue()
await self.update_embed(server_handler[voice.id])
await self.bot.change_presence(activity=discord.Game(f"{'{'}help | {len(server_handler)} queues"))
#clean up
await self.msg_cleanup(msgs, 5)
#@commands.command(name='next', help='Moves to next person in the queue')
async def next_up(self, ctx):
'''moves to the next person in the queue'''
msgs = []
msgs.append(ctx.message)
user_queue = await self.get_user_list(ctx)
#gets new leader and mentions them
name_string = await user_queue.next_user()
msgs.append(await user_queue.text_channel.send(f'{name_string} is up!'))
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
#clean up
await self.msg_cleanup(msgs, 5)
#@commands.command(name='add', help='Adds a person to the queue')
async def add(self, ctx):
'''adds a specific person to the queue'''
msgs = []
msgs.append(ctx.message)
user_queue = await self.get_user_list(ctx)
#gets mentions
mentions = ctx.message.mentions
#containers
added_string = 'Added '
bot_msg = None
if len(mentions) == 0:
#if no mentions cannot remove, post error to chat
msgs.append(await user_queue.text_channel.send('Nobody added! Make sure to use the @ symbol when adding or removing'))
elif len(mentions) == 1 and mentions[0].bot:
#if the mention is a bot throw and error
msgs.append(await user_queue.text_channel.send(f'Cannot add {mentions[0].display_name} because they are a bot'))
else:
#add user to queue
for i, user in enumerate(mentions):
if user.bot:
msgs.append(await user_queue.text_channel.send(f'Cannot add {user.display_name} because they are a bot'))
else:
await user_queue.append_user(user)
added_string = added_string + f'{user.display_name}'
if i+1 < len(mentions):
added_string = added_string + ', '
msgs.append(await user_queue.text_channel.send(f'{added_string}'))
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
#clean up bot messages
if len(msgs) != 0:
await self.msg_cleanup(msgs, 5)
#@commands.command(name='remove', help='Removes a specific person from the queue')
async def remove(self, ctx, person):
'''removes specific person from queue'''
msgs = []
msgs.append(ctx.message)
user_queue = await self.get_user_list(ctx)
#get person to remove
mentions = ctx.message.mentions
#containers
removed_list = ''
if len(mentions) == 0:
#if no mentions, cannot remove
msgs.append(await user_queue.text_channel.send('Nobody removed! Make sure to use the @ symbol when adding or removing'))
else:
#remove listed mentions
for user in mentions:
if user in await user_queue.current_queue():
await user_queue.remove_user(user)
removed_list += user.display_name
msgs.append(await user_queue.text_channel.send(f'Removed {removed_list}'))
#prints current queue
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
#clean up bot messages
if len(msgs) != 0:
await self.msg_cleanup(msgs, 5)
#@commands.command(name='queue', help='Displays current queue')
async def queue(self, ctx):
'''Displays a message with the current queue'''
user_queue = await self.get_user_list(ctx)
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
#@commands.command(name='update', help='Updates queue with new users automatically')
async def force_update(self, ctx):
'''updates queue automatically'''
user_queue = await self.get_user_list(ctx)
await self.update_embed(user_queue)
#@commands.command(name='shuffle', help='Reshuffles current queue')
async def shuffle(self, ctx):
'''reshuffles current queue list'''
msgs = []
msgs.append(ctx.message)
user_queue = await self.get_user_list(ctx)
await user_queue.shuffle_queue()
msgs.append(await user_queue.text_channel.send(f'Queue Shuffled!'))
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
await self.msg_cleanup(msgs, 5)
@commands.command(name='end', help='Force ends current queue')
async def end(self, ctx):
'''ends queue and disconnects'''
msgs = []
msgs.append(ctx.message)
msgs.append(server_handler[ctx.message.author.voice.channel.id].embed_msg)
del server_handler[ctx.message.author.voice.channel.id]
msgs.append(await ctx.send('Ended queue, see you next time!'))
await self.bot.change_presence(activity=discord.Game(f"{'{'}help | {len(server_handler)} queues"))
#clean up
await self.msg_cleanup(msgs, 5)
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
if user.bot:
return
emoji = reaction.emoji
msgs = []
user_queue = server_handler.get(user.voice.channel.id)
if emoji == reactions[0]:
#gets new leader and mentions them
name_string = await user_queue.next_user()
msgs.append(await user_queue.text_channel.send(f'{name_string} is up!'))
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
elif emoji == reactions[1]:
#adds user to queue
await user_queue.append_user(user)
added_string = f'Added {user.display_name}'
msgs.append(await user_queue.text_channel.send(f'{added_string}'))
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
elif emoji == reactions[2]:
#removes users from queue
await user_queue.remove_user(user)
removed_string = f'Added {user.display_name}'
msgs.append(await user_queue.text_channel.send(f'{removed_string}'))
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
elif emoji == reactions[3]:
#forces queue to update based on voice channel
await self.update_embed(user_queue)
elif emoji == reactions[4]:
#shuffles queue
await user_queue.shuffle_queue()
msgs.append(await user_queue.text_channel.send(f'Queue Shuffled!'))
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
elif emoji == reactions[5]:
#deletes queue
del server_handler[user.voice.channel.id]
msgs.append(await reaction.message.channel.send('Ended queue, see you next time!'))
await self.bot.change_presence(activity=discord.Game(f"{'{'}help | {len(server_handler)} queues"))
msgs.append(user_queue.embed_msg)
else:
pass
#clean up
await reaction.remove(user)
if len(msgs) > 0:
await self.msg_cleanup(msgs, 5)
def setup(bot):
cog = QueueCog(bot)
bot.add_cog(cog) | 37.336303 | 149 | 0.596815 | from asyncio.tasks import current_task
import os
import random
import time
import datetime
from attr import dataclass
import discord
from discord.ext import commands, tasks
import asyncio
server_handler = {}
reactions = ["\U000027A1", "\U00002705", "\U0000274C", "\U0001F504", "\U0001F500", "\U0001F6D1"]
instructions = ["Next player", "Add yourself to the queue", "Remove yourself from the queue", "Force update the queue", "Shuffle queue", "End queue"]
class PlayerQueue():
def __init__(self, voice, text, users):
super().__init__()
self.user_list = users
self.last_event = datetime.datetime.now()
self.voice_channel = voice
self.text_channel = text
self.embed_exists = False
self.embed = None
self.embed_msg = None
r(self, user):
self.user_list.append(user)
self.last_event = datetime.datetime.now()
async def remove_user(self, user):
self.user_list.remove(user)
self.last_event = datetime.datetime.now()
async def next_user(self):
user = self.user_list.pop(0)
self.user_list.append(user)
self.last_event = datetime.datetime.now()
return self.user_list[0].mention
async def shuffle_queue(self):
random.shuffle(self.user_list)
self.last_event = datetime.datetime.now()
async def whos_up(self):
self.last_event = datetime.datetime.now()
return self.user_list[0]
async def current_queue(self):
self.last_event = datetime.datetime.now()
return self.user_list
async def generate_embed(self):
user_list = await self.current_queue()
queue_string = ''
for i, user in enumerate(user_list):
if i == 0:
queue_string = queue_string + f'{i+1}: {user.mention} is currently up!\n'
elif i == 1:
queue_string = queue_string + f'{i+1}: {user.mention} is on deck\n'
else:
queue_string = queue_string + f'{i+1}: {user.display_name}\n'
commands_string = ''
react_qty = len(reactions)
for i in range(react_qty):
commands_string = commands_string + f'{reactions[i]} = {instructions[i]}\n'
self.embed = discord.Embed(
title = f'{self.voice_channel.name} Queue',
color = discord.Color.blue()
)
self.embed.add_field(name='Queue', value=queue_string)
self.embed.add_field(name='Commands', value=commands_string)
async def update_queue(self):
msgs = []
user_list = await self.current_queue()
current_members = self.voice_channel.members
user_set = set(user_list)
current_set = set(current_members)
if set(user_list) == set(current_members):
pass
else:
if len(user_list) > len(current_members)-1:
to_prune = user_set.difference(current_members)
for user in to_prune:
await self.remove_user(user)
elif len(user_list) == len(current_members)-1:
to_add = current_set.difference(user_list)
to_prune = user_set.difference(current_members)
for user in to_prune:
await self.remove_user(user)
for user in to_add:
if user.bot:
pass
else:
await self.append_user(user)
else:
to_add = user_set.difference(current_members)
for user in to_add:
if user.bot:
pass
else:
await self.append_user(user)
msgs.append(await self.text_channel.send(f'Queue updated!'))
await self.print_queue()
self.last_event = datetime.datetime.now()
return msgs
class QueueCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.queue_prune.start()
server_handler.get(ctx.message.author.voice.channel.id) == None:
msgs = []
msgs.append(await ctx.send('Queue for this channel does not exist, try creating one first'))
await self.msg_cleanup(msgs, 5)
else:
return server_handler.get(ctx.message.author.voice.channel.id)
async def msg_cleanup(self, msgs, delay):
await asyncio.sleep(delay)
for msg in msgs:
await msg.delete()
async def update_embed(self, queue):
await queue.generate_embed()
if queue.embed_exists == False:
queue.embed_msg = await queue.text_channel.send(embed=queue.embed)
queue.embed_exists = True
else:
await queue.embed_msg.edit(embed=queue.embed)
for emoji in reactions:
await queue.embed_msg.add_reaction(emoji)
@tasks.loop(seconds=360)
async def queue_prune(self):
key_holder = []
if len(server_handler) != 0:
print('Checking for stale queues...')
for key in server_handler:
time_d = datetime.datetime.now() - server_handler[key].last_event
if time_d.total_seconds() > 3600:
msgs = []
text_channel = server_handler[key].text_channel
msgs.append(await text_channel.send('Queue timed out, ended queue and disconnected. See you next time!'))
key_holder.append(key)
await self.msg_cleanup(msgs, 5)
for key in key_holder:
del server_handler[key]
await self.bot.change_presence(activity=discord.Game(f"{'{'}help | {len(server_handler)} queues"))
', help='Creates initial queue of users')
async def create_queue(self, ctx):
msgs = []
msgs.append(ctx.message)
try:
voice_obj = ctx.message.author.voice.channel.id
if voice_obj != None:
pass
except Exception as e:
print(str(e))
msgs.append(await ctx.send('You are not in a voice channel. Please join and try again'))
await self.msg_cleanup(msgs, 5)
return
if ctx.message.author.voice.channel.id in server_handler.keys():
msgs.append(await ctx.send('There is already a queue for this voice channel'))
else:
voice = ctx.message.author.voice.channel
text = ctx.message.channel
user_queue = []
for user in ctx.message.author.voice.channel.members:
if user.bot:
pass
else:
user_queue.append(user)
if len(user_queue) == 0:
msgs.append(await ctx.send('An error has occurred!. Please re-join your voice channel and try again'))
else:
current_queue = PlayerQueue(voice, text, user_queue)
server_handler[voice.id] = current_queue
await server_handler[voice.id].shuffle_queue()
await self.update_embed(server_handler[voice.id])
await self.bot.change_presence(activity=discord.Game(f"{'{'}help | {len(server_handler)} queues"))
await self.msg_cleanup(msgs, 5)
async def next_up(self, ctx):
msgs = []
msgs.append(ctx.message)
user_queue = await self.get_user_list(ctx)
name_string = await user_queue.next_user()
msgs.append(await user_queue.text_channel.send(f'{name_string} is up!'))
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
await self.msg_cleanup(msgs, 5)
async def add(self, ctx):
msgs = []
msgs.append(ctx.message)
user_queue = await self.get_user_list(ctx)
mentions = ctx.message.mentions
added_string = 'Added '
bot_msg = None
if len(mentions) == 0:
msgs.append(await user_queue.text_channel.send('Nobody added! Make sure to use the @ symbol when adding or removing'))
elif len(mentions) == 1 and mentions[0].bot:
msgs.append(await user_queue.text_channel.send(f'Cannot add {mentions[0].display_name} because they are a bot'))
else:
for i, user in enumerate(mentions):
if user.bot:
msgs.append(await user_queue.text_channel.send(f'Cannot add {user.display_name} because they are a bot'))
else:
await user_queue.append_user(user)
added_string = added_string + f'{user.display_name}'
if i+1 < len(mentions):
added_string = added_string + ', '
msgs.append(await user_queue.text_channel.send(f'{added_string}'))
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
if len(msgs) != 0:
await self.msg_cleanup(msgs, 5)
async def remove(self, ctx, person):
msgs = []
msgs.append(ctx.message)
user_queue = await self.get_user_list(ctx)
mentions = ctx.message.mentions
removed_list = ''
if len(mentions) == 0:
msgs.append(await user_queue.text_channel.send('Nobody removed! Make sure to use the @ symbol when adding or removing'))
else:
for user in mentions:
if user in await user_queue.current_queue():
await user_queue.remove_user(user)
removed_list += user.display_name
msgs.append(await user_queue.text_channel.send(f'Removed {removed_list}'))
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
if len(msgs) != 0:
await self.msg_cleanup(msgs, 5)
async def queue(self, ctx):
user_queue = await self.get_user_list(ctx)
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
async def force_update(self, ctx):
user_queue = await self.get_user_list(ctx)
await self.update_embed(user_queue)
async def shuffle(self, ctx):
msgs = []
msgs.append(ctx.message)
user_queue = await self.get_user_list(ctx)
await user_queue.shuffle_queue()
msgs.append(await user_queue.text_channel.send(f'Queue Shuffled!'))
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
await self.msg_cleanup(msgs, 5)
@commands.command(name='end', help='Force ends current queue')
async def end(self, ctx):
msgs = []
msgs.append(ctx.message)
msgs.append(server_handler[ctx.message.author.voice.channel.id].embed_msg)
del server_handler[ctx.message.author.voice.channel.id]
msgs.append(await ctx.send('Ended queue, see you next time!'))
await self.bot.change_presence(activity=discord.Game(f"{'{'}help | {len(server_handler)} queues"))
await self.msg_cleanup(msgs, 5)
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
if user.bot:
return
emoji = reaction.emoji
msgs = []
user_queue = server_handler.get(user.voice.channel.id)
if emoji == reactions[0]:
name_string = await user_queue.next_user()
msgs.append(await user_queue.text_channel.send(f'{name_string} is up!'))
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
elif emoji == reactions[1]:
await user_queue.append_user(user)
added_string = f'Added {user.display_name}'
msgs.append(await user_queue.text_channel.send(f'{added_string}'))
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
elif emoji == reactions[2]:
await user_queue.remove_user(user)
removed_string = f'Added {user.display_name}'
msgs.append(await user_queue.text_channel.send(f'{removed_string}'))
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
elif emoji == reactions[3]:
await self.update_embed(user_queue)
elif emoji == reactions[4]:
await user_queue.shuffle_queue()
msgs.append(await user_queue.text_channel.send(f'Queue Shuffled!'))
await self.update_embed(user_queue)
user_queue.last_event = datetime.datetime.now()
elif emoji == reactions[5]:
del server_handler[user.voice.channel.id]
msgs.append(await reaction.message.channel.send('Ended queue, see you next time!'))
await self.bot.change_presence(activity=discord.Game(f"{'{'}help | {len(server_handler)} queues"))
msgs.append(user_queue.embed_msg)
else:
pass
await reaction.remove(user)
if len(msgs) > 0:
await self.msg_cleanup(msgs, 5)
def setup(bot):
cog = QueueCog(bot)
bot.add_cog(cog) | true | true |
1c2d2137de95de3a8e372e8383217ce2f7000d2c | 3,288 | py | Python | app/settings.py | khaledzaki2017/Recipe-app-api | c525b2764866fc3b8501baf9d8e9c7cc0374080b | [
"MIT"
] | null | null | null | app/settings.py | khaledzaki2017/Recipe-app-api | c525b2764866fc3b8501baf9d8e9c7cc0374080b | [
"MIT"
] | null | null | null | app/settings.py | khaledzaki2017/Recipe-app-api | c525b2764866fc3b8501baf9d8e9c7cc0374080b | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z+-!#mb%g3ridp=b(nyb-2e3g5y-lf@2agwp9p6d#v^*a^uff9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
STATIC_ROOT = '/vol/web/static'
AUTH_USER_MODEL = 'core.User'
| 24.721805 | 91 | 0.690389 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'z+-!#mb%g3ridp=b(nyb-2e3g5y-lf@2agwp9p6d#v^*a^uff9'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
STATIC_ROOT = '/vol/web/static'
AUTH_USER_MODEL = 'core.User'
| true | true |
1c2d2151cd5b2fc48c97bf31c39a0ca0e80eb1a1 | 18,214 | py | Python | rex_gym/envs/gym/walk_env.py | Dzhange/rex-gym | 3ffb674ade13ceac3352c3329f1804eb21a08403 | [
"Apache-2.0"
] | null | null | null | rex_gym/envs/gym/walk_env.py | Dzhange/rex-gym | 3ffb674ade13ceac3352c3329f1804eb21a08403 | [
"Apache-2.0"
] | null | null | null | rex_gym/envs/gym/walk_env.py | Dzhange/rex-gym | 3ffb674ade13ceac3352c3329f1804eb21a08403 | [
"Apache-2.0"
] | null | null | null | """This file implements the gym environment of rex alternating legs.
"""
import math
import random
from gym import spaces
import numpy as np
from .. import rex_gym_env
from ...model import rex_constants
from ...model.gait_planner import GaitPlanner
from ...model.kinematics import Kinematics
NUM_LEGS = 4
class RexWalkEnv(rex_gym_env.RexGymEnv):
"""The gym environment for the rex.
It simulates the locomotion of a rex, a quadruped robot. The state space
include the angles, velocities and torques for all the motors and the action
space is the desired motor angle for each motor. The reward function is based
on how far the rex walks in 2000 steps and penalizes the energy
expenditure or how near rex is to the target position.
"""
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 66}
load_ui = True
is_terminating = False
def __init__(self,
debug=False,
urdf_version=None,
control_time_step=0.005,
action_repeat=5,
control_latency=0,
pd_latency=0,
on_rack=False,
motor_kp=1.0,
motor_kd=0.02,
render=False,
num_steps_to_log=2000,
env_randomizer=None,
log_path=None,
target_position=None,
backwards=None,
signal_type="ik",
terrain_type="plane",
terrain_id=None,
mark='base'):
"""Initialize the rex alternating legs gym environment.
Args:
urdf_version: [DEFAULT_URDF_VERSION, DERPY_V0_URDF_VERSION] are allowable
versions. If None, DEFAULT_URDF_VERSION is used. Refer to
rex_gym_env for more details.
control_time_step: The time step between two successive control signals.
action_repeat: The number of simulation steps that an action is repeated.
control_latency: The latency between get_observation() and the actual
observation. See minituar.py for more details.
pd_latency: The latency used to get motor angles/velocities used to
compute PD controllers. See rex.py for more details.
on_rack: Whether to place the rex on rack. This is only used to debug
the walk gait. In this mode, the rex's base is hung midair so
that its walk gait is clearer to visualize.
motor_kp: The P gain of the motor.
motor_kd: The D gain of the motor.
remove_default_joint_damping: Whether to remove the default joint damping.
render: Whether to render the simulation.
num_steps_to_log: The max number of control steps in one episode. If the
number of steps is over num_steps_to_log, the environment will still
be running, but only first num_steps_to_log will be recorded in logging.
env_randomizer: An instance (or a list) of EnvRanzomier(s) that can
randomize the environment during when env.reset() is called and add
perturbations when env.step() is called.
log_path: The path to write out logs. For the details of logging, refer to
rex_logging.proto.
"""
super(RexWalkEnv,
self).__init__(urdf_version=urdf_version,
accurate_motor_model_enabled=True,
motor_overheat_protection=True,
hard_reset=False,
motor_kp=motor_kp,
motor_kd=motor_kd,
remove_default_joint_damping=False,
control_latency=control_latency,
pd_latency=pd_latency,
on_rack=on_rack,
render=render,
num_steps_to_log=num_steps_to_log,
env_randomizer=env_randomizer,
log_path=log_path,
control_time_step=control_time_step,
action_repeat=action_repeat,
target_position=target_position,
signal_type=signal_type,
backwards=backwards,
debug=debug,
terrain_id=terrain_id,
terrain_type=terrain_type,
mark=mark)
# (eventually) allow different feedback ranges/action spaces for different signals
action_max = {
'ik': 0.4,
'ol': 0.01
}
action_dim_map = {
'ik': 2,
'ol': 8
}
action_dim = action_dim_map[self._signal_type]
action_high = np.array([action_max[self._signal_type]] * action_dim)
self.action_space = spaces.Box(-action_high, action_high)
self._cam_dist = 1.0
self._cam_yaw = 0.0
self._cam_pitch = -20
self._signal_type = signal_type
self._gait_planner = GaitPlanner("walk")
self._kinematics = Kinematics()
self.goal_reached = False
self._stay_still = False
self.is_terminating = False
def reset(self):
self.init_pose = rex_constants.INIT_POSES["stand"]
if self._signal_type == 'ol':
self.init_pose = rex_constants.INIT_POSES["stand_ol"]
super(RexWalkEnv, self).reset(initial_motor_angles=self.init_pose, reset_duration=0.5)
self.goal_reached = False
self.is_terminating = False
self._stay_still = False
if self._backwards is None:
self.backwards = random.choice([True, False])
else:
self.backwards = self._backwards
step = 0.6
period = 0.65
base_x = self._base_x
if self.backwards:
step = -.3
period = .5
base_x = .0
if not self._target_position or self._random_pos_target:
bound = -3 if self.backwards else 3
self._target_position = random.uniform(bound//2, bound)
self._random_pos_target = True
if 1 or (self._is_render and self._signal_type == 'ik'):
if self.load_ui:
self.setup_ui(base_x, step, period)
self.load_ui = False
if self._is_debug:
print(f"Target Position x={self._target_position}, Random assignment: {self._random_pos_target}, Backwards: {self.backwards}")
return self._get_observation()
def setup_ui(self, base_x, step, period):
self.base_x_ui = self._pybullet_client.addUserDebugParameter("base_x",
self._ranges["base_x"][0],
self._ranges["base_x"][1],
base_x)
self.base_y_ui = self._pybullet_client.addUserDebugParameter("base_y",
self._ranges["base_y"][0],
self._ranges["base_y"][1],
self._ranges["base_y"][2])
self.base_z_ui = self._pybullet_client.addUserDebugParameter("base_z",
self._ranges["base_z"][0],
self._ranges["base_z"][1],
self._ranges["base_z"][2])
self.roll_ui = self._pybullet_client.addUserDebugParameter("roll",
self._ranges["roll"][0],
self._ranges["roll"][1],
self._ranges["roll"][2])
self.pitch_ui = self._pybullet_client.addUserDebugParameter("pitch",
self._ranges["pitch"][0],
self._ranges["pitch"][1],
self._ranges["pitch"][2])
self.yaw_ui = self._pybullet_client.addUserDebugParameter("yaw",
self._ranges["yaw"][0],
self._ranges["yaw"][1],
self._ranges["yaw"][2])
self.step_length_ui = self._pybullet_client.addUserDebugParameter("step_length", -0.7, 0.7, step)
self.step_rotation_ui = self._pybullet_client.addUserDebugParameter("step_rotation", -1.5, 1.5, 0.)
self.step_angle_ui = self._pybullet_client.addUserDebugParameter("step_angle", -180., 180., 0.)
self.step_period_ui = self._pybullet_client.addUserDebugParameter("step_period", 0.2, 0.9, period)
def _read_inputs(self, base_pos_coeff, gait_stage_coeff):
position = np.array(
[
self._pybullet_client.readUserDebugParameter(self.base_x_ui),
self._pybullet_client.readUserDebugParameter(self.base_y_ui) * base_pos_coeff,
self._pybullet_client.readUserDebugParameter(self.base_z_ui) * base_pos_coeff
]
)
orientation = np.array(
[
self._pybullet_client.readUserDebugParameter(self.roll_ui) * base_pos_coeff,
self._pybullet_client.readUserDebugParameter(self.pitch_ui) * base_pos_coeff,
self._pybullet_client.readUserDebugParameter(self.yaw_ui) * base_pos_coeff
]
)
step_length = self._pybullet_client.readUserDebugParameter(self.step_length_ui) * gait_stage_coeff
step_rotation = self._pybullet_client.readUserDebugParameter(self.step_rotation_ui)
step_angle = self._pybullet_client.readUserDebugParameter(self.step_angle_ui)
step_period = self._pybullet_client.readUserDebugParameter(self.step_period_ui)
return position, orientation, step_length, step_rotation, step_angle, step_period
def _check_target_position(self, t):
if self._target_position:
current_x = abs(self.rex.GetBasePosition()[0])
# give 0.15 stop space
if current_x >= abs(self._target_position) - 0.15:
self.goal_reached = True
if not self.is_terminating:
self.end_time = t
self.is_terminating = True
@staticmethod
def _evaluate_base_stage_coeff(current_t, end_t=0.0, width=0.001):
# sigmoid function
beta = p = width
if p - beta + end_t <= current_t <= p - (beta / 2) + end_t:
return (2 / beta ** 2) * (current_t - p + beta) ** 2
elif p - (beta/2) + end_t <= current_t <= p + end_t:
return 1 - (2 / beta ** 2) * (current_t - p) ** 2
else:
return 1
@staticmethod
def _evaluate_gait_stage_coeff(current_t, action, end_t=0.0):
# ramp function
p = 0.8 + action[0]
if end_t <= current_t <= p + end_t:
return current_t
else:
return 1.0
@staticmethod
def _evaluate_brakes_stage_coeff(current_t, action, end_t=0.0, end_value=0.0):
# ramp function
p = 0.8 + action[1]
if end_t <= current_t <= p + end_t:
return 1 - (current_t - end_t)
else:
return end_value
def _signal(self, t, action):
if self._signal_type == 'ik':
return self._IK_signal(t, action)
if self._signal_type == 'ol':
return self._open_loop_signal(t, action)
def _IK_signal(self, t, action):
base_pos_coeff = self._evaluate_base_stage_coeff(t, width=1.5)
gait_stage_coeff = self._evaluate_gait_stage_coeff(t, action)
step = 0.6
period = 0.65
base_x = self._base_x
if self.backwards:
step = -.3
period = .5
base_x = .0
if (self._is_render and self._is_debug):
position, orientation, step_length, step_rotation, step_angle, step_period = \
self._read_inputs(base_pos_coeff, gait_stage_coeff)
# elif 0 and self._is_debug:
# # print("here")
# position = np.array([base_x,
# self._base_y * base_pos_coeff,
# self._base_z * base_pos_coeff])
# orientation = np.array([self._base_roll * base_pos_coeff,
# self._base_pitch * base_pos_coeff,
# self._base_yaw * base_pos_coeff])
# step_length = -0.3
# step_rotation = 0
# step_angle = 0
# step_period = 0.5
else:
position = np.array([base_x,
self._base_y * base_pos_coeff,
self._base_z * base_pos_coeff])
orientation = np.array([self._base_roll * base_pos_coeff,
self._base_pitch * base_pos_coeff,
self._base_yaw * base_pos_coeff])
step_length = (self.step_length if self.step_length is not None else step) * gait_stage_coeff
step_rotation = (self.step_rotation if self.step_rotation is not None else 0.0)
step_angle = self.step_angle if self.step_angle is not None else 0.0
step_period = (self.step_period if self.step_period is not None else period)
# print(position, orientation, step_length, step_rotation, step_angle, step_period)
if self.goal_reached:
brakes_coeff = self._evaluate_brakes_stage_coeff(t, action, self.end_time)
step_length *= brakes_coeff
if brakes_coeff == 0.0:
self._stay_still = True
direction = -1.0 if step_length < 0 else 1.0
frames = self._gait_planner.loop(step_length, step_angle, step_rotation, step_period, direction)
fr_angles, fl_angles, rr_angles, rl_angles, _ = self._kinematics.solve(orientation, position, frames)
signal = [
fl_angles[0], fl_angles[1], fl_angles[2],
fr_angles[0], fr_angles[1], fr_angles[2],
rl_angles[0], rl_angles[1], rl_angles[2],
rr_angles[0], rr_angles[1], rr_angles[2]
]
return signal
def _open_loop_signal(self, t, action):
period = 1.0 / 8
l_a = 0.1
f_a = l_a * 2
if self.goal_reached:
coeff = self._evaluate_brakes_stage_coeff(t, [0., 0.], end_t=self.end_time, end_value=0.0)
l_a *= coeff
f_a *= coeff
if coeff is 0.0:
self._stay_still = True
start_coeff = self._evaluate_gait_stage_coeff(t, [0.0])
l_a *= start_coeff
f_a *= start_coeff
l_extension = l_a * math.cos(2 * math.pi / period * t)
f_extension = f_a * math.cos(2 * math.pi / period * t)
initial_pose = self.init_pose
l_swing = -l_extension
swing = -f_extension
pose = np.array([0.0, l_extension + action[0], f_extension + action[1],
0.0, l_swing + action[2], swing + action[3],
0.0, l_swing + action[4], swing + action[5],
0.0, l_extension + action[6], f_extension + action[7]])
signal = initial_pose + pose
return signal
def _transform_action_to_motor_command(self, action):
if self._stay_still:
return self.init_pose
t = self.rex.GetTimeSinceReset()
self._check_target_position(t)
action = self._signal(t, action)
action = super(RexWalkEnv, self)._transform_action_to_motor_command(action)
return action
def is_fallen(self):
"""Decide whether the rex has fallen.
If the up directions between the base and the world is large (the dot
product is smaller than 0.85), the rex is considered fallen.
Returns:
Boolean value that indicates whether the rex has fallen.
"""
orientation = self.rex.GetBaseOrientation()
rot_mat = self._pybullet_client.getMatrixFromQuaternion(orientation)
local_up = rot_mat[6:]
return np.dot(np.asarray([0, 0, 1]), np.asarray(local_up)) < 0.85
def _get_true_observation(self):
"""Get the true observations of this environment.
It includes the roll, the error between current pitch and desired pitch,
roll dot and pitch dot of the base.
Returns:
The observation list.
"""
observation = []
roll, pitch, _ = self.rex.GetTrueBaseRollPitchYaw()
roll_rate, pitch_rate, _ = self.rex.GetTrueBaseRollPitchYawRate()
observation.extend([roll, pitch, roll_rate, pitch_rate])
self._true_observation = np.array(observation)
return self._true_observation
def _get_observation(self):
observation = []
roll, pitch, _ = self.rex.GetBaseRollPitchYaw()
roll_rate, pitch_rate, _ = self.rex.GetBaseRollPitchYawRate()
observation.extend([roll, pitch, roll_rate, pitch_rate])
self._observation = np.array(observation)
return self._observation
def _get_observation_upper_bound(self):
"""Get the upper bound of the observation.
Returns:
The upper bound of an observation. See GetObservation() for the details
of each element of an observation.
"""
upper_bound = np.zeros(self._get_observation_dimension())
upper_bound[0:2] = 2 * math.pi # Roll, pitch, yaw of the base.
upper_bound[2:4] = 2 * math.pi / self._time_step # Roll, pitch, yaw rate.
return upper_bound
def _get_observation_lower_bound(self):
lower_bound = -self._get_observation_upper_bound()
return lower_bound
| 46.464286 | 138 | 0.566981 | import math
import random
from gym import spaces
import numpy as np
from .. import rex_gym_env
from ...model import rex_constants
from ...model.gait_planner import GaitPlanner
from ...model.kinematics import Kinematics
NUM_LEGS = 4
class RexWalkEnv(rex_gym_env.RexGymEnv):
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 66}
load_ui = True
is_terminating = False
def __init__(self,
debug=False,
urdf_version=None,
control_time_step=0.005,
action_repeat=5,
control_latency=0,
pd_latency=0,
on_rack=False,
motor_kp=1.0,
motor_kd=0.02,
render=False,
num_steps_to_log=2000,
env_randomizer=None,
log_path=None,
target_position=None,
backwards=None,
signal_type="ik",
terrain_type="plane",
terrain_id=None,
mark='base'):
super(RexWalkEnv,
self).__init__(urdf_version=urdf_version,
accurate_motor_model_enabled=True,
motor_overheat_protection=True,
hard_reset=False,
motor_kp=motor_kp,
motor_kd=motor_kd,
remove_default_joint_damping=False,
control_latency=control_latency,
pd_latency=pd_latency,
on_rack=on_rack,
render=render,
num_steps_to_log=num_steps_to_log,
env_randomizer=env_randomizer,
log_path=log_path,
control_time_step=control_time_step,
action_repeat=action_repeat,
target_position=target_position,
signal_type=signal_type,
backwards=backwards,
debug=debug,
terrain_id=terrain_id,
terrain_type=terrain_type,
mark=mark)
action_max = {
'ik': 0.4,
'ol': 0.01
}
action_dim_map = {
'ik': 2,
'ol': 8
}
action_dim = action_dim_map[self._signal_type]
action_high = np.array([action_max[self._signal_type]] * action_dim)
self.action_space = spaces.Box(-action_high, action_high)
self._cam_dist = 1.0
self._cam_yaw = 0.0
self._cam_pitch = -20
self._signal_type = signal_type
self._gait_planner = GaitPlanner("walk")
self._kinematics = Kinematics()
self.goal_reached = False
self._stay_still = False
self.is_terminating = False
def reset(self):
self.init_pose = rex_constants.INIT_POSES["stand"]
if self._signal_type == 'ol':
self.init_pose = rex_constants.INIT_POSES["stand_ol"]
super(RexWalkEnv, self).reset(initial_motor_angles=self.init_pose, reset_duration=0.5)
self.goal_reached = False
self.is_terminating = False
self._stay_still = False
if self._backwards is None:
self.backwards = random.choice([True, False])
else:
self.backwards = self._backwards
step = 0.6
period = 0.65
base_x = self._base_x
if self.backwards:
step = -.3
period = .5
base_x = .0
if not self._target_position or self._random_pos_target:
bound = -3 if self.backwards else 3
self._target_position = random.uniform(bound//2, bound)
self._random_pos_target = True
if 1 or (self._is_render and self._signal_type == 'ik'):
if self.load_ui:
self.setup_ui(base_x, step, period)
self.load_ui = False
if self._is_debug:
print(f"Target Position x={self._target_position}, Random assignment: {self._random_pos_target}, Backwards: {self.backwards}")
return self._get_observation()
def setup_ui(self, base_x, step, period):
self.base_x_ui = self._pybullet_client.addUserDebugParameter("base_x",
self._ranges["base_x"][0],
self._ranges["base_x"][1],
base_x)
self.base_y_ui = self._pybullet_client.addUserDebugParameter("base_y",
self._ranges["base_y"][0],
self._ranges["base_y"][1],
self._ranges["base_y"][2])
self.base_z_ui = self._pybullet_client.addUserDebugParameter("base_z",
self._ranges["base_z"][0],
self._ranges["base_z"][1],
self._ranges["base_z"][2])
self.roll_ui = self._pybullet_client.addUserDebugParameter("roll",
self._ranges["roll"][0],
self._ranges["roll"][1],
self._ranges["roll"][2])
self.pitch_ui = self._pybullet_client.addUserDebugParameter("pitch",
self._ranges["pitch"][0],
self._ranges["pitch"][1],
self._ranges["pitch"][2])
self.yaw_ui = self._pybullet_client.addUserDebugParameter("yaw",
self._ranges["yaw"][0],
self._ranges["yaw"][1],
self._ranges["yaw"][2])
self.step_length_ui = self._pybullet_client.addUserDebugParameter("step_length", -0.7, 0.7, step)
self.step_rotation_ui = self._pybullet_client.addUserDebugParameter("step_rotation", -1.5, 1.5, 0.)
self.step_angle_ui = self._pybullet_client.addUserDebugParameter("step_angle", -180., 180., 0.)
self.step_period_ui = self._pybullet_client.addUserDebugParameter("step_period", 0.2, 0.9, period)
def _read_inputs(self, base_pos_coeff, gait_stage_coeff):
position = np.array(
[
self._pybullet_client.readUserDebugParameter(self.base_x_ui),
self._pybullet_client.readUserDebugParameter(self.base_y_ui) * base_pos_coeff,
self._pybullet_client.readUserDebugParameter(self.base_z_ui) * base_pos_coeff
]
)
orientation = np.array(
[
self._pybullet_client.readUserDebugParameter(self.roll_ui) * base_pos_coeff,
self._pybullet_client.readUserDebugParameter(self.pitch_ui) * base_pos_coeff,
self._pybullet_client.readUserDebugParameter(self.yaw_ui) * base_pos_coeff
]
)
step_length = self._pybullet_client.readUserDebugParameter(self.step_length_ui) * gait_stage_coeff
step_rotation = self._pybullet_client.readUserDebugParameter(self.step_rotation_ui)
step_angle = self._pybullet_client.readUserDebugParameter(self.step_angle_ui)
step_period = self._pybullet_client.readUserDebugParameter(self.step_period_ui)
return position, orientation, step_length, step_rotation, step_angle, step_period
def _check_target_position(self, t):
if self._target_position:
current_x = abs(self.rex.GetBasePosition()[0])
if current_x >= abs(self._target_position) - 0.15:
self.goal_reached = True
if not self.is_terminating:
self.end_time = t
self.is_terminating = True
@staticmethod
def _evaluate_base_stage_coeff(current_t, end_t=0.0, width=0.001):
beta = p = width
if p - beta + end_t <= current_t <= p - (beta / 2) + end_t:
return (2 / beta ** 2) * (current_t - p + beta) ** 2
elif p - (beta/2) + end_t <= current_t <= p + end_t:
return 1 - (2 / beta ** 2) * (current_t - p) ** 2
else:
return 1
@staticmethod
def _evaluate_gait_stage_coeff(current_t, action, end_t=0.0):
p = 0.8 + action[0]
if end_t <= current_t <= p + end_t:
return current_t
else:
return 1.0
@staticmethod
def _evaluate_brakes_stage_coeff(current_t, action, end_t=0.0, end_value=0.0):
p = 0.8 + action[1]
if end_t <= current_t <= p + end_t:
return 1 - (current_t - end_t)
else:
return end_value
def _signal(self, t, action):
if self._signal_type == 'ik':
return self._IK_signal(t, action)
if self._signal_type == 'ol':
return self._open_loop_signal(t, action)
def _IK_signal(self, t, action):
base_pos_coeff = self._evaluate_base_stage_coeff(t, width=1.5)
gait_stage_coeff = self._evaluate_gait_stage_coeff(t, action)
step = 0.6
period = 0.65
base_x = self._base_x
if self.backwards:
step = -.3
period = .5
base_x = .0
if (self._is_render and self._is_debug):
position, orientation, step_length, step_rotation, step_angle, step_period = \
self._read_inputs(base_pos_coeff, gait_stage_coeff)
else:
position = np.array([base_x,
self._base_y * base_pos_coeff,
self._base_z * base_pos_coeff])
orientation = np.array([self._base_roll * base_pos_coeff,
self._base_pitch * base_pos_coeff,
self._base_yaw * base_pos_coeff])
step_length = (self.step_length if self.step_length is not None else step) * gait_stage_coeff
step_rotation = (self.step_rotation if self.step_rotation is not None else 0.0)
step_angle = self.step_angle if self.step_angle is not None else 0.0
step_period = (self.step_period if self.step_period is not None else period)
if self.goal_reached:
brakes_coeff = self._evaluate_brakes_stage_coeff(t, action, self.end_time)
step_length *= brakes_coeff
if brakes_coeff == 0.0:
self._stay_still = True
direction = -1.0 if step_length < 0 else 1.0
frames = self._gait_planner.loop(step_length, step_angle, step_rotation, step_period, direction)
fr_angles, fl_angles, rr_angles, rl_angles, _ = self._kinematics.solve(orientation, position, frames)
signal = [
fl_angles[0], fl_angles[1], fl_angles[2],
fr_angles[0], fr_angles[1], fr_angles[2],
rl_angles[0], rl_angles[1], rl_angles[2],
rr_angles[0], rr_angles[1], rr_angles[2]
]
return signal
def _open_loop_signal(self, t, action):
period = 1.0 / 8
l_a = 0.1
f_a = l_a * 2
if self.goal_reached:
coeff = self._evaluate_brakes_stage_coeff(t, [0., 0.], end_t=self.end_time, end_value=0.0)
l_a *= coeff
f_a *= coeff
if coeff is 0.0:
self._stay_still = True
start_coeff = self._evaluate_gait_stage_coeff(t, [0.0])
l_a *= start_coeff
f_a *= start_coeff
l_extension = l_a * math.cos(2 * math.pi / period * t)
f_extension = f_a * math.cos(2 * math.pi / period * t)
initial_pose = self.init_pose
l_swing = -l_extension
swing = -f_extension
pose = np.array([0.0, l_extension + action[0], f_extension + action[1],
0.0, l_swing + action[2], swing + action[3],
0.0, l_swing + action[4], swing + action[5],
0.0, l_extension + action[6], f_extension + action[7]])
signal = initial_pose + pose
return signal
def _transform_action_to_motor_command(self, action):
if self._stay_still:
return self.init_pose
t = self.rex.GetTimeSinceReset()
self._check_target_position(t)
action = self._signal(t, action)
action = super(RexWalkEnv, self)._transform_action_to_motor_command(action)
return action
def is_fallen(self):
orientation = self.rex.GetBaseOrientation()
rot_mat = self._pybullet_client.getMatrixFromQuaternion(orientation)
local_up = rot_mat[6:]
return np.dot(np.asarray([0, 0, 1]), np.asarray(local_up)) < 0.85
def _get_true_observation(self):
observation = []
roll, pitch, _ = self.rex.GetTrueBaseRollPitchYaw()
roll_rate, pitch_rate, _ = self.rex.GetTrueBaseRollPitchYawRate()
observation.extend([roll, pitch, roll_rate, pitch_rate])
self._true_observation = np.array(observation)
return self._true_observation
def _get_observation(self):
observation = []
roll, pitch, _ = self.rex.GetBaseRollPitchYaw()
roll_rate, pitch_rate, _ = self.rex.GetBaseRollPitchYawRate()
observation.extend([roll, pitch, roll_rate, pitch_rate])
self._observation = np.array(observation)
return self._observation
def _get_observation_upper_bound(self):
upper_bound = np.zeros(self._get_observation_dimension())
upper_bound[0:2] = 2 * math.pi
upper_bound[2:4] = 2 * math.pi / self._time_step
return upper_bound
def _get_observation_lower_bound(self):
lower_bound = -self._get_observation_upper_bound()
return lower_bound
| true | true |
1c2d23a538a46a1a35586ddeca8f545e09db4f19 | 1,587 | py | Python | lib/galaxy/tool_util/deps/mulled/mulled_build_tool.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/tool_util/deps/mulled/mulled_build_tool.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | 6 | 2021-11-11T20:57:49.000Z | 2021-12-10T15:30:33.000Z | lib/galaxy/tool_util/deps/mulled/mulled_build_tool.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
"""Build a mulled images for a tool source (Galaxy or CWL tool).
Examples:
Build mulled images for requirements defined in a tool:
mulled-build-tool build path/to/tool_file.xml
"""
from galaxy.tool_util.parser import get_tool_source
from ._cli import arg_parser
from .mulled_build import (
add_build_arguments,
add_single_image_arguments,
args_to_mull_targets_kwds,
mull_targets,
)
from .util import build_target
def main(argv=None):
"""Main entry-point for the CLI tool."""
parser = arg_parser(argv, globals())
add_build_arguments(parser)
add_single_image_arguments(parser)
parser.add_argument("command", metavar="COMMAND", help="Command (build-and-test, build, all)")
parser.add_argument("tool", metavar="TOOL", default=None, help="Path to tool to build mulled image for.")
args = parser.parse_args()
tool_source = get_tool_source(args.tool)
requirements, _ = tool_source.parse_requirements_and_containers()
targets = requirements_to_mulled_targets(requirements)
kwds = args_to_mull_targets_kwds(args)
mull_targets(targets, **kwds)
def requirements_to_mulled_targets(requirements):
"""Convert Galaxy's representation of requirements into mulled Target objects.
Only package requirements are retained.
"""
package_requirements = [r for r in requirements if r.type == "package"]
targets = [build_target(r.name, r.version) for r in package_requirements]
return targets
__all__ = ("main", "requirements_to_mulled_targets")
if __name__ == "__main__":
main()
| 29.943396 | 109 | 0.741021 |
from galaxy.tool_util.parser import get_tool_source
from ._cli import arg_parser
from .mulled_build import (
add_build_arguments,
add_single_image_arguments,
args_to_mull_targets_kwds,
mull_targets,
)
from .util import build_target
def main(argv=None):
parser = arg_parser(argv, globals())
add_build_arguments(parser)
add_single_image_arguments(parser)
parser.add_argument("command", metavar="COMMAND", help="Command (build-and-test, build, all)")
parser.add_argument("tool", metavar="TOOL", default=None, help="Path to tool to build mulled image for.")
args = parser.parse_args()
tool_source = get_tool_source(args.tool)
requirements, _ = tool_source.parse_requirements_and_containers()
targets = requirements_to_mulled_targets(requirements)
kwds = args_to_mull_targets_kwds(args)
mull_targets(targets, **kwds)
def requirements_to_mulled_targets(requirements):
package_requirements = [r for r in requirements if r.type == "package"]
targets = [build_target(r.name, r.version) for r in package_requirements]
return targets
__all__ = ("main", "requirements_to_mulled_targets")
if __name__ == "__main__":
main()
| true | true |
1c2d2413a1ef1a75bc362e732da7ec8c9048a08e | 3,848 | py | Python | everbean/tasks.py | messense/everbean | 2d93cff0f2f707aff25025b4e07ab1f6104b4854 | [
"MIT"
] | 2 | 2015-07-17T00:34:53.000Z | 2019-07-08T09:35:51.000Z | everbean/tasks.py | messense/everbean | 2d93cff0f2f707aff25025b4e07ab1f6104b4854 | [
"MIT"
] | 4 | 2015-05-21T13:36:39.000Z | 2017-08-21T12:40:35.000Z | everbean/tasks.py | messense/everbean | 2d93cff0f2f707aff25025b4e07ab1f6104b4854 | [
"MIT"
] | null | null | null | # coding=utf-8
from __future__ import (
with_statement,
absolute_import,
unicode_literals
)
from datetime import datetime
from flask import current_app as app
from flask.ext.mail import Message
from .core import mail, db, celery
from .account.models import User
from .note.models import Note
from .ext.douban import (
get_douban_client,
import_annotations,
import_books
)
from .ext.evernote import (
get_evernote_client,
get_notebook,
find_note,
make_note,
create_or_update_note
)
@celery.task
def send_mail(messages):
if not (app.config['MAIL_SERVER'] and
app.config['MAIL_USERNAME'] and
app.config['MAIL_PASSWORD']):
return False
if isinstance(messages, Message):
messages = [messages, ]
with mail.connect() as conn:
for msg in messages:
conn.send(msg)
@celery.task
def refresh_douban_access_token(user_id):
user = User.query.get(user_id)
if not user:
return
client = get_douban_client()
client.refresh_token(user.douban_refresh_token)
me = client.user.me
if 'id' in me:
# update access token and other infomations
user.douban_access_token = client.token_code
user.douban_refresh_token = client.refresh_token_code
user.douban_expires_at = client.access_token.expires_at
user.douban_name = me['name']
user.avatar = me['avatar']
user.large_avatar = me['avatar'].replace('icon/u', 'icon/ul')
user.signature = me['signature']
user.desc = me['desc']
db.session.add(user)
db.session.commit()
else:
app.logger.error('Refresh token for user %s error.', user.douban_uid)
@celery.task
def sync_books(user_id):
user = User.query.get(user_id)
if not user:
return
import_books(user)
@celery.task
def import_douban_annotations(user_id):
user = User.query.get(user_id)
if not user:
return
import_annotations(user)
@celery.task
def sync_book_notes(user_id, book, notes=None):
user = User.query.get(user_id)
if not user or not user.evernote_access_token:
return
if notes is None:
notes = Note.query.filter_by(
user_id=user.id,
book_id=book.id
).order_by(Note.created.asc()).all()
if not notes:
return
# generate evernote format note
token = user.evernote_access_token
en = get_evernote_client(user.is_i18n, token)
note_store = en.get_note_store()
notebook = get_notebook(
note_store,
user.evernote_notebook,
app.config['EVERNOTE_NOTEBOOK_NAME']
)
if not user.evernote_notebook:
user.evernote_notebook = notebook.guid
db.session.add(user)
db.session.commit()
note = None
the_book = user.user_books.filter_by(book_id=book.id).first()
if not the_book:
return
if the_book.evernote_guid:
note = find_note(note_store, the_book.evernote_guid)
note = make_note(
book,
notes,
note,
notebook,
template=user.template
)
# sync to evernote
note = create_or_update_note(note_store, note)
# sync guid to database
if note and hasattr(note, 'guid'):
the_book.evernote_guid = note.guid
the_book.updated = datetime.now()
db.session.add(the_book)
db.session.add(user)
db.session.commit()
def sync_notes(user):
def _sync_notes_of_book(book):
notes = Note.query.filter_by(
user_id=user.id,
book_id=book.id
).order_by(Note.created.asc()).all()
if notes:
sync_book_notes.delay(user.id, book, notes)
if not user.enable_sync:
return
books = user.books
# now we can sync notes to evernote
map(_sync_notes_of_book, books)
| 24.825806 | 77 | 0.650728 |
from __future__ import (
with_statement,
absolute_import,
unicode_literals
)
from datetime import datetime
from flask import current_app as app
from flask.ext.mail import Message
from .core import mail, db, celery
from .account.models import User
from .note.models import Note
from .ext.douban import (
get_douban_client,
import_annotations,
import_books
)
from .ext.evernote import (
get_evernote_client,
get_notebook,
find_note,
make_note,
create_or_update_note
)
@celery.task
def send_mail(messages):
if not (app.config['MAIL_SERVER'] and
app.config['MAIL_USERNAME'] and
app.config['MAIL_PASSWORD']):
return False
if isinstance(messages, Message):
messages = [messages, ]
with mail.connect() as conn:
for msg in messages:
conn.send(msg)
@celery.task
def refresh_douban_access_token(user_id):
user = User.query.get(user_id)
if not user:
return
client = get_douban_client()
client.refresh_token(user.douban_refresh_token)
me = client.user.me
if 'id' in me:
user.douban_access_token = client.token_code
user.douban_refresh_token = client.refresh_token_code
user.douban_expires_at = client.access_token.expires_at
user.douban_name = me['name']
user.avatar = me['avatar']
user.large_avatar = me['avatar'].replace('icon/u', 'icon/ul')
user.signature = me['signature']
user.desc = me['desc']
db.session.add(user)
db.session.commit()
else:
app.logger.error('Refresh token for user %s error.', user.douban_uid)
@celery.task
def sync_books(user_id):
user = User.query.get(user_id)
if not user:
return
import_books(user)
@celery.task
def import_douban_annotations(user_id):
user = User.query.get(user_id)
if not user:
return
import_annotations(user)
@celery.task
def sync_book_notes(user_id, book, notes=None):
user = User.query.get(user_id)
if not user or not user.evernote_access_token:
return
if notes is None:
notes = Note.query.filter_by(
user_id=user.id,
book_id=book.id
).order_by(Note.created.asc()).all()
if not notes:
return
token = user.evernote_access_token
en = get_evernote_client(user.is_i18n, token)
note_store = en.get_note_store()
notebook = get_notebook(
note_store,
user.evernote_notebook,
app.config['EVERNOTE_NOTEBOOK_NAME']
)
if not user.evernote_notebook:
user.evernote_notebook = notebook.guid
db.session.add(user)
db.session.commit()
note = None
the_book = user.user_books.filter_by(book_id=book.id).first()
if not the_book:
return
if the_book.evernote_guid:
note = find_note(note_store, the_book.evernote_guid)
note = make_note(
book,
notes,
note,
notebook,
template=user.template
)
note = create_or_update_note(note_store, note)
if note and hasattr(note, 'guid'):
the_book.evernote_guid = note.guid
the_book.updated = datetime.now()
db.session.add(the_book)
db.session.add(user)
db.session.commit()
def sync_notes(user):
def _sync_notes_of_book(book):
notes = Note.query.filter_by(
user_id=user.id,
book_id=book.id
).order_by(Note.created.asc()).all()
if notes:
sync_book_notes.delay(user.id, book, notes)
if not user.enable_sync:
return
books = user.books
map(_sync_notes_of_book, books)
| true | true |
1c2d28a71efc509d66a44e44cc0bbc4d58484dd8 | 6,328 | py | Python | src/radixlib/parsers/no_parser.py | 0xOmarA/RadixLib | 85d75a47d4c4df4c1a319b74857ae2c513933623 | [
"MIT"
] | 32 | 2022-01-12T16:52:28.000Z | 2022-03-24T18:05:47.000Z | src/radixlib/parsers/no_parser.py | 0xOmarA/RadixLib | 85d75a47d4c4df4c1a319b74857ae2c513933623 | [
"MIT"
] | 3 | 2022-01-12T17:01:55.000Z | 2022-02-12T15:14:16.000Z | src/radixlib/parsers/no_parser.py | 0xOmarA/RadixLib | 85d75a47d4c4df4c1a319b74857ae2c513933623 | [
"MIT"
] | 1 | 2022-01-21T04:28:07.000Z | 2022-01-21T04:28:07.000Z | from typing import Callable, Optional, Any
from radixlib.parsers.base_parser import ParserBase
class NoParser(ParserBase):
""" Defines a parser which performs no parsing of the data whatsoever.
This class defines a parser which does not perform parsing over any of the data at all. While
this class might seem somewhat redundant, it acually makes the implementation of partial parsers
a lot more simple. To create a partial parser all tha you would need to do is to inherit from
this class and then define the parsing functions that you would like.
"""
@classmethod
def parse(
cls,
data: Any,
data_type: str
) -> Any:
""" Routes the parsing of the data to the appropriate parsing function from within the class
This function acts as a router which tires to find the appropriate parsing function within
the class to parse the data. If no parser is implemented for this data type, then the
original data is returned without any parsing.
Args:
data (Any): Data of any type to pass to the parser function
data_type (str): Type of the data or the origin of the data
Returns:
Any: The parsed data
"""
# Getting the parsing function for this data type from the attributes of the class
function_name: str = f'parse_{data_type}'
parsing_function: Optional[Callable[..., Any]] = getattr(cls, function_name, None)
# We try calling the parsing function with the data that we have. If the parsing function
# works, then we return the parsed data. However, if a TypeError or NotImplementedError is
# raised, then we return the original data
try:
parsed_data: Any = parsing_function(data) # type: ignore
return parsed_data if parsed_data is not None else data
except (TypeError, NotImplementedError):
return data
@classmethod
def parse_get_gateway_info(cls, data: Any) -> Any:
""" A function used for the parsing of the get_gateway_info API calls. """
raise NotImplementedError("No implementation for get_gateway_info")
@classmethod
def parse_derive_account_identifier(cls, data: Any) -> Any:
""" A function used for the parsing of the derive_account_identifier API calls. """
raise NotImplementedError("No implementation for derive_account_identifier")
@classmethod
def parse_get_account_balances(cls, data: Any) -> Any:
""" A function used for the parsing of the get_account_balances API calls. """
raise NotImplementedError("No implementation for get_account_balances")
@classmethod
def parse_get_stake_positions(cls, data: Any) -> Any:
""" A function used for the parsing of the get_stake_positions API calls. """
raise NotImplementedError("No implementation for get_stake_positions")
@classmethod
def parse_get_unstake_positions(cls, data: Any) -> Any:
""" A function used for the parsing of the get_unstake_positions API calls. """
raise NotImplementedError("No implementation for get_unstake_positions")
@classmethod
def parse_get_account_transactions(cls, data: Any) -> Any:
""" A function used for the parsing of the get_account_transactions API calls. """
raise NotImplementedError("No implementation for get_account_transactions")
@classmethod
def parse_get_native_token_info(cls, data: Any) -> Any:
""" A function used for the parsing of the get_native_token_info API calls. """
raise NotImplementedError("No implementation for get_native_token_info")
@classmethod
def parse_get_token_info(cls, data: Any) -> Any:
""" A function used for the parsing of the get_token_info API calls. """
raise NotImplementedError("No implementation for get_token_info")
@classmethod
def parse_derive_token_identifier(cls, data: Any) -> Any:
""" A function used for the parsing of the derive_token_identifier API calls. """
raise NotImplementedError("No implementation for derive_token_identifier")
@classmethod
def parse_get_validator(cls, data: Any) -> Any:
""" A function used for the parsing of the get_validator API calls. """
raise NotImplementedError("No implementation for get_validator")
@classmethod
def parse_get_validator_identifier(cls, data: Any) -> Any:
""" A function used for the parsing of the get_validator_identifier API calls. """
raise NotImplementedError("No implementation for get_validator_identifier")
@classmethod
def parse_get_validators(cls, data: Any) -> Any:
""" A function used for the parsing of the get_validators API calls. """
raise NotImplementedError("No implementation for get_validators")
@classmethod
def parse_get_validator_stakes(cls, data: Any) -> Any:
""" A function used for the parsing of the get_validator_stakes API calls. """
raise NotImplementedError("No implementation for get_validator_stakes")
@classmethod
def parse_get_transaction_rules(cls, data: Any) -> Any:
""" A function used for the parsing of the get_transaction_rules API calls. """
raise NotImplementedError("No implementation for get_transaction_rules")
@classmethod
def parse_build_transaction(cls, data: Any) -> Any:
""" A function used for the parsing of the build_transaction API calls. """
raise NotImplementedError("No implementation for build_transaction")
@classmethod
def parse_finalize_transaction(cls, data: Any) -> Any:
""" A function used for the parsing of the finalize_transaction API calls. """
raise NotImplementedError("No implementation for finalize_transaction")
@classmethod
def parse_submit_transaction(cls, data: Any) -> Any:
""" A function used for the parsing of the submit_transaction API calls. """
raise NotImplementedError("No implementation for submit_transaction")
@classmethod
def parse_transaction_status(cls, data: Any) -> Any:
""" A function used for the parsing of the transaction_status API calls. """
raise NotImplementedError("No implementation for transaction_status")
| 46.874074 | 101 | 0.70354 | from typing import Callable, Optional, Any
from radixlib.parsers.base_parser import ParserBase
class NoParser(ParserBase):
@classmethod
def parse(
cls,
data: Any,
data_type: str
) -> Any:
function_name: str = f'parse_{data_type}'
parsing_function: Optional[Callable[..., Any]] = getattr(cls, function_name, None)
try:
parsed_data: Any = parsing_function(data)
return parsed_data if parsed_data is not None else data
except (TypeError, NotImplementedError):
return data
@classmethod
def parse_get_gateway_info(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for get_gateway_info")
@classmethod
def parse_derive_account_identifier(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for derive_account_identifier")
@classmethod
def parse_get_account_balances(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for get_account_balances")
@classmethod
def parse_get_stake_positions(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for get_stake_positions")
@classmethod
def parse_get_unstake_positions(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for get_unstake_positions")
@classmethod
def parse_get_account_transactions(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for get_account_transactions")
@classmethod
def parse_get_native_token_info(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for get_native_token_info")
@classmethod
def parse_get_token_info(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for get_token_info")
@classmethod
def parse_derive_token_identifier(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for derive_token_identifier")
@classmethod
def parse_get_validator(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for get_validator")
@classmethod
def parse_get_validator_identifier(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for get_validator_identifier")
@classmethod
def parse_get_validators(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for get_validators")
@classmethod
def parse_get_validator_stakes(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for get_validator_stakes")
@classmethod
def parse_get_transaction_rules(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for get_transaction_rules")
@classmethod
def parse_build_transaction(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for build_transaction")
@classmethod
def parse_finalize_transaction(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for finalize_transaction")
@classmethod
def parse_submit_transaction(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for submit_transaction")
@classmethod
def parse_transaction_status(cls, data: Any) -> Any:
raise NotImplementedError("No implementation for transaction_status")
| true | true |
1c2d2a773462018896ad739e576823a0d2b1692c | 1,559 | py | Python | symphony/cli/gql/tests/test_transport.py | idoshveki/magma | 8022267bd8b8d94913fbb9a0836880361d785446 | [
"BSD-3-Clause"
] | 2 | 2020-11-05T18:58:26.000Z | 2021-02-09T06:42:49.000Z | symphony/cli/gql/tests/test_transport.py | idoshveki/magma | 8022267bd8b8d94913fbb9a0836880361d785446 | [
"BSD-3-Clause"
] | 10 | 2021-03-31T20:19:00.000Z | 2022-02-19T07:09:57.000Z | symphony/cli/gql/tests/test_transport.py | idoshveki/magma | 8022267bd8b8d94913fbb9a0836880361d785446 | [
"BSD-3-Clause"
] | 3 | 2020-08-20T18:45:34.000Z | 2020-08-20T20:18:42.000Z | #!/usr/bin/env python3
import pytest
import requests
from gql import Client, gql
from gql.transport.requests import RequestsHTTPTransport
@pytest.fixture
def client():
request = requests.get(
"http://swapi.graphene-python.org/graphql",
headers={"Host": "swapi.graphene-python.org", "Accept": "text/html"},
)
request.raise_for_status()
csrf = request.cookies["csrftoken"]
return Client(
transport=RequestsHTTPTransport(
url="http://swapi.graphene-python.org/graphql",
cookies={"csrftoken": csrf},
headers={"x-csrftoken": csrf},
),
fetch_schema_from_transport=True,
)
def test_hero_name_query(client):
query = gql(
"""
{
myFavoriteFilm: film(id:"RmlsbToz") {
id
title
episodeId
characters(first:5) {
edges {
node {
name
}
}
}
}
}
"""
)
expected = {
"myFavoriteFilm": {
"id": "RmlsbToz",
"title": "Return of the Jedi",
"episodeId": 6,
"characters": {
"edges": [
{"node": {"name": "Luke Skywalker"}},
{"node": {"name": "C-3PO"}},
{"node": {"name": "R2-D2"}},
{"node": {"name": "Darth Vader"}},
{"node": {"name": "Leia Organa"}},
]
},
}
}
result = client.execute(query)
assert result == expected
| 23.984615 | 77 | 0.479153 |
import pytest
import requests
from gql import Client, gql
from gql.transport.requests import RequestsHTTPTransport
@pytest.fixture
def client():
request = requests.get(
"http://swapi.graphene-python.org/graphql",
headers={"Host": "swapi.graphene-python.org", "Accept": "text/html"},
)
request.raise_for_status()
csrf = request.cookies["csrftoken"]
return Client(
transport=RequestsHTTPTransport(
url="http://swapi.graphene-python.org/graphql",
cookies={"csrftoken": csrf},
headers={"x-csrftoken": csrf},
),
fetch_schema_from_transport=True,
)
def test_hero_name_query(client):
query = gql(
"""
{
myFavoriteFilm: film(id:"RmlsbToz") {
id
title
episodeId
characters(first:5) {
edges {
node {
name
}
}
}
}
}
"""
)
expected = {
"myFavoriteFilm": {
"id": "RmlsbToz",
"title": "Return of the Jedi",
"episodeId": 6,
"characters": {
"edges": [
{"node": {"name": "Luke Skywalker"}},
{"node": {"name": "C-3PO"}},
{"node": {"name": "R2-D2"}},
{"node": {"name": "Darth Vader"}},
{"node": {"name": "Leia Organa"}},
]
},
}
}
result = client.execute(query)
assert result == expected
| true | true |
1c2d2aeb9f93a8945d0da4537d89d203cc18a3cb | 27,956 | py | Python | r6sapi/players.py | dasBunny/RainbowSixSiege-Python-API | dade9efd41b2a8855e65347ea1c70b85c3a36bbf | [
"MIT"
] | null | null | null | r6sapi/players.py | dasBunny/RainbowSixSiege-Python-API | dade9efd41b2a8855e65347ea1c70b85c3a36bbf | [
"MIT"
] | null | null | null | r6sapi/players.py | dasBunny/RainbowSixSiege-Python-API | dade9efd41b2a8855e65347ea1c70b85c3a36bbf | [
"MIT"
] | null | null | null | """
Copyright (c) 2016-2019 billyoyo
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import asyncio
import inspect
from .exceptions import InvalidRequest
from .platforms import PlatformURLNames
from .weapons import *
from .gamemodes import *
from .gamequeues import *
from .operators import *
from .ranks import *
class PlayerUrlTemplates:
""" Private class, base API URLs """
FETCH_STATISTIC = "https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s"
LOAD_LEVEL = "https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s"
LOAD_RANK = "https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s®ion_id=%s&season_id=%s"
LOAD_OPERATOR = "https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s"
LOAD_WEAPON = "https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=weapontypepvp_kills,weapontypepvp_headshot,weapontypepvp_bulletfired,weapontypepvp_bullethit"
class PlayerUrlBuilder:
""" Private class, creates URLs for different types of requests """
def __init__(self, spaceid, platform_url, player_ids):
self.spaceid = spaceid
self.platform_url = platform_url
self.player_ids = player_ids
if isinstance(player_ids, list) or isinstance(player_ids, tuple):
player_ids = ",".join(player_ids)
def fetch_statistic_url(self, statistics):
return PlayerUrlTemplates.FETCH_STATISTIC % (self.spaceid, self.platform_url, self.player_ids, ",".join(statistics))
def load_level_url(self):
return PlayerUrlTemplates.LOAD_LEVEL % (self.spaceid, self.platform_url, self.player_ids)
def load_rank_url(self, region, season):
return PlayerUrlTemplates.LOAD_RANK % (self.spaceid, self.platform_url, self.player_ids, region, season)
def load_operator_url(self, statistics):
return PlayerUrlTemplates.LOAD_OPERATOR % (self.spaceid, self.platform_url, self.player_ids, statistics)
def load_weapon_url(self):
return PlayerUrlTemplates.LOAD_WEAPON % (self.spaceid, self.platform_url, self.player_ids)
class PlayerBatch:
""" Accumulates requests for multiple players' stats in to a single request, saving time.
Acts as a proxy for any asynchronous method in :class:`Player`. The response of the method will be a dictionary of
the responses from each player, with the player ids as keys.
This class is also an iterable, and iterates over the :class:`Player` objects contained in the batch.
Individual players in the batch can be accessed via their ID using an item accessor (player_batch[player.id])
Parameters
----------
players : list[:class:`Player`]
the list of players in the batch """
def __init__(self, players):
self.players = players
self.player_ids = [player_id for player_id in players]
self._player_objs = [players[player_id] for player_id in players]
if len(players) == 0:
raise ValueError("batch must contain at least one player")
def __iter__(self):
return iter(self._player_objs)
def __getitem__(self, name):
return self.players[name]
def __getattr__(self, name):
root_player = self.players[self.player_ids[0]]
root_method = getattr(root_player, name)
@asyncio.coroutine
def _proxy(*args, **kwargs):
results = {}
# temporarily override url builder so we get data for all players
root_player.url_builder.player_ids = ",".join(self.player_ids)
root_result = yield from root_method(*args, **kwargs)
results[root_player.id] = root_result
data = root_player._last_data
kwargs["data"] = data
for player_id in self.players:
if player_id != root_player.id:
results[player_id] = yield from getattr(self.players[player_id], name)(*args, **kwargs)
# reset root player url builder to default state
root_player.url_builder.player_ids = root_player.id
return results
return _proxy
class Player:
"""Contains information about a specific player
Attributes
----------
auth : :class:`Auth`
the auth object used to find this player
id : str
the players profile id
userid : str
the players user id
platform : str
the platform this player is on
platform_url : str
the URL name for this platform (used internally)
id_on_platform : str
the players ID on the platform
name : str
the players name on the platform
url : str
a link to the players profile
icon_url : str
a link to the players avatar
xp : int
the amount of xp the player has, must call check_level or load_level first
level : int
the level of the player, must call check_level or load_level first
ranks : dict
dict containing already found ranks ("region_name:season": :class:`Rank`)
operators : dict
dict containing already found operators (operator_name: :class:`Operator`)
gamemodes : dict
dict containing already found gamemodes (gamemode_id: :class:`Gamemode`)
weapons : dict
dict containing already found weapons (weapon_id: :class:`Weapon`)
casual : :class:`GameQueue`
stats for the casual queue, must call load_queues or check_queues first
ranked : :class:`GameQueue`
stats for the ranked queue, must call load_queues or check_queues first
deaths : int
the number of deaths the player has (must call load_general or check_general first)
kills : int
the number of kills the player has (must call load_general or check_general first)
kill_assists : int
the number of kill assists the player has (must call load_general or check_general first)
penetration_kills : int
the number of penetration kills the player has (must call load_general or check_general first)
melee_kills : int
the number of melee kills the player has (must call load_general or check_general first)
revives : int
the number of revives the player has (must call load_general or check_general first)
matches_won : int
the number of matches the player has won (must call load_general or check_general first)
matches_lost : int
the number of matches the player has lost (must call load_general or check_general first)
matches_played : int
the number of matches the player has played (must call load_general or check_general first)
time_played : int
the amount of time in seconds the player has played for (must call load_general or check_general first)
bullets_fired : int
the amount of bullets the player has fired (must call load_general or check_general first)
bullets_hit : int
the amount of bullets the player has hit (must call load_general or check_general first)
headshots : int
the amount of headshots the player has hit (must call load_general or check_general first)
terrorist_hunt : :class:`GameQueue`
contains all of the above state (from deaths to headshots) inside a gamequeue object.
"""
def __init__(self, auth, data):
self.auth = auth
self.id = data.get("profileId")
self.userid = data.get("userId")
self.platform = data.get("platformType")
self.platform_url = PlatformURLNames[self.platform]
self.id_on_platform = data.get("idOnPlatform")
self.name = data.get("nameOnPlatform")
self.url_builder = PlayerUrlBuilder(self.spaceid, self.platform_url, self.id)
self.url = "https://game-rainbow6.ubi.com/en-us/%s/player-statistics/%s/multiplayer" % (self.platform, self.id)
self.icon_url = "https://ubisoft-avatars.akamaized.net/%s/default_146_146.png" % (self.id)
self.ranks = {}
self.operators = {}
self.gamemodes = {}
self.weapons = []
self.casual = None
self.ranked = None
self.terrorist_hunt = None
self._last_data = None
@property
def spaceid(self):
return self.auth.spaceids[self.platform]
@asyncio.coroutine
def _fetch_statistics(self, *statistics, data=None):
if data is None:
data = yield from self.auth.get(self.url_builder.fetch_statistic_url(statistics))
self._last_data = data
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
stats = {}
for x in data:
statistic = x.split(":")[0]
if statistic in statistics:
stats[statistic] = data[x]
return stats
@asyncio.coroutine
def load_level(self, data=None):
"""|coro|
Load the players XP and level"""
if data is None:
data = yield from self.auth.get(self.url_builder.load_level_url())
self._last_data = data
if "player_profiles" in data and len(data["player_profiles"]) > 0:
self.xp = data["player_profiles"][0].get("xp", 0)
self.level = data["player_profiles"][0].get("level", 0)
else:
raise InvalidRequest("Missing key player_profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def check_level(self):
"""|coro|
Check the players XP and level, only loading it if it hasn't been loaded yet"""
if not hasattr(self, "level"):
yield from self.load_level()
@asyncio.coroutine
def load_rank(self, region, season=-1, data=None):
"""|coro|
Loads the players rank for this region and season
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
if data is None:
data = yield from self.auth.get(self.url_builder.load_rank_url(region, season))
self._last_data = data
rank_definitions = yield from self.auth.get_rank_definitions()
if "players" in data and self.id in data["players"]:
regionkey = "%s:%s" % (region, season)
self.ranks[regionkey] = Rank(data["players"][self.id], rank_definitions)
return self.ranks[regionkey]
else:
raise InvalidRequest("Missing players key in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_rank(self, region, season=-1, data=None):
"""|coro|
Checks the players rank for this region, only loading it if it hasn't already been found
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
cache_key = "%s:%s" % (region, season)
if cache_key in self.ranks:
return self.ranks[cache_key]
result = yield from self.load_rank(region, season, data=data)
return result
@asyncio.coroutine
def load_all_operators(self, data=None):
"""|coro|
Loads the player stats for all operators
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
statistics = ",".join(OperatorUrlStatisticNames)
for operator in OperatorStatisticNames:
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
statistics += "," + operator_key
if data is None:
data = yield from self.auth.get(self.url_builder.load_operator_url(statistics))
self._last_data = data
if "results" not in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
for operator in OperatorStatisticNames:
location = yield from self.auth.get_operator_index(operator.lower())
op_data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
op_data["__statistic_name"] = operator_key.split("_")[1]
self.operators[operator.lower()] = Operator(operator.lower(), op_data)
return self.operators
@asyncio.coroutine
def get_all_operators(self, data=None):
"""|coro|
Checks the player stats for all operators, loading them all again if any aren't found
This is significantly more efficient than calling get_operator for every operator name.
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
if len(self.operators) >= len(OperatorStatisticNames):
return self.operators
result = yield from self.load_all_operators(data=data)
return result
@asyncio.coroutine
def load_operator(self, operator, data=None):
"""|coro|
Loads the players stats for the operator
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
location = yield from self.auth.get_operator_index(operator)
if location is None:
raise ValueError("invalid operator %s" % operator)
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key is not None:
operator_key = "," + operator_key
else:
operator_key = ""
if data is None:
statistics = ",".join(OperatorUrlStatisticNames) + operator_key
data = yield from self.auth.get(self.url_builder.load_operator_url(statistics))
self._last_data = data
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
if operator_key:
data["__statistic_name"] = operator_key.split("_")[1]
#if len(data) < 5:
# raise InvalidRequest("invalid number of results for operator in JSON object %s" % data)
oper = Operator(operator, data)
self.operators[operator] = oper
return oper
@asyncio.coroutine
def get_operator(self, operator, data=None):
"""|coro|
Checks the players stats for this operator, only loading them if they haven't already been found
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
if operator in self.operators:
return self.operators[operator]
result = yield from self.load_operator(operator, data=data)
return result
@asyncio.coroutine
def load_weapons(self, data=None):
"""|coro|
Load the players weapon stats
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
if data is None:
data = yield from self.auth.get(self.url_builder.load_weapon_url())
self._last_data = data
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing key results in returned JSON object %s" % str(data))
data = data["results"][self.id]
self.weapons = [Weapon(i, data) for i in range(7)]
return self.weapons
@asyncio.coroutine
def check_weapons(self, data=None):
"""|coro|
Check the players weapon stats, only loading them if they haven't already been found
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
if len(self.weapons) == 0:
yield from self.load_weapons(data=data)
return self.weapons
@asyncio.coroutine
def load_gamemodes(self, data=None):
"""|coro|
Loads the players gamemode stats
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
stats = yield from self._fetch_statistics("secureareapvp_matchwon", "secureareapvp_matchlost", "secureareapvp_matchplayed",
"secureareapvp_bestscore", "rescuehostagepvp_matchwon", "rescuehostagepvp_matchlost",
"rescuehostagepvp_matchplayed", "rescuehostagepvp_bestscore", "plantbombpvp_matchwon",
"plantbombpvp_matchlost", "plantbombpvp_matchplayed", "plantbombpvp_bestscore",
"generalpvp_servershacked", "generalpvp_serverdefender", "generalpvp_serveraggression",
"generalpvp_hostagerescue", "generalpvp_hostagedefense", data=data)
self.gamemodes = {x: Gamemode(x, stats) for x in GamemodeNames}
return self.gamemodes
@asyncio.coroutine
def check_gamemodes(self, data=None):
"""|coro|
Checks the players gamemode stats, only loading them if they haven't already been found
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
if len(self.gamemodes) == 0:
yield from self.load_gamemodes(data=data)
return self.gamemodes
@asyncio.coroutine
def load_general(self, data=None):
"""|coro|
Loads the players general stats"""
stats = yield from self._fetch_statistics("generalpvp_timeplayed", "generalpvp_matchplayed", "generalpvp_matchwon",
"generalpvp_matchlost", "generalpvp_kills", "generalpvp_death",
"generalpvp_bullethit", "generalpvp_bulletfired", "generalpvp_killassists",
"generalpvp_revive", "generalpvp_headshot", "generalpvp_penetrationkills",
"generalpvp_meleekills", "generalpvp_dbnoassists", "generalpvp_suicide",
"generalpvp_barricadedeployed", "generalpvp_reinforcementdeploy", "generalpvp_totalxp",
"generalpvp_rappelbreach", "generalpvp_distancetravelled", "generalpvp_revivedenied",
"generalpvp_dbno", "generalpvp_gadgetdestroy", "generalpvp_blindkills", data=data)
statname = "generalpvp_"
self.deaths = stats.get(statname + "death", 0)
self.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.matches_won = stats.get(statname + "matchwon", 0)
self.bullets_hit = stats.get(statname + "bullethit", 0)
self.melee_kills = stats.get(statname + "meleekills", 0)
self.bullets_fired = stats.get(statname + "bulletfired", 0)
self.matches_played = stats.get(statname + "matchplayed", 0)
self.kill_assists = stats.get(statname + "killassists", 0)
self.time_played = stats.get(statname + "timeplayed", 0)
self.revives = stats.get(statname + "revive", 0)
self.kills = stats.get(statname + "kills", 0)
self.headshots = stats.get(statname + "headshot", 0)
self.matches_lost = stats.get(statname + "matchlost", 0)
self.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.suicides = stats.get(statname + "suicide", 0)
self.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.total_xp = stats.get(statname + "totalxp", 0)
self.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.revives_denied = stats.get(statname + "revivedenied", 0)
self.dbnos = stats.get(statname + "dbno", 0)
self.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.blind_kills = stats.get(statname + "blindkills")
@asyncio.coroutine
def check_general(self, data=None):
"""|coro|
Checks the players general stats, only loading them if they haven't already been found"""
if not hasattr(self, "kills"):
yield from self.load_general(data=data)
@asyncio.coroutine
def load_queues(self, data=None):
"""|coro|
Loads the players game queues"""
stats = yield from self._fetch_statistics("casualpvp_matchwon", "casualpvp_matchlost", "casualpvp_timeplayed",
"casualpvp_matchplayed", "casualpvp_kills", "casualpvp_death",
"rankedpvp_matchwon", "rankedpvp_matchlost", "rankedpvp_timeplayed",
"rankedpvp_matchplayed", "rankedpvp_kills", "rankedpvp_death", data=data)
self.ranked = GameQueue("ranked", stats)
self.casual = GameQueue("casual", stats)
@asyncio.coroutine
def check_queues(self, data=None):
"""|coro|
Checks the players game queues, only loading them if they haven't already been found"""
if self.casual is None:
yield from self.load_queues(data=data)
@asyncio.coroutine
def load_terrohunt(self, data=None):
"""|coro|
Loads the player's general stats for terrorist hunt"""
stats = yield from self._fetch_statistics("generalpve_dbnoassists", "generalpve_death", "generalpve_revive",
"generalpve_matchwon", "generalpve_suicide", "generalpve_servershacked",
"generalpve_serverdefender", "generalpve_barricadedeployed", "generalpve_reinforcementdeploy",
"generalpve_kills", "generalpve_hostagedefense", "generalpve_bulletfired",
"generalpve_matchlost", "generalpve_killassists", "generalpve_totalxp",
"generalpve_hostagerescue", "generalpve_penetrationkills", "generalpve_meleekills",
"generalpve_rappelbreach", "generalpve_distancetravelled", "generalpve_matchplayed",
"generalpve_serveraggression", "generalpve_timeplayed", "generalpve_revivedenied",
"generalpve_dbno", "generalpve_bullethit", "generalpve_blindkills", "generalpve_headshot",
"generalpve_gadgetdestroy", "generalpve_accuracy", data=data)
self.terrorist_hunt = GameQueue("terrohunt")
statname = "generalpve_"
self.terrorist_hunt.deaths = stats.get(statname + "death", 0)
self.terrorist_hunt.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.terrorist_hunt.matches_won = stats.get(statname + "matchwon", 0)
self.terrorist_hunt.bullets_hit = stats.get(statname + "bullethit", 0)
self.terrorist_hunt.melee_kills = stats.get(statname + "meleekills", 0)
self.terrorist_hunt.bullets_fired = stats.get(statname + "bulletfired", 0)
self.terrorist_hunt.matches_played = stats.get(statname + "matchplayed", 0)
self.terrorist_hunt.kill_assists = stats.get(statname + "killassists", 0)
self.terrorist_hunt.time_played = stats.get(statname + "timeplayed", 0)
self.terrorist_hunt.revives = stats.get(statname + "revive", 0)
self.terrorist_hunt.kills = stats.get(statname + "kills", 0)
self.terrorist_hunt.headshots = stats.get(statname + "headshot", 0)
self.terrorist_hunt.matches_lost = stats.get(statname + "matchlost", 0)
self.terrorist_hunt.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.terrorist_hunt.suicides = stats.get(statname + "suicide", 0)
self.terrorist_hunt.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.terrorist_hunt.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.terrorist_hunt.total_xp = stats.get(statname + "totalxp", 0)
self.terrorist_hunt.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.terrorist_hunt.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.terrorist_hunt.revives_denied = stats.get(statname + "revivedenied", 0)
self.terrorist_hunt.dbnos = stats.get(statname + "dbno", 0)
self.terrorist_hunt.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.terrorist_hunt.areas_secured = stats.get(statname + "servershacked", 0)
self.terrorist_hunt.areas_defended = stats.get(statname + "serverdefender", 0)
self.terrorist_hunt.areas_contested = stats.get(statname + "serveraggression", 0)
self.terrorist_hunt.hostages_rescued = stats.get(statname + "hostagerescue", 0)
self.terrorist_hunt.hostages_defended = stats.get(statname + "hostagedefense", 0)
self.terrorist_hunt.blind_kills = stats.get(statname + "blindkills", 0)
return self.terrorist_hunt
@asyncio.coroutine
def check_terrohunt(self, data=None):
"""|coro|
Checks the players general stats for terrorist hunt, only loading them if they haven't been loaded already"""
if self.terrorist_hunt is None:
yield from self.load_terrohunt(data=data)
return self.terrorist_hunt
@property
def wins(self):
return self.won
@property
def losses(self):
return self.lost | 43.477449 | 460 | 0.639648 |
import asyncio
import inspect
from .exceptions import InvalidRequest
from .platforms import PlatformURLNames
from .weapons import *
from .gamemodes import *
from .gamequeues import *
from .operators import *
from .ranks import *
class PlayerUrlTemplates:
FETCH_STATISTIC = "https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s"
LOAD_LEVEL = "https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s"
LOAD_RANK = "https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s®ion_id=%s&season_id=%s"
LOAD_OPERATOR = "https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s"
LOAD_WEAPON = "https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=weapontypepvp_kills,weapontypepvp_headshot,weapontypepvp_bulletfired,weapontypepvp_bullethit"
class PlayerUrlBuilder:
def __init__(self, spaceid, platform_url, player_ids):
self.spaceid = spaceid
self.platform_url = platform_url
self.player_ids = player_ids
if isinstance(player_ids, list) or isinstance(player_ids, tuple):
player_ids = ",".join(player_ids)
def fetch_statistic_url(self, statistics):
return PlayerUrlTemplates.FETCH_STATISTIC % (self.spaceid, self.platform_url, self.player_ids, ",".join(statistics))
def load_level_url(self):
return PlayerUrlTemplates.LOAD_LEVEL % (self.spaceid, self.platform_url, self.player_ids)
def load_rank_url(self, region, season):
return PlayerUrlTemplates.LOAD_RANK % (self.spaceid, self.platform_url, self.player_ids, region, season)
def load_operator_url(self, statistics):
return PlayerUrlTemplates.LOAD_OPERATOR % (self.spaceid, self.platform_url, self.player_ids, statistics)
def load_weapon_url(self):
return PlayerUrlTemplates.LOAD_WEAPON % (self.spaceid, self.platform_url, self.player_ids)
class PlayerBatch:
def __init__(self, players):
self.players = players
self.player_ids = [player_id for player_id in players]
self._player_objs = [players[player_id] for player_id in players]
if len(players) == 0:
raise ValueError("batch must contain at least one player")
def __iter__(self):
return iter(self._player_objs)
def __getitem__(self, name):
return self.players[name]
def __getattr__(self, name):
root_player = self.players[self.player_ids[0]]
root_method = getattr(root_player, name)
@asyncio.coroutine
def _proxy(*args, **kwargs):
results = {}
root_player.url_builder.player_ids = ",".join(self.player_ids)
root_result = yield from root_method(*args, **kwargs)
results[root_player.id] = root_result
data = root_player._last_data
kwargs["data"] = data
for player_id in self.players:
if player_id != root_player.id:
results[player_id] = yield from getattr(self.players[player_id], name)(*args, **kwargs)
root_player.url_builder.player_ids = root_player.id
return results
return _proxy
class Player:
def __init__(self, auth, data):
self.auth = auth
self.id = data.get("profileId")
self.userid = data.get("userId")
self.platform = data.get("platformType")
self.platform_url = PlatformURLNames[self.platform]
self.id_on_platform = data.get("idOnPlatform")
self.name = data.get("nameOnPlatform")
self.url_builder = PlayerUrlBuilder(self.spaceid, self.platform_url, self.id)
self.url = "https://game-rainbow6.ubi.com/en-us/%s/player-statistics/%s/multiplayer" % (self.platform, self.id)
self.icon_url = "https://ubisoft-avatars.akamaized.net/%s/default_146_146.png" % (self.id)
self.ranks = {}
self.operators = {}
self.gamemodes = {}
self.weapons = []
self.casual = None
self.ranked = None
self.terrorist_hunt = None
self._last_data = None
@property
def spaceid(self):
return self.auth.spaceids[self.platform]
@asyncio.coroutine
def _fetch_statistics(self, *statistics, data=None):
if data is None:
data = yield from self.auth.get(self.url_builder.fetch_statistic_url(statistics))
self._last_data = data
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
stats = {}
for x in data:
statistic = x.split(":")[0]
if statistic in statistics:
stats[statistic] = data[x]
return stats
@asyncio.coroutine
def load_level(self, data=None):
if data is None:
data = yield from self.auth.get(self.url_builder.load_level_url())
self._last_data = data
if "player_profiles" in data and len(data["player_profiles"]) > 0:
self.xp = data["player_profiles"][0].get("xp", 0)
self.level = data["player_profiles"][0].get("level", 0)
else:
raise InvalidRequest("Missing key player_profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def check_level(self):
if not hasattr(self, "level"):
yield from self.load_level()
@asyncio.coroutine
def load_rank(self, region, season=-1, data=None):
if data is None:
data = yield from self.auth.get(self.url_builder.load_rank_url(region, season))
self._last_data = data
rank_definitions = yield from self.auth.get_rank_definitions()
if "players" in data and self.id in data["players"]:
regionkey = "%s:%s" % (region, season)
self.ranks[regionkey] = Rank(data["players"][self.id], rank_definitions)
return self.ranks[regionkey]
else:
raise InvalidRequest("Missing players key in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_rank(self, region, season=-1, data=None):
cache_key = "%s:%s" % (region, season)
if cache_key in self.ranks:
return self.ranks[cache_key]
result = yield from self.load_rank(region, season, data=data)
return result
@asyncio.coroutine
def load_all_operators(self, data=None):
statistics = ",".join(OperatorUrlStatisticNames)
for operator in OperatorStatisticNames:
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
statistics += "," + operator_key
if data is None:
data = yield from self.auth.get(self.url_builder.load_operator_url(statistics))
self._last_data = data
if "results" not in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
for operator in OperatorStatisticNames:
location = yield from self.auth.get_operator_index(operator.lower())
op_data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
op_data["__statistic_name"] = operator_key.split("_")[1]
self.operators[operator.lower()] = Operator(operator.lower(), op_data)
return self.operators
@asyncio.coroutine
def get_all_operators(self, data=None):
if len(self.operators) >= len(OperatorStatisticNames):
return self.operators
result = yield from self.load_all_operators(data=data)
return result
@asyncio.coroutine
def load_operator(self, operator, data=None):
location = yield from self.auth.get_operator_index(operator)
if location is None:
raise ValueError("invalid operator %s" % operator)
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key is not None:
operator_key = "," + operator_key
else:
operator_key = ""
if data is None:
statistics = ",".join(OperatorUrlStatisticNames) + operator_key
data = yield from self.auth.get(self.url_builder.load_operator_url(statistics))
self._last_data = data
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
if operator_key:
data["__statistic_name"] = operator_key.split("_")[1]
oper = Operator(operator, data)
self.operators[operator] = oper
return oper
@asyncio.coroutine
def get_operator(self, operator, data=None):
if operator in self.operators:
return self.operators[operator]
result = yield from self.load_operator(operator, data=data)
return result
@asyncio.coroutine
def load_weapons(self, data=None):
if data is None:
data = yield from self.auth.get(self.url_builder.load_weapon_url())
self._last_data = data
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing key results in returned JSON object %s" % str(data))
data = data["results"][self.id]
self.weapons = [Weapon(i, data) for i in range(7)]
return self.weapons
@asyncio.coroutine
def check_weapons(self, data=None):
if len(self.weapons) == 0:
yield from self.load_weapons(data=data)
return self.weapons
@asyncio.coroutine
def load_gamemodes(self, data=None):
stats = yield from self._fetch_statistics("secureareapvp_matchwon", "secureareapvp_matchlost", "secureareapvp_matchplayed",
"secureareapvp_bestscore", "rescuehostagepvp_matchwon", "rescuehostagepvp_matchlost",
"rescuehostagepvp_matchplayed", "rescuehostagepvp_bestscore", "plantbombpvp_matchwon",
"plantbombpvp_matchlost", "plantbombpvp_matchplayed", "plantbombpvp_bestscore",
"generalpvp_servershacked", "generalpvp_serverdefender", "generalpvp_serveraggression",
"generalpvp_hostagerescue", "generalpvp_hostagedefense", data=data)
self.gamemodes = {x: Gamemode(x, stats) for x in GamemodeNames}
return self.gamemodes
@asyncio.coroutine
def check_gamemodes(self, data=None):
if len(self.gamemodes) == 0:
yield from self.load_gamemodes(data=data)
return self.gamemodes
@asyncio.coroutine
def load_general(self, data=None):
stats = yield from self._fetch_statistics("generalpvp_timeplayed", "generalpvp_matchplayed", "generalpvp_matchwon",
"generalpvp_matchlost", "generalpvp_kills", "generalpvp_death",
"generalpvp_bullethit", "generalpvp_bulletfired", "generalpvp_killassists",
"generalpvp_revive", "generalpvp_headshot", "generalpvp_penetrationkills",
"generalpvp_meleekills", "generalpvp_dbnoassists", "generalpvp_suicide",
"generalpvp_barricadedeployed", "generalpvp_reinforcementdeploy", "generalpvp_totalxp",
"generalpvp_rappelbreach", "generalpvp_distancetravelled", "generalpvp_revivedenied",
"generalpvp_dbno", "generalpvp_gadgetdestroy", "generalpvp_blindkills", data=data)
statname = "generalpvp_"
self.deaths = stats.get(statname + "death", 0)
self.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.matches_won = stats.get(statname + "matchwon", 0)
self.bullets_hit = stats.get(statname + "bullethit", 0)
self.melee_kills = stats.get(statname + "meleekills", 0)
self.bullets_fired = stats.get(statname + "bulletfired", 0)
self.matches_played = stats.get(statname + "matchplayed", 0)
self.kill_assists = stats.get(statname + "killassists", 0)
self.time_played = stats.get(statname + "timeplayed", 0)
self.revives = stats.get(statname + "revive", 0)
self.kills = stats.get(statname + "kills", 0)
self.headshots = stats.get(statname + "headshot", 0)
self.matches_lost = stats.get(statname + "matchlost", 0)
self.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.suicides = stats.get(statname + "suicide", 0)
self.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.total_xp = stats.get(statname + "totalxp", 0)
self.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.revives_denied = stats.get(statname + "revivedenied", 0)
self.dbnos = stats.get(statname + "dbno", 0)
self.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.blind_kills = stats.get(statname + "blindkills")
@asyncio.coroutine
def check_general(self, data=None):
if not hasattr(self, "kills"):
yield from self.load_general(data=data)
@asyncio.coroutine
def load_queues(self, data=None):
stats = yield from self._fetch_statistics("casualpvp_matchwon", "casualpvp_matchlost", "casualpvp_timeplayed",
"casualpvp_matchplayed", "casualpvp_kills", "casualpvp_death",
"rankedpvp_matchwon", "rankedpvp_matchlost", "rankedpvp_timeplayed",
"rankedpvp_matchplayed", "rankedpvp_kills", "rankedpvp_death", data=data)
self.ranked = GameQueue("ranked", stats)
self.casual = GameQueue("casual", stats)
@asyncio.coroutine
def check_queues(self, data=None):
if self.casual is None:
yield from self.load_queues(data=data)
@asyncio.coroutine
def load_terrohunt(self, data=None):
stats = yield from self._fetch_statistics("generalpve_dbnoassists", "generalpve_death", "generalpve_revive",
"generalpve_matchwon", "generalpve_suicide", "generalpve_servershacked",
"generalpve_serverdefender", "generalpve_barricadedeployed", "generalpve_reinforcementdeploy",
"generalpve_kills", "generalpve_hostagedefense", "generalpve_bulletfired",
"generalpve_matchlost", "generalpve_killassists", "generalpve_totalxp",
"generalpve_hostagerescue", "generalpve_penetrationkills", "generalpve_meleekills",
"generalpve_rappelbreach", "generalpve_distancetravelled", "generalpve_matchplayed",
"generalpve_serveraggression", "generalpve_timeplayed", "generalpve_revivedenied",
"generalpve_dbno", "generalpve_bullethit", "generalpve_blindkills", "generalpve_headshot",
"generalpve_gadgetdestroy", "generalpve_accuracy", data=data)
self.terrorist_hunt = GameQueue("terrohunt")
statname = "generalpve_"
self.terrorist_hunt.deaths = stats.get(statname + "death", 0)
self.terrorist_hunt.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.terrorist_hunt.matches_won = stats.get(statname + "matchwon", 0)
self.terrorist_hunt.bullets_hit = stats.get(statname + "bullethit", 0)
self.terrorist_hunt.melee_kills = stats.get(statname + "meleekills", 0)
self.terrorist_hunt.bullets_fired = stats.get(statname + "bulletfired", 0)
self.terrorist_hunt.matches_played = stats.get(statname + "matchplayed", 0)
self.terrorist_hunt.kill_assists = stats.get(statname + "killassists", 0)
self.terrorist_hunt.time_played = stats.get(statname + "timeplayed", 0)
self.terrorist_hunt.revives = stats.get(statname + "revive", 0)
self.terrorist_hunt.kills = stats.get(statname + "kills", 0)
self.terrorist_hunt.headshots = stats.get(statname + "headshot", 0)
self.terrorist_hunt.matches_lost = stats.get(statname + "matchlost", 0)
self.terrorist_hunt.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.terrorist_hunt.suicides = stats.get(statname + "suicide", 0)
self.terrorist_hunt.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.terrorist_hunt.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.terrorist_hunt.total_xp = stats.get(statname + "totalxp", 0)
self.terrorist_hunt.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.terrorist_hunt.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.terrorist_hunt.revives_denied = stats.get(statname + "revivedenied", 0)
self.terrorist_hunt.dbnos = stats.get(statname + "dbno", 0)
self.terrorist_hunt.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.terrorist_hunt.areas_secured = stats.get(statname + "servershacked", 0)
self.terrorist_hunt.areas_defended = stats.get(statname + "serverdefender", 0)
self.terrorist_hunt.areas_contested = stats.get(statname + "serveraggression", 0)
self.terrorist_hunt.hostages_rescued = stats.get(statname + "hostagerescue", 0)
self.terrorist_hunt.hostages_defended = stats.get(statname + "hostagedefense", 0)
self.terrorist_hunt.blind_kills = stats.get(statname + "blindkills", 0)
return self.terrorist_hunt
@asyncio.coroutine
def check_terrohunt(self, data=None):
if self.terrorist_hunt is None:
yield from self.load_terrohunt(data=data)
return self.terrorist_hunt
@property
def wins(self):
return self.won
@property
def losses(self):
return self.lost | true | true |
1c2d2c23a3bcd443b95c043f6fb27c7092470d17 | 142 | py | Python | zcrmsdk/src/com/zoho/crm/api/bulk_write/response_wrapper.py | zoho/zohocrm-python-sdk-2.0 | 3a93eb3b57fed4e08f26bd5b311e101cb2995411 | [
"Apache-2.0"
] | null | null | null | zcrmsdk/src/com/zoho/crm/api/bulk_write/response_wrapper.py | zoho/zohocrm-python-sdk-2.0 | 3a93eb3b57fed4e08f26bd5b311e101cb2995411 | [
"Apache-2.0"
] | null | null | null | zcrmsdk/src/com/zoho/crm/api/bulk_write/response_wrapper.py | zoho/zohocrm-python-sdk-2.0 | 3a93eb3b57fed4e08f26bd5b311e101cb2995411 | [
"Apache-2.0"
] | null | null | null | from abc import ABC, abstractmethod
class ResponseWrapper(ABC):
def __init__(self):
"""Creates an instance of ResponseWrapper"""
pass
| 15.777778 | 46 | 0.746479 | from abc import ABC, abstractmethod
class ResponseWrapper(ABC):
def __init__(self):
pass
| true | true |
1c2d2c2a9b337d93ae73da357bf12fb17a8477bd | 52,929 | py | Python | py_entitymatching/catalog/catalog_manager.py | kvpradap/py_entitymatching | 4ff803df1a03cf4d77ef935357355e6de5dd9438 | [
"BSD-3-Clause"
] | 165 | 2016-08-28T14:30:01.000Z | 2022-03-29T17:24:03.000Z | py_entitymatching/catalog/catalog_manager.py | mvahit/py_entitymatching | 6724081d7d95c547e5a51625b4a8207c6c1737f8 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 70 | 2016-11-22T00:35:22.000Z | 2022-03-11T22:26:26.000Z | py_entitymatching/catalog/catalog_manager.py | mvahit/py_entitymatching | 6724081d7d95c547e5a51625b4a8207c6c1737f8 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 53 | 2016-09-22T02:07:34.000Z | 2022-03-19T18:57:06.000Z | # coding=utf-8
"""
This module contains wrapper functions for the catalog.
"""
import logging
import pandas as pd
import six
import py_entitymatching.utils.catalog_helper as ch
from py_entitymatching.catalog.catalog import Catalog
from py_entitymatching.utils.validation_helper import validate_object_type
logger = logging.getLogger(__name__)
def get_property(data_frame, property_name):
"""
Gets the value of a property (with the given property name) for a pandas
DataFrame from the catalog.
Args:
data_frame (DataFrame): The DataFrame for which the property should be
retrieved.
property_name (string): The name of the property that should be
retrieved.
Returns:
A Python object (typically a string or a pandas DataFrame depending
on the property name) is returned.
Raises:
AssertionError: If `data_frame` is not of type pandas
DataFrame.
AssertionError: If `property_name` is not of type string.
KeyError: If `data_frame` information is not present in the catalog.
KeyError: If requested property for the `data_frame` is not present
in the catalog.
Examples:
>>> import py_entitymatching as em
>>> import pandas as pd
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> em.set_key(A, 'id')
>>> em.get_property(A, 'key')
# id
"""
# Validate input parameters
# # The input object should be of type pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
# # The property name should be of type string
validate_object_type(property_name, six.string_types, error_prefix='Property name')
# Get the catalog instance, this is imported here because this object
# used to validate the presence of a DataFrame in the catalog, and the
# presence of requested metadata in the catalog.
catalog = Catalog.Instance()
# Check for the present of input DataFrame in the catalog.
if not catalog.is_df_info_present_in_catalog(data_frame):
logger.error('DataFrame information is not present in the catalog')
raise KeyError('DataFrame information is not present in the catalog')
# Check if the requested property is present in the catalog.
if not catalog.is_property_present_for_df(data_frame, property_name):
logger.error(
'Requested metadata ( %s ) for the given DataFrame is not '
'present in the catalog' % property_name)
raise KeyError(
'Requested metadata ( %s ) for the given DataFrame is not '
'present in the catalog' % property_name)
# Return the requested property for the input DataFrame
return catalog.get_property(data_frame, property_name)
def set_property(data_frame, property_name, property_value):
"""
Sets the value of a property (with the given property name) for a pandas
DataFrame in the catalog.
Args:
data_frame (DataFrame): The DataFrame for which the property must be
set.
property_name (string): The name of the property to be set.
property_value (object): The value of the property to be set. This is
typically a string (such as key) or pandas DataFrame (such as
ltable, rtable).
Returns:
A Boolean value of True is returned if the update was successful.
Raises:
AssertionError: If `data_frame` is not of type pandas
DataFrame.
AssertionError: If `property_name` is not of type string.
Examples:
>>> import py_entitymatching as em
>>> import pandas as pd
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> em.set_property(A, 'key', 'id')
>>> em.get_property(A, 'key')
# id
>>> em.get_key(A)
# id
Note:
If the input DataFrame is not present in the catalog, this function
will create an entry in the catalog and set the given property.
"""
# Validate input parameters
# # The input object should be of type pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
# # The property name should be of type string
validate_object_type(property_name, six.string_types, error_prefix='Property name')
# Get the catalog instance
catalog = Catalog.Instance()
# Check if the DataFrame information is present in the catalog. If the
# information is not present, then initialize an entry for that DataFrame
# in the catalog.
if not catalog.is_df_info_present_in_catalog(data_frame):
catalog.init_properties(data_frame)
# Set the property in the catalog, and relay the return value from the
# underlying catalog object's function. The return value is typically
# True if the update was successful.
return catalog.set_property(data_frame, property_name, property_value)
def init_properties(data_frame):
"""
Initializes properties for a pandas DataFrame in the catalog.
Specifically, this function creates an entry in the catalog and sets its
properties to empty.
Args:
data_frame (DataFrame): DataFrame for which the properties must be
initialized.
Returns:
A Boolean value of True is returned if the initialization was
successful.
"""
# Validate input parameters
# # The input object should be of type pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
# Get the catalog instance
catalog = Catalog.Instance()
# Initialize the property in the catalog.
# Relay the return value from the underlying catalog object's function.
# The return value is typically True if the initialization was successful
return catalog.init_properties(data_frame)
def get_all_properties(data_frame):
"""
Gets all the properties for a pandas DataFrame object from the catalog.
Args:
data_frame (DataFrame): DataFrame for which the properties must be
retrieved.
Returns:
A dictionary containing properties for the input pandas DataFrame.
Raises:
AttributeError: If the input object is not of type pandas DataFrame.
KeyError: If the information about DataFrame is not present in the
catalog.
"""
# Validate input parameters
# # The input object is expected to be of type DataFrame
# # The input object should be of type pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
# Get the catalog instance
catalog = Catalog.Instance()
# Check if the DataFrame information is present in the catalog. If not
# raise an error.
if not catalog.is_df_info_present_in_catalog(data_frame):
logger.error('DataFrame information is not present in the catalog')
raise KeyError('DataFrame information is not present in the catalog')
# Retrieve the properties for the DataFrame from the catalog and return
# it back to the user.
return catalog.get_all_properties(data_frame)
def del_property(data_frame, property_name):
"""
Deletes a property for a pandas DataFrame from the catalog.
Args:
data_frame (DataFrame): The input DataFrame for which a property must be
deleted from the catalog.
property_name (string): The name of the property that should be deleted.
Returns:
A Boolean value of True is returned if the deletion was successful.
Raises:
AssertionError: If `data_frame` is not of type pandas DataFrame.
AssertionError: If `property_name` is not of type string.
KeyError: If `data_frame` information is not present in the catalog.
KeyError: If requested property for the DataFrame is not present
in the catalog.
Examples:
>>> import py_entitymatching as em
>>> import pandas as pd
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> em.set_property(A, 'key', 'id')
>>> em.get_property(A, 'key')
# id
>>> em.del_property(A, 'key')
>>> em.is_property_present_for_df(A, 'key')
# False
"""
# Validate input parameters
# # The input object should be of type pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
# # The property name should be of type string
validate_object_type(property_name, six.string_types, error_prefix='Property name')
# Get the catalog instance
catalog = Catalog.Instance()
# Check if the DataFrame information is present in the catalog, if not
# raise an error.
if not catalog.is_df_info_present_in_catalog(data_frame):
logger.error('DataFrame information is not present in the catalog')
raise KeyError('DataFrame information is not present in the catalog')
# Check if the requested property name to be deleted is present for the
# DataFrame in the catalog, if not raise an error.
if not catalog.is_property_present_for_df(data_frame, property_name):
logger.error('Requested metadata ( %s ) for the given DataFrame is '
'not present in the catalog' %property_name)
raise KeyError('Requested metadata ( %s ) for the given DataFrame is '
'not present in the catalog' %property_name)
# Delete the property using the underlying catalog object and relay the
# return value. Typically the return value is True if the deletion was
# successful
return catalog.del_property(data_frame, property_name)
def del_all_properties(data_frame):
"""
Deletes all properties for a DataFrame from the catalog.
Args:
data_frame (DataFrame): Input DataFrame for which all the properties
must be deleted from the catalog.
Returns:
A boolean of True is returned if the deletion was successful
from the catalog.
Raises:
AssertionError: If the `data_frame` is not of type pandas DataFrame.
KeyError: If the DataFrame information is not present in the catalog.
Note:
This method's functionality is not as same as init_properties. Here
the DataFrame's entry will be removed from the catalog,
but init_properties will add (if the DataFrame is not present in the
catalog) and initialize its properties to an empty object (
specifically, an empty Python dictionary).
"""
# Validations of input parameters
# # The input object is expected to be of type pandas DataFrame
if not isinstance(data_frame, pd.DataFrame):
logger.error('Input object is not of type pandas data frame')
raise AssertionError('Input object is not of type pandas data frame')
# Get the catalog instance
catalog = Catalog.Instance()
# Check if the DataFrame is present in the catalog. If not, raise an error
if not catalog.is_df_info_present_in_catalog(data_frame):
logger.error('DataFrame information is not present in the catalog')
raise KeyError('DataFrame information is not present in the catalog')
# Call the underlying catalog object's function to delete the properties
# and relay its return value
return catalog.del_all_properties(data_frame)
def get_catalog():
"""
Gets the catalog information for the current session.
Returns:
A Python dictionary containing the catalog information.
Specifically, the dictionary contains the Python identifier of a
DataFrame (obtained by id(DataFrame object)) as the key
and their properties as value.
Examples:
>>> import py_entitymatching as em
>>> catalog = em.get_catalog()
"""
# Get the catalog instance
catalog = Catalog.Instance()
# Call the underlying catalog object's function to get the catalog. Relay
# the return value from the delegated function.
return catalog.get_catalog()
def del_catalog():
"""
Deletes the catalog for the current session.
Returns:
A Boolean value of True is returned if the deletion was successful.
Examples:
>>> import py_entitymatching as em
>>> em.del_catalog()
"""
# Get the catalog instance
catalog = Catalog.Instance()
# Call the underlying catalog object's function to delete the catalog (a
# dict). Relay the return value from the delegated function.
return catalog.del_catalog()
def is_catalog_empty():
"""
Checks if the catalog is empty.
Returns:
A Boolean value of True is returned if the catalog is empty,
else returns False.
Examples:
>>> import py_entitymatching as em
>>> import pandas as pd
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> em.set_key(A, 'id')
>>> em.is_catalog_empty()
# False
"""
# Get the catalog instance
catalog = Catalog.Instance()
# Call the underlying catalog object's function to check if the catalog
# is empty. Relay the return value from the delegated function.
return catalog.is_catalog_empty()
def is_dfinfo_present(data_frame):
"""
Checks whether the DataFrame information is present in the catalog.
Args:
data_frame (DataFrame): The DataFrame that should be checked for its
presence in the catalog.
Returns:
A Boolean value of True is returned if the DataFrame is present in
the catalog, else False is returned.
Raises:
AssertionError: If `data_frame` is not of type pandas
DataFrame.
Examples:
>>> import py_entitymatching as em
>>> import pandas as pd
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> em.set_key(A, 'id')
>>> em.is_dfinfo_present(A)
# True
"""
# Validate inputs
# We expect the input object to be of type pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
# Get the catalog instance
catalog = Catalog.Instance()
# Call the underlying catalog object's function to check if the
# DataFrame information is present in the catalog.
# Relay the return value from the delegated function.
return catalog.is_df_info_present_in_catalog(data_frame)
def is_property_present_for_df(data_frame, property_name):
"""
Checks if the given property is present for the given DataFrame in the
catalog.
Args:
data_frame (DataFrame): The DataFrame for which the property must be
checked for.
property_name (string): The name of the property that should be
checked for its presence for the DataFrame, in the catalog.
Returns:
A Boolean value of True is returned if the property is present for
the given DataFrame.
Raises:
AssertionError: If `data_frame` is not of type pandas
DataFrame.
AssertionError: If `property_name` is not of type string.
KeyError: If `data_frame` is not present in the catalog.
Examples:
>>> import py_entitymatching as em
>>> import pandas as pd
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> em.set_key(A, 'id')
>>> em.is_property_present_for_df(A, 'id')
# True
>>> em.is_property_present_for_df(A, 'fk_ltable')
# False
"""
# Input validations
# # The input object should be of type pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
# # The property name should be of type string
validate_object_type(property_name, six.string_types, error_prefix='Property name')
# Get the catalog instance
catalog = Catalog.Instance()
# Check if the given DataFrame information is present in the catalog. If
# not, raise an error.
if catalog.is_df_info_present_in_catalog(data_frame) is False:
logger.error('DataFrame information is not present in the catalog')
raise KeyError('DataFrame information is not present in the catalog')
# Call the underlying catalog object's function to check if the property
# is present for the given DataFrame. Relay the return value from that
# function.
return catalog.is_property_present_for_df(data_frame, property_name)
def get_catalog_len():
"""
Get the length (i.e the number of entries) in the catalog.
Returns:
The number of entries in the catalog as an integer.
Examples:
>>> import py_entitymatching as em
>>> len = em.get_catalog_len()
"""
# Get the catalog instance
catalog = Catalog.Instance()
# Call the underlying catalog object's function to get the catalog length.
# Relay the return value from that function.
return catalog.get_catalog_len()
def set_properties(data_frame, properties, replace=True):
"""
Sets the properties for a DataFrame in the catalog.
Args:
data_frame (DataFrame): DataFrame for which the properties must be set.
properties (dict): A Python dictionary with keys as property names and
values as Python objects (typically strings or DataFrames)
replace (Optional[bool]): Flag to indicate whether the input
properties can replace the properties in the catalog. The default
value for the flag is True.
Specifically, if the DataFrame information is already present in
the catalog then the function will check if the replace flag is
True. If the flag is set to True, then the function will first
delete the existing properties, set it with the given properties.
If the flag is False, the function will just return without
modifying the existing properties.
Returns:
A Boolean value of True is returned if the properties were set for
the given DataFrame, else returns False.
Raises:
AssertionError: If the input data_frame object is not of type pandas
DataFrame.
AssertionError: If the input properties object is not of type Python
dictionary.
"""
# Validate input parameters
# # Input object is expected to be a pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
# # Input properties is expected to be of type Python dictionary
validate_object_type(properties, dict, error_prefix='The properties')
# Get the catalog instance
catalog = Catalog.Instance()
# Check if the the DataFrame information is present in the catalog. If
# present, we expect the replace flag to be True. If the flag was set to
# False, then warn the user and return False.
if catalog.is_df_info_present_in_catalog(data_frame):
if not replace:
logger.warning(
'Properties already exists for df ( %s ). Not replacing it'
%str(id(data_frame)))
return False
else:
# DataFrame information is present and replace flag is True. We
# now reset the properties dictionary for this DataFrame.
catalog.init_properties(data_frame)
else:
# The DataFrame information is not present in the catalog. so
# initialize the properties
catalog.init_properties(data_frame)
# Now iterate through the given properties and set for the DataFrame.
# Note: Here we don't check the correctness of the input properties (i.e
# we do not check if a property 'key' is indeed a key)
for property_name, property_value in six.iteritems(properties):
catalog.set_property(data_frame, property_name, property_value)
# Finally return True, if everything was successful
return True
def copy_properties(source_data_frame, target_data_frame, replace=True):
"""
Copies properties from a source DataFrame to target DataFrame in the
catalog.
Args:
source_data_frame (DataFrame): The DataFrame from which the properties
to be copied from, in the catalog.
target_data_frame (DataFrame): The DataFrame to which the properties
to be copied to, in the catalog.
replace (boolean): A flag to indicate whether the source
DataFrame's properties can replace the target
DataFrame's properties in the catalog. The default value for the
flag is True.
Specifically, if the target DataFrame's information is already
present in the catalog then the function will check if the
replace flag is True. If the flag is set to True, then the
function will first delete the existing properties and then set
it with the source DataFrame properties.
If the flag is False, the function will just return without
modifying the existing properties.
Returns:
A Boolean value of True is returned if the copying was successful.
Raises:
AssertionError: If `source_data_frame` is not of
type pandas DataFrame.
AssertionError: If `target_data_frame` is not of
type pandas DataFrame.
KeyError: If source DataFrame is not present in the
catalog.
Examples:
>>> import py_entitymatching as em
>>> import pandas as pd
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> em.set_key(A, 'id')
>>> B = pd.DataFrame({'id' : [1, 2], 'colA':['c', 'd'], 'colB' : [30, 40]})
>>> em.copy_properties(A, B)
>>> em.get_key(B)
# 'id'
"""
# Validate input parameters
# # The source_data_frame is expected to be of type pandas DataFrame
validate_object_type(source_data_frame, pd.DataFrame, error_prefix='Input object (source_data_frame)')
# # The target_data_frame is expected to be of type pandas DataFrame
validate_object_type(target_data_frame, pd.DataFrame, error_prefix='Input object (target_data_frame)')
# Get the catalog instance
catalog = Catalog.Instance()
# Check if the source DataFrame information is present in the catalog. If
# not raise an error.
if catalog.is_df_info_present_in_catalog(source_data_frame) is False:
logger.error(
'DataFrame information (source_data_frame) is not present in the '
'catalog')
raise KeyError(
'DataFrame information (source_data_frame) is not present in the '
'catalog')
# Get all properties for the source DataFrame
metadata = catalog.get_all_properties(source_data_frame)
# Set the properties to the target DataFrame. Specifically, call the set
# properties function and relay its return value.
# Note: There is a redundancy in validating the input parameters. This
# might have a slight performance impact, but we don't expect that this
# function gets called so often.
return set_properties(target_data_frame, metadata,
replace) # this initializes tar in the catalog.
# key related methods
def get_key(data_frame):
"""
Gets the value of 'key' property for a DataFrame from the catalog.
Args:
data_frame (DataFrame): The DataFrame for which the key must be
retrieved from the catalog.
Returns:
A string value containing the key column name is returned (if present).
Examples:
>>> import py_entitymatching as em
>>> import pandas as pd
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> em.set_key(A, 'id')
>>> em.get_key(A)
# 'id'
See Also:
:meth:`~py_entitymatching.get_property`
"""
# This function is just a sugar to get the 'key' property for a DataFrame
return get_property(data_frame, 'key')
def set_key(data_frame, key_attribute):
"""
Sets the value of 'key' property for a DataFrame in the catalog with the
given attribute (i.e column name).
Specifically, this function set the the key attribute for the DataFrame
if the given attribute satisfies the following two properties:
The key attribute should have unique values.
The key attribute should not have missing values. A missing value
is represented as np.NaN.
Args:
data_frame (DataFrame): The DataFrame for which the key must be set in
the catalog.
key_attribute (string): The key attribute (column name) in the
DataFrame.
Returns:
A Boolean value of True is returned, if the given attribute
satisfies the conditions for a key and the update was successful.
Raises:
AssertionError: If `data_frame` is not of type
pandas DataFrame.
AssertionError: If `key_attribute` is not of type string.
KeyError: If given `key_attribute` is not in the DataFrame columns.
Examples:
>>> import py_entitymatching as em
>>> import pandas as pd
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> em.set_key(A, 'id')
>>> em.get_key(A)
# 'id'
See Also:
:meth:`~py_entitymatching.set_property`
"""
# Validate input parameters
# # We expect the input object (data_frame) to be of type pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
# # We expect input key attribute to be of type string
validate_object_type(key_attribute, six.string_types, error_prefix='Input key attribute')
# Check if the key attribute is present as one of the columns in the
# DataFrame
if not ch.check_attrs_present(data_frame, key_attribute):
logger.error('Input key ( %s ) not in the DataFrame' % key_attribute)
raise KeyError('Input key ( %s ) not in the DataFrame' % key_attribute)
# Check if the key attribute satisfies the conditions to be a key. If
# not, just return False.
# Note: Currently it is not clear, whether we should return False from
# here or raise an exception. As of now resorting to just returning
# False, because this function is used by other computation
# intensive commands in py_entitymatching and raising an exception might make all
# the work done in those commands go in vain (or those commands should
# catch the exception correctly, which may be complicated and require
# changes to the current code). We need to revisit this
# later.
if ch.is_key_attribute(data_frame, key_attribute) is False:
logger.warning('Attribute (%s ) does not qualify to be a key; Not '
'setting/replacing the key' % key_attribute)
return False
else:
# Set the key property for the input DataFrame
return set_property(data_frame, 'key', key_attribute)
def get_fk_ltable(data_frame):
"""
Gets the foreign key to left table for a DataFrame from the
catalog.
Specifically this function is a sugar function that will get the foreign
key to left table using underlying :meth:`~py_entitymatching.get_property` function.
This function is typically called on a DataFrame which contains metadata
such as fk_ltable, fk_rtable, ltable, rtable.
Args:
data_frame (DataFrame): The input DataFrame for which the foreign key
ltable property must be retrieved.
Returns:
A Python object, typically a string is returned.
Examples:
>>> import py_entitymatching as em
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> B = pd.DataFrame({'id' : [1, 2], 'colA':['c', 'd'], 'colB' : [30, 40]})
>>> em.set_key(A, 'id')
>>> em.set_key(B, 'id')
>>> C = pd.DataFrame({'id':[1, 2], 'ltable_id':[1, 2], 'rtable_id':[2, 1]})
>>> em.set_key(C, 'id')
>>> em.set_fk_ltable(C, 'ltable_id')
>>> em.get_fk_ltable(C)
# 'ltable_id'
See Also:
:meth:`~py_entitymatching.get_property`
"""
# Call the get_property function and relay the result.
return get_property(data_frame, 'fk_ltable')
def get_fk_rtable(data_frame):
"""
Gets the foreign key to right table for a DataFrame from the catalog.
Specifically this function is a sugar function that will get the foreign
key to right table using :meth:`py_entitymatching.get_property` function. This
function is typically called on a DataFrame which contains metadata such as
fk_ltable, fk_rtable, ltable, rtable.
Args:
data_frame (DataFrame): The input DataFrame for which the foreign key
rtable property must be retrieved.
Returns:
A Python object, (typically a string) is returned.
Examples:
>>> import py_entitymatching as em
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> B = pd.DataFrame({'id' : [1, 2], 'colA':['c', 'd'], 'colB' : [30, 40]})
>>> em.set_key(A, 'id')
>>> em.set_key(B, 'id')
>>> C = pd.DataFrame({'id':[1, 2], 'ltable_id':[1, 2], 'rtable_id':[2, 1]})
>>> em.set_key(C, 'id')
>>> em.set_fk_rtable(C, 'rtable_id')
>>> em.get_fk_rtable(C)
# 'rtable_id'
See Also:
:meth:`~py_entitymatching.get_property`
"""
# Call the get_property function and relay the result.
return get_property(data_frame, 'fk_rtable')
def set_fk_ltable(data_frame, fk_ltable):
"""
Sets the foreign key to ltable for a DataFrame in the catalog.
Specifically this function is a sugar function that will set the foreign
key to the left table using :meth:`py_entitymatching.set_property` function. This
function is typically called on a DataFrame which contains metadata such as
fk_ltable, fk_rtable, ltable, rtable.
Args:
data_frame (DataFrame): The input DataFrame for which the foreign key
ltable property must be set.
fk_ltable (string): The attribute that must ne set as the foreign key
to the ltable in the catalog.
Returns:
A Boolean value of True is returned if the foreign key to ltable was
set successfully.
Raises:
AssertionError: If `data_frame` is not of type
pandas DataFrame.
AssertionError: If `fk_ltable` is not of type
string.
AssertionError: If `fk_ltable` is not in the input
DataFrame.
Examples:
>>> import py_entitymatching as em
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> B = pd.DataFrame({'id' : [1, 2], 'colA':['c', 'd'], 'colB' : [30, 40]})
>>> em.set_key(A, 'id')
>>> em.set_key(B, 'id')
>>> C = pd.DataFrame({'id':[1, 2], 'ltable_id':[1, 2], 'rtable_id':[2, 1]})
>>> em.set_key(C, 'id')
>>> em.set_fk_ltable(C, 'ltable_id')
>>> em.get_fk_ltable(C)
# 'ltable_id'
See Also:
:meth:`~py_entitymatching.set_property`
"""
# Validate the input parameters
# # We expect the input object to be of type pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
# # We expect the input fk_ltable to be of type string
validate_object_type(fk_ltable, six.string_types, error_prefix='The input (fk_ltable)')
# # The fk_ltable attribute should be one of the columns in the input
# DataFrame
if not ch.check_attrs_present(data_frame, fk_ltable):
logger.error('Input attr. ( %s ) not in the DataFrame' % fk_ltable)
raise KeyError('Input attr. ( %s ) not in the DataFrame' % fk_ltable)
# Call the set_property function and relay the result.
return set_property(data_frame, 'fk_ltable', fk_ltable)
def validate_and_set_fk_ltable(foreign_data_frame, foreign_key_ltable, ltable,
ltable_key):
"""
Validates and set the foreign key ltable for a DataFrame in the the catalog.
Specifically, given a DataFrame and a foreign key attribute it checks
for the following conditions to be satisfied for the attribute. First it
checks that foreign key ltable attribute does not have any missing
values. Second it checks that the subset of foreign key values,
have unique values in the primary (base) table.
Args:
foreign_data_frame (DataFrame): DataFrame containing the foreign key
(typically a candidate set, for example output from blocking two
tables).
foreign_key_ltable (string): An attribute in the foreign DataFrame
ltable (DataFrame): Base DataFrame, in which the foreign key
attribute would form the primary key.
ltable_key (string): An attribute in the base table
(typically a primary key attribute).
Returns:
A Boolean value of True will be returned if the validation was
successful and the update was successful in the catalog.
Raises:
AssertionError: If the input foreign DataFrame (foreign_data_frame)
is not of type pandas DataFrame.
AssertionError: If the foreign key ltable (foreign_key_ltable) is not
of type string.
AssertionError: If the input ltable (ltable) is not of type pandas
DataFrame.
AssertionError: If the ltable key (ltable_key) is not of type string.
"""
# check the foreign key constraint
# # Note all the validations are done inside the function
# check_fk_constraint
status = ch.check_fk_constraint(foreign_data_frame, foreign_key_ltable,
ltable, ltable_key)
# If the validation is successful then set the property
if status:
return set_property(foreign_data_frame, 'fk_ltable', foreign_key_ltable)
else:
# else report the error and just return False.
logger.warning(
'FK constraint for fk_ltable is not satisfied; '
'Not setting the fk_ltable')
return False
def validate_and_set_fk_rtable(foreign_data_frame, foreign_key_rtable,
rtable, rtable_key):
"""
Validates and set the foreign key ltable for a DataFrame in the the catalog.
Specifically, given a DataFrame and a foreign key attribute it checks
for the following conditions to be satisfied for the attribute. First it
checks that foreign key rtable attribute does not have any missing
values. Second it checks that the subset of foreign key values,
have unique values in the primary (base) table.
Args:
foreign_data_frame (DataFrame): DataFrame containing the foreign key
(typically a candidate set, for example output from blocking two
tables).
foreign_key_rtable (string): An attribute in the foreign DataFrame
rtable (DataFrame): Base DataFrame, in which the foreign key
attribute would form the primary key.
rtable_key (string): An attribute in the base table
(typically a primary key attribute).
Returns:
A Boolean value of True will be returned if the validation was
successful and the update was successful in the catalog.
Raises:
AssertionError: If the input foreign DataFrame (foreign_data_frame)
is not of type pandas DataFrame.
AssertionError: If the foreign key ltable (foreign_key_ltable) is not
of type string.
AssertionError: If the input ltable (ltable) is not of type pandas
DataFrame.
AssertionError: If the ltable key (ltable_key) is not of type string.
"""
# Validate the foreign key constraint
# Note: All the basic input validations are done inside the
# check_fk_constraint function.
status = ch.check_fk_constraint(foreign_data_frame, foreign_key_rtable,
rtable, rtable_key)
# If the validation was successful, then set the property
if status:
return set_property(foreign_data_frame, 'fk_rtable', foreign_key_rtable)
# else just warn and return False
else:
logger.warning(
'FK constraint for fk_rtable is not satisfied; Not '
'setting the fk_rtable and rtable')
return False
def set_fk_rtable(data_frame, foreign_key_rtable):
"""
Sets the foreign key to rtable for a DataFrame in the catalog.
Specifically this function is a sugar function that will set the foreign
key to right table using set_property function. This function
is typically called on a DataFrame which contains metadata such as
fk_ltable, fk_rtable, ltable, rtable.
Args:
data_frame (DataFrame): The input DataFrame for which the foreign key
rtable property must be set.
foreign_key_rtable (string): The attribute that must be set as
foreign key to rtable in the catalog.
Returns:
A Boolean value of True is returned if the foreign key to rtable was
set successfully.
Raises:
AssertionError: If `data_frame` is not of type
pandas DataFrame.
AssertionError: If `foreign_key_rtable` is not of
type string.
AssertionError: If `fk_rtable` is not in the input
DataFrame.
Examples:
>>> import py_entitymatching as em
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> B = pd.DataFrame({'id' : [1, 2], 'colA':['c', 'd'], 'colB' : [30, 40]})
>>> em.set_key(A, 'id')
>>> em.set_key(B, 'id')
>>> C = pd.DataFrame({'id':[1, 2], 'ltable_id':[1, 2], 'rtable_id':[2, 1]})
>>> em.set_key(C, 'id')
>>> em.set_fk_rtable(C, 'rtable_id')
>>> em.get_fk_rtable(C)
# 'rtable_id'
See Also:
:meth:`~py_entitymatching.set_property`
"""
# Validate the input parameters
# # The input object is expected to be of type pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
validate_object_type(foreign_key_rtable, six.string_types, error_prefix='Input (foreign key ltable)')
# Check if the given attribute is present in the DataFrame
if not ch.check_attrs_present(data_frame, foreign_key_rtable):
logger.error('Input attr. ( %s ) not in the DataFrame'
% foreign_key_rtable)
raise KeyError('Input attr. ( %s ) not in the DataFrame'
% foreign_key_rtable)
# Finally set the property and relay the result
return set_property(data_frame, 'fk_rtable', foreign_key_rtable)
def show_properties(data_frame):
"""
Prints the properties for a DataFrame that is present in the catalog.
Args:
data_frame (DataFrame): The input pandas DataFrame for which the
properties must be displayed.
Examples:
>>> A = pd.DataFrame({'key_attr' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> em.set_key(A, 'key_attr')
>>> em.show_properties(A)
# id: 4572922488 # This will change dynamically
# key: key_attr
"""
# Check if the DataFrame information is present in the catalog. If not
# return
if not is_dfinfo_present(data_frame):
logger.error('DataFrame information is not present in the catalog')
return
# Delegate it to show properties for the id if an object in the catalog
show_properties_for_id(id(data_frame))
# # Get the properties for the DataFrame from the catalog
# metadata = get_all_properties(data_frame)
#
# # First print the id for the DataFrame
# print('id: ' + str(id(data_frame)))
# # For each property name anf value, print the contents to the user
# for property_name, property_value in six.iteritems(metadata):
# # If the property value is string print it out
# if isinstance(property_value, six.string_types):
# print(property_name + ": " + property_value)
# # else, print just the id.
# else:
# print(property_name + "(obj.id): " + str(id(property_value)))
def show_properties_for_id(object_id):
"""
Shows the properties for an object id present in the catalog.
Specifically, given an object id got from typically executing id(
<object>), where the object could be a DataFrame, this function will
display the properties present for that object id in the catalog.
Args:
object_id (int): The Python identifier of an object (typically a
pandas DataFrame).
Examples:
>>> A = pd.DataFrame({'key_attr' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> em.set_key(A, 'key_attr')
>>> em.show_properties_for_id(id(A))
# id: 4572922488 # This will change dynamically
# key: key_attr
"""
catalog = Catalog.Instance()
metadata = catalog.get_all_properties_for_id(object_id)
# First print the id for the DataFrame
print('id: ' + str(object_id))
# For each property name anf value, print the contents to the user
for property_name, property_value in six.iteritems(metadata):
# If the property value is string print it out
if isinstance(property_value, six.string_types):
print(property_name + ": " + property_value)
# else, print just the id.
else:
print(property_name + "(obj.id): " + str(id(property_value)))
def set_candset_properties(candset, key, foreign_key_ltable,
foreign_key_rtable, ltable, rtable):
"""
Sets candidate set properties.
Specifically, this is a sugar function that sets all the properties for a
candidate set such as key, foreign key ltable, foreign key rtable,
ltable and rtable. Further, this function does not check the integrity of
input properties.
Args:
candset (DataFrame): Input DataFrame for which the properties must be
set.
key (string): Key attribute that must be set for the DataFrame in the
catalog.
foreign_key_ltable (string): Foreign key ltable attribute that must be
set for the DataFrame in the catalog.
foreign_key_rtable (string): Foreign key rtable attribute that must be
set for the DataFrame in the catalog.
ltable (DataFrame): DataFrame that must be set as ltable.
rtable (DataFrame): DataFrame that must be set as rtable.
Returns:
A Boolean value of True is returned if the updates were successful.
"""
# set the key
set_property(candset, 'key', key)
# set the foreign key attributes
set_fk_ltable(candset, foreign_key_ltable)
set_fk_rtable(candset, foreign_key_rtable)
# set the ltable and rtables
set_property(candset, 'ltable', ltable)
set_property(candset, 'rtable', rtable)
return True
def _validate_metadata_for_table(table, key, output_string, lgr, verbose):
"""
Validates metadata for table (DataFrame)
"""
# Validate input parameters
# # We expect the input table to be of type pandas DataFrame
validate_object_type(table, pd.DataFrame)
# Check the key column is present in the table
if not ch.check_attrs_present(table, key):
raise KeyError('Input key ( %s ) not in the DataFrame' % key)
# Validate the key
ch.log_info(lgr, 'Validating ' + output_string + ' key: ' + str(key),
verbose)
# We expect the key to be of type string
validate_object_type(key, six.string_types, error_prefix='Key attribute')
if not ch.is_key_attribute(table, key, verbose):
raise AssertionError('Attribute %s in the %s table does not '
'qualify to be the key' % (
str(key), output_string))
ch.log_info(lgr, '..... Done', verbose)
return True
def _validate_metadata_for_candset(candset, key, foreign_key_ltable,
foreign_key_rtable,
ltable, rtable,
ltable_key, rtable_key,
lgr, verbose):
"""
Validates metadata for a candidate set.
"""
# Validate input parameters
# # We expect candset to be of type pandas DataFrame
validate_object_type(candset, pd.DataFrame, error_prefix='Input candset')
# Check if the key column is present in the candset
if not ch.check_attrs_present(candset, key):
raise KeyError('Input key ( %s ) not in the DataFrame' % key)
# Check if the foreign key ltable column is present in the candset
if not ch.check_attrs_present(candset, foreign_key_ltable):
raise KeyError(
'Input foreign_key_ltable ( %s ) not in the DataFrame'
% foreign_key_ltable)
# Check if the foreign key rtable column is present in the candset
if not ch.check_attrs_present(candset, foreign_key_rtable):
raise KeyError(
'Input fk_rtable ( %s ) not in the DataFrame' % foreign_key_rtable)
# We expect the ltable to be of type pandas DataFrame
validate_object_type(ltable, pd.DataFrame, error_prefix='Input ltable')
# We expect the rtable to be of type pandas DataFrame
validate_object_type(rtable, pd.DataFrame, error_prefix='Input rtable')
# We expect the ltable key to be present in the ltable
if not ch.check_attrs_present(ltable, ltable_key):
raise KeyError('ltable key ( %s ) not in ltable' % ltable_key)
# We expect the rtable key to be present in the rtable
if not ch.check_attrs_present(rtable, rtable_key):
raise KeyError('rtable key ( %s ) not in rtable' % rtable_key)
# First validate metadata for the candidate set (as a table)
_validate_metadata_for_table(candset, key, 'candset', lgr, verbose)
ch.log_info(lgr, 'Validating foreign key constraint for left table',
verbose)
# Second check foreign key constraints
if not ch.check_fk_constraint(candset, foreign_key_ltable,
ltable, ltable_key):
raise AssertionError(
'Candset does not satisfy foreign key constraint with '
'the left table')
if not ch.check_fk_constraint(candset, foreign_key_rtable,
rtable, rtable_key):
raise AssertionError(
'Candset does not satisfy foreign key constraint with '
'the right table')
ch.log_info(lgr, '..... Done', verbose)
ch.log_info(lgr, 'Validating foreign key constraint for right table',
verbose)
ch.log_info(lgr, '..... Done', verbose)
return True
# noinspection PyIncorrectDocstring
def get_keys_for_ltable_rtable(ltable, rtable, lgr, verbose):
"""
Gets keys for the ltable and rtable.
"""
# We expect the ltable to be of type pandas DataFrame
if not isinstance(ltable, pd.DataFrame):
logger.error('Input ltable is not of type pandas data frame')
raise AssertionError('Input ltable is not of type pandas data frame')
# We expect the rtable to be of type pandas DataFrame
if not isinstance(rtable, pd.DataFrame):
logger.error('Input rtable is not of type pandas data frame')
raise AssertionError('Input rtable is not of type pandas data frame')
ch.log_info(lgr, 'Required metadata: ltable key, rtable key', verbose)
ch.log_info(lgr, 'Getting metadata from the catalog', verbose)
# Get the ltable key and rtable key from the catalog
ltable_key = get_key(ltable)
rtable_key = get_key(rtable)
ch.log_info(lgr, '..... Done', verbose)
# return the ltable and rtable keys
return ltable_key, rtable_key
# noinspection PyIncorrectDocstring
def get_metadata_for_candset(candset, lgr, verbose):
"""
Gets metadata for the candset
"""
# Validate input parameters
validate_object_type(candset, pd.DataFrame, error_prefix='Input candset')
ch.log_info(lgr, 'Getting metadata from the catalog', verbose)
# Get the key, foreign keys, ltable, rtable and their keys
# # Get key
key = get_key(candset)
# # Get the foreign keys
fk_ltable = get_fk_ltable(candset)
fk_rtable = get_fk_rtable(candset)
# # Get the base tables
ltable = get_ltable(candset)
rtable = get_rtable(candset)
# Get the base table keys
l_key = get_key(ltable)
r_key = get_key(rtable)
ch.log_info(lgr, '..... Done', verbose)
# Return the metadata
return key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key
# noinspection PyIncorrectDocstring
def get_ltable(candset):
"""
Gets the ltable for a DataFrame from the catalog.
Args:
candset (DataFrame): The input table for which the ltable must be
returned.
Returns:
A pandas DataFrame that is pointed by 'ltable' property of the input
table.
Examples:
>>> import py_entitymatching as em
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> B = pd.DataFrame({'id' : [1, 2], 'colA':['c', 'd'], 'colB' : [30, 40]})
>>> em.set_key(A, 'id')
>>> em.set_key(B, 'id')
>>> C = pd.DataFrame({'id':[1, 2], 'ltable_id':[1, 2], 'rtable_id':[2, 1]})
>>> em.set_key(C, 'id')
>>> em.set_ltable(C, A)
>>> id(em.get_ltable(A) == id(A)
# True
See Also:
:meth:`~py_entitymatching.get_property`
"""
# Return the ltable for a candidate set. This function is just a sugar
return get_property(candset, 'ltable')
# noinspection PyIncorrectDocstring
def get_rtable(candset):
"""
Gets the rtable for a DataFrame from the catalog.
Args:
candset (DataFrame): Input table for which the rtable must be returned.
Returns:
A pandas DataFrame that is pointed by 'rtable' property of the input
table.
Examples:
>>> import py_entitymatching as em
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> B = pd.DataFrame({'id' : [1, 2], 'colA':['c', 'd'], 'colB' : [30, 40]})
>>> em.set_key(A, 'id')
>>> em.set_key(B, 'id')
>>> C = pd.DataFrame({'id':[1, 2], 'ltable_id':[1, 2], 'rtable_id':[2, 1]})
>>> em.set_key(C, 'id')
>>> em.set_rtable(C, B)
>>> id(em.get_rtable(B) == id(B)
# True
See Also:
:meth:`~py_entitymatching.get_property`
"""
# Return the rtable for a candidate set. This function is just a sugar
return get_property(candset, 'rtable')
def set_ltable(candset, table):
"""
Sets the ltable for a DataFrame in the catalog.
Args:
candset (DataFrame): The input table for which the ltable must be set.
table (DataFrame): The table (typically a pandas DataFrame) that must
be set as ltable for the input DataFrame.
Returns:
A Boolean value of True is returned, if the update was successful.
Examples:
>>> import py_entitymatching as em
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> B = pd.DataFrame({'id' : [1, 2], 'colA':['c', 'd'], 'colB' : [30, 40]})
>>> em.set_key(A, 'id')
>>> em.set_key(B, 'id')
>>> C = pd.DataFrame({'id':[1, 2], 'ltable_id':[1, 2], 'rtable_id':[2, 1]})
>>> em.set_key(C, 'id')
>>> em.set_ltable(C, A)
>>> id(em.get_ltable(A) == id(A)
# True
See Also:
:meth:`~py_entitymatching.set_property`
"""
# Return the ltable for a candidate set. This function is just a sugar
return set_property(candset, 'ltable', table)
# noinspection PyIncorrectDocstring
def set_rtable(candset, table):
"""
Sets the rtable for a DataFrame in the catalog.
Args:
candset (DataFrame): The input table for which the rtable must be set.
table (DataFrame): The table that must be set as rtable for the input
DataFrame.
Returns:
A Boolean value of True is returned, if the update was successful.
Examples:
>>> import py_entitymatching as em
>>> A = pd.DataFrame({'id' : [1, 2], 'colA':['a', 'b'], 'colB' : [10, 20]})
>>> B = pd.DataFrame({'id' : [1, 2], 'colA':['c', 'd'], 'colB' : [30, 40]})
>>> em.set_key(A, 'id')
>>> em.set_key(B, 'id')
>>> C = pd.DataFrame({'id':[1, 2], 'ltable_id':[1, 2], 'rtable_id':[2, 1]})
>>> em.set_key(C, 'id')
>>> em.set_rtable(C, B)
>>> id(em.get_rtable(B) == id(B)
# True
See Also:
:meth:`~py_entitymatching.set_property`
"""
# Return the rtable for a candidate set. This function is just a sugar
return set_property(candset, 'rtable', table)
| 36.832985 | 106 | 0.652006 |
import logging
import pandas as pd
import six
import py_entitymatching.utils.catalog_helper as ch
from py_entitymatching.catalog.catalog import Catalog
from py_entitymatching.utils.validation_helper import validate_object_type
logger = logging.getLogger(__name__)
def get_property(data_frame, property_name):
.string_types, error_prefix='Property name')
catalog = Catalog.Instance()
if not catalog.is_df_info_present_in_catalog(data_frame):
logger.error('DataFrame information is not present in the catalog')
raise KeyError('DataFrame information is not present in the catalog')
if not catalog.is_property_present_for_df(data_frame, property_name):
logger.error(
'Requested metadata ( %s ) for the given DataFrame is not '
'present in the catalog' % property_name)
raise KeyError(
'Requested metadata ( %s ) for the given DataFrame is not '
'present in the catalog' % property_name)
return catalog.get_property(data_frame, property_name)
def set_property(data_frame, property_name, property_value):
.string_types, error_prefix='Property name')
catalog = Catalog.Instance()
if not catalog.is_df_info_present_in_catalog(data_frame):
catalog.init_properties(data_frame)
# True if the update was successful.
return catalog.set_property(data_frame, property_name, property_value)
def init_properties(data_frame):
# Validate input parameters
# # The input object should be of type pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
# Get the catalog instance
catalog = Catalog.Instance()
# Initialize the property in the catalog.
# Relay the return value from the underlying catalog object's function.
return catalog.init_properties(data_frame)
def get_all_properties(data_frame):
if not catalog.is_df_info_present_in_catalog(data_frame):
logger.error('DataFrame information is not present in the catalog')
raise KeyError('DataFrame information is not present in the catalog')
return catalog.get_all_properties(data_frame)
def del_property(data_frame, property_name):
.string_types, error_prefix='Property name')
catalog = Catalog.Instance()
if not catalog.is_df_info_present_in_catalog(data_frame):
logger.error('DataFrame information is not present in the catalog')
raise KeyError('DataFrame information is not present in the catalog')
if not catalog.is_property_present_for_df(data_frame, property_name):
logger.error('Requested metadata ( %s ) for the given DataFrame is '
'not present in the catalog' %property_name)
raise KeyError('Requested metadata ( %s ) for the given DataFrame is '
'not present in the catalog' %property_name)
return catalog.del_property(data_frame, property_name)
def del_all_properties(data_frame):
ger.error('Input object is not of type pandas data frame')
raise AssertionError('Input object is not of type pandas data frame')
catalog = Catalog.Instance()
if not catalog.is_df_info_present_in_catalog(data_frame):
logger.error('DataFrame information is not present in the catalog')
raise KeyError('DataFrame information is not present in the catalog')
# and relay its return value
return catalog.del_all_properties(data_frame)
def get_catalog():
# Get the catalog instance
catalog = Catalog.Instance()
# Call the underlying catalog object's function to get the catalog. Relay
return catalog.get_catalog()
def del_catalog():
catalog = Catalog.Instance()
# dict). Relay the return value from the delegated function.
return catalog.del_catalog()
def is_catalog_empty():
# Get the catalog instance
catalog = Catalog.Instance()
# Call the underlying catalog object's function to check if the catalog
return catalog.is_catalog_empty()
def is_dfinfo_present(data_frame):
validate_object_type(data_frame, pd.DataFrame)
catalog = Catalog.Instance()
# DataFrame information is present in the catalog.
# Relay the return value from the delegated function.
return catalog.is_df_info_present_in_catalog(data_frame)
def is_property_present_for_df(data_frame, property_name):
# Input validations
# # The input object should be of type pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
# # The property name should be of type string
validate_object_type(property_name, six.string_types, error_prefix='Property name')
# Get the catalog instance
catalog = Catalog.Instance()
# Check if the given DataFrame information is present in the catalog. If
# not, raise an error.
if catalog.is_df_info_present_in_catalog(data_frame) is False:
logger.error('DataFrame information is not present in the catalog')
raise KeyError('DataFrame information is not present in the catalog')
# Call the underlying catalog object's function to check if the property
return catalog.is_property_present_for_df(data_frame, property_name)
def get_catalog_len():
catalog = Catalog.Instance()
# Relay the return value from that function.
return catalog.get_catalog_len()
def set_properties(data_frame, properties, replace=True):
# Validate input parameters
# # Input object is expected to be a pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
# # Input properties is expected to be of type Python dictionary
validate_object_type(properties, dict, error_prefix='The properties')
# Get the catalog instance
catalog = Catalog.Instance()
# Check if the the DataFrame information is present in the catalog. If
# present, we expect the replace flag to be True. If the flag was set to
# False, then warn the user and return False.
if catalog.is_df_info_present_in_catalog(data_frame):
if not replace:
logger.warning(
'Properties already exists for df ( %s ). Not replacing it'
%str(id(data_frame)))
return False
else:
# DataFrame information is present and replace flag is True. We
# now reset the properties dictionary for this DataFrame.
catalog.init_properties(data_frame)
else:
# The DataFrame information is not present in the catalog. so
# initialize the properties
catalog.init_properties(data_frame)
# Now iterate through the given properties and set for the DataFrame.
# Note: Here we don't check the correctness of the input properties (i.e
for property_name, property_value in six.iteritems(properties):
catalog.set_property(data_frame, property_name, property_value)
return True
def copy_properties(source_data_frame, target_data_frame, replace=True):
refix='Input object (source_data_frame)')
refix='Input object (target_data_frame)')
catalog = Catalog.Instance()
if catalog.is_df_info_present_in_catalog(source_data_frame) is False:
logger.error(
'DataFrame information (source_data_frame) is not present in the '
'catalog')
raise KeyError(
'DataFrame information (source_data_frame) is not present in the '
'catalog')
metadata = catalog.get_all_properties(source_data_frame)
# function gets called so often.
return set_properties(target_data_frame, metadata,
replace) # this initializes tar in the catalog.
# key related methods
def get_key(data_frame):
# This function is just a sugar to get the 'key' property for a DataFrame
return get_property(data_frame, 'key')
def set_key(data_frame, key_attribute):
# Validate input parameters
# # We expect the input object (data_frame) to be of type pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
# # We expect input key attribute to be of type string
validate_object_type(key_attribute, six.string_types, error_prefix='Input key attribute')
# Check if the key attribute is present as one of the columns in the
# DataFrame
if not ch.check_attrs_present(data_frame, key_attribute):
logger.error('Input key ( %s ) not in the DataFrame' % key_attribute)
raise KeyError('Input key ( %s ) not in the DataFrame' % key_attribute)
# Check if the key attribute satisfies the conditions to be a key. If
# not, just return False.
# Note: Currently it is not clear, whether we should return False from
# here or raise an exception. As of now resorting to just returning
# False, because this function is used by other computation
# intensive commands in py_entitymatching and raising an exception might make all
# the work done in those commands go in vain (or those commands should
# catch the exception correctly, which may be complicated and require
# changes to the current code). We need to revisit this
# later.
if ch.is_key_attribute(data_frame, key_attribute) is False:
logger.warning('Attribute (%s ) does not qualify to be a key; Not '
'setting/replacing the key' % key_attribute)
return False
else:
# Set the key property for the input DataFrame
return set_property(data_frame, 'key', key_attribute)
def get_fk_ltable(data_frame):
# Call the get_property function and relay the result.
return get_property(data_frame, 'fk_ltable')
def get_fk_rtable(data_frame):
# Call the get_property function and relay the result.
return get_property(data_frame, 'fk_rtable')
def set_fk_ltable(data_frame, fk_ltable):
# Validate the input parameters
# # We expect the input object to be of type pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
# # We expect the input fk_ltable to be of type string
validate_object_type(fk_ltable, six.string_types, error_prefix='The input (fk_ltable)')
# # The fk_ltable attribute should be one of the columns in the input
# DataFrame
if not ch.check_attrs_present(data_frame, fk_ltable):
logger.error('Input attr. ( %s ) not in the DataFrame' % fk_ltable)
raise KeyError('Input attr. ( %s ) not in the DataFrame' % fk_ltable)
# Call the set_property function and relay the result.
return set_property(data_frame, 'fk_ltable', fk_ltable)
def validate_and_set_fk_ltable(foreign_data_frame, foreign_key_ltable, ltable,
ltable_key):
# check the foreign key constraint
# # Note all the validations are done inside the function
# check_fk_constraint
status = ch.check_fk_constraint(foreign_data_frame, foreign_key_ltable,
ltable, ltable_key)
# If the validation is successful then set the property
if status:
return set_property(foreign_data_frame, 'fk_ltable', foreign_key_ltable)
else:
# else report the error and just return False.
logger.warning(
'FK constraint for fk_ltable is not satisfied; '
'Not setting the fk_ltable')
return False
def validate_and_set_fk_rtable(foreign_data_frame, foreign_key_rtable,
rtable, rtable_key):
# Validate the foreign key constraint
# Note: All the basic input validations are done inside the
# check_fk_constraint function.
status = ch.check_fk_constraint(foreign_data_frame, foreign_key_rtable,
rtable, rtable_key)
# If the validation was successful, then set the property
if status:
return set_property(foreign_data_frame, 'fk_rtable', foreign_key_rtable)
# else just warn and return False
else:
logger.warning(
'FK constraint for fk_rtable is not satisfied; Not '
'setting the fk_rtable and rtable')
return False
def set_fk_rtable(data_frame, foreign_key_rtable):
# Validate the input parameters
# # The input object is expected to be of type pandas DataFrame
validate_object_type(data_frame, pd.DataFrame)
validate_object_type(foreign_key_rtable, six.string_types, error_prefix='Input (foreign key ltable)')
# Check if the given attribute is present in the DataFrame
if not ch.check_attrs_present(data_frame, foreign_key_rtable):
logger.error('Input attr. ( %s ) not in the DataFrame'
% foreign_key_rtable)
raise KeyError('Input attr. ( %s ) not in the DataFrame'
% foreign_key_rtable)
# Finally set the property and relay the result
return set_property(data_frame, 'fk_rtable', foreign_key_rtable)
def show_properties(data_frame):
# Check if the DataFrame information is present in the catalog. If not
# return
if not is_dfinfo_present(data_frame):
logger.error('DataFrame information is not present in the catalog')
return
# Delegate it to show properties for the id if an object in the catalog
show_properties_for_id(id(data_frame))
# # Get the properties for the DataFrame from the catalog
# metadata = get_all_properties(data_frame)
#
# # First print the id for the DataFrame
# print('id: ' + str(id(data_frame)))
# # For each property name anf value, print the contents to the user
# for property_name, property_value in six.iteritems(metadata):
# # If the property value is string print it out
# if isinstance(property_value, six.string_types):
# print(property_name + ": " + property_value)
# # else, print just the id.
# else:
# print(property_name + "(obj.id): " + str(id(property_value)))
def show_properties_for_id(object_id):
catalog = Catalog.Instance()
metadata = catalog.get_all_properties_for_id(object_id)
# First print the id for the DataFrame
print('id: ' + str(object_id))
# For each property name anf value, print the contents to the user
for property_name, property_value in six.iteritems(metadata):
# If the property value is string print it out
if isinstance(property_value, six.string_types):
print(property_name + ": " + property_value)
# else, print just the id.
else:
print(property_name + "(obj.id): " + str(id(property_value)))
def set_candset_properties(candset, key, foreign_key_ltable,
foreign_key_rtable, ltable, rtable):
# set the key
set_property(candset, 'key', key)
# set the foreign key attributes
set_fk_ltable(candset, foreign_key_ltable)
set_fk_rtable(candset, foreign_key_rtable)
# set the ltable and rtables
set_property(candset, 'ltable', ltable)
set_property(candset, 'rtable', rtable)
return True
def _validate_metadata_for_table(table, key, output_string, lgr, verbose):
# Validate input parameters
# # We expect the input table to be of type pandas DataFrame
validate_object_type(table, pd.DataFrame)
# Check the key column is present in the table
if not ch.check_attrs_present(table, key):
raise KeyError('Input key ( %s ) not in the DataFrame' % key)
# Validate the key
ch.log_info(lgr, 'Validating ' + output_string + ' key: ' + str(key),
verbose)
# We expect the key to be of type string
validate_object_type(key, six.string_types, error_prefix='Key attribute')
if not ch.is_key_attribute(table, key, verbose):
raise AssertionError('Attribute %s in the %s table does not '
'qualify to be the key' % (
str(key), output_string))
ch.log_info(lgr, '..... Done', verbose)
return True
def _validate_metadata_for_candset(candset, key, foreign_key_ltable,
foreign_key_rtable,
ltable, rtable,
ltable_key, rtable_key,
lgr, verbose):
# Validate input parameters
# # We expect candset to be of type pandas DataFrame
validate_object_type(candset, pd.DataFrame, error_prefix='Input candset')
# Check if the key column is present in the candset
if not ch.check_attrs_present(candset, key):
raise KeyError('Input key ( %s ) not in the DataFrame' % key)
# Check if the foreign key ltable column is present in the candset
if not ch.check_attrs_present(candset, foreign_key_ltable):
raise KeyError(
'Input foreign_key_ltable ( %s ) not in the DataFrame'
% foreign_key_ltable)
# Check if the foreign key rtable column is present in the candset
if not ch.check_attrs_present(candset, foreign_key_rtable):
raise KeyError(
'Input fk_rtable ( %s ) not in the DataFrame' % foreign_key_rtable)
# We expect the ltable to be of type pandas DataFrame
validate_object_type(ltable, pd.DataFrame, error_prefix='Input ltable')
# We expect the rtable to be of type pandas DataFrame
validate_object_type(rtable, pd.DataFrame, error_prefix='Input rtable')
# We expect the ltable key to be present in the ltable
if not ch.check_attrs_present(ltable, ltable_key):
raise KeyError('ltable key ( %s ) not in ltable' % ltable_key)
# We expect the rtable key to be present in the rtable
if not ch.check_attrs_present(rtable, rtable_key):
raise KeyError('rtable key ( %s ) not in rtable' % rtable_key)
# First validate metadata for the candidate set (as a table)
_validate_metadata_for_table(candset, key, 'candset', lgr, verbose)
ch.log_info(lgr, 'Validating foreign key constraint for left table',
verbose)
# Second check foreign key constraints
if not ch.check_fk_constraint(candset, foreign_key_ltable,
ltable, ltable_key):
raise AssertionError(
'Candset does not satisfy foreign key constraint with '
'the left table')
if not ch.check_fk_constraint(candset, foreign_key_rtable,
rtable, rtable_key):
raise AssertionError(
'Candset does not satisfy foreign key constraint with '
'the right table')
ch.log_info(lgr, '..... Done', verbose)
ch.log_info(lgr, 'Validating foreign key constraint for right table',
verbose)
ch.log_info(lgr, '..... Done', verbose)
return True
# noinspection PyIncorrectDocstring
def get_keys_for_ltable_rtable(ltable, rtable, lgr, verbose):
# We expect the ltable to be of type pandas DataFrame
if not isinstance(ltable, pd.DataFrame):
logger.error('Input ltable is not of type pandas data frame')
raise AssertionError('Input ltable is not of type pandas data frame')
# We expect the rtable to be of type pandas DataFrame
if not isinstance(rtable, pd.DataFrame):
logger.error('Input rtable is not of type pandas data frame')
raise AssertionError('Input rtable is not of type pandas data frame')
ch.log_info(lgr, 'Required metadata: ltable key, rtable key', verbose)
ch.log_info(lgr, 'Getting metadata from the catalog', verbose)
# Get the ltable key and rtable key from the catalog
ltable_key = get_key(ltable)
rtable_key = get_key(rtable)
ch.log_info(lgr, '..... Done', verbose)
# return the ltable and rtable keys
return ltable_key, rtable_key
# noinspection PyIncorrectDocstring
def get_metadata_for_candset(candset, lgr, verbose):
# Validate input parameters
validate_object_type(candset, pd.DataFrame, error_prefix='Input candset')
ch.log_info(lgr, 'Getting metadata from the catalog', verbose)
# Get the key, foreign keys, ltable, rtable and their keys
# # Get key
key = get_key(candset)
# # Get the foreign keys
fk_ltable = get_fk_ltable(candset)
fk_rtable = get_fk_rtable(candset)
# # Get the base tables
ltable = get_ltable(candset)
rtable = get_rtable(candset)
# Get the base table keys
l_key = get_key(ltable)
r_key = get_key(rtable)
ch.log_info(lgr, '..... Done', verbose)
# Return the metadata
return key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key
# noinspection PyIncorrectDocstring
def get_ltable(candset):
# Return the ltable for a candidate set. This function is just a sugar
return get_property(candset, 'ltable')
# noinspection PyIncorrectDocstring
def get_rtable(candset):
# Return the rtable for a candidate set. This function is just a sugar
return get_property(candset, 'rtable')
def set_ltable(candset, table):
# Return the ltable for a candidate set. This function is just a sugar
return set_property(candset, 'ltable', table)
# noinspection PyIncorrectDocstring
def set_rtable(candset, table):
# Return the rtable for a candidate set. This function is just a sugar
return set_property(candset, 'rtable', table)
| true | true |
1c2d2c9056f3f7435f1e80d6ff4b5cde68b9076d | 10,970 | py | Python | cvxpy/reductions/solvers/conic_solvers/conic_solver.py | adshieh/cvxpy | 73b696b71dbb2ceb66a805798c922461e33afc6b | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-12-21T03:11:12.000Z | 2022-03-02T16:56:24.000Z | cvxpy/reductions/solvers/conic_solvers/conic_solver.py | adshieh/cvxpy | 73b696b71dbb2ceb66a805798c922461e33afc6b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cvxpy/reductions/solvers/conic_solvers/conic_solver.py | adshieh/cvxpy | 73b696b71dbb2ceb66a805798c922461e33afc6b | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-11-17T15:45:54.000Z | 2020-11-17T15:45:54.000Z | """
Copyright 2017 Robin Verschueren, 2017 Akshay Agrawal
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy.settings as s
from cvxpy.constraints import SOC, ExpCone, PSD, Zero, NonNeg
from cvxpy.reductions.cvx_attr2constr import convex_attributes
from cvxpy.reductions.dcp2cone.cone_matrix_stuffing import ParamConeProg
from cvxpy.reductions.solution import Solution, failure_solution
from cvxpy.reductions.solvers.solver import Solver
from cvxpy.reductions.solvers import utilities
import numpy as np
import scipy.sparse as sp
# NOTE(akshayka): Small changes to this file can lead to drastic
# performance regressions. If you are making a change to this file,
# make sure to run cvxpy/tests/test_benchmarks.py to ensure that you have
# not introduced a regression.
class LinearOperator(object):
"""A wrapper for linear operators."""
def __init__(self, linear_op, shape):
if sp.issparse(linear_op):
self._matmul = lambda X: linear_op @ X
else:
self._matmul = linear_op
self.shape = shape
def __call__(self, X):
return self._matmul(X)
def as_linear_operator(linear_op):
if isinstance(linear_op, LinearOperator):
return linear_op
elif sp.issparse(linear_op):
return LinearOperator(linear_op, linear_op.shape)
def as_block_diag_linear_operator(matrices):
"""Block diag of SciPy sparse matrices or linear operators."""
linear_operators = [as_linear_operator(op) for op in matrices]
nrows = [op.shape[0] for op in linear_operators]
ncols = [op.shape[1] for op in linear_operators]
m, n = sum(nrows), sum(ncols)
col_indices = np.append(0, np.cumsum(ncols))
def matmul(X):
outputs = []
for i, op in enumerate(linear_operators):
Xi = X[col_indices[i]:col_indices[i + 1]]
outputs.append(op(Xi))
return sp.vstack(outputs)
return LinearOperator(matmul, (m, n))
class ConicSolver(Solver):
"""Conic solver class with reduction semantics
"""
# The key that maps to ConeDims in the data returned by apply().
DIMS = "dims"
# Every conic solver must support Zero and NonNeg constraints.
SUPPORTED_CONSTRAINTS = [Zero, NonNeg]
# Some solvers cannot solve problems that do not have constraints.
# For such solvers, REQUIRES_CONSTR should be set to True.
REQUIRES_CONSTR = False
EXP_CONE_ORDER = None
def accepts(self, problem):
return (isinstance(problem, ParamConeProg)
and (self.MIP_CAPABLE or not problem.is_mixed_integer())
and not convex_attributes([problem.x])
and (len(problem.constraints) > 0 or not self.REQUIRES_CONSTR)
and all(type(c) in self.SUPPORTED_CONSTRAINTS for c in
problem.constraints))
@staticmethod
def get_spacing_matrix(shape, spacing, streak, num_blocks, offset):
"""Returns a sparse matrix that spaces out an expression.
Parameters
----------
shape : tuple
(rows in matrix, columns in matrix)
spacing : int
The number of rows between the start of each non-zero block.
streak: int
The number of elements in each block.
num_blocks : int
The number of non-zero blocks.
offset : int
The number of zero rows at the beginning of the matrix.
Returns
-------
SciPy CSC matrix
A sparse matrix
"""
num_values = num_blocks * streak
val_arr = np.ones(num_values, dtype=np.float64)
streak_plus_spacing = streak + spacing
row_arr = np.arange(0, num_blocks * streak_plus_spacing).reshape(
num_blocks, streak_plus_spacing)[:, :streak].flatten() + offset
col_arr = np.arange(num_values)
return sp.csc_matrix((val_arr, (row_arr, col_arr)), shape)
def psd_format_mat(self, constr):
"""Return a matrix to multiply by PSD constraint coefficients.
"""
# Default is identity.
return sp.eye(constr.size, format='csc')
def format_constraints(self, problem, exp_cone_order):
"""
Returns a ParamConeProg whose problem data tensors will yield the
coefficient "A" and offset "b" for the constraint in the following
formats:
Linear equations: (A, b) such that A * x + b == 0,
Linear inequalities: (A, b) such that A * x + b >= 0,
Second order cone: (A, b) such that A * x + b in SOC,
Exponential cone: (A, b) such that A * x + b in EXP,
Semidefinite cone: (A, b) such that A * x + b in PSD,
The CVXPY standard for the exponential cone is:
K_e = closure{(x,y,z) | z >= y * exp(x/y), y>0}.
Whenever a solver uses this convention, EXP_CONE_ORDER should be
[0, 1, 2].
The CVXPY standard for the second order cone is:
SOC(n) = { x : x[0] >= norm(x[1:n], 2) }.
All currently supported solvers use this convention.
Args:
problem : ParamConeProg
The problem that is the provenance of the constraint.
exp_cone_order: list
A list indicating how the exponential cone arguments are ordered.
Returns:
ParamConeProg with structured A.
"""
# Create a matrix to reshape constraints, then replicate for each
# variable entry.
restruct_mat = [] # Form a block diagonal matrix.
for constr in problem.constraints:
total_height = sum([arg.size for arg in constr.args])
if type(constr) == Zero:
restruct_mat.append(-sp.eye(constr.size, format='csr'))
elif type(constr) == NonNeg:
restruct_mat.append(sp.eye(constr.size, format='csr'))
elif type(constr) == SOC:
# Group each t row with appropriate X rows.
assert constr.axis == 0, 'SOC must be lowered to axis == 0'
# Interleave the rows of coeffs[0] and coeffs[1]:
# coeffs[0][0, :]
# coeffs[1][0:gap-1, :]
# coeffs[0][1, :]
# coeffs[1][gap-1:2*(gap-1), :]
t_spacer = ConicSolver.get_spacing_matrix(
shape=(total_height, constr.args[0].size),
spacing=constr.args[1].shape[0],
streak=1,
num_blocks=constr.args[0].size,
offset=0,
)
X_spacer = ConicSolver.get_spacing_matrix(
shape=(total_height, constr.args[1].size),
spacing=1,
streak=constr.args[1].shape[0],
num_blocks=constr.args[0].size,
offset=1,
)
restruct_mat.append(sp.hstack([t_spacer, X_spacer]))
elif type(constr) == ExpCone:
arg_mats = []
for i, arg in enumerate(constr.args):
space_mat = ConicSolver.get_spacing_matrix(
shape=(total_height, arg.size),
spacing=len(exp_cone_order) - 1,
streak=1,
num_blocks=arg.size,
offset=exp_cone_order[i],
)
arg_mats.append(space_mat)
restruct_mat.append(sp.hstack(arg_mats))
elif type(constr) == PSD:
restruct_mat.append(self.psd_format_mat(constr))
else:
raise ValueError("Unsupported constraint type.")
# Form new ParamConeProg
if restruct_mat:
# TODO(akshayka): profile to see whether using linear operators
# or bmat is faster
restruct_mat = as_block_diag_linear_operator(restruct_mat)
# this is equivalent to but _much_ faster than:
# restruct_mat_rep = sp.block_diag([restruct_mat]*(problem.x.size + 1))
# restruct_A = restruct_mat_rep * problem.A
unspecified, remainder = divmod(problem.A.shape[0] *
problem.A.shape[1],
restruct_mat.shape[1])
reshaped_A = problem.A.reshape(restruct_mat.shape[1],
unspecified, order='F').tocsr()
restructured_A = restruct_mat(reshaped_A).tocoo()
# Because of a bug in scipy versions < 1.20, `reshape`
# can overflow if indices are int32s.
restructured_A.row = restructured_A.row.astype(np.int64)
restructured_A.col = restructured_A.col.astype(np.int64)
restructured_A = restructured_A.reshape(
restruct_mat.shape[0] * (problem.x.size + 1),
problem.A.shape[1], order='F')
else:
restructured_A = problem.A
new_param_cone_prog = ParamConeProg(problem.c,
problem.x,
restructured_A,
problem.variables,
problem.var_id_to_col,
problem.constraints,
problem.parameters,
problem.param_id_to_col,
formatted=True)
return new_param_cone_prog
def invert(self, solution, inverse_data):
"""Returns the solution to the original problem given the inverse_data.
"""
status = solution['status']
if status in s.SOLUTION_PRESENT:
opt_val = solution['value']
primal_vars = {inverse_data[self.VAR_ID]: solution['primal']}
eq_dual = utilities.get_dual_values(
solution['eq_dual'],
utilities.extract_dual_value,
inverse_data[Solver.EQ_CONSTR])
leq_dual = utilities.get_dual_values(
solution['ineq_dual'],
utilities.extract_dual_value,
inverse_data[Solver.NEQ_CONSTR])
eq_dual.update(leq_dual)
dual_vars = eq_dual
return Solution(status, opt_val, primal_vars, dual_vars, {})
else:
return failure_solution(status)
| 41.711027 | 86 | 0.583227 |
import cvxpy.settings as s
from cvxpy.constraints import SOC, ExpCone, PSD, Zero, NonNeg
from cvxpy.reductions.cvx_attr2constr import convex_attributes
from cvxpy.reductions.dcp2cone.cone_matrix_stuffing import ParamConeProg
from cvxpy.reductions.solution import Solution, failure_solution
from cvxpy.reductions.solvers.solver import Solver
from cvxpy.reductions.solvers import utilities
import numpy as np
import scipy.sparse as sp
class LinearOperator(object):
def __init__(self, linear_op, shape):
if sp.issparse(linear_op):
self._matmul = lambda X: linear_op @ X
else:
self._matmul = linear_op
self.shape = shape
def __call__(self, X):
return self._matmul(X)
def as_linear_operator(linear_op):
if isinstance(linear_op, LinearOperator):
return linear_op
elif sp.issparse(linear_op):
return LinearOperator(linear_op, linear_op.shape)
def as_block_diag_linear_operator(matrices):
linear_operators = [as_linear_operator(op) for op in matrices]
nrows = [op.shape[0] for op in linear_operators]
ncols = [op.shape[1] for op in linear_operators]
m, n = sum(nrows), sum(ncols)
col_indices = np.append(0, np.cumsum(ncols))
def matmul(X):
outputs = []
for i, op in enumerate(linear_operators):
Xi = X[col_indices[i]:col_indices[i + 1]]
outputs.append(op(Xi))
return sp.vstack(outputs)
return LinearOperator(matmul, (m, n))
class ConicSolver(Solver):
DIMS = "dims"
SUPPORTED_CONSTRAINTS = [Zero, NonNeg]
REQUIRES_CONSTR = False
EXP_CONE_ORDER = None
def accepts(self, problem):
return (isinstance(problem, ParamConeProg)
and (self.MIP_CAPABLE or not problem.is_mixed_integer())
and not convex_attributes([problem.x])
and (len(problem.constraints) > 0 or not self.REQUIRES_CONSTR)
and all(type(c) in self.SUPPORTED_CONSTRAINTS for c in
problem.constraints))
@staticmethod
def get_spacing_matrix(shape, spacing, streak, num_blocks, offset):
num_values = num_blocks * streak
val_arr = np.ones(num_values, dtype=np.float64)
streak_plus_spacing = streak + spacing
row_arr = np.arange(0, num_blocks * streak_plus_spacing).reshape(
num_blocks, streak_plus_spacing)[:, :streak].flatten() + offset
col_arr = np.arange(num_values)
return sp.csc_matrix((val_arr, (row_arr, col_arr)), shape)
def psd_format_mat(self, constr):
return sp.eye(constr.size, format='csc')
def format_constraints(self, problem, exp_cone_order):
restruct_mat = []
for constr in problem.constraints:
total_height = sum([arg.size for arg in constr.args])
if type(constr) == Zero:
restruct_mat.append(-sp.eye(constr.size, format='csr'))
elif type(constr) == NonNeg:
restruct_mat.append(sp.eye(constr.size, format='csr'))
elif type(constr) == SOC:
assert constr.axis == 0, 'SOC must be lowered to axis == 0'
t_spacer = ConicSolver.get_spacing_matrix(
shape=(total_height, constr.args[0].size),
spacing=constr.args[1].shape[0],
streak=1,
num_blocks=constr.args[0].size,
offset=0,
)
X_spacer = ConicSolver.get_spacing_matrix(
shape=(total_height, constr.args[1].size),
spacing=1,
streak=constr.args[1].shape[0],
num_blocks=constr.args[0].size,
offset=1,
)
restruct_mat.append(sp.hstack([t_spacer, X_spacer]))
elif type(constr) == ExpCone:
arg_mats = []
for i, arg in enumerate(constr.args):
space_mat = ConicSolver.get_spacing_matrix(
shape=(total_height, arg.size),
spacing=len(exp_cone_order) - 1,
streak=1,
num_blocks=arg.size,
offset=exp_cone_order[i],
)
arg_mats.append(space_mat)
restruct_mat.append(sp.hstack(arg_mats))
elif type(constr) == PSD:
restruct_mat.append(self.psd_format_mat(constr))
else:
raise ValueError("Unsupported constraint type.")
if restruct_mat:
restruct_mat = as_block_diag_linear_operator(restruct_mat)
unspecified, remainder = divmod(problem.A.shape[0] *
problem.A.shape[1],
restruct_mat.shape[1])
reshaped_A = problem.A.reshape(restruct_mat.shape[1],
unspecified, order='F').tocsr()
restructured_A = restruct_mat(reshaped_A).tocoo()
restructured_A.row = restructured_A.row.astype(np.int64)
restructured_A.col = restructured_A.col.astype(np.int64)
restructured_A = restructured_A.reshape(
restruct_mat.shape[0] * (problem.x.size + 1),
problem.A.shape[1], order='F')
else:
restructured_A = problem.A
new_param_cone_prog = ParamConeProg(problem.c,
problem.x,
restructured_A,
problem.variables,
problem.var_id_to_col,
problem.constraints,
problem.parameters,
problem.param_id_to_col,
formatted=True)
return new_param_cone_prog
def invert(self, solution, inverse_data):
status = solution['status']
if status in s.SOLUTION_PRESENT:
opt_val = solution['value']
primal_vars = {inverse_data[self.VAR_ID]: solution['primal']}
eq_dual = utilities.get_dual_values(
solution['eq_dual'],
utilities.extract_dual_value,
inverse_data[Solver.EQ_CONSTR])
leq_dual = utilities.get_dual_values(
solution['ineq_dual'],
utilities.extract_dual_value,
inverse_data[Solver.NEQ_CONSTR])
eq_dual.update(leq_dual)
dual_vars = eq_dual
return Solution(status, opt_val, primal_vars, dual_vars, {})
else:
return failure_solution(status)
| true | true |
1c2d2cce86d70b42f0fd8a31f5f38480d589a8a6 | 2,914 | py | Python | f5_cccl/resource/ltm/test/test_irule.py | f5yacobucci/f5-cccl | 64e7fa0a6d4ead9b5209b5b46bf4ed1b6cef036a | [
"Apache-2.0"
] | null | null | null | f5_cccl/resource/ltm/test/test_irule.py | f5yacobucci/f5-cccl | 64e7fa0a6d4ead9b5209b5b46bf4ed1b6cef036a | [
"Apache-2.0"
] | null | null | null | f5_cccl/resource/ltm/test/test_irule.py | f5yacobucci/f5-cccl | 64e7fa0a6d4ead9b5209b5b46bf4ed1b6cef036a | [
"Apache-2.0"
] | 1 | 2019-11-02T05:22:48.000Z | 2019-11-02T05:22:48.000Z | #!/usr/bin/env python
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from copy import copy
from f5_cccl.resource.ltm.irule import IRule
from mock import Mock
import pytest
ssl_redirect_irule_1 = """
when HTTP_REQUEST {
HTTP::redirect https://[getfield [HTTP::host] \":\" 1][HTTP::uri]
}
"""
cfg_test = {
'name': 'ssl_redirect',
'partition': 'my_partition',
'apiAnonymous': ssl_redirect_irule_1
}
class FakeObj: pass
@pytest.fixture
def bigip():
bigip = Mock()
return bigip
def test_create_irule():
"""Test iRule creation."""
irule = IRule(
**cfg_test
)
assert irule
# verify all cfg items
for k,v in cfg_test.items():
assert irule.data[k] == v
def test_hash():
"""Test Node Server hash."""
irule1 = IRule(
**cfg_test
)
irule2 = IRule(
**cfg_test
)
cfg_changed = copy(cfg_test)
cfg_changed['name'] = 'test'
irule3 = IRule(
**cfg_changed
)
cfg_changed = copy(cfg_test)
cfg_changed['partition'] = 'other'
irule4 = IRule(
**cfg_changed
)
assert irule1
assert irule2
assert irule3
assert irule4
assert hash(irule1) == hash(irule2)
assert hash(irule1) != hash(irule3)
assert hash(irule1) != hash(irule4)
def test_eq():
"""Test iRule equality."""
partition = 'Common'
name = 'irule_1'
irule1 = IRule(
**cfg_test
)
irule2 = IRule(
**cfg_test
)
assert irule1
assert irule2
assert irule1 == irule2
# name not equal
cfg_changed = copy(cfg_test)
cfg_changed['name'] = 'ssl_redirect_2'
irule2 = IRule(**cfg_changed)
assert irule1 != irule2
# partition not equal
cfg_changed = copy(cfg_test)
cfg_changed['partition'] = 'test'
irule2 = IRule(**cfg_changed)
assert irule1 != irule2
# the actual rule code not equal
cfg_changed = copy(cfg_test)
cfg_changed['apiAnonymous'] = None
irule2 = IRule(**cfg_changed)
assert irule1 != irule2
# different objects
fake = FakeObj
assert irule1 != fake
# should be equal after assignment
irule2 = irule1
assert irule1 == irule2
def test_uri_path(bigip):
"""Test iRule URI."""
irule = IRule(
**cfg_test
)
assert irule
assert irule._uri_path(bigip) == bigip.tm.ltm.rules.rule
| 21.746269 | 74 | 0.640014 |
from copy import copy
from f5_cccl.resource.ltm.irule import IRule
from mock import Mock
import pytest
ssl_redirect_irule_1 = """
when HTTP_REQUEST {
HTTP::redirect https://[getfield [HTTP::host] \":\" 1][HTTP::uri]
}
"""
cfg_test = {
'name': 'ssl_redirect',
'partition': 'my_partition',
'apiAnonymous': ssl_redirect_irule_1
}
class FakeObj: pass
@pytest.fixture
def bigip():
bigip = Mock()
return bigip
def test_create_irule():
irule = IRule(
**cfg_test
)
assert irule
for k,v in cfg_test.items():
assert irule.data[k] == v
def test_hash():
irule1 = IRule(
**cfg_test
)
irule2 = IRule(
**cfg_test
)
cfg_changed = copy(cfg_test)
cfg_changed['name'] = 'test'
irule3 = IRule(
**cfg_changed
)
cfg_changed = copy(cfg_test)
cfg_changed['partition'] = 'other'
irule4 = IRule(
**cfg_changed
)
assert irule1
assert irule2
assert irule3
assert irule4
assert hash(irule1) == hash(irule2)
assert hash(irule1) != hash(irule3)
assert hash(irule1) != hash(irule4)
def test_eq():
partition = 'Common'
name = 'irule_1'
irule1 = IRule(
**cfg_test
)
irule2 = IRule(
**cfg_test
)
assert irule1
assert irule2
assert irule1 == irule2
cfg_changed = copy(cfg_test)
cfg_changed['name'] = 'ssl_redirect_2'
irule2 = IRule(**cfg_changed)
assert irule1 != irule2
cfg_changed = copy(cfg_test)
cfg_changed['partition'] = 'test'
irule2 = IRule(**cfg_changed)
assert irule1 != irule2
cfg_changed = copy(cfg_test)
cfg_changed['apiAnonymous'] = None
irule2 = IRule(**cfg_changed)
assert irule1 != irule2
fake = FakeObj
assert irule1 != fake
irule2 = irule1
assert irule1 == irule2
def test_uri_path(bigip):
irule = IRule(
**cfg_test
)
assert irule
assert irule._uri_path(bigip) == bigip.tm.ltm.rules.rule
| true | true |
1c2d2d3875cb0e774551d5ff9f6a71a5260ab0f5 | 109 | py | Python | notes/admin.py | mbassale/noteprg | 31042224c396441ba6954de4ed5d0b4f3e1efa71 | [
"MIT"
] | null | null | null | notes/admin.py | mbassale/noteprg | 31042224c396441ba6954de4ed5d0b4f3e1efa71 | [
"MIT"
] | 4 | 2021-03-30T12:57:11.000Z | 2021-06-10T18:45:28.000Z | notes/admin.py | mbassale/noteprg | 31042224c396441ba6954de4ed5d0b4f3e1efa71 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Note
# Register your models here.
admin.register(Note)
| 18.166667 | 32 | 0.798165 | from django.contrib import admin
from .models import Note
admin.register(Note)
| true | true |
1c2d2d3cf5b1d5868ecf0db1f97ce9e33cb66b8c | 2,844 | py | Python | tests/common/test_run/truncatemod_run.py | laekov/akg | 5316b8cb2340bbf71bdc724dc9d81513a67b3104 | [
"Apache-2.0"
] | 1 | 2020-08-31T02:43:43.000Z | 2020-08-31T02:43:43.000Z | tests/common/test_run/truncatemod_run.py | laekov/akg | 5316b8cb2340bbf71bdc724dc9d81513a67b3104 | [
"Apache-2.0"
] | null | null | null | tests/common/test_run/truncatemod_run.py | laekov/akg | 5316b8cb2340bbf71bdc724dc9d81513a67b3104 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from akg.utils import kernel_exec as utils
from tensorio import compare_tensor
import numpy as np
from gen_random import random_gaussian
from test_op import truncatemod
from base import get_rtol_atol
def truncatemod_run(shape1, shape2, dtype, attrs):
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(truncatemod.truncatemod, [shape1, shape2], [dtype, dtype], kernel_name=kernel_name,
attrs=attrs, dump_code=True, tuning=t)
if t:
expect, input1, input2, output = gen_data(dtype, shape1, shape2)
return mod, expect, (input1, input2, output)
else:
return mod
else:
expect, input1, input2, output = gen_data(dtype, shape1, shape2)
mod = utils.op_build_test(truncatemod.truncatemod, [shape1, shape2], [dtype, dtype], kernel_name="truncatemod",
attrs=attrs, dump_code=True)
output = utils.mod_launch(mod, (input1, input2, output), expect=expect)
rtol, atol = get_rtol_atol("truncatemod", dtype)
res = compare_tensor(output, expect, rtol=rtol, atol=atol, equal_nan=True)
return (input1, input2), output, expect, res
def truncatemod_compute(x, y):
dtype = x.dtype
if dtype != "float32":
x = x.astype("float32")
y = y.astype("float32")
expect = (x - y*np.trunc(x/y))
if expect.dtype != dtype:
expect = expect.astype(dtype)
return expect
def gen_data(dtype, shape1, shape2):
input1 = random_gaussian(shape1).astype(dtype)
input2 = random_gaussian(shape2).astype(dtype)
# mod 0 is undefined
input2 = np.select(input2 == 0, np.ones_like(input2), input2)
if utils.product_is_mini():
# If the value of input2 is too small, input1/input2 will be some overflow
lower_bound = 1e-3
input2 = np.select([input2 >= 0, input2 < 0], [np.maximum(input2, lower_bound),
np.minimum(input2, -lower_bound)])
expect = truncatemod_compute(input1, input2)
output = np.full(expect.shape, np.nan, dtype)
return expect, input1, input2, output
| 40.056338 | 119 | 0.662799 |
from akg.utils import kernel_exec as utils
from tensorio import compare_tensor
import numpy as np
from gen_random import random_gaussian
from test_op import truncatemod
from base import get_rtol_atol
def truncatemod_run(shape1, shape2, dtype, attrs):
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(truncatemod.truncatemod, [shape1, shape2], [dtype, dtype], kernel_name=kernel_name,
attrs=attrs, dump_code=True, tuning=t)
if t:
expect, input1, input2, output = gen_data(dtype, shape1, shape2)
return mod, expect, (input1, input2, output)
else:
return mod
else:
expect, input1, input2, output = gen_data(dtype, shape1, shape2)
mod = utils.op_build_test(truncatemod.truncatemod, [shape1, shape2], [dtype, dtype], kernel_name="truncatemod",
attrs=attrs, dump_code=True)
output = utils.mod_launch(mod, (input1, input2, output), expect=expect)
rtol, atol = get_rtol_atol("truncatemod", dtype)
res = compare_tensor(output, expect, rtol=rtol, atol=atol, equal_nan=True)
return (input1, input2), output, expect, res
def truncatemod_compute(x, y):
dtype = x.dtype
if dtype != "float32":
x = x.astype("float32")
y = y.astype("float32")
expect = (x - y*np.trunc(x/y))
if expect.dtype != dtype:
expect = expect.astype(dtype)
return expect
def gen_data(dtype, shape1, shape2):
input1 = random_gaussian(shape1).astype(dtype)
input2 = random_gaussian(shape2).astype(dtype)
input2 = np.select(input2 == 0, np.ones_like(input2), input2)
if utils.product_is_mini():
lower_bound = 1e-3
input2 = np.select([input2 >= 0, input2 < 0], [np.maximum(input2, lower_bound),
np.minimum(input2, -lower_bound)])
expect = truncatemod_compute(input1, input2)
output = np.full(expect.shape, np.nan, dtype)
return expect, input1, input2, output
| true | true |
1c2d2defd3c46424bb893cb4ea7bdee520efc1cb | 821 | py | Python | lab5.py | GregoryLazarev/SP_LAB5_6 | 39b1914e2782d2f20fc412da82b485b187de6dda | [
"MIT"
] | null | null | null | lab5.py | GregoryLazarev/SP_LAB5_6 | 39b1914e2782d2f20fc412da82b485b187de6dda | [
"MIT"
] | null | null | null | lab5.py | GregoryLazarev/SP_LAB5_6 | 39b1914e2782d2f20fc412da82b485b187de6dda | [
"MIT"
] | null | null | null | import threading
tr_dict = dict()
mutex = threading.Lock()
prohibited = (',','.','?','!','-','+','\'','@')
def Count_trigrams(in_str):
trgrms = dict()
trgrm = ""
for i in in_str:
if i not in prohibited:
trgrm += i
else:
trgrm = ""
if len(trgrm) == 3:
if trgrm in trgrms:
trgrms[trgrm] += 1
else:
trgrms[trgrm] = 1
trgrm = trgrm[1:]
Add_to_global(trgrms)
def Add_to_global(trgrms):
for i in trgrms:
mutex.acquire()
if i in tr_dict:
tr_dict[i] += trgrms[i]
else:
tr_dict[i] = trgrms[i]
mutex.release()
in_str = input("input your string here:\n")
strs = in_str.split()
threads = [
threading.Thread(target = Count_trigrams, args = (s,))
for s in strs
]
for t in threads:
t.start()
for t in threads:
t.join()
print(tr_dict) | 18.244444 | 56 | 0.580999 | import threading
tr_dict = dict()
mutex = threading.Lock()
prohibited = (',','.','?','!','-','+','\'','@')
def Count_trigrams(in_str):
trgrms = dict()
trgrm = ""
for i in in_str:
if i not in prohibited:
trgrm += i
else:
trgrm = ""
if len(trgrm) == 3:
if trgrm in trgrms:
trgrms[trgrm] += 1
else:
trgrms[trgrm] = 1
trgrm = trgrm[1:]
Add_to_global(trgrms)
def Add_to_global(trgrms):
for i in trgrms:
mutex.acquire()
if i in tr_dict:
tr_dict[i] += trgrms[i]
else:
tr_dict[i] = trgrms[i]
mutex.release()
in_str = input("input your string here:\n")
strs = in_str.split()
threads = [
threading.Thread(target = Count_trigrams, args = (s,))
for s in strs
]
for t in threads:
t.start()
for t in threads:
t.join()
print(tr_dict) | true | true |
1c2d2df0d25244136dde5a5320b73448f8857bc0 | 22,649 | py | Python | certbot/tests/compat/filesystem_test.py | venim/certbot | fe01390f925eb72d13709e4cd065e727c9eeaa75 | [
"Apache-2.0"
] | null | null | null | certbot/tests/compat/filesystem_test.py | venim/certbot | fe01390f925eb72d13709e4cd065e727c9eeaa75 | [
"Apache-2.0"
] | null | null | null | certbot/tests/compat/filesystem_test.py | venim/certbot | fe01390f925eb72d13709e4cd065e727c9eeaa75 | [
"Apache-2.0"
] | null | null | null | """Tests for certbot.compat.filesystem"""
import contextlib
import errno
import unittest
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock
from certbot import util
from certbot._internal import lock
from certbot.compat import filesystem
from certbot.compat import os
import certbot.tests.util as test_util
from certbot.tests.util import TempDirTestCase
try:
import win32api
import win32security
import ntsecuritycon
POSIX_MODE = False
except ImportError:
POSIX_MODE = True
EVERYBODY_SID = 'S-1-1-0'
SYSTEM_SID = 'S-1-5-18'
ADMINS_SID = 'S-1-5-32-544'
@unittest.skipIf(POSIX_MODE, reason='Tests specific to Windows security')
class WindowsChmodTests(TempDirTestCase):
"""Unit tests for Windows chmod function in filesystem module"""
def setUp(self):
super(WindowsChmodTests, self).setUp()
self.probe_path = _create_probe(self.tempdir)
def test_symlink_resolution(self):
link_path = os.path.join(self.tempdir, 'link')
os.symlink(self.probe_path, link_path)
ref_dacl_probe = _get_security_dacl(self.probe_path).GetSecurityDescriptorDacl()
ref_dacl_link = _get_security_dacl(link_path).GetSecurityDescriptorDacl()
filesystem.chmod(link_path, 0o700)
# Assert the real file is impacted, not the link.
cur_dacl_probe = _get_security_dacl(self.probe_path).GetSecurityDescriptorDacl()
cur_dacl_link = _get_security_dacl(link_path).GetSecurityDescriptorDacl()
self.assertFalse(filesystem._compare_dacls(ref_dacl_probe, cur_dacl_probe)) # pylint: disable=protected-access
self.assertTrue(filesystem._compare_dacls(ref_dacl_link, cur_dacl_link)) # pylint: disable=protected-access
def test_world_permission(self):
everybody = win32security.ConvertStringSidToSid(EVERYBODY_SID)
filesystem.chmod(self.probe_path, 0o700)
dacl = _get_security_dacl(self.probe_path).GetSecurityDescriptorDacl()
self.assertFalse([dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == everybody])
filesystem.chmod(self.probe_path, 0o704)
dacl = _get_security_dacl(self.probe_path).GetSecurityDescriptorDacl()
self.assertTrue([dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == everybody])
def test_group_permissions_noop(self):
filesystem.chmod(self.probe_path, 0o700)
ref_dacl_probe = _get_security_dacl(self.probe_path).GetSecurityDescriptorDacl()
filesystem.chmod(self.probe_path, 0o740)
cur_dacl_probe = _get_security_dacl(self.probe_path).GetSecurityDescriptorDacl()
self.assertTrue(filesystem._compare_dacls(ref_dacl_probe, cur_dacl_probe)) # pylint: disable=protected-access
def test_admin_permissions(self):
system = win32security.ConvertStringSidToSid(SYSTEM_SID)
admins = win32security.ConvertStringSidToSid(ADMINS_SID)
filesystem.chmod(self.probe_path, 0o400)
dacl = _get_security_dacl(self.probe_path).GetSecurityDescriptorDacl()
system_aces = [dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == system]
admin_aces = [dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == admins]
self.assertEqual(len(system_aces), 1)
self.assertEqual(len(admin_aces), 1)
self.assertEqual(system_aces[0][1], ntsecuritycon.FILE_ALL_ACCESS)
self.assertEqual(admin_aces[0][1], ntsecuritycon.FILE_ALL_ACCESS)
def test_read_flag(self):
self._test_flag(4, ntsecuritycon.FILE_GENERIC_READ)
def test_execute_flag(self):
self._test_flag(1, ntsecuritycon.FILE_GENERIC_EXECUTE)
def test_write_flag(self):
self._test_flag(2, (ntsecuritycon.FILE_ALL_ACCESS
^ ntsecuritycon.FILE_GENERIC_READ
^ ntsecuritycon.FILE_GENERIC_EXECUTE))
def test_full_flag(self):
self._test_flag(7, ntsecuritycon.FILE_ALL_ACCESS)
def _test_flag(self, everyone_mode, windows_flag):
# Note that flag is tested against `everyone`, not `user`, because practically these unit
# tests are executed with admin privilege, so current user is effectively the admins group,
# and so will always have all rights.
filesystem.chmod(self.probe_path, 0o700 | everyone_mode)
dacl = _get_security_dacl(self.probe_path).GetSecurityDescriptorDacl()
everybody = win32security.ConvertStringSidToSid(EVERYBODY_SID)
acls_everybody = [dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == everybody]
self.assertEqual(len(acls_everybody), 1)
acls_everybody = acls_everybody[0]
self.assertEqual(acls_everybody[1], windows_flag)
def test_user_admin_dacl_consistency(self):
# Set ownership of target to authenticated user
authenticated_user, _, _ = win32security.LookupAccountName("", win32api.GetUserName())
security_owner = _get_security_owner(self.probe_path)
_set_owner(self.probe_path, security_owner, authenticated_user)
filesystem.chmod(self.probe_path, 0o700)
security_dacl = _get_security_dacl(self.probe_path)
# We expect three ACE: one for admins, one for system, and one for the user
self.assertEqual(security_dacl.GetSecurityDescriptorDacl().GetAceCount(), 3)
# Set ownership of target to Administrators user group
admin_user = win32security.ConvertStringSidToSid(ADMINS_SID)
security_owner = _get_security_owner(self.probe_path)
_set_owner(self.probe_path, security_owner, admin_user)
filesystem.chmod(self.probe_path, 0o700)
security_dacl = _get_security_dacl(self.probe_path)
# We expect only two ACE: one for admins, one for system,
# since the user is also the admins group
self.assertEqual(security_dacl.GetSecurityDescriptorDacl().GetAceCount(), 2)
class ComputePrivateKeyModeTest(TempDirTestCase):
def setUp(self):
super(ComputePrivateKeyModeTest, self).setUp()
self.probe_path = _create_probe(self.tempdir)
def test_compute_private_key_mode(self):
filesystem.chmod(self.probe_path, 0o777)
new_mode = filesystem.compute_private_key_mode(self.probe_path, 0o600)
if POSIX_MODE:
# On Linux RWX permissions for group and R permission for world
# are persisted from the existing moe
self.assertEqual(new_mode, 0o674)
else:
# On Windows no permission is persisted
self.assertEqual(new_mode, 0o600)
@unittest.skipIf(POSIX_MODE, reason='Tests specific to Windows security')
class WindowsOpenTest(TempDirTestCase):
def test_new_file_correct_permissions(self):
path = os.path.join(self.tempdir, 'file')
desc = filesystem.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o700)
os.close(desc)
dacl = _get_security_dacl(path).GetSecurityDescriptorDacl()
everybody = win32security.ConvertStringSidToSid(EVERYBODY_SID)
self.assertFalse([dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == everybody])
def test_existing_file_correct_permissions(self):
path = os.path.join(self.tempdir, 'file')
open(path, 'w').close()
desc = filesystem.open(path, os.O_EXCL | os.O_RDWR, 0o700)
os.close(desc)
dacl = _get_security_dacl(path).GetSecurityDescriptorDacl()
everybody = win32security.ConvertStringSidToSid(EVERYBODY_SID)
self.assertFalse([dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == everybody])
def test_create_file_on_open(self):
# os.O_CREAT | os.O_EXCL + file not exists = OK
self._test_one_creation(1, file_exist=False, flags=(os.O_CREAT | os.O_EXCL))
# os.O_CREAT | os.O_EXCL + file exists = EEXIST OS exception
with self.assertRaises(OSError) as raised:
self._test_one_creation(2, file_exist=True, flags=(os.O_CREAT | os.O_EXCL))
self.assertEqual(raised.exception.errno, errno.EEXIST)
# os.O_CREAT + file not exists = OK
self._test_one_creation(3, file_exist=False, flags=os.O_CREAT)
# os.O_CREAT + file exists = OK
self._test_one_creation(4, file_exist=True, flags=os.O_CREAT)
# os.O_CREAT + file exists (locked) = EACCES OS exception
path = os.path.join(self.tempdir, '5')
open(path, 'w').close()
filelock = lock.LockFile(path)
try:
with self.assertRaises(OSError) as raised:
self._test_one_creation(5, file_exist=True, flags=os.O_CREAT)
self.assertEqual(raised.exception.errno, errno.EACCES)
finally:
filelock.release()
# os.O_CREAT not set + file not exists = OS exception
with self.assertRaises(OSError):
self._test_one_creation(6, file_exist=False, flags=os.O_RDONLY)
def _test_one_creation(self, num, file_exist, flags):
one_file = os.path.join(self.tempdir, str(num))
if file_exist and not os.path.exists(one_file):
with open(one_file, 'w'):
pass
handler = None
try:
handler = filesystem.open(one_file, flags)
finally:
if handler:
os.close(handler)
@unittest.skipIf(POSIX_MODE, reason='Test specific to Windows security')
class WindowsMkdirTests(test_util.TempDirTestCase):
"""Unit tests for Windows mkdir + makedirs functions in filesystem module"""
def test_mkdir_correct_permissions(self):
path = os.path.join(self.tempdir, 'dir')
filesystem.mkdir(path, 0o700)
everybody = win32security.ConvertStringSidToSid(EVERYBODY_SID)
dacl = _get_security_dacl(path).GetSecurityDescriptorDacl()
self.assertFalse([dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == everybody])
def test_makedirs_correct_permissions(self):
path = os.path.join(self.tempdir, 'dir')
subpath = os.path.join(path, 'subpath')
filesystem.makedirs(subpath, 0o700)
everybody = win32security.ConvertStringSidToSid(EVERYBODY_SID)
dacl = _get_security_dacl(subpath).GetSecurityDescriptorDacl()
self.assertFalse([dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == everybody])
def test_makedirs_switch_os_mkdir(self):
path = os.path.join(self.tempdir, 'dir')
import os as std_os # pylint: disable=os-module-forbidden
original_mkdir = std_os.mkdir
filesystem.makedirs(path)
self.assertEqual(original_mkdir, std_os.mkdir)
try:
filesystem.makedirs(path) # Will fail because path already exists
except OSError:
pass
self.assertEqual(original_mkdir, std_os.mkdir)
class OwnershipTest(test_util.TempDirTestCase):
"""Tests about copy_ownership_and_apply_mode and has_same_ownership"""
def setUp(self):
super(OwnershipTest, self).setUp()
self.probe_path = _create_probe(self.tempdir)
@unittest.skipIf(POSIX_MODE, reason='Test specific to Windows security')
def test_copy_ownership_windows(self):
system = win32security.ConvertStringSidToSid(SYSTEM_SID)
security = win32security.SECURITY_ATTRIBUTES().SECURITY_DESCRIPTOR
security.SetSecurityDescriptorOwner(system, False)
with mock.patch('win32security.GetFileSecurity') as mock_get:
with mock.patch('win32security.SetFileSecurity') as mock_set:
mock_get.return_value = security
filesystem.copy_ownership_and_apply_mode(
'dummy', self.probe_path, 0o700, copy_user=True, copy_group=False)
self.assertEqual(mock_set.call_count, 2)
first_call = mock_set.call_args_list[0]
security = first_call[0][2]
self.assertEqual(system, security.GetSecurityDescriptorOwner())
second_call = mock_set.call_args_list[1]
security = second_call[0][2]
dacl = security.GetSecurityDescriptorDacl()
everybody = win32security.ConvertStringSidToSid(EVERYBODY_SID)
self.assertTrue(dacl.GetAceCount())
self.assertFalse([dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == everybody])
@unittest.skipUnless(POSIX_MODE, reason='Test specific to Linux security')
def test_copy_ownership_linux(self):
with mock.patch('os.chown') as mock_chown:
with mock.patch('os.chmod') as mock_chmod:
with mock.patch('os.stat') as mock_stat:
mock_stat.return_value.st_uid = 50
mock_stat.return_value.st_gid = 51
filesystem.copy_ownership_and_apply_mode(
'dummy', self.probe_path, 0o700, copy_user=True, copy_group=True)
mock_chown.assert_called_once_with(self.probe_path, 50, 51)
mock_chmod.assert_called_once_with(self.probe_path, 0o700)
def test_has_same_ownership(self):
path1 = os.path.join(self.tempdir, 'test1')
path2 = os.path.join(self.tempdir, 'test2')
util.safe_open(path1, 'w').close()
util.safe_open(path2, 'w').close()
self.assertTrue(filesystem.has_same_ownership(path1, path2))
class CheckPermissionsTest(test_util.TempDirTestCase):
"""Tests relative to functions that check modes."""
def setUp(self):
super(CheckPermissionsTest, self).setUp()
self.probe_path = _create_probe(self.tempdir)
def test_check_mode(self):
self.assertTrue(filesystem.check_mode(self.probe_path, 0o744))
filesystem.chmod(self.probe_path, 0o700)
self.assertFalse(filesystem.check_mode(self.probe_path, 0o744))
@unittest.skipIf(POSIX_MODE, reason='Test specific to Windows security')
def test_check_owner_windows(self):
self.assertTrue(filesystem.check_owner(self.probe_path))
system = win32security.ConvertStringSidToSid(SYSTEM_SID)
security = win32security.SECURITY_ATTRIBUTES().SECURITY_DESCRIPTOR
security.SetSecurityDescriptorOwner(system, False)
with mock.patch('win32security.GetFileSecurity') as mock_get:
mock_get.return_value = security
self.assertFalse(filesystem.check_owner(self.probe_path))
@unittest.skipUnless(POSIX_MODE, reason='Test specific to Linux security')
def test_check_owner_linux(self):
self.assertTrue(filesystem.check_owner(self.probe_path))
import os as std_os # pylint: disable=os-module-forbidden
# See related inline comment in certbot.compat.filesystem.check_owner method
# that explains why MyPy/PyLint check disable is needed here.
uid = std_os.getuid()
with mock.patch('os.getuid') as mock_uid:
mock_uid.return_value = uid + 1
self.assertFalse(filesystem.check_owner(self.probe_path))
def test_check_permissions(self):
self.assertTrue(filesystem.check_permissions(self.probe_path, 0o744))
with mock.patch('certbot.compat.filesystem.check_mode') as mock_mode:
mock_mode.return_value = False
self.assertFalse(filesystem.check_permissions(self.probe_path, 0o744))
with mock.patch('certbot.compat.filesystem.check_owner') as mock_owner:
mock_owner.return_value = False
self.assertFalse(filesystem.check_permissions(self.probe_path, 0o744))
def test_check_min_permissions(self):
filesystem.chmod(self.probe_path, 0o744)
self.assertTrue(filesystem.has_min_permissions(self.probe_path, 0o744))
filesystem.chmod(self.probe_path, 0o700)
self.assertFalse(filesystem.has_min_permissions(self.probe_path, 0o744))
filesystem.chmod(self.probe_path, 0o741)
self.assertFalse(filesystem.has_min_permissions(self.probe_path, 0o744))
def test_is_world_reachable(self):
filesystem.chmod(self.probe_path, 0o744)
self.assertTrue(filesystem.has_world_permissions(self.probe_path))
filesystem.chmod(self.probe_path, 0o700)
self.assertFalse(filesystem.has_world_permissions(self.probe_path))
class OsReplaceTest(test_util.TempDirTestCase):
"""Test to ensure consistent behavior of rename method"""
def test_os_replace_to_existing_file(self):
"""Ensure that replace will effectively rename src into dst for all platforms."""
src = os.path.join(self.tempdir, 'src')
dst = os.path.join(self.tempdir, 'dst')
open(src, 'w').close()
open(dst, 'w').close()
# On Windows, a direct call to os.rename would fail because dst already exists.
filesystem.replace(src, dst)
self.assertFalse(os.path.exists(src))
self.assertTrue(os.path.exists(dst))
class RealpathTest(test_util.TempDirTestCase):
"""Tests for realpath method"""
def setUp(self):
super(RealpathTest, self).setUp()
self.probe_path = _create_probe(self.tempdir)
def test_symlink_resolution(self):
# Remove any symlinks already in probe_path
self.probe_path = filesystem.realpath(self.probe_path)
# Absolute resolution
link_path = os.path.join(self.tempdir, 'link_abs')
os.symlink(self.probe_path, link_path)
self.assertEqual(self.probe_path, filesystem.realpath(self.probe_path))
self.assertEqual(self.probe_path, filesystem.realpath(link_path))
# Relative resolution
curdir = os.getcwd()
link_path = os.path.join(self.tempdir, 'link_rel')
probe_name = os.path.basename(self.probe_path)
try:
os.chdir(os.path.dirname(self.probe_path))
os.symlink(probe_name, link_path)
self.assertEqual(self.probe_path, filesystem.realpath(probe_name))
self.assertEqual(self.probe_path, filesystem.realpath(link_path))
finally:
os.chdir(curdir)
def test_symlink_loop_mitigation(self):
link1_path = os.path.join(self.tempdir, 'link1')
link2_path = os.path.join(self.tempdir, 'link2')
link3_path = os.path.join(self.tempdir, 'link3')
os.symlink(link1_path, link2_path)
os.symlink(link2_path, link3_path)
os.symlink(link3_path, link1_path)
with self.assertRaises(RuntimeError) as error:
filesystem.realpath(link1_path)
self.assertTrue('link1 is a loop!' in str(error.exception))
class IsExecutableTest(test_util.TempDirTestCase):
"""Tests for is_executable method"""
def test_not_executable(self):
file_path = os.path.join(self.tempdir, "foo")
# On Windows a file created within Certbot will always have all permissions to the
# Administrators group set. Since the unit tests are typically executed under elevated
# privileges, it means that current user will always have effective execute rights on the
# hook script, and so the test will fail. To prevent that and represent a file created
# outside Certbot as typically a hook file is, we mock the _generate_dacl function in
# certbot.compat.filesystem to give rights only to the current user. This implies removing
# all ACEs except the first one from the DACL created by original _generate_dacl function.
from certbot.compat.filesystem import _generate_dacl
def _execute_mock(user_sid, mode):
dacl = _generate_dacl(user_sid, mode)
for _ in range(1, dacl.GetAceCount()):
dacl.DeleteAce(1) # DeleteAce dynamically updates the internal index mapping.
return dacl
# create a non-executable file
with mock.patch("certbot.compat.filesystem._generate_dacl", side_effect=_execute_mock):
os.close(filesystem.open(file_path, os.O_CREAT | os.O_WRONLY, 0o666))
self.assertFalse(filesystem.is_executable(file_path))
@mock.patch("certbot.compat.filesystem.os.path.isfile")
@mock.patch("certbot.compat.filesystem.os.access")
def test_full_path(self, mock_access, mock_isfile):
with _fix_windows_runtime():
mock_access.return_value = True
mock_isfile.return_value = True
self.assertTrue(filesystem.is_executable("/path/to/exe"))
@mock.patch("certbot.compat.filesystem.os.path.isfile")
@mock.patch("certbot.compat.filesystem.os.access")
def test_rel_path(self, mock_access, mock_isfile):
with _fix_windows_runtime():
mock_access.return_value = True
mock_isfile.return_value = True
self.assertTrue(filesystem.is_executable("exe"))
@mock.patch("certbot.compat.filesystem.os.path.isfile")
@mock.patch("certbot.compat.filesystem.os.access")
def test_not_found(self, mock_access, mock_isfile):
with _fix_windows_runtime():
mock_access.return_value = True
mock_isfile.return_value = False
self.assertFalse(filesystem.is_executable("exe"))
@contextlib.contextmanager
def _fix_windows_runtime():
if os.name != 'nt':
yield
else:
with mock.patch('win32security.GetFileSecurity') as mock_get:
dacl_mock = mock_get.return_value.GetSecurityDescriptorDacl
mode_mock = dacl_mock.return_value.GetEffectiveRightsFromAcl
mode_mock.return_value = ntsecuritycon.FILE_GENERIC_EXECUTE
yield
def _get_security_dacl(target):
return win32security.GetFileSecurity(target, win32security.DACL_SECURITY_INFORMATION)
def _get_security_owner(target):
return win32security.GetFileSecurity(target, win32security.OWNER_SECURITY_INFORMATION)
def _set_owner(target, security_owner, user):
security_owner.SetSecurityDescriptorOwner(user, False)
win32security.SetFileSecurity(
target, win32security.OWNER_SECURITY_INFORMATION, security_owner)
def _create_probe(tempdir):
filesystem.chmod(tempdir, 0o744)
probe_path = os.path.join(tempdir, 'probe')
util.safe_open(probe_path, 'w', chmod=0o744).close()
return probe_path
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 41.255009 | 119 | 0.692525 | import contextlib
import errno
import unittest
try:
import mock
except ImportError:
from unittest import mock
from certbot import util
from certbot._internal import lock
from certbot.compat import filesystem
from certbot.compat import os
import certbot.tests.util as test_util
from certbot.tests.util import TempDirTestCase
try:
import win32api
import win32security
import ntsecuritycon
POSIX_MODE = False
except ImportError:
POSIX_MODE = True
EVERYBODY_SID = 'S-1-1-0'
SYSTEM_SID = 'S-1-5-18'
ADMINS_SID = 'S-1-5-32-544'
@unittest.skipIf(POSIX_MODE, reason='Tests specific to Windows security')
class WindowsChmodTests(TempDirTestCase):
def setUp(self):
super(WindowsChmodTests, self).setUp()
self.probe_path = _create_probe(self.tempdir)
def test_symlink_resolution(self):
link_path = os.path.join(self.tempdir, 'link')
os.symlink(self.probe_path, link_path)
ref_dacl_probe = _get_security_dacl(self.probe_path).GetSecurityDescriptorDacl()
ref_dacl_link = _get_security_dacl(link_path).GetSecurityDescriptorDacl()
filesystem.chmod(link_path, 0o700)
cur_dacl_probe = _get_security_dacl(self.probe_path).GetSecurityDescriptorDacl()
cur_dacl_link = _get_security_dacl(link_path).GetSecurityDescriptorDacl()
self.assertFalse(filesystem._compare_dacls(ref_dacl_probe, cur_dacl_probe))
self.assertTrue(filesystem._compare_dacls(ref_dacl_link, cur_dacl_link))
def test_world_permission(self):
everybody = win32security.ConvertStringSidToSid(EVERYBODY_SID)
filesystem.chmod(self.probe_path, 0o700)
dacl = _get_security_dacl(self.probe_path).GetSecurityDescriptorDacl()
self.assertFalse([dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == everybody])
filesystem.chmod(self.probe_path, 0o704)
dacl = _get_security_dacl(self.probe_path).GetSecurityDescriptorDacl()
self.assertTrue([dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == everybody])
def test_group_permissions_noop(self):
filesystem.chmod(self.probe_path, 0o700)
ref_dacl_probe = _get_security_dacl(self.probe_path).GetSecurityDescriptorDacl()
filesystem.chmod(self.probe_path, 0o740)
cur_dacl_probe = _get_security_dacl(self.probe_path).GetSecurityDescriptorDacl()
self.assertTrue(filesystem._compare_dacls(ref_dacl_probe, cur_dacl_probe))
def test_admin_permissions(self):
system = win32security.ConvertStringSidToSid(SYSTEM_SID)
admins = win32security.ConvertStringSidToSid(ADMINS_SID)
filesystem.chmod(self.probe_path, 0o400)
dacl = _get_security_dacl(self.probe_path).GetSecurityDescriptorDacl()
system_aces = [dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == system]
admin_aces = [dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == admins]
self.assertEqual(len(system_aces), 1)
self.assertEqual(len(admin_aces), 1)
self.assertEqual(system_aces[0][1], ntsecuritycon.FILE_ALL_ACCESS)
self.assertEqual(admin_aces[0][1], ntsecuritycon.FILE_ALL_ACCESS)
def test_read_flag(self):
self._test_flag(4, ntsecuritycon.FILE_GENERIC_READ)
def test_execute_flag(self):
self._test_flag(1, ntsecuritycon.FILE_GENERIC_EXECUTE)
def test_write_flag(self):
self._test_flag(2, (ntsecuritycon.FILE_ALL_ACCESS
^ ntsecuritycon.FILE_GENERIC_READ
^ ntsecuritycon.FILE_GENERIC_EXECUTE))
def test_full_flag(self):
self._test_flag(7, ntsecuritycon.FILE_ALL_ACCESS)
def _test_flag(self, everyone_mode, windows_flag):
filesystem.chmod(self.probe_path, 0o700 | everyone_mode)
dacl = _get_security_dacl(self.probe_path).GetSecurityDescriptorDacl()
everybody = win32security.ConvertStringSidToSid(EVERYBODY_SID)
acls_everybody = [dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == everybody]
self.assertEqual(len(acls_everybody), 1)
acls_everybody = acls_everybody[0]
self.assertEqual(acls_everybody[1], windows_flag)
def test_user_admin_dacl_consistency(self):
authenticated_user, _, _ = win32security.LookupAccountName("", win32api.GetUserName())
security_owner = _get_security_owner(self.probe_path)
_set_owner(self.probe_path, security_owner, authenticated_user)
filesystem.chmod(self.probe_path, 0o700)
security_dacl = _get_security_dacl(self.probe_path)
self.assertEqual(security_dacl.GetSecurityDescriptorDacl().GetAceCount(), 3)
admin_user = win32security.ConvertStringSidToSid(ADMINS_SID)
security_owner = _get_security_owner(self.probe_path)
_set_owner(self.probe_path, security_owner, admin_user)
filesystem.chmod(self.probe_path, 0o700)
security_dacl = _get_security_dacl(self.probe_path)
self.assertEqual(security_dacl.GetSecurityDescriptorDacl().GetAceCount(), 2)
class ComputePrivateKeyModeTest(TempDirTestCase):
def setUp(self):
super(ComputePrivateKeyModeTest, self).setUp()
self.probe_path = _create_probe(self.tempdir)
def test_compute_private_key_mode(self):
filesystem.chmod(self.probe_path, 0o777)
new_mode = filesystem.compute_private_key_mode(self.probe_path, 0o600)
if POSIX_MODE:
self.assertEqual(new_mode, 0o674)
else:
self.assertEqual(new_mode, 0o600)
@unittest.skipIf(POSIX_MODE, reason='Tests specific to Windows security')
class WindowsOpenTest(TempDirTestCase):
def test_new_file_correct_permissions(self):
path = os.path.join(self.tempdir, 'file')
desc = filesystem.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o700)
os.close(desc)
dacl = _get_security_dacl(path).GetSecurityDescriptorDacl()
everybody = win32security.ConvertStringSidToSid(EVERYBODY_SID)
self.assertFalse([dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == everybody])
def test_existing_file_correct_permissions(self):
path = os.path.join(self.tempdir, 'file')
open(path, 'w').close()
desc = filesystem.open(path, os.O_EXCL | os.O_RDWR, 0o700)
os.close(desc)
dacl = _get_security_dacl(path).GetSecurityDescriptorDacl()
everybody = win32security.ConvertStringSidToSid(EVERYBODY_SID)
self.assertFalse([dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == everybody])
def test_create_file_on_open(self):
self._test_one_creation(1, file_exist=False, flags=(os.O_CREAT | os.O_EXCL))
with self.assertRaises(OSError) as raised:
self._test_one_creation(2, file_exist=True, flags=(os.O_CREAT | os.O_EXCL))
self.assertEqual(raised.exception.errno, errno.EEXIST)
self._test_one_creation(3, file_exist=False, flags=os.O_CREAT)
self._test_one_creation(4, file_exist=True, flags=os.O_CREAT)
path = os.path.join(self.tempdir, '5')
open(path, 'w').close()
filelock = lock.LockFile(path)
try:
with self.assertRaises(OSError) as raised:
self._test_one_creation(5, file_exist=True, flags=os.O_CREAT)
self.assertEqual(raised.exception.errno, errno.EACCES)
finally:
filelock.release()
with self.assertRaises(OSError):
self._test_one_creation(6, file_exist=False, flags=os.O_RDONLY)
def _test_one_creation(self, num, file_exist, flags):
one_file = os.path.join(self.tempdir, str(num))
if file_exist and not os.path.exists(one_file):
with open(one_file, 'w'):
pass
handler = None
try:
handler = filesystem.open(one_file, flags)
finally:
if handler:
os.close(handler)
@unittest.skipIf(POSIX_MODE, reason='Test specific to Windows security')
class WindowsMkdirTests(test_util.TempDirTestCase):
def test_mkdir_correct_permissions(self):
path = os.path.join(self.tempdir, 'dir')
filesystem.mkdir(path, 0o700)
everybody = win32security.ConvertStringSidToSid(EVERYBODY_SID)
dacl = _get_security_dacl(path).GetSecurityDescriptorDacl()
self.assertFalse([dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == everybody])
def test_makedirs_correct_permissions(self):
path = os.path.join(self.tempdir, 'dir')
subpath = os.path.join(path, 'subpath')
filesystem.makedirs(subpath, 0o700)
everybody = win32security.ConvertStringSidToSid(EVERYBODY_SID)
dacl = _get_security_dacl(subpath).GetSecurityDescriptorDacl()
self.assertFalse([dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == everybody])
def test_makedirs_switch_os_mkdir(self):
path = os.path.join(self.tempdir, 'dir')
import os as std_os
original_mkdir = std_os.mkdir
filesystem.makedirs(path)
self.assertEqual(original_mkdir, std_os.mkdir)
try:
filesystem.makedirs(path)
except OSError:
pass
self.assertEqual(original_mkdir, std_os.mkdir)
class OwnershipTest(test_util.TempDirTestCase):
def setUp(self):
super(OwnershipTest, self).setUp()
self.probe_path = _create_probe(self.tempdir)
@unittest.skipIf(POSIX_MODE, reason='Test specific to Windows security')
def test_copy_ownership_windows(self):
system = win32security.ConvertStringSidToSid(SYSTEM_SID)
security = win32security.SECURITY_ATTRIBUTES().SECURITY_DESCRIPTOR
security.SetSecurityDescriptorOwner(system, False)
with mock.patch('win32security.GetFileSecurity') as mock_get:
with mock.patch('win32security.SetFileSecurity') as mock_set:
mock_get.return_value = security
filesystem.copy_ownership_and_apply_mode(
'dummy', self.probe_path, 0o700, copy_user=True, copy_group=False)
self.assertEqual(mock_set.call_count, 2)
first_call = mock_set.call_args_list[0]
security = first_call[0][2]
self.assertEqual(system, security.GetSecurityDescriptorOwner())
second_call = mock_set.call_args_list[1]
security = second_call[0][2]
dacl = security.GetSecurityDescriptorDacl()
everybody = win32security.ConvertStringSidToSid(EVERYBODY_SID)
self.assertTrue(dacl.GetAceCount())
self.assertFalse([dacl.GetAce(index) for index in range(0, dacl.GetAceCount())
if dacl.GetAce(index)[2] == everybody])
@unittest.skipUnless(POSIX_MODE, reason='Test specific to Linux security')
def test_copy_ownership_linux(self):
with mock.patch('os.chown') as mock_chown:
with mock.patch('os.chmod') as mock_chmod:
with mock.patch('os.stat') as mock_stat:
mock_stat.return_value.st_uid = 50
mock_stat.return_value.st_gid = 51
filesystem.copy_ownership_and_apply_mode(
'dummy', self.probe_path, 0o700, copy_user=True, copy_group=True)
mock_chown.assert_called_once_with(self.probe_path, 50, 51)
mock_chmod.assert_called_once_with(self.probe_path, 0o700)
def test_has_same_ownership(self):
path1 = os.path.join(self.tempdir, 'test1')
path2 = os.path.join(self.tempdir, 'test2')
util.safe_open(path1, 'w').close()
util.safe_open(path2, 'w').close()
self.assertTrue(filesystem.has_same_ownership(path1, path2))
class CheckPermissionsTest(test_util.TempDirTestCase):
def setUp(self):
super(CheckPermissionsTest, self).setUp()
self.probe_path = _create_probe(self.tempdir)
def test_check_mode(self):
self.assertTrue(filesystem.check_mode(self.probe_path, 0o744))
filesystem.chmod(self.probe_path, 0o700)
self.assertFalse(filesystem.check_mode(self.probe_path, 0o744))
@unittest.skipIf(POSIX_MODE, reason='Test specific to Windows security')
def test_check_owner_windows(self):
self.assertTrue(filesystem.check_owner(self.probe_path))
system = win32security.ConvertStringSidToSid(SYSTEM_SID)
security = win32security.SECURITY_ATTRIBUTES().SECURITY_DESCRIPTOR
security.SetSecurityDescriptorOwner(system, False)
with mock.patch('win32security.GetFileSecurity') as mock_get:
mock_get.return_value = security
self.assertFalse(filesystem.check_owner(self.probe_path))
@unittest.skipUnless(POSIX_MODE, reason='Test specific to Linux security')
def test_check_owner_linux(self):
self.assertTrue(filesystem.check_owner(self.probe_path))
import os as std_os
uid = std_os.getuid()
with mock.patch('os.getuid') as mock_uid:
mock_uid.return_value = uid + 1
self.assertFalse(filesystem.check_owner(self.probe_path))
def test_check_permissions(self):
self.assertTrue(filesystem.check_permissions(self.probe_path, 0o744))
with mock.patch('certbot.compat.filesystem.check_mode') as mock_mode:
mock_mode.return_value = False
self.assertFalse(filesystem.check_permissions(self.probe_path, 0o744))
with mock.patch('certbot.compat.filesystem.check_owner') as mock_owner:
mock_owner.return_value = False
self.assertFalse(filesystem.check_permissions(self.probe_path, 0o744))
def test_check_min_permissions(self):
filesystem.chmod(self.probe_path, 0o744)
self.assertTrue(filesystem.has_min_permissions(self.probe_path, 0o744))
filesystem.chmod(self.probe_path, 0o700)
self.assertFalse(filesystem.has_min_permissions(self.probe_path, 0o744))
filesystem.chmod(self.probe_path, 0o741)
self.assertFalse(filesystem.has_min_permissions(self.probe_path, 0o744))
def test_is_world_reachable(self):
filesystem.chmod(self.probe_path, 0o744)
self.assertTrue(filesystem.has_world_permissions(self.probe_path))
filesystem.chmod(self.probe_path, 0o700)
self.assertFalse(filesystem.has_world_permissions(self.probe_path))
class OsReplaceTest(test_util.TempDirTestCase):
def test_os_replace_to_existing_file(self):
src = os.path.join(self.tempdir, 'src')
dst = os.path.join(self.tempdir, 'dst')
open(src, 'w').close()
open(dst, 'w').close()
filesystem.replace(src, dst)
self.assertFalse(os.path.exists(src))
self.assertTrue(os.path.exists(dst))
class RealpathTest(test_util.TempDirTestCase):
def setUp(self):
super(RealpathTest, self).setUp()
self.probe_path = _create_probe(self.tempdir)
def test_symlink_resolution(self):
self.probe_path = filesystem.realpath(self.probe_path)
link_path = os.path.join(self.tempdir, 'link_abs')
os.symlink(self.probe_path, link_path)
self.assertEqual(self.probe_path, filesystem.realpath(self.probe_path))
self.assertEqual(self.probe_path, filesystem.realpath(link_path))
curdir = os.getcwd()
link_path = os.path.join(self.tempdir, 'link_rel')
probe_name = os.path.basename(self.probe_path)
try:
os.chdir(os.path.dirname(self.probe_path))
os.symlink(probe_name, link_path)
self.assertEqual(self.probe_path, filesystem.realpath(probe_name))
self.assertEqual(self.probe_path, filesystem.realpath(link_path))
finally:
os.chdir(curdir)
def test_symlink_loop_mitigation(self):
link1_path = os.path.join(self.tempdir, 'link1')
link2_path = os.path.join(self.tempdir, 'link2')
link3_path = os.path.join(self.tempdir, 'link3')
os.symlink(link1_path, link2_path)
os.symlink(link2_path, link3_path)
os.symlink(link3_path, link1_path)
with self.assertRaises(RuntimeError) as error:
filesystem.realpath(link1_path)
self.assertTrue('link1 is a loop!' in str(error.exception))
class IsExecutableTest(test_util.TempDirTestCase):
def test_not_executable(self):
file_path = os.path.join(self.tempdir, "foo")
from certbot.compat.filesystem import _generate_dacl
def _execute_mock(user_sid, mode):
dacl = _generate_dacl(user_sid, mode)
for _ in range(1, dacl.GetAceCount()):
dacl.DeleteAce(1)
return dacl
with mock.patch("certbot.compat.filesystem._generate_dacl", side_effect=_execute_mock):
os.close(filesystem.open(file_path, os.O_CREAT | os.O_WRONLY, 0o666))
self.assertFalse(filesystem.is_executable(file_path))
@mock.patch("certbot.compat.filesystem.os.path.isfile")
@mock.patch("certbot.compat.filesystem.os.access")
def test_full_path(self, mock_access, mock_isfile):
with _fix_windows_runtime():
mock_access.return_value = True
mock_isfile.return_value = True
self.assertTrue(filesystem.is_executable("/path/to/exe"))
@mock.patch("certbot.compat.filesystem.os.path.isfile")
@mock.patch("certbot.compat.filesystem.os.access")
def test_rel_path(self, mock_access, mock_isfile):
with _fix_windows_runtime():
mock_access.return_value = True
mock_isfile.return_value = True
self.assertTrue(filesystem.is_executable("exe"))
@mock.patch("certbot.compat.filesystem.os.path.isfile")
@mock.patch("certbot.compat.filesystem.os.access")
def test_not_found(self, mock_access, mock_isfile):
with _fix_windows_runtime():
mock_access.return_value = True
mock_isfile.return_value = False
self.assertFalse(filesystem.is_executable("exe"))
@contextlib.contextmanager
def _fix_windows_runtime():
if os.name != 'nt':
yield
else:
with mock.patch('win32security.GetFileSecurity') as mock_get:
dacl_mock = mock_get.return_value.GetSecurityDescriptorDacl
mode_mock = dacl_mock.return_value.GetEffectiveRightsFromAcl
mode_mock.return_value = ntsecuritycon.FILE_GENERIC_EXECUTE
yield
def _get_security_dacl(target):
return win32security.GetFileSecurity(target, win32security.DACL_SECURITY_INFORMATION)
def _get_security_owner(target):
return win32security.GetFileSecurity(target, win32security.OWNER_SECURITY_INFORMATION)
def _set_owner(target, security_owner, user):
security_owner.SetSecurityDescriptorOwner(user, False)
win32security.SetFileSecurity(
target, win32security.OWNER_SECURITY_INFORMATION, security_owner)
def _create_probe(tempdir):
filesystem.chmod(tempdir, 0o744)
probe_path = os.path.join(tempdir, 'probe')
util.safe_open(probe_path, 'w', chmod=0o744).close()
return probe_path
if __name__ == "__main__":
unittest.main()
| true | true |
1c2d2eeb5796406a7964c144d0b8b98a0c2da39a | 138 | py | Python | tests/types/test_types_package.py | authlete/authlete-python | 751514c525cd04a930373de78463a1fe71b6da60 | [
"Apache-2.0"
] | 5 | 2019-07-30T01:37:04.000Z | 2021-02-15T05:55:55.000Z | tests/types/test_types_package.py | DestinyCall/authlete-python | 751514c525cd04a930373de78463a1fe71b6da60 | [
"Apache-2.0"
] | null | null | null | tests/types/test_types_package.py | DestinyCall/authlete-python | 751514c525cd04a930373de78463a1fe71b6da60 | [
"Apache-2.0"
] | 1 | 2021-02-15T05:55:56.000Z | 2021-02-15T05:55:56.000Z | import unittest
from authlete.types import *
class TestDtoPackage(unittest.TestCase):
def test_1(self):
ApplicationType.WEB
| 17.25 | 40 | 0.746377 | import unittest
from authlete.types import *
class TestDtoPackage(unittest.TestCase):
def test_1(self):
ApplicationType.WEB
| true | true |
1c2d2fe1bb015c0395aa7a11e44cbb7e502a54cc | 6,922 | py | Python | src/healthcareapis/azext_healthcareapis/custom.py | dijyotir/azure-cli-extensions | db626a9d53f7a3a683d9629cbd3d86fdcce98118 | [
"MIT"
] | 1 | 2021-09-16T09:13:38.000Z | 2021-09-16T09:13:38.000Z | src/healthcareapis/azext_healthcareapis/custom.py | dijyotir/azure-cli-extensions | db626a9d53f7a3a683d9629cbd3d86fdcce98118 | [
"MIT"
] | null | null | null | src/healthcareapis/azext_healthcareapis/custom.py | dijyotir/azure-cli-extensions | db626a9d53f7a3a683d9629cbd3d86fdcce98118 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=unused-argument
def create_healthcareapis(cmd, client,
resource_group,
name,
kind,
location,
access_policies_object_id,
tags=None,
etag=None,
cosmos_db_offer_throughput=None,
authentication_authority=None,
authentication_audience=None,
authentication_smart_proxy_enabled=None,
cors_origins=None,
cors_headers=None,
cors_methods=None,
cors_max_age=None,
cors_allow_credentials=None):
service_description = {}
service_description['location'] = location
service_description['kind'] = kind
service_description['properties'] = {}
service_description['properties']['access_policies'] = []
for policy in access_policies_object_id.split(','):
service_description['properties']['access_policies'].append({'object_id': policy})
service_description['properties']['cors_configuration'] = {}
service_description['properties']['cors_configuration']['origins'] = None if cors_origins is None else cors_origins.split(',')
service_description['properties']['cors_configuration']['headers'] = None if cors_headers is None else cors_headers.split(',')
service_description['properties']['cors_configuration']['methods'] = None if cors_methods is None else cors_methods.split(',')
service_description['properties']['cors_configuration']['max_age'] = cors_max_age
service_description['properties']['cors_configuration']['allow_credentials'] = cors_allow_credentials
service_description['properties']['cosmos_db_configuration'] = {}
service_description['properties']['cosmos_db_configuration']['offer_throughput'] = cosmos_db_offer_throughput
service_description['authentication_configuration'] = {}
service_description['authentication_configuration']['authority'] = authentication_authority
service_description['authentication_configuration']['audience'] = authentication_audience
service_description['authentication_configuration']['smart_proxy_enabled'] = authentication_smart_proxy_enabled
return client.create_or_update(resource_group_name=resource_group, resource_name=name, service_description=service_description)
def update_healthcareapis(cmd, client,
resource_group,
name,
kind=None,
location=None,
access_policies_object_id=None,
tags=None,
etag=None,
cosmos_db_offer_throughput=None,
authentication_authority=None,
authentication_audience=None,
authentication_smart_proxy_enabled=None,
cors_origins=None,
cors_headers=None,
cors_methods=None,
cors_max_age=None,
cors_allow_credentials=None):
service_description = client.get(resource_group_name=resource_group, resource_name=name).as_dict()
if location is not None:
service_description['location'] = location
if kind is not None:
service_description['kind'] = kind
if access_policies_object_id is not None:
service_description['properties']['access_policies'] = []
for policy in access_policies_object_id.split(','):
service_description['properties']['access_policies'].append({'object_id': policy})
if service_description['properties'].get('cors_configuration') is None:
service_description['properties']['cors_configuration'] = {}
if cors_origins is not None:
service_description['properties']['cors_configuration']['origins'] = None if cors_origins is None else cors_origins.split(',')
if cors_headers is not None:
service_description['properties']['cors_configuration']['headers'] = None if cors_headers is None else cors_headers.split(',')
if cors_methods is not None:
service_description['properties']['cors_configuration']['methods'] = None if cors_methods is None else cors_methods.split(',')
if cors_max_age is not None:
service_description['properties']['cors_configuration']['max_age'] = cors_max_age
if cors_allow_credentials is not None:
service_description['properties']['cors_configuration']['allow_credentials'] = cors_allow_credentials
if service_description['properties'].get('cosmos_db_configuration') is None:
service_description['properties']['cosmos_db_configuration'] = {}
if cosmos_db_offer_throughput is not None:
service_description['properties']['cosmos_db_configuration']['offer_throughput'] = cosmos_db_offer_throughput
if service_description['properties'].get('authentication_configuration') is None:
service_description['authentication_configuration'] = {}
if authentication_authority is not None:
service_description['authentication_configuration']['authority'] = authentication_authority
if authentication_audience is not None:
service_description['authentication_configuration']['audience'] = authentication_audience
if authentication_smart_proxy_enabled is not None:
service_description['authentication_configuration']['smart_proxy_enabled'] = authentication_smart_proxy_enabled
return client.create_or_update(resource_group_name=resource_group, resource_name=name, service_description=service_description)
def list_healthcareapis(cmd, client,
resource_group=None):
if resource_group is not None:
return client.list_by_resource_group(resource_group_name=resource_group)
return client.list()
def show_healthcareapis(cmd, client,
resource_group,
name):
return client.get(resource_group_name=resource_group, resource_name=name)
def delete_healthcareapis(cmd, client,
resource_group,
name):
return client.delete(resource_group_name=resource_group, resource_name=name)
| 56.276423 | 134 | 0.651401 |
def create_healthcareapis(cmd, client,
resource_group,
name,
kind,
location,
access_policies_object_id,
tags=None,
etag=None,
cosmos_db_offer_throughput=None,
authentication_authority=None,
authentication_audience=None,
authentication_smart_proxy_enabled=None,
cors_origins=None,
cors_headers=None,
cors_methods=None,
cors_max_age=None,
cors_allow_credentials=None):
service_description = {}
service_description['location'] = location
service_description['kind'] = kind
service_description['properties'] = {}
service_description['properties']['access_policies'] = []
for policy in access_policies_object_id.split(','):
service_description['properties']['access_policies'].append({'object_id': policy})
service_description['properties']['cors_configuration'] = {}
service_description['properties']['cors_configuration']['origins'] = None if cors_origins is None else cors_origins.split(',')
service_description['properties']['cors_configuration']['headers'] = None if cors_headers is None else cors_headers.split(',')
service_description['properties']['cors_configuration']['methods'] = None if cors_methods is None else cors_methods.split(',')
service_description['properties']['cors_configuration']['max_age'] = cors_max_age
service_description['properties']['cors_configuration']['allow_credentials'] = cors_allow_credentials
service_description['properties']['cosmos_db_configuration'] = {}
service_description['properties']['cosmos_db_configuration']['offer_throughput'] = cosmos_db_offer_throughput
service_description['authentication_configuration'] = {}
service_description['authentication_configuration']['authority'] = authentication_authority
service_description['authentication_configuration']['audience'] = authentication_audience
service_description['authentication_configuration']['smart_proxy_enabled'] = authentication_smart_proxy_enabled
return client.create_or_update(resource_group_name=resource_group, resource_name=name, service_description=service_description)
def update_healthcareapis(cmd, client,
resource_group,
name,
kind=None,
location=None,
access_policies_object_id=None,
tags=None,
etag=None,
cosmos_db_offer_throughput=None,
authentication_authority=None,
authentication_audience=None,
authentication_smart_proxy_enabled=None,
cors_origins=None,
cors_headers=None,
cors_methods=None,
cors_max_age=None,
cors_allow_credentials=None):
service_description = client.get(resource_group_name=resource_group, resource_name=name).as_dict()
if location is not None:
service_description['location'] = location
if kind is not None:
service_description['kind'] = kind
if access_policies_object_id is not None:
service_description['properties']['access_policies'] = []
for policy in access_policies_object_id.split(','):
service_description['properties']['access_policies'].append({'object_id': policy})
if service_description['properties'].get('cors_configuration') is None:
service_description['properties']['cors_configuration'] = {}
if cors_origins is not None:
service_description['properties']['cors_configuration']['origins'] = None if cors_origins is None else cors_origins.split(',')
if cors_headers is not None:
service_description['properties']['cors_configuration']['headers'] = None if cors_headers is None else cors_headers.split(',')
if cors_methods is not None:
service_description['properties']['cors_configuration']['methods'] = None if cors_methods is None else cors_methods.split(',')
if cors_max_age is not None:
service_description['properties']['cors_configuration']['max_age'] = cors_max_age
if cors_allow_credentials is not None:
service_description['properties']['cors_configuration']['allow_credentials'] = cors_allow_credentials
if service_description['properties'].get('cosmos_db_configuration') is None:
service_description['properties']['cosmos_db_configuration'] = {}
if cosmos_db_offer_throughput is not None:
service_description['properties']['cosmos_db_configuration']['offer_throughput'] = cosmos_db_offer_throughput
if service_description['properties'].get('authentication_configuration') is None:
service_description['authentication_configuration'] = {}
if authentication_authority is not None:
service_description['authentication_configuration']['authority'] = authentication_authority
if authentication_audience is not None:
service_description['authentication_configuration']['audience'] = authentication_audience
if authentication_smart_proxy_enabled is not None:
service_description['authentication_configuration']['smart_proxy_enabled'] = authentication_smart_proxy_enabled
return client.create_or_update(resource_group_name=resource_group, resource_name=name, service_description=service_description)
def list_healthcareapis(cmd, client,
resource_group=None):
if resource_group is not None:
return client.list_by_resource_group(resource_group_name=resource_group)
return client.list()
def show_healthcareapis(cmd, client,
resource_group,
name):
return client.get(resource_group_name=resource_group, resource_name=name)
def delete_healthcareapis(cmd, client,
resource_group,
name):
return client.delete(resource_group_name=resource_group, resource_name=name)
| true | true |
1c2d348864a628a2ff495a5fdc2d7ebec058f190 | 122 | py | Python | plenum/test/primary_selection/test_primary_selection_routes.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | null | null | null | plenum/test/primary_selection/test_primary_selection_routes.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | null | null | null | plenum/test/primary_selection/test_primary_selection_routes.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | null | null | null | from plenum.test.conftest import looper
nodeCount = 7
def test_routes(nodeSet, up):
# TODO: Low priority.
pass
| 13.555556 | 39 | 0.704918 | from plenum.test.conftest import looper
nodeCount = 7
def test_routes(nodeSet, up):
pass
| true | true |
1c2d34d62c53dc9486da792127a3f5778a42701f | 3,092 | py | Python | cwharaj/cwharaj/spiders/_mstaml_debug_spider.py | trujunzhang/djzhang-targets | c2e327acde9d51f0455e7243f17d93d74b579501 | [
"MIT"
] | 2 | 2018-12-03T16:30:55.000Z | 2019-04-03T13:29:20.000Z | cwharaj/cwharaj/spiders/_mstaml_debug_spider.py | trujunzhang/djzhang-targets | c2e327acde9d51f0455e7243f17d93d74b579501 | [
"MIT"
] | null | null | null | cwharaj/cwharaj/spiders/_mstaml_debug_spider.py | trujunzhang/djzhang-targets | c2e327acde9d51f0455e7243f17d93d74b579501 | [
"MIT"
] | 1 | 2019-04-03T13:29:25.000Z | 2019-04-03T13:29:25.000Z | # -*- coding: utf-8 -*-
import scrapy
from cwharaj.scraped_websites import WebsiteTypes, websites_allowed_domains, is_pagination
class MstamlDebugWatchSpider(scrapy.Spider):
url_from = WebsiteTypes.mstaml
name = "{}_debug".format(url_from.value)
details_urls = [
# Details
# 'http://www.mstaml.com/2073561/للبيع_جمس_يوكن_1999/'
# 'http://www.mstaml.com/2078991/للبيع_اكسبلورر_أبيض_2010_وارد_توكيلات_الجزيرة/'
# 'http://www.mstaml.com/2079607/افضل_عروض_لطابعات_الكروت/'
# contains emoji unicode
# 'http://www.mstaml.com/2073595/للبيع_قطط_تركيه/'
# parse time_added and last_updated_ad
# 'http://www.mstaml.com/2079892/تفويض_إلكتروني_للمؤسسات/'
# parsing member(has e-mail)
'http://www.mstaml.com/2080634/تلفزيون_Haier_29_بوصه_للبيع_بحاله_ممتازه_مستعمل/'
]
def __init__(self, name=None, **kwargs):
self.allowed_domains = [websites_allowed_domains.get(self.url_from)]
if is_pagination:
self.start_urls = [WebsiteTypes.get_pagination_url(self.url_from)]
else:
self.start_urls = self.details_urls
from cwharaj.database_factory import DatabaseFactory, CollectionTypes
database_factory = DatabaseFactory(kwargs['host'], kwargs['port'],
kwargs['user'], kwargs['passwd'],
kwargs['db'], kwargs['collection_name'])
self._cache_db = database_factory.get_database(CollectionTypes.cache)
self._history_db = database_factory.get_database(CollectionTypes.history)
self._item_db = database_factory.get_database(CollectionTypes.item)
from cwharaj.parser.mstaml_parser import MstamlParse
self._parser = MstamlParse()
super(MstamlDebugWatchSpider, self).__init__(name, **kwargs)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
return super(MstamlDebugWatchSpider, cls).from_crawler(crawler,
args,
host=crawler.settings.get('SQL_HOST'),
port=crawler.settings.get('SQL_PORT'),
user=crawler.settings.get('SQL_USER'),
passwd=crawler.settings.get('SQL_PASSWD'),
db=crawler.settings.get('SQL_DB'),
collection_name=crawler.settings.get(
'SQL_COLLECTION_NAME')
)
def parse(self, response):
if is_pagination:
self._parser.parse_paginate(response.url, response, self._cache_db, self._history_db)
else:
item = self._parser.parse(response.url, response, self._item_db)
| 48.3125 | 105 | 0.559832 |
import scrapy
from cwharaj.scraped_websites import WebsiteTypes, websites_allowed_domains, is_pagination
class MstamlDebugWatchSpider(scrapy.Spider):
url_from = WebsiteTypes.mstaml
name = "{}_debug".format(url_from.value)
details_urls = [
'http://www.mstaml.com/2080634/تلفزيون_Haier_29_بوصه_للبيع_بحاله_ممتازه_مستعمل/'
]
def __init__(self, name=None, **kwargs):
self.allowed_domains = [websites_allowed_domains.get(self.url_from)]
if is_pagination:
self.start_urls = [WebsiteTypes.get_pagination_url(self.url_from)]
else:
self.start_urls = self.details_urls
from cwharaj.database_factory import DatabaseFactory, CollectionTypes
database_factory = DatabaseFactory(kwargs['host'], kwargs['port'],
kwargs['user'], kwargs['passwd'],
kwargs['db'], kwargs['collection_name'])
self._cache_db = database_factory.get_database(CollectionTypes.cache)
self._history_db = database_factory.get_database(CollectionTypes.history)
self._item_db = database_factory.get_database(CollectionTypes.item)
from cwharaj.parser.mstaml_parser import MstamlParse
self._parser = MstamlParse()
super(MstamlDebugWatchSpider, self).__init__(name, **kwargs)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
return super(MstamlDebugWatchSpider, cls).from_crawler(crawler,
args,
host=crawler.settings.get('SQL_HOST'),
port=crawler.settings.get('SQL_PORT'),
user=crawler.settings.get('SQL_USER'),
passwd=crawler.settings.get('SQL_PASSWD'),
db=crawler.settings.get('SQL_DB'),
collection_name=crawler.settings.get(
'SQL_COLLECTION_NAME')
)
def parse(self, response):
if is_pagination:
self._parser.parse_paginate(response.url, response, self._cache_db, self._history_db)
else:
item = self._parser.parse(response.url, response, self._item_db)
| true | true |
1c2d353839d357fc3f304e21e3a23bc1936234b5 | 30,587 | py | Python | bg/graphviz.py | sergey-aganezov-jr/bg | 1ec758193441e49e7b34e0da09571480f4c24455 | [
"MIT"
] | 2 | 2020-01-29T14:26:18.000Z | 2020-09-08T05:37:15.000Z | bg/graphviz.py | sergey-aganezov-jr/bg | 1ec758193441e49e7b34e0da09571480f4c24455 | [
"MIT"
] | 3 | 2015-12-25T17:36:50.000Z | 2017-11-23T17:06:09.000Z | bg/graphviz.py | sergey-aganezov-jr/bg | 1ec758193441e49e7b34e0da09571480f4c24455 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from collections import defaultdict
from collections import deque
from enum import Enum
from ete3 import TreeNode
from bg.edge import BGEdge
from bg.genome import BGGenome
from bg.multicolor import Multicolor
from bg.utils import get_from_dict_with_path
from bg.vertices import BGVertex, InfinityVertex, TaggedInfinityVertex
def vertex_as_a_sting(vertex, separator=" "):
result = ""
if isinstance(vertex, BGVertex):
orientation = "t" if vertex.is_tail_vertex else "h"
result += vertex.block_name + orientation
if vertex.is_tagged_vertex and len(vertex.tags) > 0:
result += separator + separator.join(map(lambda entry: "(" + entry + ")", vertex.get_tags_as_list_of_strings()))
else:
result = str(vertex)
return "{string}".format(string=result)
def vertex_as_html(vertex, separator=" "):
result = ""
if isinstance(vertex, BGVertex):
if vertex.is_block_vertex:
orientation = "t" if vertex.is_tail_vertex else "h"
result += vertex.block_name + "<SUP>" + orientation + "</SUP>"
if vertex.is_tagged_vertex and len(vertex.tags) > 0:
result += separator + separator.join(map(lambda entry: "(" + entry + ")", vertex.get_tags_as_list_of_strings()))
else:
result = str(vertex)
return "<" + result + ">"
class LabelFormat(Enum):
plain = "plain"
html = "html"
class Colors(Enum):
black = "black"
blue = "blue"
red = "red"
green = "green"
orange = "orange"
aquamarine = "aquamarine"
bisque = "bisque"
cyan = "cyan"
gold = "gold"
gray = "gray"
# 10
khaki = "khaki"
magenta = "magenta"
maroon = "maroon"
pink = "pink"
orchid = "orchid"
sandybrown = "sandybrown"
cadetblue = "cadetblue"
dimgrey = "dimgrey"
plum = "plum"
wheat = "wheat"
# 20
def ids_generator(start=1, step=1):
while True:
yield start
start += step
class ColorSource(object):
def __init__(self):
self.color_to_dot_color = {}
self.unused_colors = deque([
Colors.black,
Colors.blue,
Colors.red,
Colors.green,
Colors.orange,
Colors.aquamarine,
Colors.bisque,
Colors.cyan,
Colors.gold,
Colors.gray,
Colors.khaki,
Colors.magenta,
Colors.maroon,
Colors.pink,
Colors.orchid,
Colors.sandybrown,
Colors.cadetblue,
Colors.dimgrey,
Colors.plum,
Colors.wheat,
])
def get_unused_color(self, entry):
if entry not in self.color_to_dot_color:
self.color_to_dot_color[entry] = self.unused_colors.popleft()
return self.color_to_dot_color[entry]
def get_color_as_string(self, entry):
return self.get_unused_color(entry=entry).value
class ShapeProcessor(object):
def __init__(self, pen_width=1, style="solid", color=Colors.black, color_source=None):
self.style_attrib_template = "style=\"{style}\""
self.color_attrib_template = "color=\"{color}\""
self.color_source = color_source if color_source is not None else ColorSource()
self.pen_width = pen_width
self.style = style
self.color = color
self.pen_width_attrib_template = "penwidth=\"{pen_width}\""
def get_pen_width(self, entry=None):
return self.pen_width
def get_style(self, entry=None):
return "solid"
def get_color_as_string(self, entry):
return self.color_source.get_color_as_string(entry=entry)
def get_attributes_string_list(self, entry):
return [
self.color_attrib_template.format(color=self.get_color_as_string(entry=entry)),
self.style_attrib_template.format(style=self.get_style(entry=entry)),
self.pen_width_attrib_template.format(pen_width=self.get_pen_width(entry=entry))
]
class TextProcessor(object):
def __init__(self, color=Colors.black, size=12, font_name="Arial", color_source=None):
self.color_source = color_source if color_source is not None else ColorSource()
self.color = color
self.text_size = size
self.text_font_name = font_name
self.color_attrib_template = "fontcolor=\"{color}\""
self.size_attrib_template = "fontsize=\"{size}\""
self.font_attrib_template = "fontname=\"{font}\""
self.label_attrib_template = "label={label}"
def get_text_font(self, entry=None):
return self.text_font_name
def get_text_size(self, entry=None):
return self.text_size
def get_text_color(self, entry=None):
return self.color.value if self.color in Colors else str(self.color)
def get_text(self, entry=None, label_format=LabelFormat.plain):
if label_format == LabelFormat.html.value or label_format == LabelFormat.html:
return "<>"
return "\"\""
def get_attributes_string_list(self, entry, label_format=LabelFormat.plain):
return [self.label_attrib_template.format(label=self.get_text(entry=entry, label_format=label_format)),
self.font_attrib_template.format(font=self.text_font_name),
self.size_attrib_template.format(size=self.text_size),
self.color_attrib_template.format(color=self.get_text_color(entry=entry))]
class VertexShapeProcessor(ShapeProcessor):
def __init__(self, pen_width=1, style="solid", color=Colors.black, shape="oval", color_source=None):
super(VertexShapeProcessor, self).__init__(pen_width=pen_width, style=style, color=color, color_source=color_source)
self.shape_attrib_template = "shape=\"{shape}\""
self.shape = shape
def get_shape(self, entry=None):
return self.shape
def get_attributes_string_list(self, entry):
return [self.shape_attrib_template.format(shape=self.get_shape(entry=entry)),
self.pen_width_attrib_template.format(pen_width=self.get_pen_width(entry=entry)),
self.style_attrib_template.format(style=self.get_style(entry=entry)),
self.color_attrib_template.format(color=self.get_color_as_string(entry=entry))]
class BGVertexShapeProcessor(VertexShapeProcessor):
def __init__(self, pen_width=1, style="solid", color=Colors.black, color_source=None,
regular_vertex_shape="oval", irregular_vertex_shape="point", non_bg_vertex_shape="oval"):
super(BGVertexShapeProcessor, self).__init__(pen_width=pen_width, style=style, color=color, shape=non_bg_vertex_shape, color_source=color_source)
self.regular_vertex_shape = regular_vertex_shape
self.irregular_vertex_shape = irregular_vertex_shape
def get_shape(self, entry=None):
if isinstance(entry, BGVertex):
return self.regular_vertex_shape if entry.is_regular_vertex else self.irregular_vertex_shape
return super(BGVertexShapeProcessor, self).get_shape(entry=entry)
def get_attributes_string_list(self, entry):
return [self.shape_attrib_template.format(shape=self.get_shape(entry=entry)),
self.pen_width_attrib_template.format(pen_width=self.get_pen_width())]
class BGVertexTextProcessor(TextProcessor):
def __init__(self, color=Colors.black, size=12, font_name="Arial", color_source=None):
super(BGVertexTextProcessor, self).__init__(color=color, size=size, font_name=font_name, color_source=color_source)
def get_text(self, entry=None, label_format=LabelFormat.plain, separator="\n"):
if entry is None:
return super(BGVertexTextProcessor, self).get_text(entry=entry, label_format=label_format)
if label_format == LabelFormat.plain.value or label_format == LabelFormat.plain:
return "\"" + vertex_as_a_sting(vertex=entry, separator=separator) + "\""
elif label_format == LabelFormat.html.value or label_format == LabelFormat.html:
return vertex_as_html(vertex=entry, separator=separator)
class VertexProcessor(object):
def __init__(self, shape_processor=None, text_processor=None):
self.vertices_id_generator = ids_generator()
self.vertices_ids_storage = {}
self.shape_processor = shape_processor
self.text_processor = text_processor
self.template = "\"{v_id}\" [{attributes}];"
def get_vertex_id(self, vertex):
if vertex not in self.vertices_ids_storage:
self.vertices_ids_storage[vertex] = next(self.vertices_id_generator)
return self.vertices_ids_storage[vertex]
def export_vertex_as_dot(self, vertex, label_format=LabelFormat.plain):
"""
:type label_format: Union[str, LabelFormat]
"""
vertex_id = self.get_vertex_id(vertex=vertex)
attributes = []
attributes.extend(self.text_processor.get_attributes_string_list(entry=vertex, label_format=label_format))
attributes.extend(self.shape_processor.get_attributes_string_list(entry=vertex))
return self.template.format(v_id=vertex_id, attributes=", ".join(attributes))
class BGVertexProcessor(VertexProcessor):
def __init__(self, shape_processor=None, text_processor=None, color_source=None):
super(BGVertexProcessor, self).__init__(shape_processor=shape_processor, text_processor=text_processor)
if color_source is None:
color_source = ColorSource()
if self.shape_processor is None:
self.shape_processor = BGVertexShapeProcessor(color_source=color_source)
if self.text_processor is None:
self.text_processor = BGVertexTextProcessor(color_source=color_source)
def get_vertex_id(self, vertex):
if isinstance(vertex, InfinityVertex):
vertex = BGVertex.get_vertex_name_root(vertex.name)
return super(BGVertexProcessor, self).get_vertex_id(vertex=vertex)
def export_vertex_as_dot(self, vertex, label_format=LabelFormat.plain):
"""
:type label_format: Union[str, LabelFormat]
"""
vertex_id = self.get_vertex_id(vertex=vertex)
attributes = []
if not isinstance(vertex, InfinityVertex):
attributes.extend(self.text_processor.get_attributes_string_list(entry=vertex, label_format=label_format))
attributes.extend(self.shape_processor.get_attributes_string_list(entry=vertex))
return self.template.format(v_id=vertex_id, attributes=", ".join(attributes))
class BGEdgeShapeProcessor(ShapeProcessor):
def __init__(self, pen_width=1, style="solid", color=Colors.black, color_source=None):
super(BGEdgeShapeProcessor, self).__init__(pen_width=pen_width, style=style, color=color, color_source=color_source)
self.regular_edge_style = "solid"
self.irregular_edge_style = "dotted"
self.repeat_edge_style = "dashed"
self.regular_edge_pen_width = 1
self.irregular_edge_pen_with = .7
self.repeat_edge_pen_width = .7
def get_style(self, entry=None):
if entry is None or not isinstance(entry, BGEdge):
return self.regular_edge_style
if entry.is_repeat_edge:
return self.repeat_edge_style
if entry.is_irregular_edge:
return self.irregular_edge_style
if entry.is_regular_edge:
return self.regular_edge_style
def get_pen_width(self, entry=None):
if entry is None or not isinstance(entry, BGEdge):
return self.regular_edge_pen_width
if entry.is_repeat_edge:
return self.repeat_edge_pen_width
if entry.is_irregular_edge:
return self.irregular_edge_pen_with
if entry.is_regular_edge:
return self.regular_edge_pen_width
def get_dot_colors(self, multicolor):
return [self.color_source.get_unused_color(entry=color) for color in multicolor.multicolors.elements()]
def get_attributes_string_list(self, entry):
if len(list(entry.multicolor.multicolors.elements())) != 1:
raise ValueError(
"Graphviz edge shape attributes can not be created only for multi-colored edge, but rather an edge with a single-colored edge")
color = self.get_dot_colors(multicolor=entry.multicolor)[0].value
return [
self.color_attrib_template.format(color=color),
self.style_attrib_template.format(style=self.get_style(entry=entry)),
self.pen_width_attrib_template.format(pen_width=self.get_pen_width(entry=entry))]
class BGEdgeTextProcessor(TextProcessor):
def __init__(self, size=7, font_name="Arial", color=Colors.black, color_source=None):
super(BGEdgeTextProcessor, self).__init__(size=size, font_name=font_name, color=color, color_source=color_source)
def get_text(self, entry=None, label_format=LabelFormat.plain,
edge_attributes_to_be_displayed=None,
tag_key_processor=None, tag_value_processor=None,
edge_key_value_separator=":",
entries_separator="\n"):
"""
:type label_format: Union[str, LabelFormat]
"""
if entry is None or not isinstance(entry, BGEdge):
return super(BGEdgeTextProcessor, self).get_text(entry=entry, label_format=label_format)
if tag_key_processor is None:
tag_key_processor = self._tag_key_processor
if tag_value_processor is None:
tag_value_processor = self._tag_value_processor
if edge_attributes_to_be_displayed is None:
edge_attributes_to_be_displayed = []
text = ""
entries = []
for path, key in edge_attributes_to_be_displayed:
value = get_from_dict_with_path(source_dict=entry.data, key=key, path=path)
if value is None:
continue
entries.append(tag_key_processor(key=key, label_format=label_format) + \
edge_key_value_separator + \
tag_value_processor(value=value, label_format=label_format))
text += entries_separator.join(entries)
if isinstance(entry.vertex1, TaggedInfinityVertex):
entries = []
starting = "" if len(text) == 0 else entries_separator
for tag, value in entry.vertex1.tags:
entries.append(tag_key_processor(tag, label_format=label_format) + \
entry.vertex1.TAG_SEPARATOR + \
tag_value_processor(value, label_format=label_format))
text += starting + entries_separator.join(entries)
if isinstance(entry.vertex2, TaggedInfinityVertex):
entries = []
starting = "" if len(text) == 0 else entries_separator
for tag, value in entry.vertex2.tags:
entries.append(tag_key_processor(tag, label_format=label_format) + \
entry.vertex1.TAG_SEPARATOR + \
tag_value_processor(value, label_format=label_format))
text += starting + entries_separator.join(entries)
if label_format == LabelFormat.plain.value or label_format == LabelFormat.plain:
return "\"" + text + "\""
elif label_format == LabelFormat.html.value or label_format == LabelFormat.html:
return "<" + text + ">"
return "\"\""
def _tag_key_processor(self, key, label_format):
if key == "repeat":
return "r"
else:
return str(key)
def _tag_value_processor(self, value, label_format):
if str(value).endswith(("h", "t")) and (label_format == LabelFormat.html.value or label_format == LabelFormat.html):
return str(value)[:-1] + "<SUP>" + str(value)[-1] + "</SUP>"
return str(value)
def get_attributes_string_list(self, entry, label_format=LabelFormat.plain, edge_attributes_to_be_displayed=None,
tag_key_processor=None, tag_value_processor=None, edge_key_value_separator=":",
entries_separator="\n"):
return [self.label_attrib_template.format(label=self.get_text(entry=entry, label_format=label_format,
edge_attributes_to_be_displayed=edge_attributes_to_be_displayed,
tag_key_processor=tag_key_processor,
tag_value_processor=tag_value_processor,
edge_key_value_separator=edge_key_value_separator,
entries_separator=entries_separator)),
self.font_attrib_template.format(font=self.text_font_name),
self.size_attrib_template.format(size=self.text_size),
self.color_attrib_template.format(color=self.get_text_color(entry=entry))]
class EdgeProcessor(object):
def __init__(self, vertex_processor, edge_shape_processor=None, edge_text_processor=None):
self.shape_processor = edge_shape_processor
self.text_processor = edge_text_processor
self.vertex_processor = vertex_processor
self.template = "\"{v1_id}\" -- \"{v2_id}\" [{attributes}];"
def export_edge_as_dot(self, edge, label_format=LabelFormat.plain):
"""
:type label_format: Union[str, LabelFormat]
"""
v1_id = self.vertex_processor.get_vertex_id(vertex=self.get_vertex_1(edge))
v2_id = self.vertex_processor.get_vertex_id(vertex=self.get_vertex_2(edge))
attributes = self.shape_processor.get_attributes_string_list(entry=edge)
if len(self.text_processor.get_text(entry=edge)) > 2:
attributes.extend(self.text_processor.get_attributes_string_list(entry=edge, label_format=label_format))
return [self.template.format(v1_id=v1_id, v2_id=v2_id, attributes=", ".join(attributes))]
def get_vertex_1(self, edge):
return edge[0]
def get_vertex_2(self, edge):
return edge[1]
class BGEdgeProcessor(EdgeProcessor):
def __init__(self, vertex_processor, edge_shape_processor=None, edge_text_processor=None, color_source=None):
super(BGEdgeProcessor, self).__init__(vertex_processor=vertex_processor, edge_shape_processor=edge_shape_processor,
edge_text_processor=edge_text_processor)
if color_source is None:
color_source = ColorSource()
if self.shape_processor is None:
self.shape_processor = BGEdgeShapeProcessor(color_source=color_source)
if self.text_processor is None:
self.text_processor = BGEdgeTextProcessor(color_source=color_source)
def export_edge_as_dot(self, edge, label_format=LabelFormat.plain):
"""
:type label_format: Union[str, LabelFormat]
"""
v1_id = self.vertex_processor.get_vertex_id(vertex=self.get_vertex_1(edge))
v2_id = self.vertex_processor.get_vertex_id(vertex=self.get_vertex_2(edge))
result = []
for color in edge.multicolor.multicolors.elements():
tmp_edge = BGEdge(vertex1=self.get_vertex_1(edge=edge), vertex2=self.get_vertex_2(edge=edge), multicolor=Multicolor(color),
data=edge.data)
attributes = self.shape_processor.get_attributes_string_list(entry=tmp_edge)
if len(self.text_processor.get_text(entry=tmp_edge)) > 2:
attributes.extend(self.text_processor.get_attributes_string_list(entry=tmp_edge, label_format=label_format))
result.append(self.template.format(v1_id=v1_id, v2_id=v2_id, attributes=", ".join(attributes)))
return result
def get_vertex_1(self, edge):
return edge.vertex1
def get_vertex_2(self, edge):
return edge.vertex2
class GraphProcessor(object):
def __init__(self, vertex_processor=None, edge_processor=None):
self.vertex_processor = vertex_processor
self.edge_processor = edge_processor
self.template = "graph {{\n{edges}\n{vertices}\n}}"
def export_vertices_as_dot(self, graph, label_format=LabelFormat.plain):
result = []
for vertex in graph.nodes():
result.append(self.vertex_processor.export_vertex_as_dot(vertex=vertex, label_format=label_format))
return result
def export_edges_as_dot(self, graph, label_format=LabelFormat.plain):
result = []
for edge in graph.edges():
result.extend(self.edge_processor.export_edge_as_dot(edge=edge, label_format=label_format))
return result
def export_graph_as_dot(self, graph, label_format=LabelFormat.plain):
vertices_entries = self.export_vertices_as_dot(graph=graph, label_format=label_format)
edges_entries = self.export_edges_as_dot(graph=graph, label_format=label_format)
return self.template.format(edges="\n".join(edges_entries), vertices="\n".join(vertices_entries))
class BreakpointGraphProcessor(GraphProcessor):
def __init__(self, vertex_processor=None, edge_processor=None, color_source=None, cc_filters=None):
super(BreakpointGraphProcessor, self).__init__(vertex_processor=vertex_processor, edge_processor=edge_processor)
if color_source is None:
color_source = ColorSource()
if self.vertex_processor is None:
self.vertex_processor = BGVertexProcessor(color_source=color_source)
if self.edge_processor is None:
self.edge_processor = BGEdgeProcessor(vertex_processor=self.vertex_processor, color_source=color_source)
if cc_filters is None:
cc_filters = []
self.cc_filters = cc_filters
self.cc_filter_template = "{filter_name}: {filtered_cnt}"
self.cc_filters_template = "\"cc_filters\" [shape=\"square\", penwidth=\"5\"," \
" fontname=\"Arial\", fontsize=\"15\", " \
"label=\"{overall_filters_info}\"];"
def export_graph_as_dot(self, graph, label_format=LabelFormat.plain):
vertices_entries = []
edges_entries = []
filters_results = defaultdict(int)
for cc in graph.connected_components_subgraphs(copy=False):
for cc_filter in self.cc_filters:
if not cc_filter.accept_connected_component(cc=cc, breakpoint_graph=graph):
filters_results[cc_filter.name] += 1
break
else:
vertices_entries.extend(self.export_vertices_as_dot(graph=cc, label_format=label_format))
edges_entries.extend(self.export_edges_as_dot(graph=cc, label_format=label_format))
invoked_filters = {key: value for key, value in filters_results.items() if value > 0}
if len(invoked_filters) > 0:
entries = []
for key, value in invoked_filters.items():
entries.append(self.cc_filter_template.format(filter_name=key, filtered_cnt=value))
label = "\n".join(entries)
vertices_entries.append(self.cc_filters_template.format(overall_filters_info=label))
return self.template.format(edges="\n".join(edges_entries), vertices="\n".join(vertices_entries))
class BGTreeVertexShapeProcessor(VertexShapeProcessor):
def __init__(self, color=Colors.black, style="solid", internal_node_pen_width=1, leaf_node_pen_width=3, shape="oval", color_source=None,
vertex_data_wrapper=BGGenome, leaf_wrapper=None):
super(BGTreeVertexShapeProcessor, self).__init__(color=color, style=style, pen_width=internal_node_pen_width, shape=shape, color_source=color_source)
self.leaf_node_pen_width = leaf_node_pen_width
self.__leaf_wrapper = lambda node: BGGenome(node.name) if leaf_wrapper is None else leaf_wrapper
self.internal_node_pen_width = internal_node_pen_width
self.vertex_data_wrapper = vertex_data_wrapper
def get_pen_width(self, entry=None):
if not isinstance(entry, TreeNode):
return super(BGTreeVertexShapeProcessor, self).get_pen_width(entry=entry)
if entry.is_leaf():
return self.leaf_node_pen_width
else:
return self.internal_node_pen_width
def get_color_as_string(self, entry, leaf_wrapper=None):
if leaf_wrapper is None:
self.__leaf_wrapper = self.__leaf_wrapper
if not isinstance(entry, TreeNode):
return super(BGTreeVertexShapeProcessor, self).get_color_as_string(entry=entry)
if entry.is_leaf():
entry = self.__leaf_wrapper(entry)
else:
entry = "non_leaf_tree_node"
return super(BGTreeVertexShapeProcessor, self).get_color_as_string(entry=entry)
class BGTreeVertexTextProcessor(TextProcessor):
def __init__(self, color=Colors.black, size=12, font_name="Arial", color_source=None, leaf_wrapper=None):
super(BGTreeVertexTextProcessor, self).__init__(color=color, size=size, font_name=font_name, color_source=color_source)
self.__leaf_wrapper = lambda node: BGGenome(node.name) if leaf_wrapper is None else leaf_wrapper
def get_text_color(self, entry=None, leaf_wrapper=None):
if leaf_wrapper is None:
leaf_wrapper = self.__leaf_wrapper
if entry is None or not isinstance(entry, TreeNode):
return super(BGTreeVertexTextProcessor, self).get_text_color(entry=entry)
if entry.is_leaf():
entry = leaf_wrapper(entry)
else:
entry = "non_leaf_tree_node"
return self.color_source.get_color_as_string(entry=entry)
def get_text(self, entry=None, label_format=LabelFormat.plain):
if entry is None or not isinstance(entry, TreeNode):
return super(BGTreeVertexTextProcessor, self).get_text(entry=entry, label_format=label_format)
text = ""
if entry.is_leaf():
text += entry.name
if label_format == LabelFormat.html or label_format == LabelFormat.html.value:
return "<" + text + ">"
return "\"" + text + "\""
class BGTreeVertexProcessor(VertexProcessor):
def __init__(self, shape_processor=None, text_processor=None, color_source=None):
super(BGTreeVertexProcessor, self).__init__(shape_processor=shape_processor, text_processor=text_processor)
if color_source is None:
color_source = ColorSource()
if self.shape_processor is None:
self.shape_processor = BGTreeVertexShapeProcessor(color_source=color_source)
if self.text_processor is None:
self.text_processor = BGTreeVertexTextProcessor(color_source=color_source)
def get_vertex_id(self, vertex, leaf_wrapper=BGGenome):
if isinstance(vertex, TreeNode) and vertex.is_leaf():
vertex_for_id = leaf_wrapper(vertex.name)
else:
vertex_for_id = vertex
return super(BGTreeVertexProcessor, self).get_vertex_id(vertex=vertex_for_id)
def export_vertex_as_dot(self, vertex, label_format=LabelFormat.plain, leaf_wrapper=BGGenome):
vertex_id = self.get_vertex_id(vertex=vertex, leaf_wrapper=leaf_wrapper)
attributes = []
attributes.extend(self.text_processor.get_attributes_string_list(entry=vertex, label_format=label_format))
attributes.extend(self.shape_processor.get_attributes_string_list(entry=vertex))
return self.template.format(v_id=vertex_id, attributes=", ".join(attributes))
class BGTreeEdgeShapeProcessor(ShapeProcessor):
def __init__(self, non_leaf_pen_width=1, leaf_pen_width=3, color=Colors.black, color_source=None, style="solid"):
super(BGTreeEdgeShapeProcessor, self).__init__(pen_width=non_leaf_pen_width, color=color, color_source=color_source, style=style)
self.leaf_branch_pen_width = leaf_pen_width
self.non_leaf_branch_pen_width = non_leaf_pen_width
def _is_leaf_branch(self, edge):
return not (isinstance(edge[0], TreeNode) and isinstance(edge[1], TreeNode))
def get_color_as_string(self, entry):
if not isinstance(entry, tuple):
return super(BGTreeEdgeShapeProcessor, self).get_attributes_string_list(entry=entry)
if not self._is_leaf_branch(edge=entry):
entry = None
else:
non_tree_node_instance = entry[0] if not isinstance(entry[0], TreeNode) else entry[1]
entry = non_tree_node_instance
return super(BGTreeEdgeShapeProcessor, self).get_color_as_string(entry=entry)
def get_pen_width(self, entry=None):
if self._is_leaf_branch(edge=entry):
return self.leaf_branch_pen_width
else:
return self.non_leaf_branch_pen_width
class BGTreeEdgeTextProcessor(TextProcessor):
def __init__(self, font_name="Arial", size=7, color=Colors.black, color_source=None):
super(BGTreeEdgeTextProcessor, self).__init__(color=color, size=size, font_name=font_name, color_source=color_source)
class BGTreeEdgeProcessor(EdgeProcessor):
def __init__(self, vertex_processor, edge_shape_processor=None, edge_text_processor=None, color_source=None):
super(BGTreeEdgeProcessor, self).__init__(vertex_processor=vertex_processor, edge_shape_processor=edge_shape_processor,
edge_text_processor=edge_text_processor)
self.vertex_processor = vertex_processor
if color_source is None:
color_source = ColorSource()
if self.shape_processor is None:
self.shape_processor = BGTreeEdgeShapeProcessor(color_source=color_source)
if self.text_processor is None:
self.text_processor = BGTreeEdgeTextProcessor(color_source=color_source)
class BGTreeProcessor(GraphProcessor):
def __init__(self, vertex_processor=None, edge_processor=None, color_source=None):
super(BGTreeProcessor, self).__init__(vertex_processor=vertex_processor, edge_processor=edge_processor)
if color_source is None:
color_source = ColorSource()
if self.vertex_processor is None:
self.vertex_processor = BGTreeVertexProcessor(color_source=color_source)
if self.edge_processor is None:
self.edge_processor = BGTreeEdgeProcessor(vertex_processor=self.vertex_processor, color_source=color_source)
| 46.984639 | 157 | 0.679635 |
from collections import defaultdict
from collections import deque
from enum import Enum
from ete3 import TreeNode
from bg.edge import BGEdge
from bg.genome import BGGenome
from bg.multicolor import Multicolor
from bg.utils import get_from_dict_with_path
from bg.vertices import BGVertex, InfinityVertex, TaggedInfinityVertex
def vertex_as_a_sting(vertex, separator=" "):
result = ""
if isinstance(vertex, BGVertex):
orientation = "t" if vertex.is_tail_vertex else "h"
result += vertex.block_name + orientation
if vertex.is_tagged_vertex and len(vertex.tags) > 0:
result += separator + separator.join(map(lambda entry: "(" + entry + ")", vertex.get_tags_as_list_of_strings()))
else:
result = str(vertex)
return "{string}".format(string=result)
def vertex_as_html(vertex, separator=" "):
result = ""
if isinstance(vertex, BGVertex):
if vertex.is_block_vertex:
orientation = "t" if vertex.is_tail_vertex else "h"
result += vertex.block_name + "<SUP>" + orientation + "</SUP>"
if vertex.is_tagged_vertex and len(vertex.tags) > 0:
result += separator + separator.join(map(lambda entry: "(" + entry + ")", vertex.get_tags_as_list_of_strings()))
else:
result = str(vertex)
return "<" + result + ">"
class LabelFormat(Enum):
plain = "plain"
html = "html"
class Colors(Enum):
black = "black"
blue = "blue"
red = "red"
green = "green"
orange = "orange"
aquamarine = "aquamarine"
bisque = "bisque"
cyan = "cyan"
gold = "gold"
gray = "gray"
khaki = "khaki"
magenta = "magenta"
maroon = "maroon"
pink = "pink"
orchid = "orchid"
sandybrown = "sandybrown"
cadetblue = "cadetblue"
dimgrey = "dimgrey"
plum = "plum"
wheat = "wheat"
def ids_generator(start=1, step=1):
while True:
yield start
start += step
class ColorSource(object):
def __init__(self):
self.color_to_dot_color = {}
self.unused_colors = deque([
Colors.black,
Colors.blue,
Colors.red,
Colors.green,
Colors.orange,
Colors.aquamarine,
Colors.bisque,
Colors.cyan,
Colors.gold,
Colors.gray,
Colors.khaki,
Colors.magenta,
Colors.maroon,
Colors.pink,
Colors.orchid,
Colors.sandybrown,
Colors.cadetblue,
Colors.dimgrey,
Colors.plum,
Colors.wheat,
])
def get_unused_color(self, entry):
if entry not in self.color_to_dot_color:
self.color_to_dot_color[entry] = self.unused_colors.popleft()
return self.color_to_dot_color[entry]
def get_color_as_string(self, entry):
return self.get_unused_color(entry=entry).value
class ShapeProcessor(object):
def __init__(self, pen_width=1, style="solid", color=Colors.black, color_source=None):
self.style_attrib_template = "style=\"{style}\""
self.color_attrib_template = "color=\"{color}\""
self.color_source = color_source if color_source is not None else ColorSource()
self.pen_width = pen_width
self.style = style
self.color = color
self.pen_width_attrib_template = "penwidth=\"{pen_width}\""
def get_pen_width(self, entry=None):
return self.pen_width
def get_style(self, entry=None):
return "solid"
def get_color_as_string(self, entry):
return self.color_source.get_color_as_string(entry=entry)
def get_attributes_string_list(self, entry):
return [
self.color_attrib_template.format(color=self.get_color_as_string(entry=entry)),
self.style_attrib_template.format(style=self.get_style(entry=entry)),
self.pen_width_attrib_template.format(pen_width=self.get_pen_width(entry=entry))
]
class TextProcessor(object):
def __init__(self, color=Colors.black, size=12, font_name="Arial", color_source=None):
self.color_source = color_source if color_source is not None else ColorSource()
self.color = color
self.text_size = size
self.text_font_name = font_name
self.color_attrib_template = "fontcolor=\"{color}\""
self.size_attrib_template = "fontsize=\"{size}\""
self.font_attrib_template = "fontname=\"{font}\""
self.label_attrib_template = "label={label}"
def get_text_font(self, entry=None):
return self.text_font_name
def get_text_size(self, entry=None):
return self.text_size
def get_text_color(self, entry=None):
return self.color.value if self.color in Colors else str(self.color)
def get_text(self, entry=None, label_format=LabelFormat.plain):
if label_format == LabelFormat.html.value or label_format == LabelFormat.html:
return "<>"
return "\"\""
def get_attributes_string_list(self, entry, label_format=LabelFormat.plain):
return [self.label_attrib_template.format(label=self.get_text(entry=entry, label_format=label_format)),
self.font_attrib_template.format(font=self.text_font_name),
self.size_attrib_template.format(size=self.text_size),
self.color_attrib_template.format(color=self.get_text_color(entry=entry))]
class VertexShapeProcessor(ShapeProcessor):
def __init__(self, pen_width=1, style="solid", color=Colors.black, shape="oval", color_source=None):
super(VertexShapeProcessor, self).__init__(pen_width=pen_width, style=style, color=color, color_source=color_source)
self.shape_attrib_template = "shape=\"{shape}\""
self.shape = shape
def get_shape(self, entry=None):
return self.shape
def get_attributes_string_list(self, entry):
return [self.shape_attrib_template.format(shape=self.get_shape(entry=entry)),
self.pen_width_attrib_template.format(pen_width=self.get_pen_width(entry=entry)),
self.style_attrib_template.format(style=self.get_style(entry=entry)),
self.color_attrib_template.format(color=self.get_color_as_string(entry=entry))]
class BGVertexShapeProcessor(VertexShapeProcessor):
def __init__(self, pen_width=1, style="solid", color=Colors.black, color_source=None,
regular_vertex_shape="oval", irregular_vertex_shape="point", non_bg_vertex_shape="oval"):
super(BGVertexShapeProcessor, self).__init__(pen_width=pen_width, style=style, color=color, shape=non_bg_vertex_shape, color_source=color_source)
self.regular_vertex_shape = regular_vertex_shape
self.irregular_vertex_shape = irregular_vertex_shape
def get_shape(self, entry=None):
if isinstance(entry, BGVertex):
return self.regular_vertex_shape if entry.is_regular_vertex else self.irregular_vertex_shape
return super(BGVertexShapeProcessor, self).get_shape(entry=entry)
def get_attributes_string_list(self, entry):
return [self.shape_attrib_template.format(shape=self.get_shape(entry=entry)),
self.pen_width_attrib_template.format(pen_width=self.get_pen_width())]
class BGVertexTextProcessor(TextProcessor):
def __init__(self, color=Colors.black, size=12, font_name="Arial", color_source=None):
super(BGVertexTextProcessor, self).__init__(color=color, size=size, font_name=font_name, color_source=color_source)
def get_text(self, entry=None, label_format=LabelFormat.plain, separator="\n"):
if entry is None:
return super(BGVertexTextProcessor, self).get_text(entry=entry, label_format=label_format)
if label_format == LabelFormat.plain.value or label_format == LabelFormat.plain:
return "\"" + vertex_as_a_sting(vertex=entry, separator=separator) + "\""
elif label_format == LabelFormat.html.value or label_format == LabelFormat.html:
return vertex_as_html(vertex=entry, separator=separator)
class VertexProcessor(object):
def __init__(self, shape_processor=None, text_processor=None):
self.vertices_id_generator = ids_generator()
self.vertices_ids_storage = {}
self.shape_processor = shape_processor
self.text_processor = text_processor
self.template = "\"{v_id}\" [{attributes}];"
def get_vertex_id(self, vertex):
if vertex not in self.vertices_ids_storage:
self.vertices_ids_storage[vertex] = next(self.vertices_id_generator)
return self.vertices_ids_storage[vertex]
def export_vertex_as_dot(self, vertex, label_format=LabelFormat.plain):
vertex_id = self.get_vertex_id(vertex=vertex)
attributes = []
attributes.extend(self.text_processor.get_attributes_string_list(entry=vertex, label_format=label_format))
attributes.extend(self.shape_processor.get_attributes_string_list(entry=vertex))
return self.template.format(v_id=vertex_id, attributes=", ".join(attributes))
class BGVertexProcessor(VertexProcessor):
def __init__(self, shape_processor=None, text_processor=None, color_source=None):
super(BGVertexProcessor, self).__init__(shape_processor=shape_processor, text_processor=text_processor)
if color_source is None:
color_source = ColorSource()
if self.shape_processor is None:
self.shape_processor = BGVertexShapeProcessor(color_source=color_source)
if self.text_processor is None:
self.text_processor = BGVertexTextProcessor(color_source=color_source)
def get_vertex_id(self, vertex):
if isinstance(vertex, InfinityVertex):
vertex = BGVertex.get_vertex_name_root(vertex.name)
return super(BGVertexProcessor, self).get_vertex_id(vertex=vertex)
def export_vertex_as_dot(self, vertex, label_format=LabelFormat.plain):
vertex_id = self.get_vertex_id(vertex=vertex)
attributes = []
if not isinstance(vertex, InfinityVertex):
attributes.extend(self.text_processor.get_attributes_string_list(entry=vertex, label_format=label_format))
attributes.extend(self.shape_processor.get_attributes_string_list(entry=vertex))
return self.template.format(v_id=vertex_id, attributes=", ".join(attributes))
class BGEdgeShapeProcessor(ShapeProcessor):
def __init__(self, pen_width=1, style="solid", color=Colors.black, color_source=None):
super(BGEdgeShapeProcessor, self).__init__(pen_width=pen_width, style=style, color=color, color_source=color_source)
self.regular_edge_style = "solid"
self.irregular_edge_style = "dotted"
self.repeat_edge_style = "dashed"
self.regular_edge_pen_width = 1
self.irregular_edge_pen_with = .7
self.repeat_edge_pen_width = .7
def get_style(self, entry=None):
if entry is None or not isinstance(entry, BGEdge):
return self.regular_edge_style
if entry.is_repeat_edge:
return self.repeat_edge_style
if entry.is_irregular_edge:
return self.irregular_edge_style
if entry.is_regular_edge:
return self.regular_edge_style
def get_pen_width(self, entry=None):
if entry is None or not isinstance(entry, BGEdge):
return self.regular_edge_pen_width
if entry.is_repeat_edge:
return self.repeat_edge_pen_width
if entry.is_irregular_edge:
return self.irregular_edge_pen_with
if entry.is_regular_edge:
return self.regular_edge_pen_width
def get_dot_colors(self, multicolor):
return [self.color_source.get_unused_color(entry=color) for color in multicolor.multicolors.elements()]
def get_attributes_string_list(self, entry):
if len(list(entry.multicolor.multicolors.elements())) != 1:
raise ValueError(
"Graphviz edge shape attributes can not be created only for multi-colored edge, but rather an edge with a single-colored edge")
color = self.get_dot_colors(multicolor=entry.multicolor)[0].value
return [
self.color_attrib_template.format(color=color),
self.style_attrib_template.format(style=self.get_style(entry=entry)),
self.pen_width_attrib_template.format(pen_width=self.get_pen_width(entry=entry))]
class BGEdgeTextProcessor(TextProcessor):
def __init__(self, size=7, font_name="Arial", color=Colors.black, color_source=None):
super(BGEdgeTextProcessor, self).__init__(size=size, font_name=font_name, color=color, color_source=color_source)
def get_text(self, entry=None, label_format=LabelFormat.plain,
edge_attributes_to_be_displayed=None,
tag_key_processor=None, tag_value_processor=None,
edge_key_value_separator=":",
entries_separator="\n"):
if entry is None or not isinstance(entry, BGEdge):
return super(BGEdgeTextProcessor, self).get_text(entry=entry, label_format=label_format)
if tag_key_processor is None:
tag_key_processor = self._tag_key_processor
if tag_value_processor is None:
tag_value_processor = self._tag_value_processor
if edge_attributes_to_be_displayed is None:
edge_attributes_to_be_displayed = []
text = ""
entries = []
for path, key in edge_attributes_to_be_displayed:
value = get_from_dict_with_path(source_dict=entry.data, key=key, path=path)
if value is None:
continue
entries.append(tag_key_processor(key=key, label_format=label_format) + \
edge_key_value_separator + \
tag_value_processor(value=value, label_format=label_format))
text += entries_separator.join(entries)
if isinstance(entry.vertex1, TaggedInfinityVertex):
entries = []
starting = "" if len(text) == 0 else entries_separator
for tag, value in entry.vertex1.tags:
entries.append(tag_key_processor(tag, label_format=label_format) + \
entry.vertex1.TAG_SEPARATOR + \
tag_value_processor(value, label_format=label_format))
text += starting + entries_separator.join(entries)
if isinstance(entry.vertex2, TaggedInfinityVertex):
entries = []
starting = "" if len(text) == 0 else entries_separator
for tag, value in entry.vertex2.tags:
entries.append(tag_key_processor(tag, label_format=label_format) + \
entry.vertex1.TAG_SEPARATOR + \
tag_value_processor(value, label_format=label_format))
text += starting + entries_separator.join(entries)
if label_format == LabelFormat.plain.value or label_format == LabelFormat.plain:
return "\"" + text + "\""
elif label_format == LabelFormat.html.value or label_format == LabelFormat.html:
return "<" + text + ">"
return "\"\""
def _tag_key_processor(self, key, label_format):
if key == "repeat":
return "r"
else:
return str(key)
def _tag_value_processor(self, value, label_format):
if str(value).endswith(("h", "t")) and (label_format == LabelFormat.html.value or label_format == LabelFormat.html):
return str(value)[:-1] + "<SUP>" + str(value)[-1] + "</SUP>"
return str(value)
def get_attributes_string_list(self, entry, label_format=LabelFormat.plain, edge_attributes_to_be_displayed=None,
tag_key_processor=None, tag_value_processor=None, edge_key_value_separator=":",
entries_separator="\n"):
return [self.label_attrib_template.format(label=self.get_text(entry=entry, label_format=label_format,
edge_attributes_to_be_displayed=edge_attributes_to_be_displayed,
tag_key_processor=tag_key_processor,
tag_value_processor=tag_value_processor,
edge_key_value_separator=edge_key_value_separator,
entries_separator=entries_separator)),
self.font_attrib_template.format(font=self.text_font_name),
self.size_attrib_template.format(size=self.text_size),
self.color_attrib_template.format(color=self.get_text_color(entry=entry))]
class EdgeProcessor(object):
def __init__(self, vertex_processor, edge_shape_processor=None, edge_text_processor=None):
self.shape_processor = edge_shape_processor
self.text_processor = edge_text_processor
self.vertex_processor = vertex_processor
self.template = "\"{v1_id}\" -- \"{v2_id}\" [{attributes}];"
def export_edge_as_dot(self, edge, label_format=LabelFormat.plain):
v1_id = self.vertex_processor.get_vertex_id(vertex=self.get_vertex_1(edge))
v2_id = self.vertex_processor.get_vertex_id(vertex=self.get_vertex_2(edge))
attributes = self.shape_processor.get_attributes_string_list(entry=edge)
if len(self.text_processor.get_text(entry=edge)) > 2:
attributes.extend(self.text_processor.get_attributes_string_list(entry=edge, label_format=label_format))
return [self.template.format(v1_id=v1_id, v2_id=v2_id, attributes=", ".join(attributes))]
def get_vertex_1(self, edge):
return edge[0]
def get_vertex_2(self, edge):
return edge[1]
class BGEdgeProcessor(EdgeProcessor):
def __init__(self, vertex_processor, edge_shape_processor=None, edge_text_processor=None, color_source=None):
super(BGEdgeProcessor, self).__init__(vertex_processor=vertex_processor, edge_shape_processor=edge_shape_processor,
edge_text_processor=edge_text_processor)
if color_source is None:
color_source = ColorSource()
if self.shape_processor is None:
self.shape_processor = BGEdgeShapeProcessor(color_source=color_source)
if self.text_processor is None:
self.text_processor = BGEdgeTextProcessor(color_source=color_source)
def export_edge_as_dot(self, edge, label_format=LabelFormat.plain):
v1_id = self.vertex_processor.get_vertex_id(vertex=self.get_vertex_1(edge))
v2_id = self.vertex_processor.get_vertex_id(vertex=self.get_vertex_2(edge))
result = []
for color in edge.multicolor.multicolors.elements():
tmp_edge = BGEdge(vertex1=self.get_vertex_1(edge=edge), vertex2=self.get_vertex_2(edge=edge), multicolor=Multicolor(color),
data=edge.data)
attributes = self.shape_processor.get_attributes_string_list(entry=tmp_edge)
if len(self.text_processor.get_text(entry=tmp_edge)) > 2:
attributes.extend(self.text_processor.get_attributes_string_list(entry=tmp_edge, label_format=label_format))
result.append(self.template.format(v1_id=v1_id, v2_id=v2_id, attributes=", ".join(attributes)))
return result
def get_vertex_1(self, edge):
return edge.vertex1
def get_vertex_2(self, edge):
return edge.vertex2
class GraphProcessor(object):
def __init__(self, vertex_processor=None, edge_processor=None):
self.vertex_processor = vertex_processor
self.edge_processor = edge_processor
self.template = "graph {{\n{edges}\n{vertices}\n}}"
def export_vertices_as_dot(self, graph, label_format=LabelFormat.plain):
result = []
for vertex in graph.nodes():
result.append(self.vertex_processor.export_vertex_as_dot(vertex=vertex, label_format=label_format))
return result
def export_edges_as_dot(self, graph, label_format=LabelFormat.plain):
result = []
for edge in graph.edges():
result.extend(self.edge_processor.export_edge_as_dot(edge=edge, label_format=label_format))
return result
def export_graph_as_dot(self, graph, label_format=LabelFormat.plain):
vertices_entries = self.export_vertices_as_dot(graph=graph, label_format=label_format)
edges_entries = self.export_edges_as_dot(graph=graph, label_format=label_format)
return self.template.format(edges="\n".join(edges_entries), vertices="\n".join(vertices_entries))
class BreakpointGraphProcessor(GraphProcessor):
def __init__(self, vertex_processor=None, edge_processor=None, color_source=None, cc_filters=None):
super(BreakpointGraphProcessor, self).__init__(vertex_processor=vertex_processor, edge_processor=edge_processor)
if color_source is None:
color_source = ColorSource()
if self.vertex_processor is None:
self.vertex_processor = BGVertexProcessor(color_source=color_source)
if self.edge_processor is None:
self.edge_processor = BGEdgeProcessor(vertex_processor=self.vertex_processor, color_source=color_source)
if cc_filters is None:
cc_filters = []
self.cc_filters = cc_filters
self.cc_filter_template = "{filter_name}: {filtered_cnt}"
self.cc_filters_template = "\"cc_filters\" [shape=\"square\", penwidth=\"5\"," \
" fontname=\"Arial\", fontsize=\"15\", " \
"label=\"{overall_filters_info}\"];"
def export_graph_as_dot(self, graph, label_format=LabelFormat.plain):
vertices_entries = []
edges_entries = []
filters_results = defaultdict(int)
for cc in graph.connected_components_subgraphs(copy=False):
for cc_filter in self.cc_filters:
if not cc_filter.accept_connected_component(cc=cc, breakpoint_graph=graph):
filters_results[cc_filter.name] += 1
break
else:
vertices_entries.extend(self.export_vertices_as_dot(graph=cc, label_format=label_format))
edges_entries.extend(self.export_edges_as_dot(graph=cc, label_format=label_format))
invoked_filters = {key: value for key, value in filters_results.items() if value > 0}
if len(invoked_filters) > 0:
entries = []
for key, value in invoked_filters.items():
entries.append(self.cc_filter_template.format(filter_name=key, filtered_cnt=value))
label = "\n".join(entries)
vertices_entries.append(self.cc_filters_template.format(overall_filters_info=label))
return self.template.format(edges="\n".join(edges_entries), vertices="\n".join(vertices_entries))
class BGTreeVertexShapeProcessor(VertexShapeProcessor):
def __init__(self, color=Colors.black, style="solid", internal_node_pen_width=1, leaf_node_pen_width=3, shape="oval", color_source=None,
vertex_data_wrapper=BGGenome, leaf_wrapper=None):
super(BGTreeVertexShapeProcessor, self).__init__(color=color, style=style, pen_width=internal_node_pen_width, shape=shape, color_source=color_source)
self.leaf_node_pen_width = leaf_node_pen_width
self.__leaf_wrapper = lambda node: BGGenome(node.name) if leaf_wrapper is None else leaf_wrapper
self.internal_node_pen_width = internal_node_pen_width
self.vertex_data_wrapper = vertex_data_wrapper
def get_pen_width(self, entry=None):
if not isinstance(entry, TreeNode):
return super(BGTreeVertexShapeProcessor, self).get_pen_width(entry=entry)
if entry.is_leaf():
return self.leaf_node_pen_width
else:
return self.internal_node_pen_width
def get_color_as_string(self, entry, leaf_wrapper=None):
if leaf_wrapper is None:
self.__leaf_wrapper = self.__leaf_wrapper
if not isinstance(entry, TreeNode):
return super(BGTreeVertexShapeProcessor, self).get_color_as_string(entry=entry)
if entry.is_leaf():
entry = self.__leaf_wrapper(entry)
else:
entry = "non_leaf_tree_node"
return super(BGTreeVertexShapeProcessor, self).get_color_as_string(entry=entry)
class BGTreeVertexTextProcessor(TextProcessor):
def __init__(self, color=Colors.black, size=12, font_name="Arial", color_source=None, leaf_wrapper=None):
super(BGTreeVertexTextProcessor, self).__init__(color=color, size=size, font_name=font_name, color_source=color_source)
self.__leaf_wrapper = lambda node: BGGenome(node.name) if leaf_wrapper is None else leaf_wrapper
def get_text_color(self, entry=None, leaf_wrapper=None):
if leaf_wrapper is None:
leaf_wrapper = self.__leaf_wrapper
if entry is None or not isinstance(entry, TreeNode):
return super(BGTreeVertexTextProcessor, self).get_text_color(entry=entry)
if entry.is_leaf():
entry = leaf_wrapper(entry)
else:
entry = "non_leaf_tree_node"
return self.color_source.get_color_as_string(entry=entry)
def get_text(self, entry=None, label_format=LabelFormat.plain):
if entry is None or not isinstance(entry, TreeNode):
return super(BGTreeVertexTextProcessor, self).get_text(entry=entry, label_format=label_format)
text = ""
if entry.is_leaf():
text += entry.name
if label_format == LabelFormat.html or label_format == LabelFormat.html.value:
return "<" + text + ">"
return "\"" + text + "\""
class BGTreeVertexProcessor(VertexProcessor):
def __init__(self, shape_processor=None, text_processor=None, color_source=None):
super(BGTreeVertexProcessor, self).__init__(shape_processor=shape_processor, text_processor=text_processor)
if color_source is None:
color_source = ColorSource()
if self.shape_processor is None:
self.shape_processor = BGTreeVertexShapeProcessor(color_source=color_source)
if self.text_processor is None:
self.text_processor = BGTreeVertexTextProcessor(color_source=color_source)
def get_vertex_id(self, vertex, leaf_wrapper=BGGenome):
if isinstance(vertex, TreeNode) and vertex.is_leaf():
vertex_for_id = leaf_wrapper(vertex.name)
else:
vertex_for_id = vertex
return super(BGTreeVertexProcessor, self).get_vertex_id(vertex=vertex_for_id)
def export_vertex_as_dot(self, vertex, label_format=LabelFormat.plain, leaf_wrapper=BGGenome):
vertex_id = self.get_vertex_id(vertex=vertex, leaf_wrapper=leaf_wrapper)
attributes = []
attributes.extend(self.text_processor.get_attributes_string_list(entry=vertex, label_format=label_format))
attributes.extend(self.shape_processor.get_attributes_string_list(entry=vertex))
return self.template.format(v_id=vertex_id, attributes=", ".join(attributes))
class BGTreeEdgeShapeProcessor(ShapeProcessor):
def __init__(self, non_leaf_pen_width=1, leaf_pen_width=3, color=Colors.black, color_source=None, style="solid"):
super(BGTreeEdgeShapeProcessor, self).__init__(pen_width=non_leaf_pen_width, color=color, color_source=color_source, style=style)
self.leaf_branch_pen_width = leaf_pen_width
self.non_leaf_branch_pen_width = non_leaf_pen_width
def _is_leaf_branch(self, edge):
return not (isinstance(edge[0], TreeNode) and isinstance(edge[1], TreeNode))
def get_color_as_string(self, entry):
if not isinstance(entry, tuple):
return super(BGTreeEdgeShapeProcessor, self).get_attributes_string_list(entry=entry)
if not self._is_leaf_branch(edge=entry):
entry = None
else:
non_tree_node_instance = entry[0] if not isinstance(entry[0], TreeNode) else entry[1]
entry = non_tree_node_instance
return super(BGTreeEdgeShapeProcessor, self).get_color_as_string(entry=entry)
def get_pen_width(self, entry=None):
if self._is_leaf_branch(edge=entry):
return self.leaf_branch_pen_width
else:
return self.non_leaf_branch_pen_width
class BGTreeEdgeTextProcessor(TextProcessor):
def __init__(self, font_name="Arial", size=7, color=Colors.black, color_source=None):
super(BGTreeEdgeTextProcessor, self).__init__(color=color, size=size, font_name=font_name, color_source=color_source)
class BGTreeEdgeProcessor(EdgeProcessor):
def __init__(self, vertex_processor, edge_shape_processor=None, edge_text_processor=None, color_source=None):
super(BGTreeEdgeProcessor, self).__init__(vertex_processor=vertex_processor, edge_shape_processor=edge_shape_processor,
edge_text_processor=edge_text_processor)
self.vertex_processor = vertex_processor
if color_source is None:
color_source = ColorSource()
if self.shape_processor is None:
self.shape_processor = BGTreeEdgeShapeProcessor(color_source=color_source)
if self.text_processor is None:
self.text_processor = BGTreeEdgeTextProcessor(color_source=color_source)
class BGTreeProcessor(GraphProcessor):
def __init__(self, vertex_processor=None, edge_processor=None, color_source=None):
super(BGTreeProcessor, self).__init__(vertex_processor=vertex_processor, edge_processor=edge_processor)
if color_source is None:
color_source = ColorSource()
if self.vertex_processor is None:
self.vertex_processor = BGTreeVertexProcessor(color_source=color_source)
if self.edge_processor is None:
self.edge_processor = BGTreeEdgeProcessor(vertex_processor=self.vertex_processor, color_source=color_source)
| true | true |
1c2d353a656cee00eaf8f5db2afb2488b261dd8c | 685 | py | Python | app/middlewares.py | wasp/waspy-example | 98f25b68808925ef5ae14b68670650eeb61f47eb | [
"Apache-2.0"
] | null | null | null | app/middlewares.py | wasp/waspy-example | 98f25b68808925ef5ae14b68670650eeb61f47eb | [
"Apache-2.0"
] | null | null | null | app/middlewares.py | wasp/waspy-example | 98f25b68808925ef5ae14b68670650eeb61f47eb | [
"Apache-2.0"
] | 1 | 2018-03-19T22:00:12.000Z | 2018-03-19T22:00:12.000Z |
async def attach_object(app, handler):
""" This middleware shows an example of adding something
to the request object """
async def middleware(request):
request.some_object = object()
return await handler(request)
return middleware
async def add_header(app, handler):
""" This middleware inspects a response and adds some headers
after the handler responds, but before it goes across the wire"""
async def middleware(request):
response = await handler(request)
if response.headers.get('content-type') == 'application/json':
response.headers['x_is_json'] = 'true'
return response
return middleware | 32.619048 | 70 | 0.683212 |
async def attach_object(app, handler):
async def middleware(request):
request.some_object = object()
return await handler(request)
return middleware
async def add_header(app, handler):
async def middleware(request):
response = await handler(request)
if response.headers.get('content-type') == 'application/json':
response.headers['x_is_json'] = 'true'
return response
return middleware | true | true |
1c2d3572b7617a11b0cd4abb4a1e94698e519aaa | 375 | py | Python | configs/_base_/schedules/schedule_1k.py | AnonSubmission6150/submission6150 | 571633d9a12b4fd7a9546947787fc068966dab04 | [
"Apache-2.0"
] | null | null | null | configs/_base_/schedules/schedule_1k.py | AnonSubmission6150/submission6150 | 571633d9a12b4fd7a9546947787fc068966dab04 | [
"Apache-2.0"
] | null | null | null | configs/_base_/schedules/schedule_1k.py | AnonSubmission6150/submission6150 | 571633d9a12b4fd7a9546947787fc068966dab04 | [
"Apache-2.0"
] | null | null | null | # optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=2000)
checkpoint_config = dict(by_epoch=False, interval=200)
evaluation = dict(interval=200, metric='mIoU') | 41.666667 | 72 | 0.757333 |
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
runner = dict(type='IterBasedRunner', max_iters=2000)
checkpoint_config = dict(by_epoch=False, interval=200)
evaluation = dict(interval=200, metric='mIoU') | true | true |
1c2d35ec21d0c5f7564e7e31974aa32de2b16ac0 | 362 | py | Python | review/urls.py | jamilnoyda/login-sign-in-logout-changepassword-in-django | d8cbbe1d7cbab0607e5ca737728a5f4627c49c07 | [
"MIT"
] | null | null | null | review/urls.py | jamilnoyda/login-sign-in-logout-changepassword-in-django | d8cbbe1d7cbab0607e5ca737728a5f4627c49c07 | [
"MIT"
] | null | null | null | review/urls.py | jamilnoyda/login-sign-in-logout-changepassword-in-django | d8cbbe1d7cbab0607e5ca737728a5f4627c49c07 | [
"MIT"
] | null | null | null | from django.conf.urls import url,include
from . import *
from . import views
from django.views.generic.base import TemplateView
app_name='review'
urlpatterns = [
url(r'^signup/', views.SignUp.as_view(), name='signup'),
url(r'^customerlist/', views.CustomerListView.as_view(), name='list'),
#url(r'^home/', views.new, name='home'),
]
| 18.1 | 74 | 0.668508 | from django.conf.urls import url,include
from . import *
from . import views
from django.views.generic.base import TemplateView
app_name='review'
urlpatterns = [
url(r'^signup/', views.SignUp.as_view(), name='signup'),
url(r'^customerlist/', views.CustomerListView.as_view(), name='list'),
]
| true | true |
1c2d360757f9b6fad75aee38c6ffc1f0a4c4c337 | 399 | py | Python | Assignment-3/gcdLcm.py | HembramBeta777/Python-Programming | 827611b0613d9d953d13fb04ea9b5c5ac3c510f2 | [
"BSD-3-Clause"
] | 2 | 2020-09-01T04:58:16.000Z | 2021-01-30T03:45:52.000Z | Assignment-3/gcdLcm.py | HembramBeta777/Python-Programming | 827611b0613d9d953d13fb04ea9b5c5ac3c510f2 | [
"BSD-3-Clause"
] | null | null | null | Assignment-3/gcdLcm.py | HembramBeta777/Python-Programming | 827611b0613d9d953d13fb04ea9b5c5ac3c510f2 | [
"BSD-3-Clause"
] | null | null | null | # To find GCD and LCM of two numbers.
num1 = int(input("Enter the first number: "))
num2 = int(input("Enter the second number: "))
i=1
while( (i<=num1) and (i<=num2) ):
if( (num1%i==0) and (num2%i==0) ):
GCD = i;
i = i+1
print("The GCD of",num1,"and",num2,"is:",GCD)
lcm = (num1 * num2) // GCD
print("The LCM of",num1,"and",num2,"is:",lcm)
| 19.95 | 47 | 0.516291 |
num1 = int(input("Enter the first number: "))
num2 = int(input("Enter the second number: "))
i=1
while( (i<=num1) and (i<=num2) ):
if( (num1%i==0) and (num2%i==0) ):
GCD = i;
i = i+1
print("The GCD of",num1,"and",num2,"is:",GCD)
lcm = (num1 * num2) // GCD
print("The LCM of",num1,"and",num2,"is:",lcm)
| true | true |
1c2d3746dc514c02495497b0e298410fc17f7ff9 | 1,349 | py | Python | pymager/resources/_path.py | samidalouche/pymager | 86a5f02163def40b7bbf81fd17e4c3e84bc5059c | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2015-01-20T03:24:13.000Z | 2015-01-20T03:24:13.000Z | pymager/resources/_path.py | samidalouche/pymager | 86a5f02163def40b7bbf81fd17e4c3e84bc5059c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | pymager/resources/_path.py | samidalouche/pymager | 86a5f02163def40b7bbf81fd17e4c3e84bc5059c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """
Copyright 2010 Sami Dalouche
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
class Path(object):
def __init__(self, reference_directory, path_elements=[]):
self.__reference_directory = reference_directory
self.__path_elements = path_elements
def parent_directory(self):
return Path(self.__reference_directory, self.__path_elements[:-1])
def absolute(self):
return os.path.join(self.__reference_directory, *self.__path_elements)
def relative(self):
return os.path.join(*self.__path_elements)
def append(self, path_element):
return Path(self.__reference_directory, self.__path_elements + [path_element])
def appendall(self, path_elements):
return Path(self.__reference_directory, self.__path_elements + path_elements)
| 34.589744 | 86 | 0.72424 |
import os
class Path(object):
def __init__(self, reference_directory, path_elements=[]):
self.__reference_directory = reference_directory
self.__path_elements = path_elements
def parent_directory(self):
return Path(self.__reference_directory, self.__path_elements[:-1])
def absolute(self):
return os.path.join(self.__reference_directory, *self.__path_elements)
def relative(self):
return os.path.join(*self.__path_elements)
def append(self, path_element):
return Path(self.__reference_directory, self.__path_elements + [path_element])
def appendall(self, path_elements):
return Path(self.__reference_directory, self.__path_elements + path_elements)
| true | true |
1c2d38ce51164a71d268b0df377e00336ed974d3 | 983 | py | Python | lib/spack/spack/analyzers/install_files.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | lib/spack/spack/analyzers/install_files.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | lib/spack/spack/analyzers/install_files.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""The install files json file (install_manifest.json) already exists in
the package install folder, so this analyzer simply moves it to the user
analyzer folder for further processing."""
import os
import spack.monitor
from .analyzer_base import AnalyzerBase
class InstallFiles(AnalyzerBase):
name = "install_files"
outfile = "spack-analyzer-install-files.json"
description = "install file listing read from install_manifest.json"
def run(self):
"""
Load in the install_manifest.json and save to analyzers.
We write it out to the analyzers folder, with key as the analyzer name.
"""
manifest_file = os.path.join(self.meta_dir, "install_manifest.json")
return {self.name: spack.monitor.read_json(manifest_file)}
| 30.71875 | 79 | 0.733469 |
import os
import spack.monitor
from .analyzer_base import AnalyzerBase
class InstallFiles(AnalyzerBase):
name = "install_files"
outfile = "spack-analyzer-install-files.json"
description = "install file listing read from install_manifest.json"
def run(self):
manifest_file = os.path.join(self.meta_dir, "install_manifest.json")
return {self.name: spack.monitor.read_json(manifest_file)}
| true | true |
1c2d3978d6b4d992081478a6033e9332b9d07db0 | 1,073 | py | Python | Luggage-Management-QR-System-master/Website/mongo_api.py | Shaunak04/S.C.A.M.S | 7f10f1c679a0b76269d0dcf05aa17cc26a268e1c | [
"Apache-2.0"
] | null | null | null | Luggage-Management-QR-System-master/Website/mongo_api.py | Shaunak04/S.C.A.M.S | 7f10f1c679a0b76269d0dcf05aa17cc26a268e1c | [
"Apache-2.0"
] | null | null | null | Luggage-Management-QR-System-master/Website/mongo_api.py | Shaunak04/S.C.A.M.S | 7f10f1c679a0b76269d0dcf05aa17cc26a268e1c | [
"Apache-2.0"
] | null | null | null | import pymongo
from bson import ObjectId
import dns
import gridfs
connection = None
database = None
QR_Gen_Accounts = None
QR_Read_Accounts = None
Flights = None
Luggage = None
Complaint = None
user_db = None
def get_obj_id(id):
return ObjectId(id)
def main():
global connection
global database
global QR_Gen_Accounts
global QR_Read_Accounts
global Flights
global Luggage
global Complaint
global user_db
global qr_db
global grid_fs
global helpdesk_db
connection_String = "mongodb+srv://Nikhil:newpassword@cluster0.2iqos.mongodb.net/Test?retryWrites=true&w=majority"
client = pymongo.MongoClient(connection_String)
#client = pymongo.MongoClient(host = 'localhost', port = 27017)
db = client.Tarp_project
print("working")
QR_Read_Accounts = db.QR_Read_Accounts
QR_Gen_Accounts = db.QR_Gen_Accounts
Flights = db.Flights
Luggage = db.Luggage
Complaint = db.Complaint
user_db = db.user_db
qr_db = db.QRs
helpdesk_db = db.Helpdesk
grid_fs = gridfs.GridFS(db)
main() | 22.829787 | 118 | 0.724138 | import pymongo
from bson import ObjectId
import dns
import gridfs
connection = None
database = None
QR_Gen_Accounts = None
QR_Read_Accounts = None
Flights = None
Luggage = None
Complaint = None
user_db = None
def get_obj_id(id):
return ObjectId(id)
def main():
global connection
global database
global QR_Gen_Accounts
global QR_Read_Accounts
global Flights
global Luggage
global Complaint
global user_db
global qr_db
global grid_fs
global helpdesk_db
connection_String = "mongodb+srv://Nikhil:newpassword@cluster0.2iqos.mongodb.net/Test?retryWrites=true&w=majority"
client = pymongo.MongoClient(connection_String)
db = client.Tarp_project
print("working")
QR_Read_Accounts = db.QR_Read_Accounts
QR_Gen_Accounts = db.QR_Gen_Accounts
Flights = db.Flights
Luggage = db.Luggage
Complaint = db.Complaint
user_db = db.user_db
qr_db = db.QRs
helpdesk_db = db.Helpdesk
grid_fs = gridfs.GridFS(db)
main() | true | true |
1c2d3a2ae09f20773917cb473932130238c7a165 | 649 | py | Python | pil_resize_aspect_ratio/enums/fill_type.py | kkristof200/py_resize_image | 33824c691481b2166ade18e7fa6b5583ceeaa4f6 | [
"MIT"
] | null | null | null | pil_resize_aspect_ratio/enums/fill_type.py | kkristof200/py_resize_image | 33824c691481b2166ade18e7fa6b5583ceeaa4f6 | [
"MIT"
] | null | null | null | pil_resize_aspect_ratio/enums/fill_type.py | kkristof200/py_resize_image | 33824c691481b2166ade18e7fa6b5583ceeaa4f6 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# System
from enum import Enum
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------- class: FillType ------------------------------------------------------------ #
class FillType(Enum):
Fill = 0
Fit = 1
# ---------------------------------------------------------------------------------------------------------------------------------------- # | 40.5625 | 140 | 0.107858 |
from enum import Enum
class FillType(Enum):
Fill = 0
Fit = 1
| true | true |
1c2d3a915d317a05969c55ff714a638a4a1dca31 | 7,338 | py | Python | Modules/Gekokujo_vanilla_enhanced/Code/Module_system/ID_parties.py | roalyr/gekokujo_vanilla_enhanced | 84d8cc1033be98357ac139fafbc1c10851274019 | [
"MIT"
] | null | null | null | Modules/Gekokujo_vanilla_enhanced/Code/Module_system/ID_parties.py | roalyr/gekokujo_vanilla_enhanced | 84d8cc1033be98357ac139fafbc1c10851274019 | [
"MIT"
] | null | null | null | Modules/Gekokujo_vanilla_enhanced/Code/Module_system/ID_parties.py | roalyr/gekokujo_vanilla_enhanced | 84d8cc1033be98357ac139fafbc1c10851274019 | [
"MIT"
] | null | null | null | p_main_party = 0
p_temp_party = 1
p_camp_bandits = 2
p_temp_party_2 = 3
p_temp_casualties = 4
p_temp_casualties_2 = 5
p_temp_casualties_3 = 6
p_temp_wounded = 7
p_temp_killed = 8
p_main_party_backup = 9
p_encountered_party_backup = 10
p_collective_friends_backup = 11
p_player_casualties = 12
p_enemy_casualties = 13
p_ally_casualties = 14
p_collective_enemy = 15
p_collective_ally = 16
p_collective_friends = 17
p_total_enemy_casualties = 18
p_routed_enemies = 19
p_freelancer_party_backup = 20
p_zendar = 21
p_town_1 = 22
p_town_2 = 23
p_town_3 = 24
p_town_4 = 25
p_town_5 = 26
p_town_6 = 27
p_town_7 = 28
p_town_8 = 29
p_town_9 = 30
p_town_10 = 31
p_town_11 = 32
p_town_12 = 33
p_town_13 = 34
p_town_14 = 35
p_town_15 = 36
p_town_16 = 37
p_town_17 = 38
p_town_18 = 39
p_town_19 = 40
p_town_20 = 41
p_town_21 = 42
p_town_22 = 43
p_town_23 = 44
p_town_24 = 45
p_town_25 = 46
p_town_26 = 47
p_town_27 = 48
p_town_28 = 49
p_town_29 = 50
p_town_30 = 51
p_town_31 = 52
p_town_32 = 53
p_castle_1 = 54
p_castle_2 = 55
p_castle_3 = 56
p_castle_4 = 57
p_castle_5 = 58
p_castle_6 = 59
p_castle_7 = 60
p_castle_8 = 61
p_castle_9 = 62
p_castle_10 = 63
p_castle_11 = 64
p_castle_12 = 65
p_castle_13 = 66
p_castle_14 = 67
p_castle_15 = 68
p_castle_16 = 69
p_castle_17 = 70
p_castle_18 = 71
p_castle_19 = 72
p_castle_20 = 73
p_castle_21 = 74
p_castle_22 = 75
p_castle_23 = 76
p_castle_24 = 77
p_castle_25 = 78
p_castle_26 = 79
p_castle_27 = 80
p_castle_28 = 81
p_castle_29 = 82
p_castle_30 = 83
p_castle_31 = 84
p_castle_32 = 85
p_castle_33 = 86
p_castle_34 = 87
p_castle_35 = 88
p_castle_36 = 89
p_castle_37 = 90
p_castle_38 = 91
p_castle_39 = 92
p_castle_40 = 93
p_castle_41 = 94
p_castle_42 = 95
p_castle_43 = 96
p_castle_44 = 97
p_castle_45 = 98
p_castle_46 = 99
p_castle_47 = 100
p_castle_48 = 101
p_castle_49 = 102
p_castle_50 = 103
p_castle_51 = 104
p_castle_52 = 105
p_castle_53 = 106
p_castle_54 = 107
p_castle_55 = 108
p_castle_56 = 109
p_castle_57 = 110
p_castle_58 = 111
p_castle_59 = 112
p_castle_60 = 113
p_castle_61 = 114
p_castle_62 = 115
p_castle_63 = 116
p_castle_64 = 117
p_castle_65 = 118
p_castle_66 = 119
p_castle_67 = 120
p_castle_68 = 121
p_castle_69 = 122
p_castle_70 = 123
p_castle_71 = 124
p_castle_72 = 125
p_castle_73 = 126
p_castle_74 = 127
p_castle_75 = 128
p_village_1 = 129
p_village_2 = 130
p_village_3 = 131
p_village_4 = 132
p_village_5 = 133
p_village_6 = 134
p_village_7 = 135
p_village_8 = 136
p_village_9 = 137
p_village_10 = 138
p_village_11 = 139
p_village_12 = 140
p_village_13 = 141
p_village_14 = 142
p_village_15 = 143
p_village_16 = 144
p_village_17 = 145
p_village_18 = 146
p_village_19 = 147
p_village_20 = 148
p_village_21 = 149
p_village_22 = 150
p_village_23 = 151
p_village_24 = 152
p_village_25 = 153
p_village_26 = 154
p_village_27 = 155
p_village_28 = 156
p_village_29 = 157
p_village_30 = 158
p_village_31 = 159
p_village_32 = 160
p_village_33 = 161
p_village_34 = 162
p_village_35 = 163
p_village_36 = 164
p_village_37 = 165
p_village_38 = 166
p_village_39 = 167
p_village_40 = 168
p_village_41 = 169
p_village_42 = 170
p_village_43 = 171
p_village_44 = 172
p_village_45 = 173
p_village_46 = 174
p_village_47 = 175
p_village_48 = 176
p_village_49 = 177
p_village_50 = 178
p_village_51 = 179
p_village_52 = 180
p_village_53 = 181
p_village_54 = 182
p_village_55 = 183
p_village_56 = 184
p_village_57 = 185
p_village_58 = 186
p_village_59 = 187
p_village_60 = 188
p_village_61 = 189
p_village_62 = 190
p_village_63 = 191
p_village_64 = 192
p_village_65 = 193
p_village_66 = 194
p_village_67 = 195
p_village_68 = 196
p_village_69 = 197
p_village_70 = 198
p_village_71 = 199
p_village_72 = 200
p_village_73 = 201
p_village_74 = 202
p_village_75 = 203
p_village_76 = 204
p_village_77 = 205
p_village_78 = 206
p_village_79 = 207
p_village_80 = 208
p_village_81 = 209
p_village_82 = 210
p_village_83 = 211
p_village_84 = 212
p_village_85 = 213
p_village_86 = 214
p_village_87 = 215
p_village_88 = 216
p_village_89 = 217
p_village_90 = 218
p_village_91 = 219
p_village_92 = 220
p_village_93 = 221
p_village_94 = 222
p_village_95 = 223
p_village_96 = 224
p_village_97 = 225
p_village_98 = 226
p_village_99 = 227
p_village_100 = 228
p_village_101 = 229
p_village_102 = 230
p_village_103 = 231
p_village_104 = 232
p_village_105 = 233
p_village_106 = 234
p_village_107 = 235
p_village_108 = 236
p_village_109 = 237
p_village_110 = 238
p_village_111 = 239
p_village_112 = 240
p_village_113 = 241
p_village_114 = 242
p_village_115 = 243
p_village_116 = 244
p_village_117 = 245
p_village_118 = 246
p_village_119 = 247
p_village_120 = 248
p_village_121 = 249
p_village_122 = 250
p_village_123 = 251
p_village_124 = 252
p_village_125 = 253
p_village_126 = 254
p_village_127 = 255
p_village_128 = 256
p_village_129 = 257
p_village_130 = 258
p_village_131 = 259
p_village_132 = 260
p_village_133 = 261
p_village_134 = 262
p_village_135 = 263
p_village_136 = 264
p_village_137 = 265
p_village_138 = 266
p_village_139 = 267
p_village_140 = 268
p_village_141 = 269
p_village_142 = 270
p_village_143 = 271
p_village_144 = 272
p_village_145 = 273
p_village_146 = 274
p_village_147 = 275
p_village_148 = 276
p_village_149 = 277
p_village_150 = 278
p_village_151 = 279
p_village_152 = 280
p_village_153 = 281
p_village_154 = 282
p_village_155 = 283
p_village_156 = 284
p_village_157 = 285
p_village_158 = 286
p_village_159 = 287
p_village_160 = 288
p_village_161 = 289
p_village_162 = 290
p_village_163 = 291
p_village_164 = 292
p_fort_1 = 293
p_fort_2 = 294
p_fort_3 = 295
p_fort_4 = 296
p_fort_5 = 297
p_fort_6 = 298
p_salt_mine = 299
p_four_ways_inn = 300
p_test_scene = 301
p_battlefields = 302
p_dhorak_keep = 303
p_training_ground = 304
p_training_ground_1 = 305
p_training_ground_2 = 306
p_training_ground_3 = 307
p_training_ground_4 = 308
p_training_ground_5 = 309
p_bridge_1 = 310
p_bridge_2 = 311
p_bridge_3 = 312
p_bridge_4 = 313
p_bridge_5 = 314
p_bridge_6 = 315
p_bridge_7 = 316
p_bridge_8 = 317
p_bridge_9 = 318
p_bridge_10 = 319
p_bridge_11 = 320
p_bridge_12 = 321
p_bridge_13 = 322
p_bridge_14 = 323
p_bridge_15 = 324
p_bridge_16 = 325
p_bridge_17 = 326
p_bridge_18 = 327
p_bridge_19 = 328
p_bridge_20 = 329
p_bridge_21 = 330
p_bridge_22 = 331
p_bridge_23 = 332
p_bridge_24 = 333
p_bridge_25 = 334
p_bridge_26 = 335
p_bridge_27 = 336
p_bridge_28 = 337
p_bridge_29 = 338
p_bridge_30 = 339
p_bridge_31 = 340
p_bridge_32 = 341
p_bridge_33 = 342
p_bridge_34 = 343
p_bridge_35 = 344
p_bridge_36 = 345
p_bridge_37 = 346
p_bridge_38 = 347
p_bridge_39 = 348
p_bridge_40 = 349
p_bridge_41 = 350
p_bridge_42 = 351
p_bridge_43 = 352
p_bridge_44 = 353
p_bridge_45 = 354
p_looter_spawn_point = 355
p_seto_pirate_spawn_point = 356
p_kanto_rebel_spawn_point = 357
p_shinano_rebel_spawn_point = 358
p_woku_pirate_spawn_point = 359
p_kinai_rebel_spawn_point = 360
p_monk_rebel_spawn_point = 361
p_northern_raider_spawn_point = 362
p_spawn_points_end = 363
p_reserved_1 = 364
p_reserved_2 = 365
p_reserved_3 = 366
p_reserved_4 = 367
p_reserved_5 = 368
| 19.832432 | 36 | 0.748569 | p_main_party = 0
p_temp_party = 1
p_camp_bandits = 2
p_temp_party_2 = 3
p_temp_casualties = 4
p_temp_casualties_2 = 5
p_temp_casualties_3 = 6
p_temp_wounded = 7
p_temp_killed = 8
p_main_party_backup = 9
p_encountered_party_backup = 10
p_collective_friends_backup = 11
p_player_casualties = 12
p_enemy_casualties = 13
p_ally_casualties = 14
p_collective_enemy = 15
p_collective_ally = 16
p_collective_friends = 17
p_total_enemy_casualties = 18
p_routed_enemies = 19
p_freelancer_party_backup = 20
p_zendar = 21
p_town_1 = 22
p_town_2 = 23
p_town_3 = 24
p_town_4 = 25
p_town_5 = 26
p_town_6 = 27
p_town_7 = 28
p_town_8 = 29
p_town_9 = 30
p_town_10 = 31
p_town_11 = 32
p_town_12 = 33
p_town_13 = 34
p_town_14 = 35
p_town_15 = 36
p_town_16 = 37
p_town_17 = 38
p_town_18 = 39
p_town_19 = 40
p_town_20 = 41
p_town_21 = 42
p_town_22 = 43
p_town_23 = 44
p_town_24 = 45
p_town_25 = 46
p_town_26 = 47
p_town_27 = 48
p_town_28 = 49
p_town_29 = 50
p_town_30 = 51
p_town_31 = 52
p_town_32 = 53
p_castle_1 = 54
p_castle_2 = 55
p_castle_3 = 56
p_castle_4 = 57
p_castle_5 = 58
p_castle_6 = 59
p_castle_7 = 60
p_castle_8 = 61
p_castle_9 = 62
p_castle_10 = 63
p_castle_11 = 64
p_castle_12 = 65
p_castle_13 = 66
p_castle_14 = 67
p_castle_15 = 68
p_castle_16 = 69
p_castle_17 = 70
p_castle_18 = 71
p_castle_19 = 72
p_castle_20 = 73
p_castle_21 = 74
p_castle_22 = 75
p_castle_23 = 76
p_castle_24 = 77
p_castle_25 = 78
p_castle_26 = 79
p_castle_27 = 80
p_castle_28 = 81
p_castle_29 = 82
p_castle_30 = 83
p_castle_31 = 84
p_castle_32 = 85
p_castle_33 = 86
p_castle_34 = 87
p_castle_35 = 88
p_castle_36 = 89
p_castle_37 = 90
p_castle_38 = 91
p_castle_39 = 92
p_castle_40 = 93
p_castle_41 = 94
p_castle_42 = 95
p_castle_43 = 96
p_castle_44 = 97
p_castle_45 = 98
p_castle_46 = 99
p_castle_47 = 100
p_castle_48 = 101
p_castle_49 = 102
p_castle_50 = 103
p_castle_51 = 104
p_castle_52 = 105
p_castle_53 = 106
p_castle_54 = 107
p_castle_55 = 108
p_castle_56 = 109
p_castle_57 = 110
p_castle_58 = 111
p_castle_59 = 112
p_castle_60 = 113
p_castle_61 = 114
p_castle_62 = 115
p_castle_63 = 116
p_castle_64 = 117
p_castle_65 = 118
p_castle_66 = 119
p_castle_67 = 120
p_castle_68 = 121
p_castle_69 = 122
p_castle_70 = 123
p_castle_71 = 124
p_castle_72 = 125
p_castle_73 = 126
p_castle_74 = 127
p_castle_75 = 128
p_village_1 = 129
p_village_2 = 130
p_village_3 = 131
p_village_4 = 132
p_village_5 = 133
p_village_6 = 134
p_village_7 = 135
p_village_8 = 136
p_village_9 = 137
p_village_10 = 138
p_village_11 = 139
p_village_12 = 140
p_village_13 = 141
p_village_14 = 142
p_village_15 = 143
p_village_16 = 144
p_village_17 = 145
p_village_18 = 146
p_village_19 = 147
p_village_20 = 148
p_village_21 = 149
p_village_22 = 150
p_village_23 = 151
p_village_24 = 152
p_village_25 = 153
p_village_26 = 154
p_village_27 = 155
p_village_28 = 156
p_village_29 = 157
p_village_30 = 158
p_village_31 = 159
p_village_32 = 160
p_village_33 = 161
p_village_34 = 162
p_village_35 = 163
p_village_36 = 164
p_village_37 = 165
p_village_38 = 166
p_village_39 = 167
p_village_40 = 168
p_village_41 = 169
p_village_42 = 170
p_village_43 = 171
p_village_44 = 172
p_village_45 = 173
p_village_46 = 174
p_village_47 = 175
p_village_48 = 176
p_village_49 = 177
p_village_50 = 178
p_village_51 = 179
p_village_52 = 180
p_village_53 = 181
p_village_54 = 182
p_village_55 = 183
p_village_56 = 184
p_village_57 = 185
p_village_58 = 186
p_village_59 = 187
p_village_60 = 188
p_village_61 = 189
p_village_62 = 190
p_village_63 = 191
p_village_64 = 192
p_village_65 = 193
p_village_66 = 194
p_village_67 = 195
p_village_68 = 196
p_village_69 = 197
p_village_70 = 198
p_village_71 = 199
p_village_72 = 200
p_village_73 = 201
p_village_74 = 202
p_village_75 = 203
p_village_76 = 204
p_village_77 = 205
p_village_78 = 206
p_village_79 = 207
p_village_80 = 208
p_village_81 = 209
p_village_82 = 210
p_village_83 = 211
p_village_84 = 212
p_village_85 = 213
p_village_86 = 214
p_village_87 = 215
p_village_88 = 216
p_village_89 = 217
p_village_90 = 218
p_village_91 = 219
p_village_92 = 220
p_village_93 = 221
p_village_94 = 222
p_village_95 = 223
p_village_96 = 224
p_village_97 = 225
p_village_98 = 226
p_village_99 = 227
p_village_100 = 228
p_village_101 = 229
p_village_102 = 230
p_village_103 = 231
p_village_104 = 232
p_village_105 = 233
p_village_106 = 234
p_village_107 = 235
p_village_108 = 236
p_village_109 = 237
p_village_110 = 238
p_village_111 = 239
p_village_112 = 240
p_village_113 = 241
p_village_114 = 242
p_village_115 = 243
p_village_116 = 244
p_village_117 = 245
p_village_118 = 246
p_village_119 = 247
p_village_120 = 248
p_village_121 = 249
p_village_122 = 250
p_village_123 = 251
p_village_124 = 252
p_village_125 = 253
p_village_126 = 254
p_village_127 = 255
p_village_128 = 256
p_village_129 = 257
p_village_130 = 258
p_village_131 = 259
p_village_132 = 260
p_village_133 = 261
p_village_134 = 262
p_village_135 = 263
p_village_136 = 264
p_village_137 = 265
p_village_138 = 266
p_village_139 = 267
p_village_140 = 268
p_village_141 = 269
p_village_142 = 270
p_village_143 = 271
p_village_144 = 272
p_village_145 = 273
p_village_146 = 274
p_village_147 = 275
p_village_148 = 276
p_village_149 = 277
p_village_150 = 278
p_village_151 = 279
p_village_152 = 280
p_village_153 = 281
p_village_154 = 282
p_village_155 = 283
p_village_156 = 284
p_village_157 = 285
p_village_158 = 286
p_village_159 = 287
p_village_160 = 288
p_village_161 = 289
p_village_162 = 290
p_village_163 = 291
p_village_164 = 292
p_fort_1 = 293
p_fort_2 = 294
p_fort_3 = 295
p_fort_4 = 296
p_fort_5 = 297
p_fort_6 = 298
p_salt_mine = 299
p_four_ways_inn = 300
p_test_scene = 301
p_battlefields = 302
p_dhorak_keep = 303
p_training_ground = 304
p_training_ground_1 = 305
p_training_ground_2 = 306
p_training_ground_3 = 307
p_training_ground_4 = 308
p_training_ground_5 = 309
p_bridge_1 = 310
p_bridge_2 = 311
p_bridge_3 = 312
p_bridge_4 = 313
p_bridge_5 = 314
p_bridge_6 = 315
p_bridge_7 = 316
p_bridge_8 = 317
p_bridge_9 = 318
p_bridge_10 = 319
p_bridge_11 = 320
p_bridge_12 = 321
p_bridge_13 = 322
p_bridge_14 = 323
p_bridge_15 = 324
p_bridge_16 = 325
p_bridge_17 = 326
p_bridge_18 = 327
p_bridge_19 = 328
p_bridge_20 = 329
p_bridge_21 = 330
p_bridge_22 = 331
p_bridge_23 = 332
p_bridge_24 = 333
p_bridge_25 = 334
p_bridge_26 = 335
p_bridge_27 = 336
p_bridge_28 = 337
p_bridge_29 = 338
p_bridge_30 = 339
p_bridge_31 = 340
p_bridge_32 = 341
p_bridge_33 = 342
p_bridge_34 = 343
p_bridge_35 = 344
p_bridge_36 = 345
p_bridge_37 = 346
p_bridge_38 = 347
p_bridge_39 = 348
p_bridge_40 = 349
p_bridge_41 = 350
p_bridge_42 = 351
p_bridge_43 = 352
p_bridge_44 = 353
p_bridge_45 = 354
p_looter_spawn_point = 355
p_seto_pirate_spawn_point = 356
p_kanto_rebel_spawn_point = 357
p_shinano_rebel_spawn_point = 358
p_woku_pirate_spawn_point = 359
p_kinai_rebel_spawn_point = 360
p_monk_rebel_spawn_point = 361
p_northern_raider_spawn_point = 362
p_spawn_points_end = 363
p_reserved_1 = 364
p_reserved_2 = 365
p_reserved_3 = 366
p_reserved_4 = 367
p_reserved_5 = 368
| true | true |
1c2d3aced5ecffbaf0cfc56972803dc0a6b35f06 | 2,585 | py | Python | check_nifi_java_gc.py | adolci/nagios-plugins | 0d8cee0376467922b3315e9b0e08b98454eb9853 | [
"IBM-pibs",
"Apache-1.1"
] | null | null | null | check_nifi_java_gc.py | adolci/nagios-plugins | 0d8cee0376467922b3315e9b0e08b98454eb9853 | [
"IBM-pibs",
"Apache-1.1"
] | null | null | null | check_nifi_java_gc.py | adolci/nagios-plugins | 0d8cee0376467922b3315e9b0e08b98454eb9853 | [
"IBM-pibs",
"Apache-1.1"
] | 3 | 2019-07-25T11:46:32.000Z | 2019-12-17T05:01:03.000Z | #!/usr/bin/env python
# coding=utf-8
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2018-08-15 23:18:55 +0100 (Wed, 15 Aug 2018)
#
# https://github.com/harisekhon/nagios-plugins
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Nagios Plugin to check Nifi Java GC last collection time via its API
Thresholds apply to Java Garbage Collection last collection time in seconds
Tested on Apache Nifi 1.7
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import traceback
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon.utils import isInt, CriticalError
from harisekhon import RestNagiosPlugin
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.2'
class CheckNifiJavaGc(RestNagiosPlugin):
def __init__(self):
# Python 2.x
super(CheckNifiJavaGc, self).__init__()
# Python 3.x
# super().__init__()
self.name = 'Nifi'
self.path = '/nifi-api/system-diagnostics'
self.default_port = 8080
self.json = True
self.auth = 'optional'
self.msg = 'Nifi message not defined'
def add_options(self):
super(CheckNifiJavaGc, self).add_options()
self.add_thresholds(default_warning=3, default_critical=10)
def process_options(self):
super(CheckNifiJavaGc, self).process_options()
self.validate_thresholds(integer=False)
def parse_json(self, json_data):
gcs = json_data['systemDiagnostics']['aggregateSnapshot']['garbageCollection']
gc_millis = max([_['collectionMillis'] for _ in gcs])
if not isInt(gc_millis):
raise CriticalError('collectionMillis \'{}\' is not an integer!!'.format(gc_millis))
gc_millis = int(gc_millis)
gc_secs = '{:.2f}'.format(gc_millis / 1000)
self.ok()
self.msg = 'Nifi Java GC last collection time = {} secs'.format(gc_secs)
self.check_thresholds(gc_secs)
self.msg += ' | gc_collection={}s{}'.format(gc_secs, self.get_perf_thresholds())
if __name__ == '__main__':
CheckNifiJavaGc().main()
| 29.375 | 96 | 0.688975 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import traceback
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
from harisekhon.utils import isInt, CriticalError
from harisekhon import RestNagiosPlugin
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.2'
class CheckNifiJavaGc(RestNagiosPlugin):
def __init__(self):
super(CheckNifiJavaGc, self).__init__()
self.name = 'Nifi'
self.path = '/nifi-api/system-diagnostics'
self.default_port = 8080
self.json = True
self.auth = 'optional'
self.msg = 'Nifi message not defined'
def add_options(self):
super(CheckNifiJavaGc, self).add_options()
self.add_thresholds(default_warning=3, default_critical=10)
def process_options(self):
super(CheckNifiJavaGc, self).process_options()
self.validate_thresholds(integer=False)
def parse_json(self, json_data):
gcs = json_data['systemDiagnostics']['aggregateSnapshot']['garbageCollection']
gc_millis = max([_['collectionMillis'] for _ in gcs])
if not isInt(gc_millis):
raise CriticalError('collectionMillis \'{}\' is not an integer!!'.format(gc_millis))
gc_millis = int(gc_millis)
gc_secs = '{:.2f}'.format(gc_millis / 1000)
self.ok()
self.msg = 'Nifi Java GC last collection time = {} secs'.format(gc_secs)
self.check_thresholds(gc_secs)
self.msg += ' | gc_collection={}s{}'.format(gc_secs, self.get_perf_thresholds())
if __name__ == '__main__':
CheckNifiJavaGc().main()
| true | true |
1c2d3c06541d8d9d49bb74af0946d1aa1c05a633 | 118 | py | Python | jr_tools/exceptions.py | erickgnavar/jr_tools | 46719b5708446ab19139fd5ac29d5bf51cf896b8 | [
"MIT"
] | 1 | 2017-11-02T01:39:11.000Z | 2017-11-02T01:39:11.000Z | jr_tools/exceptions.py | erickgnavar/jasper-reports-tools | 46719b5708446ab19139fd5ac29d5bf51cf896b8 | [
"MIT"
] | 1 | 2021-11-15T17:46:27.000Z | 2021-11-15T17:46:27.000Z | jr_tools/exceptions.py | erickgnavar/jr_tools | 46719b5708446ab19139fd5ac29d5bf51cf896b8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
class ConnectionError(Exception):
pass
class InvalidOutputFormat(Exception):
pass
| 11.8 | 37 | 0.677966 |
class ConnectionError(Exception):
pass
class InvalidOutputFormat(Exception):
pass
| true | true |
1c2d3fd9537edf88a43260b481f18923fcf5e997 | 13,165 | py | Python | nipyapi/nifi/models/variable_registry_update_request_dto.py | esecules/nipyapi | e8a53b79a5e1a6b29446f43d2b23b6a3e60873f1 | [
"Apache-2.0"
] | null | null | null | nipyapi/nifi/models/variable_registry_update_request_dto.py | esecules/nipyapi | e8a53b79a5e1a6b29446f43d2b23b6a3e60873f1 | [
"Apache-2.0"
] | null | null | null | nipyapi/nifi/models/variable_registry_update_request_dto.py | esecules/nipyapi | e8a53b79a5e1a6b29446f43d2b23b6a3e60873f1 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.11.1-SNAPSHOT
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VariableRegistryUpdateRequestDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'request_id': 'str',
'uri': 'str',
'submission_time': 'datetime',
'last_updated': 'datetime',
'complete': 'bool',
'failure_reason': 'str',
'percent_completed': 'int',
'state': 'str',
'update_steps': 'list[VariableRegistryUpdateStepDTO]',
'process_group_id': 'str',
'affected_components': 'list[AffectedComponentEntity]'
}
attribute_map = {
'request_id': 'requestId',
'uri': 'uri',
'submission_time': 'submissionTime',
'last_updated': 'lastUpdated',
'complete': 'complete',
'failure_reason': 'failureReason',
'percent_completed': 'percentCompleted',
'state': 'state',
'update_steps': 'updateSteps',
'process_group_id': 'processGroupId',
'affected_components': 'affectedComponents'
}
def __init__(self, request_id=None, uri=None, submission_time=None, last_updated=None, complete=None, failure_reason=None, percent_completed=None, state=None, update_steps=None, process_group_id=None, affected_components=None):
"""
VariableRegistryUpdateRequestDTO - a model defined in Swagger
"""
self._request_id = None
self._uri = None
self._submission_time = None
self._last_updated = None
self._complete = None
self._failure_reason = None
self._percent_completed = None
self._state = None
self._update_steps = None
self._process_group_id = None
self._affected_components = None
if request_id is not None:
self.request_id = request_id
if uri is not None:
self.uri = uri
if submission_time is not None:
self.submission_time = submission_time
if last_updated is not None:
self.last_updated = last_updated
if complete is not None:
self.complete = complete
if failure_reason is not None:
self.failure_reason = failure_reason
if percent_completed is not None:
self.percent_completed = percent_completed
if state is not None:
self.state = state
if update_steps is not None:
self.update_steps = update_steps
if process_group_id is not None:
self.process_group_id = process_group_id
if affected_components is not None:
self.affected_components = affected_components
@property
def request_id(self):
"""
Gets the request_id of this VariableRegistryUpdateRequestDTO.
The ID of the request
:return: The request_id of this VariableRegistryUpdateRequestDTO.
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""
Sets the request_id of this VariableRegistryUpdateRequestDTO.
The ID of the request
:param request_id: The request_id of this VariableRegistryUpdateRequestDTO.
:type: str
"""
self._request_id = request_id
@property
def uri(self):
"""
Gets the uri of this VariableRegistryUpdateRequestDTO.
The URI for the request
:return: The uri of this VariableRegistryUpdateRequestDTO.
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""
Sets the uri of this VariableRegistryUpdateRequestDTO.
The URI for the request
:param uri: The uri of this VariableRegistryUpdateRequestDTO.
:type: str
"""
self._uri = uri
@property
def submission_time(self):
"""
Gets the submission_time of this VariableRegistryUpdateRequestDTO.
The timestamp of when the request was submitted
:return: The submission_time of this VariableRegistryUpdateRequestDTO.
:rtype: datetime
"""
return self._submission_time
@submission_time.setter
def submission_time(self, submission_time):
"""
Sets the submission_time of this VariableRegistryUpdateRequestDTO.
The timestamp of when the request was submitted
:param submission_time: The submission_time of this VariableRegistryUpdateRequestDTO.
:type: datetime
"""
self._submission_time = submission_time
@property
def last_updated(self):
"""
Gets the last_updated of this VariableRegistryUpdateRequestDTO.
The timestamp of when the request was last updated
:return: The last_updated of this VariableRegistryUpdateRequestDTO.
:rtype: datetime
"""
return self._last_updated
@last_updated.setter
def last_updated(self, last_updated):
"""
Sets the last_updated of this VariableRegistryUpdateRequestDTO.
The timestamp of when the request was last updated
:param last_updated: The last_updated of this VariableRegistryUpdateRequestDTO.
:type: datetime
"""
self._last_updated = last_updated
@property
def complete(self):
"""
Gets the complete of this VariableRegistryUpdateRequestDTO.
Whether or not the request is completed
:return: The complete of this VariableRegistryUpdateRequestDTO.
:rtype: bool
"""
return self._complete
@complete.setter
def complete(self, complete):
"""
Sets the complete of this VariableRegistryUpdateRequestDTO.
Whether or not the request is completed
:param complete: The complete of this VariableRegistryUpdateRequestDTO.
:type: bool
"""
self._complete = complete
@property
def failure_reason(self):
"""
Gets the failure_reason of this VariableRegistryUpdateRequestDTO.
The reason for the request failing, or null if the request has not failed
:return: The failure_reason of this VariableRegistryUpdateRequestDTO.
:rtype: str
"""
return self._failure_reason
@failure_reason.setter
def failure_reason(self, failure_reason):
"""
Sets the failure_reason of this VariableRegistryUpdateRequestDTO.
The reason for the request failing, or null if the request has not failed
:param failure_reason: The failure_reason of this VariableRegistryUpdateRequestDTO.
:type: str
"""
self._failure_reason = failure_reason
@property
def percent_completed(self):
"""
Gets the percent_completed of this VariableRegistryUpdateRequestDTO.
A value between 0 and 100 (inclusive) indicating how close the request is to completion
:return: The percent_completed of this VariableRegistryUpdateRequestDTO.
:rtype: int
"""
return self._percent_completed
@percent_completed.setter
def percent_completed(self, percent_completed):
"""
Sets the percent_completed of this VariableRegistryUpdateRequestDTO.
A value between 0 and 100 (inclusive) indicating how close the request is to completion
:param percent_completed: The percent_completed of this VariableRegistryUpdateRequestDTO.
:type: int
"""
self._percent_completed = percent_completed
@property
def state(self):
"""
Gets the state of this VariableRegistryUpdateRequestDTO.
A description of the current state of the request
:return: The state of this VariableRegistryUpdateRequestDTO.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this VariableRegistryUpdateRequestDTO.
A description of the current state of the request
:param state: The state of this VariableRegistryUpdateRequestDTO.
:type: str
"""
self._state = state
@property
def update_steps(self):
"""
Gets the update_steps of this VariableRegistryUpdateRequestDTO.
The steps that are required in order to complete the request, along with the status of each
:return: The update_steps of this VariableRegistryUpdateRequestDTO.
:rtype: list[VariableRegistryUpdateStepDTO]
"""
return self._update_steps
@update_steps.setter
def update_steps(self, update_steps):
"""
Sets the update_steps of this VariableRegistryUpdateRequestDTO.
The steps that are required in order to complete the request, along with the status of each
:param update_steps: The update_steps of this VariableRegistryUpdateRequestDTO.
:type: list[VariableRegistryUpdateStepDTO]
"""
self._update_steps = update_steps
@property
def process_group_id(self):
"""
Gets the process_group_id of this VariableRegistryUpdateRequestDTO.
The unique ID of the Process Group that the variable registry belongs to
:return: The process_group_id of this VariableRegistryUpdateRequestDTO.
:rtype: str
"""
return self._process_group_id
@process_group_id.setter
def process_group_id(self, process_group_id):
"""
Sets the process_group_id of this VariableRegistryUpdateRequestDTO.
The unique ID of the Process Group that the variable registry belongs to
:param process_group_id: The process_group_id of this VariableRegistryUpdateRequestDTO.
:type: str
"""
self._process_group_id = process_group_id
@property
def affected_components(self):
"""
Gets the affected_components of this VariableRegistryUpdateRequestDTO.
A set of all components that will be affected if the value of this variable is changed
:return: The affected_components of this VariableRegistryUpdateRequestDTO.
:rtype: list[AffectedComponentEntity]
"""
return self._affected_components
@affected_components.setter
def affected_components(self, affected_components):
"""
Sets the affected_components of this VariableRegistryUpdateRequestDTO.
A set of all components that will be affected if the value of this variable is changed
:param affected_components: The affected_components of this VariableRegistryUpdateRequestDTO.
:type: list[AffectedComponentEntity]
"""
self._affected_components = affected_components
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, VariableRegistryUpdateRequestDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 32.426108 | 479 | 0.638055 |
from pprint import pformat
from six import iteritems
import re
class VariableRegistryUpdateRequestDTO(object):
swagger_types = {
'request_id': 'str',
'uri': 'str',
'submission_time': 'datetime',
'last_updated': 'datetime',
'complete': 'bool',
'failure_reason': 'str',
'percent_completed': 'int',
'state': 'str',
'update_steps': 'list[VariableRegistryUpdateStepDTO]',
'process_group_id': 'str',
'affected_components': 'list[AffectedComponentEntity]'
}
attribute_map = {
'request_id': 'requestId',
'uri': 'uri',
'submission_time': 'submissionTime',
'last_updated': 'lastUpdated',
'complete': 'complete',
'failure_reason': 'failureReason',
'percent_completed': 'percentCompleted',
'state': 'state',
'update_steps': 'updateSteps',
'process_group_id': 'processGroupId',
'affected_components': 'affectedComponents'
}
def __init__(self, request_id=None, uri=None, submission_time=None, last_updated=None, complete=None, failure_reason=None, percent_completed=None, state=None, update_steps=None, process_group_id=None, affected_components=None):
self._request_id = None
self._uri = None
self._submission_time = None
self._last_updated = None
self._complete = None
self._failure_reason = None
self._percent_completed = None
self._state = None
self._update_steps = None
self._process_group_id = None
self._affected_components = None
if request_id is not None:
self.request_id = request_id
if uri is not None:
self.uri = uri
if submission_time is not None:
self.submission_time = submission_time
if last_updated is not None:
self.last_updated = last_updated
if complete is not None:
self.complete = complete
if failure_reason is not None:
self.failure_reason = failure_reason
if percent_completed is not None:
self.percent_completed = percent_completed
if state is not None:
self.state = state
if update_steps is not None:
self.update_steps = update_steps
if process_group_id is not None:
self.process_group_id = process_group_id
if affected_components is not None:
self.affected_components = affected_components
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, request_id):
self._request_id = request_id
@property
def uri(self):
return self._uri
@uri.setter
def uri(self, uri):
self._uri = uri
@property
def submission_time(self):
return self._submission_time
@submission_time.setter
def submission_time(self, submission_time):
self._submission_time = submission_time
@property
def last_updated(self):
return self._last_updated
@last_updated.setter
def last_updated(self, last_updated):
self._last_updated = last_updated
@property
def complete(self):
return self._complete
@complete.setter
def complete(self, complete):
self._complete = complete
@property
def failure_reason(self):
return self._failure_reason
@failure_reason.setter
def failure_reason(self, failure_reason):
self._failure_reason = failure_reason
@property
def percent_completed(self):
return self._percent_completed
@percent_completed.setter
def percent_completed(self, percent_completed):
self._percent_completed = percent_completed
@property
def state(self):
return self._state
@state.setter
def state(self, state):
self._state = state
@property
def update_steps(self):
return self._update_steps
@update_steps.setter
def update_steps(self, update_steps):
self._update_steps = update_steps
@property
def process_group_id(self):
return self._process_group_id
@process_group_id.setter
def process_group_id(self, process_group_id):
self._process_group_id = process_group_id
@property
def affected_components(self):
return self._affected_components
@affected_components.setter
def affected_components(self, affected_components):
self._affected_components = affected_components
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, VariableRegistryUpdateRequestDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c2d409204403961a0d30302df49fce90c8889f3 | 5,683 | py | Python | EquationModels/BurgerEquationRar.py | mroberto166/PinnsSub | 338c5400080ba570fd8f53d7481539225fcf4d17 | [
"MIT"
] | 12 | 2020-12-02T13:04:55.000Z | 2022-01-07T21:48:30.000Z | EquationModels/BurgerEquationRar.py | mroberto166/PinnsSub | 338c5400080ba570fd8f53d7481539225fcf4d17 | [
"MIT"
] | null | null | null | EquationModels/BurgerEquationRar.py | mroberto166/PinnsSub | 338c5400080ba570fd8f53d7481539225fcf4d17 | [
"MIT"
] | 7 | 2020-12-02T13:04:57.000Z | 2021-12-28T14:28:19.000Z | from ImportFile import *
# n_coll = 8192
# n_u = 1024
# n_int = 0
pi = math.pi
v = 0.0 / pi
extrema_values = torch.tensor([[0, 0.5],
[-1, 1]])
def compute_res(network, x_f_train, space_dimensions, solid_object, computing_error):
x_f_train.requires_grad = True
u = network(x_f_train).reshape(-1, )
inputs = torch.ones(x_f_train.shape[0], )
if not computing_error and torch.cuda.is_available():
inputs = inputs.cuda()
u_sq = 0.5 * u * u
grad_u = torch.autograd.grad(u, x_f_train, grad_outputs=inputs, create_graph=True)[0]
grad_u_sq = torch.autograd.grad(u_sq, x_f_train, grad_outputs=inputs, create_graph=True)[0]
grad_u_t = grad_u[:, 0]
grad_u_x = grad_u[:, 1]
grad_u_sq_x = grad_u_sq[:, 1]
grad_grad_u_x = torch.autograd.grad(grad_u_x, x_f_train, grad_outputs=inputs, create_graph=True)[0]
grad_u_xx = grad_grad_u_x[:, 1]
if torch.cuda.is_available():
del inputs
torch.cuda.empty_cache()
residual = grad_u_t.reshape(-1, ) + grad_u_sq_x - v * grad_u_xx.reshape(-1, ) # u * grad_u_x #- 0.01 / pi * grad_u_xx.reshape(-1, )
return residual
def ub0(t):
# Impose certain type of BC per variable
# First element is the the index variable
# Second element is the type of BC for the variable defined by the first element
type_BC = ["func"]
out = torch.tensor(()).new_full(size=(t.shape[0], 1), fill_value=0.0)
return out.reshape(-1, 1), type_BC
def ub1(t):
type_BC = ["func"]
out = torch.tensor(()).new_full(size=(t.shape[0], 1), fill_value=1.0)
return out.reshape(-1, 1), type_BC
list_of_BC = [[ub0, ub1]]
def u0(x):
u_0 = torch.tensor(()).new_full(size=(x.shape[0], 1), fill_value=0.0)
for i in range(u_0.shape[0]):
if x[i] > 0:
u_0[i, 0] = 1
return u_0.reshape(-1, 1)
def exact(inputs):
t = inputs[:, 0]
x = inputs[:, 1]
u = torch.tensor(()).new_full(size=(x.shape[0], 1), fill_value=0.0)
for i in range(u.shape[0]):
if 0 <= x[i] < t[i]:
u[i, 0] = x[i] / t[i]
elif x[i] >= t[i]:
u[i, 0] = 1
return u
def convert(vector, extrema_values):
vector = np.array(vector)
max_val = np.max(np.array(extrema_values), axis=1)
min_val = np.min(np.array(extrema_values), axis=1)
vector = vector * (max_val - min_val) + min_val
return torch.from_numpy(vector).type(torch.FloatTensor)
def compute_generalization_error(model, extrema, images_path=None):
model.eval()
file_ex = "Data/BurgersRar.txt"
exact_solution = np.loadtxt(file_ex)
print(exact_solution)
Exact = exact_solution[np.where((exact_solution[:, 2] == 0.0) & (exact_solution[:, 3] == v)), -1].reshape(-1, 1)
test_inp = torch.from_numpy(exact_solution[np.where((exact_solution[:, 2] == 0.0) & (exact_solution[:, 3] == v)), :2]).type(torch.FloatTensor)
print(test_inp.shape)
if v == 0.:
test_inp = torch.from_numpy(exact_solution[np.where((exact_solution[:, 2] == 0.0) & (exact_solution[:, 3] == 0.01 / pi)), :2]).type(torch.FloatTensor)
test_inp = test_inp.reshape(test_inp.shape[1], 2)
Exact = exact(test_inp).reshape(-1, 1).detach().numpy()
else:
test_inp = test_inp.reshape(Exact.shape[0], 2)
test_out = model(test_inp).detach().numpy()
assert (Exact.shape[1] == test_out.shape[1])
L2_test = np.sqrt(np.mean((Exact - test_out) ** 2))
print("Error Test:", L2_test)
rel_L2_test = L2_test / np.sqrt(np.mean(Exact ** 2))
print("Relative Error Test:", rel_L2_test)
if images_path is not None:
plt.figure()
plt.grid(True, which="both", ls=":")
plt.scatter(Exact, test_out)
plt.xlabel(r'Exact Values')
plt.ylabel(r'Predicted Values')
plt.savefig(images_path + "/Score.png", dpi=400)
return L2_test, rel_L2_test
def plotting(model, images_path, extrema, solid):
model.cpu()
model = model.eval()
file_ex = "Data/BurgersRar.txt"
exact_solution = np.loadtxt(file_ex)
time_steps = [0.0, 0.24, 0.5]
scale_vec = np.linspace(0.65, 1.55, len(time_steps))
fig = plt.figure()
plt.grid(True, which="both", ls=":")
for val, scale in zip(time_steps, scale_vec):
ex = exact_solution[np.where((exact_solution[:, 0] == val) & (exact_solution[:, 2] == 0.0) & (exact_solution[:, 3] == v)), -1].reshape(-1, 1)
inputs = torch.from_numpy(exact_solution[np.where((exact_solution[:, 0] == val) & (exact_solution[:, 2] == 0.0) & (exact_solution[:, 3] == v)), :2]).type(torch.FloatTensor)
if v == 0.:
inputs = torch.from_numpy(exact_solution[np.where((exact_solution[:, 0] == val) & (exact_solution[:, 2] == 0.0) & (exact_solution[:, 3] == 0.01 / pi)), :2]).type(
torch.FloatTensor)
inputs = inputs.reshape(inputs.shape[1], 2)
ex = exact(inputs).reshape(-1, 1).detach().numpy()
else:
inputs = inputs.reshape(ex.shape[0], 2)
x = torch.linspace(-1, 1, 100).reshape(-1, 1)
t = torch.tensor(()).new_full(size=(x.shape[0], 1), fill_value=val)
inputs_m = torch.cat([t, x], 1)
x_plot = inputs[:, 1].reshape(-1, 1)
plt.plot(x_plot.detach().numpy(), ex, linewidth=2, label=r'Exact, $t=$' + str(val), color=lighten_color('grey', scale), zorder=0)
plt.scatter(x.detach().numpy(), model(inputs_m).detach().numpy(), label=r'Predicted, $t=$' + str(val), marker="o", s=14, color=lighten_color('C0', scale),
zorder=10)
plt.xlabel(r'$x$')
plt.ylabel(r'u')
plt.legend()
plt.savefig(images_path + "/Rar_Samples.png", dpi=500)
| 35.968354 | 180 | 0.609537 | from ImportFile import *
pi = math.pi
v = 0.0 / pi
extrema_values = torch.tensor([[0, 0.5],
[-1, 1]])
def compute_res(network, x_f_train, space_dimensions, solid_object, computing_error):
x_f_train.requires_grad = True
u = network(x_f_train).reshape(-1, )
inputs = torch.ones(x_f_train.shape[0], )
if not computing_error and torch.cuda.is_available():
inputs = inputs.cuda()
u_sq = 0.5 * u * u
grad_u = torch.autograd.grad(u, x_f_train, grad_outputs=inputs, create_graph=True)[0]
grad_u_sq = torch.autograd.grad(u_sq, x_f_train, grad_outputs=inputs, create_graph=True)[0]
grad_u_t = grad_u[:, 0]
grad_u_x = grad_u[:, 1]
grad_u_sq_x = grad_u_sq[:, 1]
grad_grad_u_x = torch.autograd.grad(grad_u_x, x_f_train, grad_outputs=inputs, create_graph=True)[0]
grad_u_xx = grad_grad_u_x[:, 1]
if torch.cuda.is_available():
del inputs
torch.cuda.empty_cache()
residual = grad_u_t.reshape(-1, ) + grad_u_sq_x - v * grad_u_xx.reshape(-1, )
type_BC = ["func"]
out = torch.tensor(()).new_full(size=(t.shape[0], 1), fill_value=0.0)
return out.reshape(-1, 1), type_BC
def ub1(t):
type_BC = ["func"]
out = torch.tensor(()).new_full(size=(t.shape[0], 1), fill_value=1.0)
return out.reshape(-1, 1), type_BC
list_of_BC = [[ub0, ub1]]
def u0(x):
u_0 = torch.tensor(()).new_full(size=(x.shape[0], 1), fill_value=0.0)
for i in range(u_0.shape[0]):
if x[i] > 0:
u_0[i, 0] = 1
return u_0.reshape(-1, 1)
def exact(inputs):
t = inputs[:, 0]
x = inputs[:, 1]
u = torch.tensor(()).new_full(size=(x.shape[0], 1), fill_value=0.0)
for i in range(u.shape[0]):
if 0 <= x[i] < t[i]:
u[i, 0] = x[i] / t[i]
elif x[i] >= t[i]:
u[i, 0] = 1
return u
def convert(vector, extrema_values):
vector = np.array(vector)
max_val = np.max(np.array(extrema_values), axis=1)
min_val = np.min(np.array(extrema_values), axis=1)
vector = vector * (max_val - min_val) + min_val
return torch.from_numpy(vector).type(torch.FloatTensor)
def compute_generalization_error(model, extrema, images_path=None):
model.eval()
file_ex = "Data/BurgersRar.txt"
exact_solution = np.loadtxt(file_ex)
print(exact_solution)
Exact = exact_solution[np.where((exact_solution[:, 2] == 0.0) & (exact_solution[:, 3] == v)), -1].reshape(-1, 1)
test_inp = torch.from_numpy(exact_solution[np.where((exact_solution[:, 2] == 0.0) & (exact_solution[:, 3] == v)), :2]).type(torch.FloatTensor)
print(test_inp.shape)
if v == 0.:
test_inp = torch.from_numpy(exact_solution[np.where((exact_solution[:, 2] == 0.0) & (exact_solution[:, 3] == 0.01 / pi)), :2]).type(torch.FloatTensor)
test_inp = test_inp.reshape(test_inp.shape[1], 2)
Exact = exact(test_inp).reshape(-1, 1).detach().numpy()
else:
test_inp = test_inp.reshape(Exact.shape[0], 2)
test_out = model(test_inp).detach().numpy()
assert (Exact.shape[1] == test_out.shape[1])
L2_test = np.sqrt(np.mean((Exact - test_out) ** 2))
print("Error Test:", L2_test)
rel_L2_test = L2_test / np.sqrt(np.mean(Exact ** 2))
print("Relative Error Test:", rel_L2_test)
if images_path is not None:
plt.figure()
plt.grid(True, which="both", ls=":")
plt.scatter(Exact, test_out)
plt.xlabel(r'Exact Values')
plt.ylabel(r'Predicted Values')
plt.savefig(images_path + "/Score.png", dpi=400)
return L2_test, rel_L2_test
def plotting(model, images_path, extrema, solid):
model.cpu()
model = model.eval()
file_ex = "Data/BurgersRar.txt"
exact_solution = np.loadtxt(file_ex)
time_steps = [0.0, 0.24, 0.5]
scale_vec = np.linspace(0.65, 1.55, len(time_steps))
fig = plt.figure()
plt.grid(True, which="both", ls=":")
for val, scale in zip(time_steps, scale_vec):
ex = exact_solution[np.where((exact_solution[:, 0] == val) & (exact_solution[:, 2] == 0.0) & (exact_solution[:, 3] == v)), -1].reshape(-1, 1)
inputs = torch.from_numpy(exact_solution[np.where((exact_solution[:, 0] == val) & (exact_solution[:, 2] == 0.0) & (exact_solution[:, 3] == v)), :2]).type(torch.FloatTensor)
if v == 0.:
inputs = torch.from_numpy(exact_solution[np.where((exact_solution[:, 0] == val) & (exact_solution[:, 2] == 0.0) & (exact_solution[:, 3] == 0.01 / pi)), :2]).type(
torch.FloatTensor)
inputs = inputs.reshape(inputs.shape[1], 2)
ex = exact(inputs).reshape(-1, 1).detach().numpy()
else:
inputs = inputs.reshape(ex.shape[0], 2)
x = torch.linspace(-1, 1, 100).reshape(-1, 1)
t = torch.tensor(()).new_full(size=(x.shape[0], 1), fill_value=val)
inputs_m = torch.cat([t, x], 1)
x_plot = inputs[:, 1].reshape(-1, 1)
plt.plot(x_plot.detach().numpy(), ex, linewidth=2, label=r'Exact, $t=$' + str(val), color=lighten_color('grey', scale), zorder=0)
plt.scatter(x.detach().numpy(), model(inputs_m).detach().numpy(), label=r'Predicted, $t=$' + str(val), marker="o", s=14, color=lighten_color('C0', scale),
zorder=10)
plt.xlabel(r'$x$')
plt.ylabel(r'u')
plt.legend()
plt.savefig(images_path + "/Rar_Samples.png", dpi=500)
| true | true |
1c2d40deae9c4867f2393d509669e5420a25eca0 | 410 | py | Python | pip-script.py | prudhvipaluvayi/user-register | 2460096043723d3c7a009319ecc6c756794cc8a4 | [
"BSD-3-Clause"
] | null | null | null | pip-script.py | prudhvipaluvayi/user-register | 2460096043723d3c7a009319ecc6c756794cc8a4 | [
"BSD-3-Clause"
] | null | null | null | pip-script.py | prudhvipaluvayi/user-register | 2460096043723d3c7a009319ecc6c756794cc8a4 | [
"BSD-3-Clause"
] | null | null | null | #!C:\Users\sys\registration\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| 31.538462 | 70 | 0.641463 |
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| true | true |
1c2d41716344b0434003b76094b8ad45ad1d49fb | 3,406 | py | Python | besspin/cwesEvaluation/resourceManagement/customCweScores/helpers.py | CTSRD-CHERI/BESSPIN-Tool-Suite | 26c0e7ce8cf6ffa6e33ea4ff9aa6194892ef6474 | [
"Apache-2.0"
] | null | null | null | besspin/cwesEvaluation/resourceManagement/customCweScores/helpers.py | CTSRD-CHERI/BESSPIN-Tool-Suite | 26c0e7ce8cf6ffa6e33ea4ff9aa6194892ef6474 | [
"Apache-2.0"
] | null | null | null | besspin/cwesEvaluation/resourceManagement/customCweScores/helpers.py | CTSRD-CHERI/BESSPIN-Tool-Suite | 26c0e7ce8cf6ffa6e33ea4ff9aa6194892ef6474 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python3
""" # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
helpers functions for scoring the CWE tests
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # """
import re
from besspin.base.utils.misc import *
from besspin.cwesEvaluation.scoreTests import SCORES, adjustToCustomScore
def getOsImage (lines,testNum=None):
warnText = "" if (testNum is None) else " in test_{0}.log".format(testNum)
for line in lines:
lineMatch = re.match(r'^<OSIMAGE=(?P<osImage>\w+)>$',line)
if (lineMatch is not None):
return lineMatch.group('osImage')
print ("Error: Could not determine <osImage>{0}.".format(warnText))
return "NoOsImageFound"
def regPartitionTest (logLines,nParts,testNum=None):
partsLines = {}
for iPart in range(1,nParts+1):
start = f"---Part{iPart:02d}:"
end = f"---Part{iPart+1:02d}:" if (iPart<nParts) else "NO-NEED-TO-DETECT-AN-ENDING"
partsLines[iPart] = partitionLines(logLines,start,end,testNum=testNum,doPrintWarnings=False)
return partsLines
def regPartitionTestFreeRTOS (logLines,nParts,testNum=None):
partsLines = {}
for iPart in range(1,nParts+1):
start = f"---Part{iPart:02d}:"
end = "---Part{:02d}:".format(iPart+1) if (iPart<nParts) else "NO-NEED-TO-DETECT-AN-ENDING"
partsLines[iPart] = partitionLines(logLines,start,end,testNum=testNum,doPrintWarnings=False)
#print(partsLines[iPart])
return partsLines
def partitionLines (lines,start,end,testNum=None,doPrintWarnings=True):
warnText = "" if (testNum is None) else " in test_{0}.log".format(testNum)
startFound = False
iStart = 0
isError = False
iEnd = len(lines)-1
for iLine,line in enumerate(lines):
if (start in line):
if (startFound):
print ("Warning: part start <{0}> found again{1}.".format(start,warnText))
startFound = True
iStart = iLine
# if (startFound and (end in line)):
# iEnd = iLine
# return lines[iStart:iEnd+1]
# if("Error" in line):
# lines += ">>>End of Besspin<<<"
# return lines[iStart:iEnd+1]
if (startFound):
if (isinstance(end,str)): #only one string
if (end in line):
iEnd = iLine
return lines[iStart:iEnd+1]
else:
for xEnd in end:
if (xEnd in line):
iEnd = iLine
return lines[iStart:iEnd+1]
if (startFound):
warnAndLog ("partitionLines: part end <{0}> not found{1}.".format(end,warnText),doPrint=doPrintWarnings)
return lines[iStart:iEnd+1]
else:
warnAndLog ("partitionLines: part start <{0}> not found{1}.".format(start,warnText))
return []
def overallScore (listScores, testNum):
if (len(listScores)==0): #not implemented
return ["TEST-{0}".format(testNum), SCORES.NOT_IMPLEMENTED, "Not Implemented"]
ovrScore = SCORES.minScore(listScores)
scoreString = ', '.join([f"p{i+1:02d}:{partScore}" for i,partScore in enumerate(listScores)])
return ["TEST-{0}".format(testNum), ovrScore, scoreString]
def doesKeywordExist (lines, keyword):
for line in lines:
if (keyword in line):
return True
return False
| 38.704545 | 112 | 0.586025 |
import re
from besspin.base.utils.misc import *
from besspin.cwesEvaluation.scoreTests import SCORES, adjustToCustomScore
def getOsImage (lines,testNum=None):
warnText = "" if (testNum is None) else " in test_{0}.log".format(testNum)
for line in lines:
lineMatch = re.match(r'^<OSIMAGE=(?P<osImage>\w+)>$',line)
if (lineMatch is not None):
return lineMatch.group('osImage')
print ("Error: Could not determine <osImage>{0}.".format(warnText))
return "NoOsImageFound"
def regPartitionTest (logLines,nParts,testNum=None):
partsLines = {}
for iPart in range(1,nParts+1):
start = f"---Part{iPart:02d}:"
end = f"---Part{iPart+1:02d}:" if (iPart<nParts) else "NO-NEED-TO-DETECT-AN-ENDING"
partsLines[iPart] = partitionLines(logLines,start,end,testNum=testNum,doPrintWarnings=False)
return partsLines
def regPartitionTestFreeRTOS (logLines,nParts,testNum=None):
partsLines = {}
for iPart in range(1,nParts+1):
start = f"---Part{iPart:02d}:"
end = "---Part{:02d}:".format(iPart+1) if (iPart<nParts) else "NO-NEED-TO-DETECT-AN-ENDING"
partsLines[iPart] = partitionLines(logLines,start,end,testNum=testNum,doPrintWarnings=False)
return partsLines
def partitionLines (lines,start,end,testNum=None,doPrintWarnings=True):
warnText = "" if (testNum is None) else " in test_{0}.log".format(testNum)
startFound = False
iStart = 0
isError = False
iEnd = len(lines)-1
for iLine,line in enumerate(lines):
if (start in line):
if (startFound):
print ("Warning: part start <{0}> found again{1}.".format(start,warnText))
startFound = True
iStart = iLine
if (startFound):
if (isinstance(end,str)):
if (end in line):
iEnd = iLine
return lines[iStart:iEnd+1]
else:
for xEnd in end:
if (xEnd in line):
iEnd = iLine
return lines[iStart:iEnd+1]
if (startFound):
warnAndLog ("partitionLines: part end <{0}> not found{1}.".format(end,warnText),doPrint=doPrintWarnings)
return lines[iStart:iEnd+1]
else:
warnAndLog ("partitionLines: part start <{0}> not found{1}.".format(start,warnText))
return []
def overallScore (listScores, testNum):
if (len(listScores)==0):
return ["TEST-{0}".format(testNum), SCORES.NOT_IMPLEMENTED, "Not Implemented"]
ovrScore = SCORES.minScore(listScores)
scoreString = ', '.join([f"p{i+1:02d}:{partScore}" for i,partScore in enumerate(listScores)])
return ["TEST-{0}".format(testNum), ovrScore, scoreString]
def doesKeywordExist (lines, keyword):
for line in lines:
if (keyword in line):
return True
return False
| true | true |
1c2d41c1ae9ef43b0330b91a6179f977d991f8fd | 181 | py | Python | blender_f3b/Logger.py | riccardobl/f3b | 87a79e047e3754b81c3290a5b4849ec21bbfa363 | [
"BSD-3-Clause"
] | 3 | 2020-10-12T03:15:06.000Z | 2020-11-27T16:01:23.000Z | blender_f3b/Logger.py | riccardobl/f3b | 87a79e047e3754b81c3290a5b4849ec21bbfa363 | [
"BSD-3-Clause"
] | null | null | null | blender_f3b/Logger.py | riccardobl/f3b | 87a79e047e3754b81c3290a5b4849ec21bbfa363 | [
"BSD-3-Clause"
] | null | null | null | def info( txt):
print("INFO: " + txt)
def warning( txt):
print("WARNING: " + txt)
def error( txt):
print("ERROR: " + txt)
def debug( txt):
print("DEBUG: " + txt) | 15.083333 | 28 | 0.541436 | def info( txt):
print("INFO: " + txt)
def warning( txt):
print("WARNING: " + txt)
def error( txt):
print("ERROR: " + txt)
def debug( txt):
print("DEBUG: " + txt) | true | true |
1c2d423828e8ffc7f33b8112b54d90c1438bfbec | 1,680 | py | Python | tkinter/__canvas__/canvas-bubbles/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 140 | 2017-02-21T22:49:04.000Z | 2022-03-22T17:51:58.000Z | tkinter/__canvas__/canvas-bubbles/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 5 | 2017-12-02T19:55:00.000Z | 2021-09-22T23:18:39.000Z | tkinter/__canvas__/canvas-bubbles/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 79 | 2017-01-25T10:53:33.000Z | 2022-03-11T16:13:57.000Z | import tkinter as tk
import random
class Bubble():
def __init__(self, canvas, x, y, size, color='red', start_offset_y=0):
self.canvas = canvas
self.x = x
self.y = y + start_offset_y
self.start_x = x
self.start_y = y
self.size = size
self.color = color
rect = [self.x, self.y, self.x+self.size, self.y+self.size]
self.circle = canvas.create_oval(rect, outline=color, fill=color)
def move(self):
x_vel = random.randint(-5, 5)
y_vel = -5
self.canvas.move(self.circle, x_vel, y_vel)
coordinates = self.canvas.coords(self.circle)
self.x = coordinates[0]
self.y = coordinates[1]
# if outside screen move to start position
if self.y < -self.size:
self.x = self.start_x
self.y = self.start_y
self.canvas.coords(self.circle, self.x, self.y, self.x + self.size, self.y + self.size)
def move():
for item in bubbles:
item.move()
window.after(33, move)
# --- main ---
start_x = 125-20
start_y = 500-40
window = tk.Tk()
window.geometry("250x500")
canvas = tk.Canvas(window, width=2500, height=500)
canvas.grid(row=0, column=0, sticky='w')
bubbles = []
for i in range(10):
if i % 2 == 0:
color = 'red'
else:
color = 'green'
offset = i * 50
b = Bubble(canvas, start_x+10, start_y, 20, color, offset)
bubbles.append(b)
coord = [start_x, start_y, start_x+40, start_y+40]
rect = canvas.create_rectangle(coord, outline="Blue", fill="Blue")
move()
window.mainloop ()
| 23.661972 | 99 | 0.564881 | import tkinter as tk
import random
class Bubble():
def __init__(self, canvas, x, y, size, color='red', start_offset_y=0):
self.canvas = canvas
self.x = x
self.y = y + start_offset_y
self.start_x = x
self.start_y = y
self.size = size
self.color = color
rect = [self.x, self.y, self.x+self.size, self.y+self.size]
self.circle = canvas.create_oval(rect, outline=color, fill=color)
def move(self):
x_vel = random.randint(-5, 5)
y_vel = -5
self.canvas.move(self.circle, x_vel, y_vel)
coordinates = self.canvas.coords(self.circle)
self.x = coordinates[0]
self.y = coordinates[1]
if self.y < -self.size:
self.x = self.start_x
self.y = self.start_y
self.canvas.coords(self.circle, self.x, self.y, self.x + self.size, self.y + self.size)
def move():
for item in bubbles:
item.move()
window.after(33, move)
start_x = 125-20
start_y = 500-40
window = tk.Tk()
window.geometry("250x500")
canvas = tk.Canvas(window, width=2500, height=500)
canvas.grid(row=0, column=0, sticky='w')
bubbles = []
for i in range(10):
if i % 2 == 0:
color = 'red'
else:
color = 'green'
offset = i * 50
b = Bubble(canvas, start_x+10, start_y, 20, color, offset)
bubbles.append(b)
coord = [start_x, start_y, start_x+40, start_y+40]
rect = canvas.create_rectangle(coord, outline="Blue", fill="Blue")
move()
window.mainloop ()
| true | true |
1c2d43cd92bb7e556034be28f68e1600460f258f | 441 | py | Python | data/scripts/templates/object/mobile/skeleton/shared_astromech.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/mobile/skeleton/shared_astromech.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/mobile/skeleton/shared_astromech.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/skeleton/shared_astromech.iff"
result.attribute_template_id = 9
result.stfName("obj_n","unknown_creature")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 25.941176 | 64 | 0.725624 | true | true | |
1c2d43d9981510e3c8607abd268d861aeaaffa26 | 2,810 | py | Python | experiments/experiment_osp.py | Weilando/bachelor_playground | e1455029384f05a48ea5d792f76aa5d232fc1ddc | [
"Apache-2.0"
] | 1 | 2020-11-09T12:00:59.000Z | 2020-11-09T12:00:59.000Z | experiments/experiment_osp.py | Weilando/bachelor_playground | e1455029384f05a48ea5d792f76aa5d232fc1ddc | [
"Apache-2.0"
] | null | null | null | experiments/experiment_osp.py | Weilando/bachelor_playground | e1455029384f05a48ea5d792f76aa5d232fc1ddc | [
"Apache-2.0"
] | null | null | null | import time
from data.plotter_evaluation import format_time
from experiments.experiment_pruning import ExperimentPruning
from training.logger import log_from_medium
def mimic_next_prune_rate(curr_prune_rate, prune_rate_specs):
""" Mimic the next pruning-rate from the current pruning-rate and the rate from specs. """
return curr_prune_rate + (1 - curr_prune_rate) * prune_rate_specs
class ExperimentOSP(ExperimentPruning):
"""
Experiment with one-shot pruning.
Train the nets with sparsity 100% once and apply pruning rates which mimic levels of sparsity as generated by IMP.
Retrain and evaluate nets after each pruning step.
"""
def __init__(self, specs, result_path='../data/results'):
super(ExperimentOSP, self).__init__(specs, result_path)
# noinspection DuplicatedCode
def execute_experiment(self):
""" Perform one-shot pruning and save accuracy- and loss-histories after each training.
Retrain subnetworks to evaluate accuracies. """
for n in range(self.specs.net_count):
curr_original_net = None
curr_prune_rate_conv, curr_prune_rate_fc = 0, 0
for p in range(0, self.specs.prune_count + 1):
tic = time.time()
if p > 0:
log_from_medium(self.specs.verbosity, f"Prune network #{n} in round {p}. ", False)
curr_prune_rate_conv = mimic_next_prune_rate(curr_prune_rate_conv, self.specs.prune_rate_conv)
curr_prune_rate_fc = mimic_next_prune_rate(curr_prune_rate_fc, self.specs.prune_rate_fc)
self.nets[n] = curr_original_net.get_new_instance(reset_weight=False)
self.nets[n].prune_net(curr_prune_rate_conv, curr_prune_rate_fc, reset=True)
if n == 0:
self.hists.sparsity[p] = self.nets[0].sparsity_report()[0]
log_from_medium(self.specs.verbosity, f"Train network #{n} (sparsity {self.hists.sparsity[p]:6.4f}).")
(self.nets[n], self.hists.train_loss[n, p], self.hists.val_loss[n, p], self.hists.val_acc[n, p],
self.hists.test_acc[n, p], self.stop_hists.histories[n].indices[p],
self.stop_hists.histories[n].state_dicts[p]) \
= self.trainer.train_net(self.nets[n], self.specs.epoch_count, self.specs.plot_step)
if p == 0:
curr_original_net = self.nets[n].get_new_instance(reset_weight=False)
toc = time.time()
log_from_medium(self.specs.verbosity,
f"Final test-accuracy: {(self.hists.test_acc[n, p, -1]):6.4f} "
f"(took {format_time(toc - tic)}).")
log_from_medium(self.specs.verbosity, "")
| 49.298246 | 118 | 0.640569 | import time
from data.plotter_evaluation import format_time
from experiments.experiment_pruning import ExperimentPruning
from training.logger import log_from_medium
def mimic_next_prune_rate(curr_prune_rate, prune_rate_specs):
return curr_prune_rate + (1 - curr_prune_rate) * prune_rate_specs
class ExperimentOSP(ExperimentPruning):
def __init__(self, specs, result_path='../data/results'):
super(ExperimentOSP, self).__init__(specs, result_path)
def execute_experiment(self):
for n in range(self.specs.net_count):
curr_original_net = None
curr_prune_rate_conv, curr_prune_rate_fc = 0, 0
for p in range(0, self.specs.prune_count + 1):
tic = time.time()
if p > 0:
log_from_medium(self.specs.verbosity, f"Prune network #{n} in round {p}. ", False)
curr_prune_rate_conv = mimic_next_prune_rate(curr_prune_rate_conv, self.specs.prune_rate_conv)
curr_prune_rate_fc = mimic_next_prune_rate(curr_prune_rate_fc, self.specs.prune_rate_fc)
self.nets[n] = curr_original_net.get_new_instance(reset_weight=False)
self.nets[n].prune_net(curr_prune_rate_conv, curr_prune_rate_fc, reset=True)
if n == 0:
self.hists.sparsity[p] = self.nets[0].sparsity_report()[0]
log_from_medium(self.specs.verbosity, f"Train network #{n} (sparsity {self.hists.sparsity[p]:6.4f}).")
(self.nets[n], self.hists.train_loss[n, p], self.hists.val_loss[n, p], self.hists.val_acc[n, p],
self.hists.test_acc[n, p], self.stop_hists.histories[n].indices[p],
self.stop_hists.histories[n].state_dicts[p]) \
= self.trainer.train_net(self.nets[n], self.specs.epoch_count, self.specs.plot_step)
if p == 0:
curr_original_net = self.nets[n].get_new_instance(reset_weight=False)
toc = time.time()
log_from_medium(self.specs.verbosity,
f"Final test-accuracy: {(self.hists.test_acc[n, p, -1]):6.4f} "
f"(took {format_time(toc - tic)}).")
log_from_medium(self.specs.verbosity, "")
| true | true |
1c2d44e00de29baa2689ef37a37ade0f8bcb32b2 | 1,821 | py | Python | target/classes/META-INF/resources/scripts/viewer/X/lib/selenium/selenium/webdriver/support/abstract_event_listener.py | chocobearz/xnat-image-viewer-plugin | 99b0aa2fd04e0390a3df920fa1c8b5569cdeb5ec | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | target/classes/META-INF/resources/scripts/viewer/X/lib/selenium/selenium/webdriver/support/abstract_event_listener.py | chocobearz/xnat-image-viewer-plugin | 99b0aa2fd04e0390a3df920fa1c8b5569cdeb5ec | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | target/classes/META-INF/resources/scripts/viewer/X/lib/selenium/selenium/webdriver/support/abstract_event_listener.py | chocobearz/xnat-image-viewer-plugin | 99b0aa2fd04e0390a3df920fa1c8b5569cdeb5ec | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | #!/usr/bin/python
#
# Copyright 2011 Software Freedom Conservancy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class AbstractEventListener(object):
"""
Event listener must subclass and implement this fully or partially
"""
def before_navigate_to(self, url, driver): pass
def after_navigate_to(self, url, driver): pass
def before_navigate_back(self, driver): pass
def after_navigate_back(self, driver): pass
def before_navigate_forward(self, driver): pass
def after_navigate_forward(self, driver): pass
def before_find(self, by, value, driver): pass
def after_find(self, by, value, driver): pass
def before_click(self, element, driver): pass
def after_click(self, element, driver): pass
def before_change_value_of(self, element, driver): pass
def after_change_value_of(self, element, driver): pass
def before_execute_script(self, script, driver): pass
def after_execute_script(self, script, driver): pass
def before_close(self, driver): pass
def after_close(self, driver): pass
def before_quit(self, driver): pass
def after_quit(self, driver): pass
def on_exception(self, exception, driver): pass
| 30.35 | 74 | 0.686985 |
class AbstractEventListener(object):
def before_navigate_to(self, url, driver): pass
def after_navigate_to(self, url, driver): pass
def before_navigate_back(self, driver): pass
def after_navigate_back(self, driver): pass
def before_navigate_forward(self, driver): pass
def after_navigate_forward(self, driver): pass
def before_find(self, by, value, driver): pass
def after_find(self, by, value, driver): pass
def before_click(self, element, driver): pass
def after_click(self, element, driver): pass
def before_change_value_of(self, element, driver): pass
def after_change_value_of(self, element, driver): pass
def before_execute_script(self, script, driver): pass
def after_execute_script(self, script, driver): pass
def before_close(self, driver): pass
def after_close(self, driver): pass
def before_quit(self, driver): pass
def after_quit(self, driver): pass
def on_exception(self, exception, driver): pass
| true | true |
1c2d4689af2e5bc73f7c8f72466004ca1d1424f4 | 1,781 | py | Python | tests/test_schematics_converters_discriminated_model.py | iterait/apistrap | e83460fa97f13a95a928971b0d2defe0ac611911 | [
"MIT"
] | 6 | 2018-09-06T18:32:48.000Z | 2021-05-28T01:03:32.000Z | tests/test_schematics_converters_discriminated_model.py | iterait/apistrap | e83460fa97f13a95a928971b0d2defe0ac611911 | [
"MIT"
] | 53 | 2018-09-06T16:16:53.000Z | 2021-05-19T14:36:58.000Z | tests/test_schematics_converters_discriminated_model.py | iterait/apistrap | e83460fa97f13a95a928971b0d2defe0ac611911 | [
"MIT"
] | null | null | null | import pytest
from schematics import Model
from schematics.types import FloatType, StringType, UnionType
from apistrap.flask import FlaskApistrap
from apistrap.schematics_converters import schematics_model_to_schema_object
from apistrap.types import DiscriminatedModelType
@pytest.fixture(scope="function")
def apistrap_extension():
yield FlaskApistrap()
class VariantA(Model):
type = StringType(required=True)
string_field = StringType()
class VariantB(Model):
type = StringType(required=True)
number_field = FloatType(required=True)
class ModelWithDiscriminatedModel(Model):
model_field = DiscriminatedModelType("TwoVariantModel", "type", {"a": VariantA, "b": VariantB})
def test_discriminated_model_no_apistrap():
with pytest.raises(ValueError):
schematics_model_to_schema_object(ModelWithDiscriminatedModel)
def test_discriminated_models(apistrap_extension):
result = schematics_model_to_schema_object(ModelWithDiscriminatedModel, apistrap_extension)
assert result == {"$ref": "#/components/schemas/ModelWithDiscriminatedModel"}
definitions = apistrap_extension.to_openapi_dict()["components"]["schemas"]
assert definitions["ModelWithDiscriminatedModel"] == {
"title": "ModelWithDiscriminatedModel",
"type": "object",
"properties": {"model_field": {"$ref": "#/components/schemas/TwoVariantModel"},},
}
assert definitions["TwoVariantModel"] == {
"anyOf": [{"$ref": "#/components/schemas/VariantA"}, {"$ref": "#/components/schemas/VariantB"},],
"discriminator": {
"propertyName": "type",
"mapping": {
"a": "#/components/schemas/VariantA",
"b": "#/components/schemas/VariantB"
}
},
}
| 31.803571 | 105 | 0.705222 | import pytest
from schematics import Model
from schematics.types import FloatType, StringType, UnionType
from apistrap.flask import FlaskApistrap
from apistrap.schematics_converters import schematics_model_to_schema_object
from apistrap.types import DiscriminatedModelType
@pytest.fixture(scope="function")
def apistrap_extension():
yield FlaskApistrap()
class VariantA(Model):
type = StringType(required=True)
string_field = StringType()
class VariantB(Model):
type = StringType(required=True)
number_field = FloatType(required=True)
class ModelWithDiscriminatedModel(Model):
model_field = DiscriminatedModelType("TwoVariantModel", "type", {"a": VariantA, "b": VariantB})
def test_discriminated_model_no_apistrap():
with pytest.raises(ValueError):
schematics_model_to_schema_object(ModelWithDiscriminatedModel)
def test_discriminated_models(apistrap_extension):
result = schematics_model_to_schema_object(ModelWithDiscriminatedModel, apistrap_extension)
assert result == {"$ref": "#/components/schemas/ModelWithDiscriminatedModel"}
definitions = apistrap_extension.to_openapi_dict()["components"]["schemas"]
assert definitions["ModelWithDiscriminatedModel"] == {
"title": "ModelWithDiscriminatedModel",
"type": "object",
"properties": {"model_field": {"$ref": "#/components/schemas/TwoVariantModel"},},
}
assert definitions["TwoVariantModel"] == {
"anyOf": [{"$ref": "#/components/schemas/VariantA"}, {"$ref": "#/components/schemas/VariantB"},],
"discriminator": {
"propertyName": "type",
"mapping": {
"a": "#/components/schemas/VariantA",
"b": "#/components/schemas/VariantB"
}
},
}
| true | true |
1c2d46b3128160103dd0892ca055304a1dbd98eb | 11,858 | py | Python | Gui_qt/mainwindow.py | 2rintf/noob-face-comparison | 000413f1a3cd837960196f7069a894bd272aa14b | [
"MIT"
] | null | null | null | Gui_qt/mainwindow.py | 2rintf/noob-face-comparison | 000413f1a3cd837960196f7069a894bd272aa14b | [
"MIT"
] | null | null | null | Gui_qt/mainwindow.py | 2rintf/noob-face-comparison | 000413f1a3cd837960196f7069a894bd272aa14b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1200, 657)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.groupBox = QtWidgets.QGroupBox(self.centralWidget)
self.groupBox.setGeometry(QtCore.QRect(40, 20, 581, 521))
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.layoutWidget = QtWidgets.QWidget(self.groupBox)
self.layoutWidget.setGeometry(QtCore.QRect(130, 450, 292, 41))
self.layoutWidget.setObjectName("layoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.layoutWidget)
self.horizontalLayout.setContentsMargins(11, 11, 11, 11)
self.horizontalLayout.setSpacing(6)
self.horizontalLayout.setObjectName("horizontalLayout")
self.selectBtn = QtWidgets.QPushButton(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Noto Sans Mono CJK SC")
font.setPointSize(14)
self.selectBtn.setFont(font)
self.selectBtn.setObjectName("selectBtn")
self.horizontalLayout.addWidget(self.selectBtn)
self.matchBtn = QtWidgets.QPushButton(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Noto Sans Mono CJK SC")
font.setPointSize(14)
self.matchBtn.setFont(font)
self.matchBtn.setObjectName("matchBtn")
self.horizontalLayout.addWidget(self.matchBtn)
self.selectedPic = QtWidgets.QLabel(self.groupBox)
self.selectedPic.setGeometry(QtCore.QRect(40, 60, 480, 320))
self.selectedPic.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.selectedPic.setFrameShape(QtWidgets.QFrame.WinPanel)
self.selectedPic.setFrameShadow(QtWidgets.QFrame.Plain)
self.selectedPic.setText("")
self.selectedPic.setObjectName("selectedPic")
self.addBtnTest = QtWidgets.QPushButton(self.centralWidget)
self.addBtnTest.setGeometry(QtCore.QRect(40, 550, 142, 37))
font = QtGui.QFont()
font.setFamily("Noto Sans Mono CJK SC")
font.setPointSize(14)
self.addBtnTest.setFont(font)
self.addBtnTest.setObjectName("addBtnTest")
self.resultGroupBox = QtWidgets.QGroupBox(self.centralWidget)
self.resultGroupBox.setGeometry(QtCore.QRect(630, 20, 551, 521))
self.resultGroupBox.setFlat(False)
self.resultGroupBox.setObjectName("resultGroupBox")
self.splitter = QtWidgets.QSplitter(self.resultGroupBox)
self.splitter.setGeometry(QtCore.QRect(10, 30, 400, 31))
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.firstBtn = QtWidgets.QPushButton(self.splitter)
font = QtGui.QFont()
font.setPointSize(11)
self.firstBtn.setFont(font)
self.firstBtn.setObjectName("firstBtn")
self.secondBtn = QtWidgets.QPushButton(self.splitter)
font = QtGui.QFont()
font.setPointSize(11)
self.secondBtn.setFont(font)
self.secondBtn.setObjectName("secondBtn")
self.thirdBtn = QtWidgets.QPushButton(self.splitter)
font = QtGui.QFont()
font.setPointSize(11)
self.thirdBtn.setFont(font)
self.thirdBtn.setObjectName("thirdBtn")
self.fourthBtn = QtWidgets.QPushButton(self.splitter)
font = QtGui.QFont()
font.setPointSize(11)
self.fourthBtn.setFont(font)
self.fourthBtn.setObjectName("fourthBtn")
self.fifthBtn = QtWidgets.QPushButton(self.splitter)
font = QtGui.QFont()
font.setPointSize(11)
self.fifthBtn.setFont(font)
self.fifthBtn.setObjectName("fifthBtn")
self.matchedPicShow = QtWidgets.QLabel(self.resultGroupBox)
self.matchedPicShow.setGeometry(QtCore.QRect(10, 90, 251, 231))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.matchedPicShow.sizePolicy().hasHeightForWidth())
self.matchedPicShow.setSizePolicy(sizePolicy)
self.matchedPicShow.setFrameShape(QtWidgets.QFrame.WinPanel)
self.matchedPicShow.setText("")
self.matchedPicShow.setObjectName("matchedPicShow")
self.layoutWidget1 = QtWidgets.QWidget(self.resultGroupBox)
self.layoutWidget1.setGeometry(QtCore.QRect(271, 91, 241, 231))
self.layoutWidget1.setObjectName("layoutWidget1")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget1)
self.verticalLayout.setContentsMargins(11, 11, 11, 11)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setSpacing(6)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label = QtWidgets.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Noto Sans CJK HK")
font.setPointSize(12)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
self.nameShow = QtWidgets.QLineEdit(self.layoutWidget1)
self.nameShow.setReadOnly(True)
self.nameShow.setObjectName("nameShow")
self.horizontalLayout_2.addWidget(self.nameShow)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setSpacing(6)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_5 = QtWidgets.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Noto Sans CJK HK")
font.setPointSize(12)
self.label_5.setFont(font)
self.label_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_5.setObjectName("label_5")
self.horizontalLayout_6.addWidget(self.label_5)
self.sexShow = QtWidgets.QLineEdit(self.layoutWidget1)
self.sexShow.setReadOnly(True)
self.sexShow.setObjectName("sexShow")
self.horizontalLayout_6.addWidget(self.sexShow)
self.verticalLayout.addLayout(self.horizontalLayout_6)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setSpacing(6)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_2 = QtWidgets.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Noto Sans CJK HK")
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName("label_2")
self.horizontalLayout_3.addWidget(self.label_2)
self.ageShow = QtWidgets.QLineEdit(self.layoutWidget1)
self.ageShow.setReadOnly(True)
self.ageShow.setObjectName("ageShow")
self.horizontalLayout_3.addWidget(self.ageShow)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setSpacing(6)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_3 = QtWidgets.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Noto Sans CJK HK")
font.setPointSize(12)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_3.setObjectName("label_3")
self.horizontalLayout_4.addWidget(self.label_3)
self.phoneShow = QtWidgets.QLineEdit(self.layoutWidget1)
self.phoneShow.setReadOnly(True)
self.phoneShow.setObjectName("phoneShow")
self.horizontalLayout_4.addWidget(self.phoneShow)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setSpacing(6)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_4 = QtWidgets.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Noto Sans CJK HK")
font.setPointSize(12)
self.label_4.setFont(font)
self.label_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_4.setObjectName("label_4")
self.horizontalLayout_5.addWidget(self.label_4)
self.emailShow = QtWidgets.QLineEdit(self.layoutWidget1)
self.emailShow.setReadOnly(True)
self.emailShow.setObjectName("emailShow")
self.horizontalLayout_5.addWidget(self.emailShow)
self.verticalLayout.addLayout(self.horizontalLayout_5)
MainWindow.setCentralWidget(self.centralWidget)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 1200, 23))
self.menuBar.setObjectName("menuBar")
self.menuFile = QtWidgets.QMenu(self.menuBar)
self.menuFile.setObjectName("menuFile")
MainWindow.setMenuBar(self.menuBar)
self.mainToolBar = QtWidgets.QToolBar(MainWindow)
self.mainToolBar.setObjectName("mainToolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.actionSearch_Face = QtWidgets.QAction(MainWindow)
self.actionSearch_Face.setObjectName("actionSearch_Face")
self.actionAdd_Face = QtWidgets.QAction(MainWindow)
self.actionAdd_Face.setObjectName("actionAdd_Face")
self.menuFile.addAction(self.actionSearch_Face)
self.menuFile.addAction(self.actionAdd_Face)
self.menuBar.addAction(self.menuFile.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.selectBtn.setText(_translate("MainWindow", "选择图片..."))
self.matchBtn.setText(_translate("MainWindow", "匹配"))
self.addBtnTest.setText(_translate("MainWindow", "添加新人脸"))
self.resultGroupBox.setTitle(_translate("MainWindow", "匹配结果"))
self.firstBtn.setText(_translate("MainWindow", "No.1"))
self.secondBtn.setText(_translate("MainWindow", "No.2"))
self.thirdBtn.setText(_translate("MainWindow", "No.3"))
self.fourthBtn.setText(_translate("MainWindow", "No.4"))
self.fifthBtn.setText(_translate("MainWindow", "No.5"))
self.label.setText(_translate("MainWindow", "姓名"))
self.label_5.setText(_translate("MainWindow", "性别"))
self.label_2.setText(_translate("MainWindow", "年龄"))
self.label_3.setText(_translate("MainWindow", "手机"))
self.label_4.setText(_translate("MainWindow", "邮箱"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.actionSearch_Face.setText(_translate("MainWindow", "Search Face"))
self.actionAdd_Face.setText(_translate("MainWindow", "Add Face"))
| 51.112069 | 108 | 0.705262 |
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1200, 657)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.groupBox = QtWidgets.QGroupBox(self.centralWidget)
self.groupBox.setGeometry(QtCore.QRect(40, 20, 581, 521))
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.layoutWidget = QtWidgets.QWidget(self.groupBox)
self.layoutWidget.setGeometry(QtCore.QRect(130, 450, 292, 41))
self.layoutWidget.setObjectName("layoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.layoutWidget)
self.horizontalLayout.setContentsMargins(11, 11, 11, 11)
self.horizontalLayout.setSpacing(6)
self.horizontalLayout.setObjectName("horizontalLayout")
self.selectBtn = QtWidgets.QPushButton(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Noto Sans Mono CJK SC")
font.setPointSize(14)
self.selectBtn.setFont(font)
self.selectBtn.setObjectName("selectBtn")
self.horizontalLayout.addWidget(self.selectBtn)
self.matchBtn = QtWidgets.QPushButton(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Noto Sans Mono CJK SC")
font.setPointSize(14)
self.matchBtn.setFont(font)
self.matchBtn.setObjectName("matchBtn")
self.horizontalLayout.addWidget(self.matchBtn)
self.selectedPic = QtWidgets.QLabel(self.groupBox)
self.selectedPic.setGeometry(QtCore.QRect(40, 60, 480, 320))
self.selectedPic.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.selectedPic.setFrameShape(QtWidgets.QFrame.WinPanel)
self.selectedPic.setFrameShadow(QtWidgets.QFrame.Plain)
self.selectedPic.setText("")
self.selectedPic.setObjectName("selectedPic")
self.addBtnTest = QtWidgets.QPushButton(self.centralWidget)
self.addBtnTest.setGeometry(QtCore.QRect(40, 550, 142, 37))
font = QtGui.QFont()
font.setFamily("Noto Sans Mono CJK SC")
font.setPointSize(14)
self.addBtnTest.setFont(font)
self.addBtnTest.setObjectName("addBtnTest")
self.resultGroupBox = QtWidgets.QGroupBox(self.centralWidget)
self.resultGroupBox.setGeometry(QtCore.QRect(630, 20, 551, 521))
self.resultGroupBox.setFlat(False)
self.resultGroupBox.setObjectName("resultGroupBox")
self.splitter = QtWidgets.QSplitter(self.resultGroupBox)
self.splitter.setGeometry(QtCore.QRect(10, 30, 400, 31))
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.firstBtn = QtWidgets.QPushButton(self.splitter)
font = QtGui.QFont()
font.setPointSize(11)
self.firstBtn.setFont(font)
self.firstBtn.setObjectName("firstBtn")
self.secondBtn = QtWidgets.QPushButton(self.splitter)
font = QtGui.QFont()
font.setPointSize(11)
self.secondBtn.setFont(font)
self.secondBtn.setObjectName("secondBtn")
self.thirdBtn = QtWidgets.QPushButton(self.splitter)
font = QtGui.QFont()
font.setPointSize(11)
self.thirdBtn.setFont(font)
self.thirdBtn.setObjectName("thirdBtn")
self.fourthBtn = QtWidgets.QPushButton(self.splitter)
font = QtGui.QFont()
font.setPointSize(11)
self.fourthBtn.setFont(font)
self.fourthBtn.setObjectName("fourthBtn")
self.fifthBtn = QtWidgets.QPushButton(self.splitter)
font = QtGui.QFont()
font.setPointSize(11)
self.fifthBtn.setFont(font)
self.fifthBtn.setObjectName("fifthBtn")
self.matchedPicShow = QtWidgets.QLabel(self.resultGroupBox)
self.matchedPicShow.setGeometry(QtCore.QRect(10, 90, 251, 231))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.matchedPicShow.sizePolicy().hasHeightForWidth())
self.matchedPicShow.setSizePolicy(sizePolicy)
self.matchedPicShow.setFrameShape(QtWidgets.QFrame.WinPanel)
self.matchedPicShow.setText("")
self.matchedPicShow.setObjectName("matchedPicShow")
self.layoutWidget1 = QtWidgets.QWidget(self.resultGroupBox)
self.layoutWidget1.setGeometry(QtCore.QRect(271, 91, 241, 231))
self.layoutWidget1.setObjectName("layoutWidget1")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget1)
self.verticalLayout.setContentsMargins(11, 11, 11, 11)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setSpacing(6)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label = QtWidgets.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Noto Sans CJK HK")
font.setPointSize(12)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
self.nameShow = QtWidgets.QLineEdit(self.layoutWidget1)
self.nameShow.setReadOnly(True)
self.nameShow.setObjectName("nameShow")
self.horizontalLayout_2.addWidget(self.nameShow)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setSpacing(6)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_5 = QtWidgets.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Noto Sans CJK HK")
font.setPointSize(12)
self.label_5.setFont(font)
self.label_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_5.setObjectName("label_5")
self.horizontalLayout_6.addWidget(self.label_5)
self.sexShow = QtWidgets.QLineEdit(self.layoutWidget1)
self.sexShow.setReadOnly(True)
self.sexShow.setObjectName("sexShow")
self.horizontalLayout_6.addWidget(self.sexShow)
self.verticalLayout.addLayout(self.horizontalLayout_6)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setSpacing(6)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_2 = QtWidgets.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Noto Sans CJK HK")
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName("label_2")
self.horizontalLayout_3.addWidget(self.label_2)
self.ageShow = QtWidgets.QLineEdit(self.layoutWidget1)
self.ageShow.setReadOnly(True)
self.ageShow.setObjectName("ageShow")
self.horizontalLayout_3.addWidget(self.ageShow)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setSpacing(6)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_3 = QtWidgets.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Noto Sans CJK HK")
font.setPointSize(12)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_3.setObjectName("label_3")
self.horizontalLayout_4.addWidget(self.label_3)
self.phoneShow = QtWidgets.QLineEdit(self.layoutWidget1)
self.phoneShow.setReadOnly(True)
self.phoneShow.setObjectName("phoneShow")
self.horizontalLayout_4.addWidget(self.phoneShow)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setSpacing(6)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_4 = QtWidgets.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Noto Sans CJK HK")
font.setPointSize(12)
self.label_4.setFont(font)
self.label_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_4.setObjectName("label_4")
self.horizontalLayout_5.addWidget(self.label_4)
self.emailShow = QtWidgets.QLineEdit(self.layoutWidget1)
self.emailShow.setReadOnly(True)
self.emailShow.setObjectName("emailShow")
self.horizontalLayout_5.addWidget(self.emailShow)
self.verticalLayout.addLayout(self.horizontalLayout_5)
MainWindow.setCentralWidget(self.centralWidget)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 1200, 23))
self.menuBar.setObjectName("menuBar")
self.menuFile = QtWidgets.QMenu(self.menuBar)
self.menuFile.setObjectName("menuFile")
MainWindow.setMenuBar(self.menuBar)
self.mainToolBar = QtWidgets.QToolBar(MainWindow)
self.mainToolBar.setObjectName("mainToolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.actionSearch_Face = QtWidgets.QAction(MainWindow)
self.actionSearch_Face.setObjectName("actionSearch_Face")
self.actionAdd_Face = QtWidgets.QAction(MainWindow)
self.actionAdd_Face.setObjectName("actionAdd_Face")
self.menuFile.addAction(self.actionSearch_Face)
self.menuFile.addAction(self.actionAdd_Face)
self.menuBar.addAction(self.menuFile.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.selectBtn.setText(_translate("MainWindow", "选择图片..."))
self.matchBtn.setText(_translate("MainWindow", "匹配"))
self.addBtnTest.setText(_translate("MainWindow", "添加新人脸"))
self.resultGroupBox.setTitle(_translate("MainWindow", "匹配结果"))
self.firstBtn.setText(_translate("MainWindow", "No.1"))
self.secondBtn.setText(_translate("MainWindow", "No.2"))
self.thirdBtn.setText(_translate("MainWindow", "No.3"))
self.fourthBtn.setText(_translate("MainWindow", "No.4"))
self.fifthBtn.setText(_translate("MainWindow", "No.5"))
self.label.setText(_translate("MainWindow", "姓名"))
self.label_5.setText(_translate("MainWindow", "性别"))
self.label_2.setText(_translate("MainWindow", "年龄"))
self.label_3.setText(_translate("MainWindow", "手机"))
self.label_4.setText(_translate("MainWindow", "邮箱"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.actionSearch_Face.setText(_translate("MainWindow", "Search Face"))
self.actionAdd_Face.setText(_translate("MainWindow", "Add Face"))
| true | true |
1c2d47cf4218a38866687d183e6810ef6e53819e | 14,235 | py | Python | playground/position-sizing/balancer-and-plot/strategies/emacounter/__init__.py | ysdede/jesse_strategies | ade9f4ba42cec11207c766d267b9d8feb8bce648 | [
"CC0-1.0"
] | 38 | 2021-09-18T15:33:28.000Z | 2022-02-21T17:29:08.000Z | playground/position-sizing/balancer-and-plot/strategies/emacounter/__init__.py | ysdede/jesse_strategies | ade9f4ba42cec11207c766d267b9d8feb8bce648 | [
"CC0-1.0"
] | 4 | 2022-01-02T14:46:12.000Z | 2022-02-16T18:39:41.000Z | playground/position-sizing/balancer-and-plot/strategies/emacounter/__init__.py | ysdede/jesse_strategies | ade9f4ba42cec11207c766d267b9d8feb8bce648 | [
"CC0-1.0"
] | 11 | 2021-10-19T06:21:43.000Z | 2022-02-21T17:29:10.000Z | import jesse.helpers as jh
import jesse.indicators as ta
from jesse import utils
from jesse.services.selectors import get_all_trading_routes
from jesse.strategies import Strategy, cached
class emacounter(Strategy):
def __init__(self):
super().__init__()
self.losecount = 0
self.wincount = 0
self.winlimit = 2
self.lastwasprofitable = False
self.multiplier = 1
self.incr = True # Martingale-like aggressive position sizing.
self.donchianfilterenabled = False
self.skipenabled = False # If last trade was profitable, skip next trade.
self.enablelong = True
self.enableshort = True
self.settp = False
self.dnaindex = 23
self.initialstop = 0
self.laststop = 0
self.entryprice = 0
self.enabletrailingstop = False
self.dpfilterenabled = True
self.enablemanualstop = False
self.atrlen = 12
self.atrstop = 0.85
self.firstrun = True
self.days = 0
self.green_count = 4
self.red_count = 4
self.long_start = None
self.short_start = None
self.trend_long = False
self.trend_short = False
self.long_exit_start = 0
self.short_exit_start = 0
self.trend_long_exit = False
self.trend_short_exit = False
self.dnas = {
1: {'dna': 'vaJpC;g', 'tpnl': 296, 'stop': 87, 'donlen': 183, 'pmpsize': 47, 'fast': 6, 'slow': 44},
2: {"dna": 'vaJpp;g', "tpnl": 296, 'tpnl2': 296, "stop": 87, "donlen": 183, "pmpsize": 93, "fast": 6,
"slow": 44},
21: {'dna': 'BTC2h', 'tpnl': 296, 'tpnl2': 296, 'stop': 24, 'trstop': 48, 'donlen': 183, 'pmpsize': 47,
'fast': 6, 'slow': 44},
22: {'dna': 'BNB30min', 'tpnl': 96, 'tpnl2': 96, 'stop': 18, 'trstop': 68, 'donlen': 183, 'pmpsize': 38,
'fast': 6, 'slow': 44}, # BNB 30min
23: {'dna': 'ETH30m', 'tpnl': 184, 'tpnl2': 184, 'stop': 34, 'trstop': 48, 'donlen': 183, 'pmpsize': 38,
'fast': 6, 'slow': 44}, # ETH 30min
25: {"dna": 'ADA', "tpnl": 130, 'tpnl2': 296, "stop": 51, "donlen": 183, "pmpsize": 32, "fast": 6,
"slow": 44},
255: {"dna": 'Generic', "tpnl": 120, 'tpnl2': 296, "stop": 51, "donlen": 183, "pmpsize": 32, "fast": 6,
"slow": 44},
7: {"dna": 'v^JpF/g', "tpnl": 281, "stop": 87, "donlen": 183, "pmpsize": 50, "fast": 4, "slow": 44},
16: {"dna": 'vj3?o1l', "tpnl": 338, "stop": 35, "donlen": 64, "pmpsize": 92, "fast": 4, "slow": 46},
# ada periyodu ve pmp işe yarar
18: {"dna": 'vaQpJ;g', "tpnl": 296, "stop": 103, "donlen": 183, "pmpsize": 54, "fast": 6, "slow": 44},
20: {"dna": 'vahpJ;g', "tpnl": 296, "stop": 156, "donlen": 183, "pmpsize": 54, "fast": 6, "slow": 44},
3: {"dna": 'vXJp.._', "tpnl": 253, "stop": 87, "donlen": 183, "pmpsize": 26, "fast": 3, "slow": 41},
4: {"dna": 'vXJp5._', "tpnl": 253, "stop": 87, "donlen": 183, "pmpsize": 33, "fast": 3, "slow": 41},
5: {"dna": 'sYon51`', "tpnl": 258, "stop": 172, "donlen": 178, "pmpsize": 33, "fast": 4, "slow": 42},
6: {"dna": 'vdfp5.)', "tpnl": 310, "stop": 151, "donlen": 183, "pmpsize": 33, "fast": 3, "slow": 21},
61: {"dna": 'TRX', "tpnl": 290, "stop": 28, "donlen": 183, "pmpsize": 33, "fast": 3, "slow": 21},
8: {"dna": 'vY\\n51`', "tpnl": 258, "stop": 128, "donlen": 178, "pmpsize": 33, "fast": 4, "slow": 42},
9: {"dna": 'Z^JpF/Y', "tpnl": 281, "stop": 87, "donlen": 183, "pmpsize": 50, "fast": 4, "slow": 39},
10: {"dna": 'kd9?;1H', "tpnl": 310, "stop": 49, "donlen": 64, "pmpsize": 39, "fast": 4, "slow": 33},
11: {"dna": 'vdfp5@l', "tpnl": 310, "stop": 151, "donlen": 183, "pmpsize": 33, "fast": 7, "slow": 46},
# ada 2h periyodu işe yarayabilir
111: {"dna": 'vdfp5@l', "tpnl": 290, "stop": 28, "donlen": 183, "pmpsize": 33, "fast": 7, "slow": 46},
12: {"dna": 'vdds59l', "tpnl": 310, "stop": 147, "donlen": 190, "pmpsize": 33, "fast": 6, "slow": 46},
13: {"dna": 'vVJ/2._', "tpnl": 243, "stop": 87, "donlen": 25, "pmpsize": 30, "fast": 3, "slow": 41},
14: {"dna": 'vN3BO,f', "tpnl": 205, "stop": 35, "donlen": 71, "pmpsize": 59, "fast": 3, "slow": 44},
15: {"dna": 'vdos5>l', "tpnl": 310, "stop": 172, "donlen": 190, "pmpsize": 33, "fast": 7, "slow": 46},
17: {"dna": 'vqopR,]', "tpnl": 372, "stop": 172, "donlen": 183, "pmpsize": 63, "fast": 3, "slow": 40},
19: {"dna": 'v^JpF/U', "tpnl": 281, "stop": 87, "donlen": 183, "pmpsize": 50, "fast": 4, "slow": 38},
191: {"dna": 'v^JpF/U', "tpnl": 140, "stop": 70, "donlen": 183, "pmpsize": 43, "fast": 7, "slow": 46},
24: {'dna': 'BTC2h', 'tpnl': 296, 'tpnl2': 296, 'stop': 24, 'trstop': 48, 'donlen': 183, 'pmpsize': 47,
'fast': 6, 'slow': 44},
999: {"dna": 'vdos5>l', "tpnl": 310, "stop": 172, "donlen": 190, "pmpsize": 33, "fast": 7, "slow": 46},
# 22: {'dna': 'BNB30min', 'tpnl': 96, 'stop': 20, 'trstop': 68, 'donlen': 183, 'pmpsize': 38, 'fast': 6, 'slow': 44}, # BNB 30min
}
@property
def targetpnl(self):
return self.dnas[self.dnaindex]['tpnl'] / 1000
@property
def targetstop(self):
return self.dnas[self.dnaindex]['stop'] / 1000
@property
def pumpsize(self):
return self.dnas[self.dnaindex]['pmpsize']
@property
def ewofast(self):
return self.dnas[self.dnaindex]['fast']
@property
def ewoslow(self):
return self.dnas[self.dnaindex]['slow']
@property
def limit(self):
return 4
@property
def carpan(self):
return 33
@property
def pumplookback(self):
return 3
@property
@cached
def positionsize(self):
if len(get_all_trading_routes()) < 3:
return 10 * len(get_all_trading_routes())
elif self.symbol.startswith('ETH-'):
return 10 * len(get_all_trading_routes())
else:
return 16 * len(get_all_trading_routes())
@property
@cached
def slow_ema(self):
return ta.ema(self.candles[-120:], self.ewoslow, sequential=True)
@property
@cached
def fast_ema(self):
return ta.ema(self.candles[-60:], self.ewofast, sequential=True)
@property
@cached
def ema_exit_hl2(self):
return ta.ema(self.candles[-120:], period=30, source_type='hl2', sequential=False)
@cached
def isdildo(self, index):
open = self.candles[:, 1][index]
close = self.candles[:, 2][index]
return abs(open - close) * 100 / open > self.pumpsize / 10
@property
@cached
def dumpump(self):
open = self.candles[:, 1][-self.pumplookback]
close = self.candles[:, 2][-1]
multibardildo = abs(open - close) * 100 / open > self.pumpsize / 10
return multibardildo or self.isdildo(-1) or self.isdildo(-2) or self.isdildo(-3)
def should_long(self) -> bool:
dp = False
if self.dpfilterenabled:
dp = self.dumpump
if utils.crossed(self.fast_ema, self.slow_ema, direction='above', sequential=False):
self.long_start = self.candles[:, 0][-1]
self.trend_long = True
self.green_count += 1
self.short_start = None
self.trend_short = False
self.red_count = 0
if self.trend_long and self.candles[:, 0][-1] != self.long_start and self.fast_ema[-1] > self.slow_ema[-1]:
self.green_count += 1
if False and self.long_start:
print('\nLong start', jh.timestamp_to_time(self.long_start), '-->',
jh.timestamp_to_time(self.candles[:, 0][-1]), 'Trend long', self.trend_long, 'Trend short', self.trend_short, 'greens:',
self.green_count, 'reds:', self.red_count, ' | L/S', self.is_long, self.is_short)
if self.trend_long and self.trend_short:
print('LONG & SHORT!')
return self.trend_long and self.green_count > 1
def should_short(self) -> bool:
if utils.crossed(self.fast_ema, self.slow_ema, direction='below', sequential=False):
self.short_start = self.candles[:, 0][-1]
self.trend_short = True
self.red_count += 1
self.long_start = None
self.trend_long = False
self.green_count = 0
if self.trend_short and self.candles[:, 0][-1] != self.short_start and self.fast_ema[-1] < self.slow_ema[-1]:
self.red_count += 1
if False and self.short_start:
print('\nShort start', jh.timestamp_to_time(self.short_start), '-->',
jh.timestamp_to_time(self.candles[:, 0][-1]), 'Trend short:', self.trend_short, ' | Trend Long:',
self.trend_long, 'reds:', self.red_count, 'greens:', self.green_count, ' | L/S', self.is_long, self.is_short)
return self.trend_short and self.red_count > 1
@property
def calcqty(self):
if self.incr and not self.lastwasprofitable and self.losecount <= self.limit:
return (self.capital / self.positionsize) * self.multiplier
return self.capital / self.positionsize
def go_long(self):
self.entryprice = self.price
sl = self.price - (self.price * self.targetstop)
tp = self.price + (self.price * self.targetpnl)
qty = (utils.size_to_qty(self.calcqty, self.price, fee_rate=self.fee_rate) * self.leverage) + 0.005
# print('--->', self.symbol, 'Long position size:', round(self.calcqty, 2), 'USD, Capital:', round(self.capital, 2), 'Qty:', qty)
self.buy = qty, self.price
if not self.enablemanualstop:
self.stop_loss = qty, sl
if self.settp:
self.take_profit = qty, tp
self.initialstop = sl
self.laststop = sl
def go_short(self):
self.entryprice = self.price
sl = self.price + (self.price * self.targetstop)
tp = self.price - (self.price * self.targetpnl)
qty = (utils.size_to_qty(self.calcqty, self.price, fee_rate=self.fee_rate) * self.leverage) + 0.005
# print('--->', self.symbol, 'Short position size:', round(self.calcqty, 2), 'USD, Capital:', round(self.capital, 2), 'Qty:', qty)
self.sell = qty, self.price
if not self.enablemanualstop:
self.stop_loss = qty, sl
if self.settp:
self.take_profit = qty, tp
self.initialstop = sl
self.laststop = sl
def update_position(self):
if self.position.pnl_percentage / self.position.leverage > (self.targetpnl * 100):
self.liquidate()
if self.is_long and utils.crossed(self.fast_ema, self.slow_ema, direction='below', sequential=False) and not self.trend_long_exit:
self.long_exit_start = self.candles[:, 0][-1]
self.trend_long_exit = True
self.red_count += 1
if self.trend_long_exit and self.candles[:, 0][-1] != self.long_exit_start and self.fast_ema[-1] < self.slow_ema[-1]:
self.red_count += 1
if self.trend_long_exit and self.red_count > 1:
self.liquidate()
# --------------------
if self.is_short and utils.crossed(self.fast_ema, self.slow_ema, direction='above', sequential=False) and not self.trend_short_exit:
self.short_exit_start = self.candles[:, 0][-1]
self.trend_short_exit = True
self.green_count += 1
if self.trend_short_exit and self.candles[:, 0][-1] != self.short_exit_start and self.fast_ema[-1] > self.slow_ema[-1]:
self.green_count += 1
if self.trend_short_exit and self.green_count > 1:
self.liquidate()
print('\n', jh.timestamp_to_time(self.candles[:, 0][-1]), 'Trend long', self.trend_long, 'Trend short', self.trend_short, 'greens:',
self.green_count, 'reds:', self.red_count, ' | L/S', self.is_long, self.is_short)
if self.is_long and self.close < self.ema_exit_hl2:
self.liquidate()
if self.is_short and self.close > self.ema_exit_hl2:
self.liquidate()
# c. Emergency exit! Close position at trend reversal
if utils.crossed(self.fast_ema, self.slow_ema, sequential=False):
pass
# self.liquidate()
def on_stop_loss(self, order):
self.lastwasprofitable = False
self.losecount += 1
self.wincount = 0
self.multiplier = self.multiplier * (1 + (self.carpan / 50))
def on_take_profit(self, order):
self.lastwasprofitable = True
self.wincount += 1
self.losecount = 0
self.multiplier = 1
def before(self):
if self.firstrun:
self.runonce()
def runonce(self):
if self.symbol.startswith('BTC'):
self.dnaindex = 21
if self.symbol.startswith('BNB'):
self.dnaindex = 22
if self.symbol.startswith('ETH'):
self.dnaindex = 23
if self.symbol.startswith('TRX-'):
self.dnaindex = 999 # 6
if self.symbol.startswith('ADA-'):
self.dnaindex = 25
if self.symbol.startswith('LTC-'):
self.dnaindex = 19
if self.symbol.startswith('NEO-'):
self.dnaindex = 8
if self.symbol.startswith('XRP-'):
self.dnaindex = 12
if self.symbol.startswith('QTUM-'):
self.dnaindex = 15
# print('\nFirst run!', self.symbol, 'Dna index: ', self.dnaindex)
self.firstrun = False
def should_cancel(self) -> bool:
return True
def on_open_position(self, order):
self.long_start = None
self.trend_long = False
self.green_count = 0
self.short_start = None
self.trend_short = False
self.red_count = 0
self.trend_short_exit = False
self.trend_long_exit = False
self.long_exit_start = 0
self.short_exit_start = 0
| 41.26087 | 141 | 0.559185 | import jesse.helpers as jh
import jesse.indicators as ta
from jesse import utils
from jesse.services.selectors import get_all_trading_routes
from jesse.strategies import Strategy, cached
class emacounter(Strategy):
def __init__(self):
super().__init__()
self.losecount = 0
self.wincount = 0
self.winlimit = 2
self.lastwasprofitable = False
self.multiplier = 1
self.incr = True
self.donchianfilterenabled = False
self.skipenabled = False
self.enablelong = True
self.enableshort = True
self.settp = False
self.dnaindex = 23
self.initialstop = 0
self.laststop = 0
self.entryprice = 0
self.enabletrailingstop = False
self.dpfilterenabled = True
self.enablemanualstop = False
self.atrlen = 12
self.atrstop = 0.85
self.firstrun = True
self.days = 0
self.green_count = 4
self.red_count = 4
self.long_start = None
self.short_start = None
self.trend_long = False
self.trend_short = False
self.long_exit_start = 0
self.short_exit_start = 0
self.trend_long_exit = False
self.trend_short_exit = False
self.dnas = {
1: {'dna': 'vaJpC;g', 'tpnl': 296, 'stop': 87, 'donlen': 183, 'pmpsize': 47, 'fast': 6, 'slow': 44},
2: {"dna": 'vaJpp;g', "tpnl": 296, 'tpnl2': 296, "stop": 87, "donlen": 183, "pmpsize": 93, "fast": 6,
"slow": 44},
21: {'dna': 'BTC2h', 'tpnl': 296, 'tpnl2': 296, 'stop': 24, 'trstop': 48, 'donlen': 183, 'pmpsize': 47,
'fast': 6, 'slow': 44},
22: {'dna': 'BNB30min', 'tpnl': 96, 'tpnl2': 96, 'stop': 18, 'trstop': 68, 'donlen': 183, 'pmpsize': 38,
'fast': 6, 'slow': 44},
23: {'dna': 'ETH30m', 'tpnl': 184, 'tpnl2': 184, 'stop': 34, 'trstop': 48, 'donlen': 183, 'pmpsize': 38,
'fast': 6, 'slow': 44},
25: {"dna": 'ADA', "tpnl": 130, 'tpnl2': 296, "stop": 51, "donlen": 183, "pmpsize": 32, "fast": 6,
"slow": 44},
255: {"dna": 'Generic', "tpnl": 120, 'tpnl2': 296, "stop": 51, "donlen": 183, "pmpsize": 32, "fast": 6,
"slow": 44},
7: {"dna": 'v^JpF/g', "tpnl": 281, "stop": 87, "donlen": 183, "pmpsize": 50, "fast": 4, "slow": 44},
16: {"dna": 'vj3?o1l', "tpnl": 338, "stop": 35, "donlen": 64, "pmpsize": 92, "fast": 4, "slow": 46},
18: {"dna": 'vaQpJ;g', "tpnl": 296, "stop": 103, "donlen": 183, "pmpsize": 54, "fast": 6, "slow": 44},
20: {"dna": 'vahpJ;g', "tpnl": 296, "stop": 156, "donlen": 183, "pmpsize": 54, "fast": 6, "slow": 44},
3: {"dna": 'vXJp.._', "tpnl": 253, "stop": 87, "donlen": 183, "pmpsize": 26, "fast": 3, "slow": 41},
4: {"dna": 'vXJp5._', "tpnl": 253, "stop": 87, "donlen": 183, "pmpsize": 33, "fast": 3, "slow": 41},
5: {"dna": 'sYon51`', "tpnl": 258, "stop": 172, "donlen": 178, "pmpsize": 33, "fast": 4, "slow": 42},
6: {"dna": 'vdfp5.)', "tpnl": 310, "stop": 151, "donlen": 183, "pmpsize": 33, "fast": 3, "slow": 21},
61: {"dna": 'TRX', "tpnl": 290, "stop": 28, "donlen": 183, "pmpsize": 33, "fast": 3, "slow": 21},
8: {"dna": 'vY\\n51`', "tpnl": 258, "stop": 128, "donlen": 178, "pmpsize": 33, "fast": 4, "slow": 42},
9: {"dna": 'Z^JpF/Y', "tpnl": 281, "stop": 87, "donlen": 183, "pmpsize": 50, "fast": 4, "slow": 39},
10: {"dna": 'kd9?;1H', "tpnl": 310, "stop": 49, "donlen": 64, "pmpsize": 39, "fast": 4, "slow": 33},
11: {"dna": 'vdfp5@l', "tpnl": 310, "stop": 151, "donlen": 183, "pmpsize": 33, "fast": 7, "slow": 46},
111: {"dna": 'vdfp5@l', "tpnl": 290, "stop": 28, "donlen": 183, "pmpsize": 33, "fast": 7, "slow": 46},
12: {"dna": 'vdds59l', "tpnl": 310, "stop": 147, "donlen": 190, "pmpsize": 33, "fast": 6, "slow": 46},
13: {"dna": 'vVJ/2._', "tpnl": 243, "stop": 87, "donlen": 25, "pmpsize": 30, "fast": 3, "slow": 41},
14: {"dna": 'vN3BO,f', "tpnl": 205, "stop": 35, "donlen": 71, "pmpsize": 59, "fast": 3, "slow": 44},
15: {"dna": 'vdos5>l', "tpnl": 310, "stop": 172, "donlen": 190, "pmpsize": 33, "fast": 7, "slow": 46},
17: {"dna": 'vqopR,]', "tpnl": 372, "stop": 172, "donlen": 183, "pmpsize": 63, "fast": 3, "slow": 40},
19: {"dna": 'v^JpF/U', "tpnl": 281, "stop": 87, "donlen": 183, "pmpsize": 50, "fast": 4, "slow": 38},
191: {"dna": 'v^JpF/U', "tpnl": 140, "stop": 70, "donlen": 183, "pmpsize": 43, "fast": 7, "slow": 46},
24: {'dna': 'BTC2h', 'tpnl': 296, 'tpnl2': 296, 'stop': 24, 'trstop': 48, 'donlen': 183, 'pmpsize': 47,
'fast': 6, 'slow': 44},
999: {"dna": 'vdos5>l', "tpnl": 310, "stop": 172, "donlen": 190, "pmpsize": 33, "fast": 7, "slow": 46},
@property
def targetpnl(self):
return self.dnas[self.dnaindex]['tpnl'] / 1000
@property
def targetstop(self):
return self.dnas[self.dnaindex]['stop'] / 1000
@property
def pumpsize(self):
return self.dnas[self.dnaindex]['pmpsize']
@property
def ewofast(self):
return self.dnas[self.dnaindex]['fast']
@property
def ewoslow(self):
return self.dnas[self.dnaindex]['slow']
@property
def limit(self):
return 4
@property
def carpan(self):
return 33
@property
def pumplookback(self):
return 3
@property
@cached
def positionsize(self):
if len(get_all_trading_routes()) < 3:
return 10 * len(get_all_trading_routes())
elif self.symbol.startswith('ETH-'):
return 10 * len(get_all_trading_routes())
else:
return 16 * len(get_all_trading_routes())
@property
@cached
def slow_ema(self):
return ta.ema(self.candles[-120:], self.ewoslow, sequential=True)
@property
@cached
def fast_ema(self):
return ta.ema(self.candles[-60:], self.ewofast, sequential=True)
@property
@cached
def ema_exit_hl2(self):
return ta.ema(self.candles[-120:], period=30, source_type='hl2', sequential=False)
@cached
def isdildo(self, index):
open = self.candles[:, 1][index]
close = self.candles[:, 2][index]
return abs(open - close) * 100 / open > self.pumpsize / 10
@property
@cached
def dumpump(self):
open = self.candles[:, 1][-self.pumplookback]
close = self.candles[:, 2][-1]
multibardildo = abs(open - close) * 100 / open > self.pumpsize / 10
return multibardildo or self.isdildo(-1) or self.isdildo(-2) or self.isdildo(-3)
def should_long(self) -> bool:
dp = False
if self.dpfilterenabled:
dp = self.dumpump
if utils.crossed(self.fast_ema, self.slow_ema, direction='above', sequential=False):
self.long_start = self.candles[:, 0][-1]
self.trend_long = True
self.green_count += 1
self.short_start = None
self.trend_short = False
self.red_count = 0
if self.trend_long and self.candles[:, 0][-1] != self.long_start and self.fast_ema[-1] > self.slow_ema[-1]:
self.green_count += 1
if False and self.long_start:
print('\nLong start', jh.timestamp_to_time(self.long_start), '-->',
jh.timestamp_to_time(self.candles[:, 0][-1]), 'Trend long', self.trend_long, 'Trend short', self.trend_short, 'greens:',
self.green_count, 'reds:', self.red_count, ' | L/S', self.is_long, self.is_short)
if self.trend_long and self.trend_short:
print('LONG & SHORT!')
return self.trend_long and self.green_count > 1
def should_short(self) -> bool:
if utils.crossed(self.fast_ema, self.slow_ema, direction='below', sequential=False):
self.short_start = self.candles[:, 0][-1]
self.trend_short = True
self.red_count += 1
self.long_start = None
self.trend_long = False
self.green_count = 0
if self.trend_short and self.candles[:, 0][-1] != self.short_start and self.fast_ema[-1] < self.slow_ema[-1]:
self.red_count += 1
if False and self.short_start:
print('\nShort start', jh.timestamp_to_time(self.short_start), '-->',
jh.timestamp_to_time(self.candles[:, 0][-1]), 'Trend short:', self.trend_short, ' | Trend Long:',
self.trend_long, 'reds:', self.red_count, 'greens:', self.green_count, ' | L/S', self.is_long, self.is_short)
return self.trend_short and self.red_count > 1
@property
def calcqty(self):
if self.incr and not self.lastwasprofitable and self.losecount <= self.limit:
return (self.capital / self.positionsize) * self.multiplier
return self.capital / self.positionsize
def go_long(self):
self.entryprice = self.price
sl = self.price - (self.price * self.targetstop)
tp = self.price + (self.price * self.targetpnl)
qty = (utils.size_to_qty(self.calcqty, self.price, fee_rate=self.fee_rate) * self.leverage) + 0.005
self.buy = qty, self.price
if not self.enablemanualstop:
self.stop_loss = qty, sl
if self.settp:
self.take_profit = qty, tp
self.initialstop = sl
self.laststop = sl
def go_short(self):
self.entryprice = self.price
sl = self.price + (self.price * self.targetstop)
tp = self.price - (self.price * self.targetpnl)
qty = (utils.size_to_qty(self.calcqty, self.price, fee_rate=self.fee_rate) * self.leverage) + 0.005
self.sell = qty, self.price
if not self.enablemanualstop:
self.stop_loss = qty, sl
if self.settp:
self.take_profit = qty, tp
self.initialstop = sl
self.laststop = sl
def update_position(self):
if self.position.pnl_percentage / self.position.leverage > (self.targetpnl * 100):
self.liquidate()
if self.is_long and utils.crossed(self.fast_ema, self.slow_ema, direction='below', sequential=False) and not self.trend_long_exit:
self.long_exit_start = self.candles[:, 0][-1]
self.trend_long_exit = True
self.red_count += 1
if self.trend_long_exit and self.candles[:, 0][-1] != self.long_exit_start and self.fast_ema[-1] < self.slow_ema[-1]:
self.red_count += 1
if self.trend_long_exit and self.red_count > 1:
self.liquidate()
if self.is_short and utils.crossed(self.fast_ema, self.slow_ema, direction='above', sequential=False) and not self.trend_short_exit:
self.short_exit_start = self.candles[:, 0][-1]
self.trend_short_exit = True
self.green_count += 1
if self.trend_short_exit and self.candles[:, 0][-1] != self.short_exit_start and self.fast_ema[-1] > self.slow_ema[-1]:
self.green_count += 1
if self.trend_short_exit and self.green_count > 1:
self.liquidate()
print('\n', jh.timestamp_to_time(self.candles[:, 0][-1]), 'Trend long', self.trend_long, 'Trend short', self.trend_short, 'greens:',
self.green_count, 'reds:', self.red_count, ' | L/S', self.is_long, self.is_short)
if self.is_long and self.close < self.ema_exit_hl2:
self.liquidate()
if self.is_short and self.close > self.ema_exit_hl2:
self.liquidate()
if utils.crossed(self.fast_ema, self.slow_ema, sequential=False):
pass
def on_stop_loss(self, order):
self.lastwasprofitable = False
self.losecount += 1
self.wincount = 0
self.multiplier = self.multiplier * (1 + (self.carpan / 50))
def on_take_profit(self, order):
self.lastwasprofitable = True
self.wincount += 1
self.losecount = 0
self.multiplier = 1
def before(self):
if self.firstrun:
self.runonce()
def runonce(self):
if self.symbol.startswith('BTC'):
self.dnaindex = 21
if self.symbol.startswith('BNB'):
self.dnaindex = 22
if self.symbol.startswith('ETH'):
self.dnaindex = 23
if self.symbol.startswith('TRX-'):
self.dnaindex = 999
if self.symbol.startswith('ADA-'):
self.dnaindex = 25
if self.symbol.startswith('LTC-'):
self.dnaindex = 19
if self.symbol.startswith('NEO-'):
self.dnaindex = 8
if self.symbol.startswith('XRP-'):
self.dnaindex = 12
if self.symbol.startswith('QTUM-'):
self.dnaindex = 15
self.firstrun = False
def should_cancel(self) -> bool:
return True
def on_open_position(self, order):
self.long_start = None
self.trend_long = False
self.green_count = 0
self.short_start = None
self.trend_short = False
self.red_count = 0
self.trend_short_exit = False
self.trend_long_exit = False
self.long_exit_start = 0
self.short_exit_start = 0
| true | true |
1c2d482eb71e3a0e323f3d5430c5cd8c7e0544cb | 5,308 | py | Python | doc/scripts/docgen.py | oplatek/Theano | 09605e7cae876e15c5502c4edaba6a9644c50c11 | [
"BSD-3-Clause"
] | null | null | null | doc/scripts/docgen.py | oplatek/Theano | 09605e7cae876e15c5502c4edaba6a9644c50c11 | [
"BSD-3-Clause"
] | null | null | null | doc/scripts/docgen.py | oplatek/Theano | 09605e7cae876e15c5502c4edaba6a9644c50c11 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import sys
import os
import shutil
import inspect
from epydoc import docintrospecter
from epydoc.apidoc import RoutineDoc
def Op_to_RoutineDoc(op, routine_doc, module_name=None):
routine_doc.specialize_to(RoutineDoc)
#NB: this code is lifted from epydoc/docintrospecter.py
# op should be an op instance
assert hasattr(op, 'perform')
# Record the function's docstring.
routine_doc.docstring = getattr(op, '__doc__', '')
# Record the function's signature.
func = op.__epydoc_asRoutine
if isinstance(func, type(Op_to_RoutineDoc)):
(args, vararg, kwarg, defaults) = inspect.getargspec(func)
# Add the arguments.
routine_doc.posargs = args
routine_doc.vararg = vararg
routine_doc.kwarg = kwarg
# Set default values for positional arguments.
routine_doc.posarg_defaults = [None] * len(args)
# Set the routine's line number.
if hasattr(func, '__code__'):
routine_doc.lineno = func.__code__.co_firstlineno
else:
# [XX] I should probably use UNKNOWN here??
# dvarrazzo: if '...' is to be changed, also check that
# `docstringparser.process_arg_field()` works correctly.
# See SF bug #1556024.
routine_doc.posargs = ['...']
routine_doc.posarg_defaults = [None]
routine_doc.kwarg = None
routine_doc.vararg = None
return routine_doc
docintrospecter.register_introspecter(
lambda value: getattr(value, '__epydoc_asRoutine', False),
Op_to_RoutineDoc,
priority=-1)
import getopt
from collections import defaultdict
if __name__ == '__main__':
# Equivalent of sys.path[0]/../..
throot = os.path.abspath(
os.path.join(sys.path[0], os.pardir, os.pardir))
options = defaultdict(bool)
opts, args = getopt.getopt(
sys.argv[1:],
'o:f:',
['epydoc', 'rst', 'help', 'nopdf', 'cache', 'test'])
options.update(dict([x, y or True] for x, y in opts))
if options['--help']:
print('Usage: %s [OPTIONS] [files...]' % sys.argv[0])
print(' -o <dir>: output the html files in the specified dir')
print(' --cache: use the doctree cache')
print(' --rst: only compile the doc (requires sphinx)')
print(' --nopdf: do not produce a PDF file from the doc, only HTML')
print(' --epydoc: only compile the api documentation', end=' ')
print('(requires epydoc)')
print(' --test: run all the code samples in the documentaton')
print(' --help: this help')
print('If one or more files are specified after the options then only '
'those files will be built. Otherwise the whole tree is '
'processed. Specifying files will implies --cache.')
sys.exit(0)
if not (options['--epydoc'] or options['--rst'] or options['--test']):
# Default is now rst
options['--rst'] = True
def mkdir(path):
try:
os.mkdir(path)
except OSError:
pass
outdir = options['-o'] or (throot + '/html')
files = None
if len(args) != 0:
files = [os.path.abspath(f) for f in args]
mkdir(outdir)
os.chdir(outdir)
# Make sure the appropriate 'theano' directory is in the PYTHONPATH
pythonpath = os.environ.get('PYTHONPATH', '')
pythonpath = os.pathsep.join([throot, pythonpath])
sys.path[0:0] = [throot] # We must not use os.environ.
if options['--all'] or options['--epydoc']:
mkdir("api")
#Generate HTML doc
## This causes problems with the subsequent generation of sphinx doc
#from epydoc.cli import cli
#sys.argv[:] = ['', '--config', '%s/doc/api/epydoc.conf' % throot,
# '-o', 'api']
#cli()
## So we use this instead
os.system("epydoc --config %s/doc/api/epydoc.conf -o api" % throot)
# Generate PDF doc
# TODO
def call_sphinx(builder, workdir, extraopts=None):
import sphinx
if extraopts is None:
extraopts = []
if not options['--cache'] and files is None:
extraopts.append('-E')
docpath = os.path.join(throot, 'doc')
inopt = [docpath, workdir]
if files is not None:
inopt.extend(files)
sphinx.build_main(['', '-b', builder] + extraopts + inopt)
if options['--all'] or options['--rst']:
mkdir("doc")
sys.path[0:0] = [os.path.join(throot, 'doc')]
call_sphinx('html', '.')
if not options['--nopdf']:
# Generate latex file in a temp directory
import tempfile
workdir = tempfile.mkdtemp()
call_sphinx('latex', workdir)
# Compile to PDF
os.chdir(workdir)
os.system('make')
try:
shutil.copy(os.path.join(workdir, 'theano.pdf'), outdir)
os.chdir(outdir)
shutil.rmtree(workdir)
except OSError as e:
print('OSError:', e)
except IOError as e:
print('IOError:', e)
if options['--test']:
mkdir("doc")
sys.path[0:0] = [os.path.join(throot, 'doc')]
call_sphinx('doctest', '.')
| 32.765432 | 79 | 0.586473 | from __future__ import print_function
import sys
import os
import shutil
import inspect
from epydoc import docintrospecter
from epydoc.apidoc import RoutineDoc
def Op_to_RoutineDoc(op, routine_doc, module_name=None):
routine_doc.specialize_to(RoutineDoc)
assert hasattr(op, 'perform')
routine_doc.docstring = getattr(op, '__doc__', '')
# Record the function's signature.
func = op.__epydoc_asRoutine
if isinstance(func, type(Op_to_RoutineDoc)):
(args, vararg, kwarg, defaults) = inspect.getargspec(func)
routine_doc.posargs = args
routine_doc.vararg = vararg
routine_doc.kwarg = kwarg
routine_doc.posarg_defaults = [None] * len(args)
if hasattr(func, '__code__'):
routine_doc.lineno = func.__code__.co_firstlineno
else:
# [XX] I should probably use UNKNOWN here??
# dvarrazzo: if '...' is to be changed, also check that
# `docstringparser.process_arg_field()` works correctly.
# See SF bug #1556024.
routine_doc.posargs = ['...']
routine_doc.posarg_defaults = [None]
routine_doc.kwarg = None
routine_doc.vararg = None
return routine_doc
docintrospecter.register_introspecter(
lambda value: getattr(value, '__epydoc_asRoutine', False),
Op_to_RoutineDoc,
priority=-1)
import getopt
from collections import defaultdict
if __name__ == '__main__':
# Equivalent of sys.path[0]/../..
throot = os.path.abspath(
os.path.join(sys.path[0], os.pardir, os.pardir))
options = defaultdict(bool)
opts, args = getopt.getopt(
sys.argv[1:],
'o:f:',
['epydoc', 'rst', 'help', 'nopdf', 'cache', 'test'])
options.update(dict([x, y or True] for x, y in opts))
if options['--help']:
print('Usage: %s [OPTIONS] [files...]' % sys.argv[0])
print(' -o <dir>: output the html files in the specified dir')
print(' --cache: use the doctree cache')
print(' --rst: only compile the doc (requires sphinx)')
print(' --nopdf: do not produce a PDF file from the doc, only HTML')
print(' --epydoc: only compile the api documentation', end=' ')
print('(requires epydoc)')
print(' --test: run all the code samples in the documentaton')
print(' --help: this help')
print('If one or more files are specified after the options then only '
'those files will be built. Otherwise the whole tree is '
'processed. Specifying files will implies --cache.')
sys.exit(0)
if not (options['--epydoc'] or options['--rst'] or options['--test']):
# Default is now rst
options['--rst'] = True
def mkdir(path):
try:
os.mkdir(path)
except OSError:
pass
outdir = options['-o'] or (throot + '/html')
files = None
if len(args) != 0:
files = [os.path.abspath(f) for f in args]
mkdir(outdir)
os.chdir(outdir)
# Make sure the appropriate 'theano' directory is in the PYTHONPATH
pythonpath = os.environ.get('PYTHONPATH', '')
pythonpath = os.pathsep.join([throot, pythonpath])
sys.path[0:0] = [throot] # We must not use os.environ.
if options['--all'] or options['--epydoc']:
mkdir("api")
#Generate HTML doc
## This causes problems with the subsequent generation of sphinx doc
#from epydoc.cli import cli
#sys.argv[:] = ['', '--config', '%s/doc/api/epydoc.conf' % throot,
# '-o', 'api']
#cli()
## So we use this instead
os.system("epydoc --config %s/doc/api/epydoc.conf -o api" % throot)
# Generate PDF doc
# TODO
def call_sphinx(builder, workdir, extraopts=None):
import sphinx
if extraopts is None:
extraopts = []
if not options['--cache'] and files is None:
extraopts.append('-E')
docpath = os.path.join(throot, 'doc')
inopt = [docpath, workdir]
if files is not None:
inopt.extend(files)
sphinx.build_main(['', '-b', builder] + extraopts + inopt)
if options['--all'] or options['--rst']:
mkdir("doc")
sys.path[0:0] = [os.path.join(throot, 'doc')]
call_sphinx('html', '.')
if not options['--nopdf']:
# Generate latex file in a temp directory
import tempfile
workdir = tempfile.mkdtemp()
call_sphinx('latex', workdir)
# Compile to PDF
os.chdir(workdir)
os.system('make')
try:
shutil.copy(os.path.join(workdir, 'theano.pdf'), outdir)
os.chdir(outdir)
shutil.rmtree(workdir)
except OSError as e:
print('OSError:', e)
except IOError as e:
print('IOError:', e)
if options['--test']:
mkdir("doc")
sys.path[0:0] = [os.path.join(throot, 'doc')]
call_sphinx('doctest', '.')
| true | true |
1c2d491ba44c6f8c9d5e1f7ca72bfa5aabb3751e | 487 | py | Python | kakunin.py | cabbagerice/word-chain | 5d2978df61a7136f502b1b601213e97454103e1e | [
"BSD-2-Clause"
] | null | null | null | kakunin.py | cabbagerice/word-chain | 5d2978df61a7136f502b1b601213e97454103e1e | [
"BSD-2-Clause"
] | 6 | 2019-07-28T20:34:58.000Z | 2021-05-10T06:53:05.000Z | kakunin.py | cabbagerice/word-chain | 5d2978df61a7136f502b1b601213e97454103e1e | [
"BSD-2-Clause"
] | null | null | null | import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
from numpy import random as unko
import math
#認証ファイルPATH
certificate_json_path='secret.json'
#DBとアプリへの認証
if (not len(firebase_admin._apps)):
cred = credentials.Certificate(certificate_json_path)
default_app = firebase_admin.initialize_app(cred, {
'databaseURL': 'https://wordchain-bfb8b.firebaseio.com/'
})
print(db.reference("/PDD_hum/あ/"+ str(math.floor(unko.rand()*1000))).get());exit() | 30.4375 | 82 | 0.788501 | import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
from numpy import random as unko
import math
certificate_json_path='secret.json'
if (not len(firebase_admin._apps)):
cred = credentials.Certificate(certificate_json_path)
default_app = firebase_admin.initialize_app(cred, {
'databaseURL': 'https://wordchain-bfb8b.firebaseio.com/'
})
print(db.reference("/PDD_hum/あ/"+ str(math.floor(unko.rand()*1000))).get());exit() | true | true |
1c2d4973b6be2f4d7e183113ece566f002b580c9 | 117,653 | py | Python | zerver/tests/test_message_edit.py | mathewstars/zulip | da269302e2f334a71a4b43f0c9e5e504f856382f | [
"Apache-2.0"
] | 1 | 2022-01-23T14:59:32.000Z | 2022-01-23T14:59:32.000Z | zerver/tests/test_message_edit.py | mathewstars/zulip | da269302e2f334a71a4b43f0c9e5e504f856382f | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_message_edit.py | mathewstars/zulip | da269302e2f334a71a4b43f0c9e5e504f856382f | [
"Apache-2.0"
] | null | null | null | import datetime
from operator import itemgetter
from typing import Any, Dict, List, Optional, Tuple, Union
from unittest import mock
import orjson
from django.db import IntegrityError
from django.http import HttpResponse
from django.utils.timezone import now as timezone_now
from zerver.actions.message_edit import (
check_update_message,
do_delete_messages,
do_update_message,
get_user_info_for_message_updates,
)
from zerver.actions.reactions import do_add_reaction
from zerver.actions.realm_settings import do_change_realm_plan_type, do_set_realm_property
from zerver.actions.streams import do_change_stream_post_policy, do_deactivate_stream
from zerver.actions.users import do_change_user_role
from zerver.lib.message import MessageDict, has_message_access, messages_for_ids
from zerver.lib.test_classes import ZulipTestCase, get_topic_messages
from zerver.lib.test_helpers import cache_tries_captured, queries_captured
from zerver.lib.topic import RESOLVED_TOPIC_PREFIX, TOPIC_NAME
from zerver.lib.user_topics import (
get_topic_mutes,
get_users_muting_topic,
set_topic_mutes,
topic_is_muted,
)
from zerver.models import Message, Realm, Stream, UserMessage, UserProfile, get_realm, get_stream
class EditMessageTestCase(ZulipTestCase):
def check_topic(self, msg_id: int, topic_name: str) -> None:
msg = Message.objects.get(id=msg_id)
self.assertEqual(msg.topic_name(), topic_name)
def check_message(self, msg_id: int, topic_name: str, content: str) -> None:
# Make sure we saved the message correctly to the DB.
msg = Message.objects.get(id=msg_id)
self.assertEqual(msg.topic_name(), topic_name)
self.assertEqual(msg.content, content)
"""
We assume our caller just edited a message.
Next, we will make sure we properly cached the messages. We still have
to do a query to hydrate recipient info, but we won't need to hit the
zerver_message table.
"""
with queries_captured(keep_cache_warm=True) as queries:
(fetch_message_dict,) = messages_for_ids(
message_ids=[msg.id],
user_message_flags={msg_id: []},
search_fields={},
apply_markdown=False,
client_gravatar=False,
allow_edit_history=True,
)
self.assert_length(queries, 1)
for query in queries:
self.assertNotIn("message", query["sql"])
self.assertEqual(
fetch_message_dict[TOPIC_NAME],
msg.topic_name(),
)
self.assertEqual(
fetch_message_dict["content"],
msg.content,
)
self.assertEqual(
fetch_message_dict["sender_id"],
msg.sender_id,
)
if msg.edit_history:
self.assertEqual(
fetch_message_dict["edit_history"],
orjson.loads(msg.edit_history),
)
def prepare_move_topics(
self,
user_email: str,
old_stream: str,
new_stream: str,
topic: str,
language: Optional[str] = None,
) -> Tuple[UserProfile, Stream, Stream, int, int]:
user_profile = self.example_user(user_email)
if language is not None:
user_profile.default_language = language
user_profile.save(update_fields=["default_language"])
self.login(user_email)
stream = self.make_stream(old_stream)
new_stream = self.make_stream(new_stream)
self.subscribe(user_profile, stream.name)
self.subscribe(user_profile, new_stream.name)
msg_id = self.send_stream_message(
user_profile, stream.name, topic_name=topic, content="First"
)
msg_id_lt = self.send_stream_message(
user_profile, stream.name, topic_name=topic, content="Second"
)
self.send_stream_message(user_profile, stream.name, topic_name=topic, content="third")
return (user_profile, stream, new_stream, msg_id, msg_id_lt)
class EditMessagePayloadTest(EditMessageTestCase):
def test_edit_message_no_changes(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("hamlet"), "Denmark", topic_name="editing", content="before edit"
)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{},
)
self.assert_json_error(result, "Nothing to change")
def test_move_message_cant_move_private_message(self) -> None:
hamlet = self.example_user("hamlet")
self.login("hamlet")
cordelia = self.example_user("cordelia")
msg_id = self.send_personal_message(hamlet, cordelia)
verona = get_stream("Verona", hamlet.realm)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"stream_id": verona.id,
},
)
self.assert_json_error(result, "Private messages cannot be moved to streams.")
def test_private_message_edit_topic(self) -> None:
hamlet = self.example_user("hamlet")
self.login("hamlet")
cordelia = self.example_user("cordelia")
msg_id = self.send_personal_message(hamlet, cordelia)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"topic": "Should not exist",
},
)
self.assert_json_error(result, "Private messages cannot have topics.")
def test_propagate_invalid(self) -> None:
self.login("hamlet")
id1 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic1")
result = self.client_patch(
"/json/messages/" + str(id1),
{
"topic": "edited",
"propagate_mode": "invalid",
},
)
self.assert_json_error(result, "Invalid propagate_mode")
self.check_topic(id1, topic_name="topic1")
result = self.client_patch(
"/json/messages/" + str(id1),
{
"content": "edited",
"propagate_mode": "change_all",
},
)
self.assert_json_error(result, "Invalid propagate_mode without topic edit")
self.check_topic(id1, topic_name="topic1")
def test_edit_message_no_topic(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("hamlet"), "Denmark", topic_name="editing", content="before edit"
)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"topic": " ",
},
)
self.assert_json_error(result, "Topic can't be empty!")
def test_edit_message_invalid_topic(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("hamlet"), "Denmark", topic_name="editing", content="before edit"
)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"topic": "editing\nfun",
},
)
self.assert_json_error(result, "Invalid character in topic, at position 8!")
def test_move_message_to_stream_with_content(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"content": "Not allowed",
},
)
self.assert_json_error(result, "Cannot change message content while changing stream")
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 3)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 0)
# Right now, we prevent users from editing widgets.
def test_edit_submessage(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("hamlet"),
"Denmark",
topic_name="editing",
content="/poll Games?\nYES\nNO",
)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"content": "/poll Games?\nYES\nNO\nMaybe",
},
)
self.assert_json_error(result, "Widgets cannot be edited.")
class EditMessageTest(EditMessageTestCase):
def test_query_count_on_to_dict_uncached(self) -> None:
# `to_dict_uncached` method is used by the mechanisms
# tested in this class. Hence, its performance is tested here.
# Generate 2 messages
user = self.example_user("hamlet")
realm = user.realm
self.login_user(user)
stream_name = "public_stream"
self.subscribe(user, stream_name)
message_ids = []
message_ids.append(self.send_stream_message(user, stream_name, "Message one"))
user_2 = self.example_user("cordelia")
self.subscribe(user_2, stream_name)
message_ids.append(self.send_stream_message(user_2, stream_name, "Message two"))
self.subscribe(self.notification_bot(realm), stream_name)
message_ids.append(
self.send_stream_message(self.notification_bot(realm), stream_name, "Message three")
)
messages = [
Message.objects.select_related().get(id=message_id) for message_id in message_ids
]
# Check number of queries performed
with queries_captured() as queries:
MessageDict.to_dict_uncached(messages)
# 1 query for realm_id per message = 3
# 1 query each for reactions & submessage for all messages = 2
self.assert_length(queries, 5)
realm_id = 2 # Fetched from stream object
# Check number of queries performed with realm_id
with queries_captured() as queries:
MessageDict.to_dict_uncached(messages, realm_id)
# 1 query each for reactions & submessage for all messages = 2
self.assert_length(queries, 2)
def test_save_message(self) -> None:
"""This is also tested by a client test, but here we can verify
the cache against the database"""
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("hamlet"), "Denmark", topic_name="editing", content="before edit"
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"content": "after edit",
},
)
self.assert_json_success(result)
self.check_message(msg_id, topic_name="editing", content="after edit")
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"topic": "edited",
},
)
self.assert_json_success(result)
self.check_topic(msg_id, topic_name="edited")
def test_fetch_message_from_id(self) -> None:
self.login("hamlet")
msg_id = self.send_personal_message(
from_user=self.example_user("hamlet"),
to_user=self.example_user("cordelia"),
content="Personal message",
)
result = self.client_get("/json/messages/" + str(msg_id))
self.assert_json_success(result)
self.assertEqual(result.json()["raw_content"], "Personal message")
self.assertEqual(result.json()["message"]["id"], msg_id)
self.assertEqual(result.json()["message"]["flags"], [])
# Send message to web-public stream where hamlet is not subscribed.
# This will test case of user having no `UserMessage` but having access
# to message.
web_public_stream = self.make_stream("web-public-stream", is_web_public=True)
self.subscribe(self.example_user("cordelia"), web_public_stream.name)
web_public_stream_msg_id = self.send_stream_message(
self.example_user("cordelia"), web_public_stream.name, content="web-public message"
)
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_success(result)
self.assertEqual(result.json()["raw_content"], "web-public message")
self.assertEqual(result.json()["message"]["id"], web_public_stream_msg_id)
self.assertEqual(result.json()["message"]["flags"], ["read", "historical"])
# Spectator should be able to fetch message in web-public stream.
self.logout()
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_success(result)
self.assertEqual(result.json()["raw_content"], "web-public message")
self.assertEqual(result.json()["message"]["id"], web_public_stream_msg_id)
# Verify default is apply_markdown=True
self.assertEqual(result.json()["message"]["content"], "<p>web-public message</p>")
# Verify apply_markdown=False works correctly.
result = self.client_get(
"/json/messages/" + str(web_public_stream_msg_id), {"apply_markdown": "false"}
)
self.assert_json_success(result)
self.assertEqual(result.json()["raw_content"], "web-public message")
self.assertEqual(result.json()["message"]["content"], "web-public message")
with self.settings(WEB_PUBLIC_STREAMS_ENABLED=False):
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_error(
result, "Not logged in: API authentication or user session required", status_code=401
)
# Test error cases
self.login("hamlet")
result = self.client_get("/json/messages/999999")
self.assert_json_error(result, "Invalid message(s)")
self.login("cordelia")
result = self.client_get(f"/json/messages/{msg_id}")
self.assert_json_success(result)
self.login("othello")
result = self.client_get(f"/json/messages/{msg_id}")
self.assert_json_error(result, "Invalid message(s)")
def test_fetch_raw_message_spectator(self) -> None:
user_profile = self.example_user("iago")
self.login("iago")
web_public_stream = self.make_stream("web-public-stream", is_web_public=True)
self.subscribe(user_profile, web_public_stream.name)
web_public_stream_msg_id = self.send_stream_message(
user_profile, web_public_stream.name, content="web-public message"
)
non_web_public_stream = self.make_stream("non-web-public-stream")
self.subscribe(user_profile, non_web_public_stream.name)
non_web_public_stream_msg_id = self.send_stream_message(
user_profile, non_web_public_stream.name, content="non-web-public message"
)
# Generate a private message to use in verification.
private_message_id = self.send_personal_message(user_profile, user_profile)
invalid_message_id = private_message_id + 1000
self.logout()
# Confirm WEB_PUBLIC_STREAMS_ENABLED is enforced.
with self.settings(WEB_PUBLIC_STREAMS_ENABLED=False):
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_error(
result, "Not logged in: API authentication or user session required", 401
)
do_set_realm_property(
user_profile.realm, "enable_spectator_access", False, acting_user=None
)
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_error(
result, "Not logged in: API authentication or user session required", 401
)
do_set_realm_property(user_profile.realm, "enable_spectator_access", True, acting_user=None)
# Verify success with web-public stream and default SELF_HOSTED plan type.
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_success(result)
self.assertEqual(result.json()["raw_content"], "web-public message")
self.assertEqual(result.json()["message"]["flags"], ["read"])
# Verify LIMITED plan type does not allow web-public access.
do_change_realm_plan_type(user_profile.realm, Realm.PLAN_TYPE_LIMITED, acting_user=None)
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_error(
result, "Not logged in: API authentication or user session required", 401
)
# Verify works with STANDARD_FREE plan type too.
do_change_realm_plan_type(
user_profile.realm, Realm.PLAN_TYPE_STANDARD_FREE, acting_user=None
)
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_success(result)
self.assertEqual(result.json()["raw_content"], "web-public message")
# Verify private messages are rejected.
result = self.client_get("/json/messages/" + str(private_message_id))
self.assert_json_error(
result, "Not logged in: API authentication or user session required", 401
)
# Verify an actual public stream is required.
result = self.client_get("/json/messages/" + str(non_web_public_stream_msg_id))
self.assert_json_error(
result, "Not logged in: API authentication or user session required", 401
)
# Verify invalid message IDs are rejected with the same error message.
result = self.client_get("/json/messages/" + str(invalid_message_id))
self.assert_json_error(
result, "Not logged in: API authentication or user session required", 401
)
# Verify deactivated streams are rejected. This may change in the future.
do_deactivate_stream(web_public_stream, acting_user=None)
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_error(
result, "Not logged in: API authentication or user session required", 401
)
def test_fetch_raw_message_stream_wrong_realm(self) -> None:
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
stream = self.make_stream("public_stream")
self.subscribe(user_profile, stream.name)
msg_id = self.send_stream_message(
user_profile, stream.name, topic_name="test", content="test"
)
result = self.client_get(f"/json/messages/{msg_id}")
self.assert_json_success(result)
mit_user = self.mit_user("sipbtest")
self.login_user(mit_user)
result = self.client_get(f"/json/messages/{msg_id}", subdomain="zephyr")
self.assert_json_error(result, "Invalid message(s)")
def test_fetch_raw_message_private_stream(self) -> None:
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
stream = self.make_stream("private_stream", invite_only=True)
self.subscribe(user_profile, stream.name)
msg_id = self.send_stream_message(
user_profile, stream.name, topic_name="test", content="test"
)
result = self.client_get(f"/json/messages/{msg_id}")
self.assert_json_success(result)
self.login("othello")
result = self.client_get(f"/json/messages/{msg_id}")
self.assert_json_error(result, "Invalid message(s)")
def test_edit_message_no_permission(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("iago"), "Denmark", topic_name="editing", content="before edit"
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"content": "content after edit",
},
)
self.assert_json_error(result, "You don't have permission to edit this message")
def test_edit_message_no_content(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("hamlet"), "Denmark", topic_name="editing", content="before edit"
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"content": " ",
},
)
self.assert_json_success(result)
content = Message.objects.filter(id=msg_id).values_list("content", flat=True)[0]
self.assertEqual(content, "(deleted)")
def test_edit_message_history_disabled(self) -> None:
user_profile = self.example_user("hamlet")
do_set_realm_property(user_profile.realm, "allow_edit_history", False, acting_user=None)
self.login("hamlet")
# Single-line edit
msg_id_1 = self.send_stream_message(
self.example_user("hamlet"),
"Denmark",
topic_name="editing",
content="content before edit",
)
new_content_1 = "content after edit"
result_1 = self.client_patch(
f"/json/messages/{msg_id_1}",
{
"content": new_content_1,
},
)
self.assert_json_success(result_1)
result = self.client_get(f"/json/messages/{msg_id_1}/history")
self.assert_json_error(result, "Message edit history is disabled in this organization")
# Now verify that if we fetch the message directly, there's no
# edit history data attached.
messages_result = self.client_get(
"/json/messages", {"anchor": msg_id_1, "num_before": 0, "num_after": 10}
)
self.assert_json_success(messages_result)
json_messages = orjson.loads(messages_result.content)
for msg in json_messages["messages"]:
self.assertNotIn("edit_history", msg)
def test_edit_message_history(self) -> None:
self.login("hamlet")
# Single-line edit
msg_id_1 = self.send_stream_message(
self.example_user("hamlet"),
"Denmark",
topic_name="editing",
content="content before edit",
)
new_content_1 = "content after edit"
result_1 = self.client_patch(
f"/json/messages/{msg_id_1}",
{
"content": new_content_1,
},
)
self.assert_json_success(result_1)
message_edit_history_1 = self.client_get(f"/json/messages/{msg_id_1}/history")
json_response_1 = orjson.loads(message_edit_history_1.content)
message_history_1 = json_response_1["message_history"]
# Check content of message after edit.
self.assertEqual(message_history_1[0]["rendered_content"], "<p>content before edit</p>")
self.assertEqual(message_history_1[1]["rendered_content"], "<p>content after edit</p>")
self.assertEqual(
message_history_1[1]["content_html_diff"],
(
"<div><p>content "
'<span class="highlight_text_inserted">after</span> '
'<span class="highlight_text_deleted">before</span>'
" edit</p></div>"
),
)
# Check content of message before edit.
self.assertEqual(
message_history_1[1]["prev_rendered_content"], "<p>content before edit</p>"
)
# Edits on new lines
msg_id_2 = self.send_stream_message(
self.example_user("hamlet"),
"Denmark",
topic_name="editing",
content="content before edit, line 1\n\ncontent before edit, line 3",
)
new_content_2 = (
"content before edit, line 1\n"
"content after edit, line 2\n"
"content before edit, line 3"
)
result_2 = self.client_patch(
f"/json/messages/{msg_id_2}",
{
"content": new_content_2,
},
)
self.assert_json_success(result_2)
message_edit_history_2 = self.client_get(f"/json/messages/{msg_id_2}/history")
json_response_2 = orjson.loads(message_edit_history_2.content)
message_history_2 = json_response_2["message_history"]
self.assertEqual(
message_history_2[0]["rendered_content"],
"<p>content before edit, line 1</p>\n<p>content before edit, line 3</p>",
)
self.assertEqual(
message_history_2[1]["rendered_content"],
(
"<p>content before edit, line 1<br>\n"
"content after edit, line 2<br>\n"
"content before edit, line 3</p>"
),
)
self.assertEqual(
message_history_2[1]["content_html_diff"],
(
"<div><p>content before edit, line 1<br> "
'content <span class="highlight_text_inserted">after edit, line 2<br> '
"content</span> before edit, line 3</p></div>"
),
)
self.assertEqual(
message_history_2[1]["prev_rendered_content"],
"<p>content before edit, line 1</p>\n<p>content before edit, line 3</p>",
)
def test_empty_message_edit(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("hamlet"),
"Denmark",
topic_name="editing",
content="We will edit this to render as empty.",
)
# Edit that manually to simulate a rendering bug
message = Message.objects.get(id=msg_id)
message.rendered_content = ""
message.save(update_fields=["rendered_content"])
self.assert_json_success(
self.client_patch(
"/json/messages/" + str(msg_id),
{
"content": "We will edit this to also render as empty.",
},
)
)
# And again tweak to simulate a rendering bug
message = Message.objects.get(id=msg_id)
message.rendered_content = ""
message.save(update_fields=["rendered_content"])
history = self.client_get("/json/messages/" + str(msg_id) + "/history")
message_history = orjson.loads(history.content)["message_history"]
self.assertEqual(message_history[0]["rendered_content"], "")
self.assertEqual(message_history[1]["rendered_content"], "")
self.assertEqual(message_history[1]["content_html_diff"], "<div></div>")
def test_edit_link(self) -> None:
# Link editing
self.login("hamlet")
msg_id_1 = self.send_stream_message(
self.example_user("hamlet"),
"Denmark",
topic_name="editing",
content="Here is a link to [zulip](www.zulip.org).",
)
new_content_1 = "Here is a link to [zulip](www.zulipchat.com)."
result_1 = self.client_patch(
f"/json/messages/{msg_id_1}",
{
"content": new_content_1,
},
)
self.assert_json_success(result_1)
message_edit_history_1 = self.client_get(f"/json/messages/{msg_id_1}/history")
json_response_1 = orjson.loads(message_edit_history_1.content)
message_history_1 = json_response_1["message_history"]
# Check content of message after edit.
self.assertEqual(
message_history_1[0]["rendered_content"],
"<p>Here is a link to " '<a href="http://www.zulip.org">zulip</a>.</p>',
)
self.assertEqual(
message_history_1[1]["rendered_content"],
"<p>Here is a link to " '<a href="http://www.zulipchat.com">zulip</a>.</p>',
)
self.assertEqual(
message_history_1[1]["content_html_diff"],
(
'<div><p>Here is a link to <a href="http://www.zulipchat.com"'
">zulip "
'<span class="highlight_text_inserted"> Link: http://www.zulipchat.com .'
'</span> <span class="highlight_text_deleted"> Link: http://www.zulip.org .'
"</span> </a></p></div>"
),
)
def test_edit_history_unedited(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("hamlet"),
"Denmark",
topic_name="editing",
content="This message has not been edited.",
)
result = self.client_get(f"/json/messages/{msg_id}/history")
self.assert_json_success(result)
message_history = result.json()["message_history"]
self.assert_length(message_history, 1)
def test_user_info_for_updates(self) -> None:
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
self.login_user(hamlet)
self.subscribe(hamlet, "Denmark")
self.subscribe(cordelia, "Denmark")
msg_id = self.send_stream_message(
hamlet, "Denmark", content="@**Cordelia, Lear's daughter**"
)
user_info = get_user_info_for_message_updates(msg_id)
message_user_ids = user_info["message_user_ids"]
self.assertIn(hamlet.id, message_user_ids)
self.assertIn(cordelia.id, message_user_ids)
mention_user_ids = user_info["mention_user_ids"]
self.assertEqual(mention_user_ids, {cordelia.id})
def test_edit_cases(self) -> None:
"""This test verifies the accuracy of construction of Zulip's edit
history data structures."""
self.login("hamlet")
hamlet = self.example_user("hamlet")
stream_1 = self.make_stream("stream 1")
stream_2 = self.make_stream("stream 2")
stream_3 = self.make_stream("stream 3")
self.subscribe(hamlet, stream_1.name)
self.subscribe(hamlet, stream_2.name)
self.subscribe(hamlet, stream_3.name)
msg_id = self.send_stream_message(
self.example_user("hamlet"), "stream 1", topic_name="topic 1", content="content 1"
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"content": "content 2",
},
)
self.assert_json_success(result)
history = orjson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]["prev_content"], "content 1")
self.assertEqual(history[0]["user_id"], hamlet.id)
self.assertEqual(
set(history[0].keys()),
{
"timestamp",
"prev_content",
"user_id",
"prev_rendered_content",
"prev_rendered_content_version",
},
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"topic": "topic 2",
},
)
self.assert_json_success(result)
history = orjson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]["prev_topic"], "topic 1")
self.assertEqual(history[0]["topic"], "topic 2")
self.assertEqual(history[0]["user_id"], hamlet.id)
self.assertEqual(
set(history[0].keys()),
{"timestamp", "prev_topic", "topic", "user_id"},
)
self.login("iago")
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": stream_2.id,
},
)
self.assert_json_success(result)
history = orjson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]["prev_stream"], stream_1.id)
self.assertEqual(history[0]["stream"], stream_2.id)
self.assertEqual(history[0]["user_id"], self.example_user("iago").id)
self.assertEqual(set(history[0].keys()), {"timestamp", "prev_stream", "stream", "user_id"})
self.login("hamlet")
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"content": "content 3",
"topic": "topic 3",
},
)
self.assert_json_success(result)
history = orjson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]["prev_content"], "content 2")
self.assertEqual(history[0]["prev_topic"], "topic 2")
self.assertEqual(history[0]["topic"], "topic 3")
self.assertEqual(history[0]["user_id"], hamlet.id)
self.assertEqual(
set(history[0].keys()),
{
"timestamp",
"prev_topic",
"topic",
"prev_content",
"user_id",
"prev_rendered_content",
"prev_rendered_content_version",
},
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"content": "content 4",
},
)
self.assert_json_success(result)
history = orjson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]["prev_content"], "content 3")
self.assertEqual(history[0]["user_id"], hamlet.id)
self.login("iago")
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"topic": "topic 4",
"stream_id": stream_3.id,
},
)
self.assert_json_success(result)
history = orjson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]["prev_topic"], "topic 3")
self.assertEqual(history[0]["topic"], "topic 4")
self.assertEqual(history[0]["prev_stream"], stream_2.id)
self.assertEqual(history[0]["stream"], stream_3.id)
self.assertEqual(history[0]["user_id"], self.example_user("iago").id)
self.assertEqual(
set(history[0].keys()),
{
"timestamp",
"prev_topic",
"topic",
"prev_stream",
"stream",
"user_id",
},
)
# Now, we verify that all of the edits stored in the message.edit_history
# have the correct data structure
history = orjson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]["prev_topic"], "topic 3")
self.assertEqual(history[0]["topic"], "topic 4")
self.assertEqual(history[0]["stream"], stream_3.id)
self.assertEqual(history[0]["prev_stream"], stream_2.id)
self.assertEqual(history[1]["prev_content"], "content 3")
self.assertEqual(history[2]["prev_topic"], "topic 2")
self.assertEqual(history[2]["topic"], "topic 3")
self.assertEqual(history[2]["prev_content"], "content 2")
self.assertEqual(history[3]["stream"], stream_2.id)
self.assertEqual(history[3]["prev_stream"], stream_1.id)
self.assertEqual(history[4]["prev_topic"], "topic 1")
self.assertEqual(history[4]["topic"], "topic 2")
self.assertEqual(history[5]["prev_content"], "content 1")
# Now, we verify that the edit history data sent back has the
# correct filled-out fields
message_edit_history = self.client_get(f"/json/messages/{msg_id}/history")
json_response = orjson.loads(message_edit_history.content)
# We reverse the message history view output so that the IDs line up with the above.
message_history = list(reversed(json_response["message_history"]))
i = 0
for entry in message_history:
expected_entries = {"content", "rendered_content", "topic", "timestamp", "user_id"}
if i in {0, 2, 4}:
expected_entries.add("prev_topic")
expected_entries.add("topic")
if i in {1, 2, 5}:
expected_entries.add("prev_content")
expected_entries.add("prev_rendered_content")
expected_entries.add("content_html_diff")
if i in {0, 3}:
expected_entries.add("prev_stream")
expected_entries.add("stream")
i += 1
self.assertEqual(expected_entries, set(entry.keys()))
self.assert_length(message_history, 7)
self.assertEqual(message_history[0]["topic"], "topic 4")
self.assertEqual(message_history[0]["prev_topic"], "topic 3")
self.assertEqual(message_history[0]["stream"], stream_3.id)
self.assertEqual(message_history[0]["prev_stream"], stream_2.id)
self.assertEqual(message_history[0]["content"], "content 4")
self.assertEqual(message_history[1]["topic"], "topic 3")
self.assertEqual(message_history[1]["content"], "content 4")
self.assertEqual(message_history[1]["prev_content"], "content 3")
self.assertEqual(message_history[2]["topic"], "topic 3")
self.assertEqual(message_history[2]["prev_topic"], "topic 2")
self.assertEqual(message_history[2]["content"], "content 3")
self.assertEqual(message_history[2]["prev_content"], "content 2")
self.assertEqual(message_history[3]["topic"], "topic 2")
self.assertEqual(message_history[3]["stream"], stream_2.id)
self.assertEqual(message_history[3]["prev_stream"], stream_1.id)
self.assertEqual(message_history[3]["content"], "content 2")
self.assertEqual(message_history[4]["topic"], "topic 2")
self.assertEqual(message_history[4]["prev_topic"], "topic 1")
self.assertEqual(message_history[4]["content"], "content 2")
self.assertEqual(message_history[5]["topic"], "topic 1")
self.assertEqual(message_history[5]["content"], "content 2")
self.assertEqual(message_history[5]["prev_content"], "content 1")
self.assertEqual(message_history[6]["content"], "content 1")
self.assertEqual(message_history[6]["topic"], "topic 1")
def test_edit_message_content_limit(self) -> None:
def set_message_editing_params(
allow_message_editing: bool,
message_content_edit_limit_seconds: int,
edit_topic_policy: int,
) -> None:
result = self.client_patch(
"/json/realm",
{
"allow_message_editing": orjson.dumps(allow_message_editing).decode(),
"message_content_edit_limit_seconds": message_content_edit_limit_seconds,
"edit_topic_policy": edit_topic_policy,
},
)
self.assert_json_success(result)
def do_edit_message_assert_success(
id_: int, unique_str: str, topic_only: bool = False
) -> None:
new_topic = "topic" + unique_str
new_content = "content" + unique_str
params_dict = {"topic": new_topic}
if not topic_only:
params_dict["content"] = new_content
result = self.client_patch(f"/json/messages/{id_}", params_dict)
self.assert_json_success(result)
if topic_only:
self.check_topic(id_, topic_name=new_topic)
else:
self.check_message(id_, topic_name=new_topic, content=new_content)
def do_edit_message_assert_error(
id_: int, unique_str: str, error: str, topic_only: bool = False
) -> None:
message = Message.objects.get(id=id_)
old_topic = message.topic_name()
old_content = message.content
new_topic = "topic" + unique_str
new_content = "content" + unique_str
params_dict = {"topic": new_topic}
if not topic_only:
params_dict["content"] = new_content
result = self.client_patch(f"/json/messages/{id_}", params_dict)
message = Message.objects.get(id=id_)
self.assert_json_error(result, error)
msg = Message.objects.get(id=id_)
self.assertEqual(msg.topic_name(), old_topic)
self.assertEqual(msg.content, old_content)
self.login("iago")
# send a message in the past
id_ = self.send_stream_message(
self.example_user("iago"), "Denmark", content="content", topic_name="topic"
)
message = Message.objects.get(id=id_)
message.date_sent = message.date_sent - datetime.timedelta(seconds=180)
message.save()
# test the various possible message editing settings
# high enough time limit, all edits allowed
set_message_editing_params(True, 240, Realm.POLICY_ADMINS_ONLY)
do_edit_message_assert_success(id_, "A")
# out of time, only topic editing allowed
set_message_editing_params(True, 120, Realm.POLICY_ADMINS_ONLY)
do_edit_message_assert_success(id_, "B", True)
do_edit_message_assert_error(id_, "C", "The time limit for editing this message has passed")
# infinite time, all edits allowed
set_message_editing_params(True, 0, Realm.POLICY_ADMINS_ONLY)
do_edit_message_assert_success(id_, "D")
# without allow_message_editing, nothing is allowed
set_message_editing_params(False, 240, Realm.POLICY_ADMINS_ONLY)
do_edit_message_assert_error(
id_, "E", "Your organization has turned off message editing", True
)
set_message_editing_params(False, 120, Realm.POLICY_ADMINS_ONLY)
do_edit_message_assert_error(
id_, "F", "Your organization has turned off message editing", True
)
set_message_editing_params(False, 0, Realm.POLICY_ADMINS_ONLY)
do_edit_message_assert_error(
id_, "G", "Your organization has turned off message editing", True
)
def test_edit_topic_policy(self) -> None:
def set_message_editing_params(
allow_message_editing: bool,
message_content_edit_limit_seconds: int,
edit_topic_policy: int,
) -> None:
self.login("iago")
result = self.client_patch(
"/json/realm",
{
"allow_message_editing": orjson.dumps(allow_message_editing).decode(),
"message_content_edit_limit_seconds": message_content_edit_limit_seconds,
"edit_topic_policy": edit_topic_policy,
},
)
self.assert_json_success(result)
def do_edit_message_assert_success(id_: int, unique_str: str, acting_user: str) -> None:
self.login(acting_user)
new_topic = "topic" + unique_str
params_dict = {"topic": new_topic}
result = self.client_patch(f"/json/messages/{id_}", params_dict)
self.assert_json_success(result)
self.check_topic(id_, topic_name=new_topic)
def do_edit_message_assert_error(
id_: int, unique_str: str, error: str, acting_user: str
) -> None:
self.login(acting_user)
message = Message.objects.get(id=id_)
old_topic = message.topic_name()
old_content = message.content
new_topic = "topic" + unique_str
params_dict = {"topic": new_topic}
result = self.client_patch(f"/json/messages/{id_}", params_dict)
message = Message.objects.get(id=id_)
self.assert_json_error(result, error)
msg = Message.objects.get(id=id_)
self.assertEqual(msg.topic_name(), old_topic)
self.assertEqual(msg.content, old_content)
# send a message in the past
id_ = self.send_stream_message(
self.example_user("hamlet"), "Denmark", content="content", topic_name="topic"
)
message = Message.objects.get(id=id_)
message.date_sent = message.date_sent - datetime.timedelta(seconds=180)
message.save()
# Guest user must be subscribed to the stream to access the message.
polonius = self.example_user("polonius")
self.subscribe(polonius, "Denmark")
# any user can edit the topic of a message
set_message_editing_params(True, 0, Realm.POLICY_EVERYONE)
do_edit_message_assert_success(id_, "A", "polonius")
# only members can edit topic of a message
set_message_editing_params(True, 0, Realm.POLICY_MEMBERS_ONLY)
do_edit_message_assert_error(
id_, "B", "You don't have permission to edit this message", "polonius"
)
do_edit_message_assert_success(id_, "B", "cordelia")
# only full members can edit topic of a message
set_message_editing_params(True, 0, Realm.POLICY_FULL_MEMBERS_ONLY)
cordelia = self.example_user("cordelia")
do_set_realm_property(cordelia.realm, "waiting_period_threshold", 10, acting_user=None)
cordelia.date_joined = timezone_now() - datetime.timedelta(days=9)
cordelia.save()
do_edit_message_assert_error(
id_, "C", "You don't have permission to edit this message", "cordelia"
)
cordelia.date_joined = timezone_now() - datetime.timedelta(days=11)
cordelia.save()
do_edit_message_assert_success(id_, "C", "cordelia")
# only moderators can edit topic of a message
set_message_editing_params(True, 0, Realm.POLICY_MODERATORS_ONLY)
do_edit_message_assert_error(
id_, "D", "You don't have permission to edit this message", "cordelia"
)
do_edit_message_assert_success(id_, "D", "shiva")
# only admins can edit the topics of messages
set_message_editing_params(True, 0, Realm.POLICY_ADMINS_ONLY)
do_edit_message_assert_error(
id_, "E", "You don't have permission to edit this message", "shiva"
)
do_edit_message_assert_success(id_, "E", "iago")
# users cannot edit topics if allow_message_editing is False
set_message_editing_params(False, 0, Realm.POLICY_EVERYONE)
do_edit_message_assert_error(
id_, "D", "Your organization has turned off message editing", "cordelia"
)
# non-admin users cannot edit topics sent > 72 hrs ago
message.date_sent = message.date_sent - datetime.timedelta(seconds=290000)
message.save()
set_message_editing_params(True, 0, Realm.POLICY_EVERYONE)
do_edit_message_assert_success(id_, "E", "iago")
do_edit_message_assert_success(id_, "F", "shiva")
do_edit_message_assert_error(
id_, "G", "The time limit for editing this message's topic has passed", "cordelia"
)
# anyone should be able to edit "no topic" indefinitely
message.set_topic_name("(no topic)")
message.save()
do_edit_message_assert_success(id_, "D", "cordelia")
@mock.patch("zerver.actions.message_edit.send_event")
def test_edit_topic_public_history_stream(self, mock_send_event: mock.MagicMock) -> None:
stream_name = "Macbeth"
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
self.make_stream(stream_name, history_public_to_subscribers=True)
self.subscribe(hamlet, stream_name)
self.login_user(hamlet)
message_id = self.send_stream_message(hamlet, stream_name, "Where am I?")
self.login_user(cordelia)
self.subscribe(cordelia, stream_name)
message = Message.objects.get(id=message_id)
def do_update_message_topic_success(
user_profile: UserProfile,
message: Message,
topic_name: str,
users_to_be_notified: List[Dict[str, Any]],
) -> None:
do_update_message(
user_profile=user_profile,
target_message=message,
new_stream=None,
topic_name=topic_name,
propagate_mode="change_later",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
rendering_result=None,
prior_mention_user_ids=set(),
mention_data=None,
)
mock_send_event.assert_called_with(mock.ANY, mock.ANY, users_to_be_notified)
# Returns the users that need to be notified when a message topic is changed
def notify(user_id: int) -> Dict[str, Any]:
um = UserMessage.objects.get(message=message_id)
if um.user_profile_id == user_id:
return {
"id": user_id,
"flags": um.flags_list(),
}
else:
return {
"id": user_id,
"flags": ["read"],
}
users_to_be_notified = list(map(notify, [hamlet.id, cordelia.id]))
# Edit topic of a message sent before Cordelia subscribed the stream
do_update_message_topic_success(
cordelia, message, "Othello eats apple", users_to_be_notified
)
# If Cordelia is long-term idle, she doesn't get a notification.
cordelia.long_term_idle = True
cordelia.save()
users_to_be_notified = list(map(notify, [hamlet.id]))
do_update_message_topic_success(
cordelia, message, "Another topic idle", users_to_be_notified
)
cordelia.long_term_idle = False
cordelia.save()
# Even if Hamlet unsubscribes the stream, he should be notified when the topic is changed
# because he has a UserMessage row.
self.unsubscribe(hamlet, stream_name)
users_to_be_notified = list(map(notify, [hamlet.id, cordelia.id]))
do_update_message_topic_success(cordelia, message, "Another topic", users_to_be_notified)
# Hamlet subscribes to the stream again and Cordelia unsubscribes, then Hamlet changes
# the message topic. Cordelia won't receive any updates when a message on that stream is
# changed because she is not a subscriber and doesn't have a UserMessage row.
self.subscribe(hamlet, stream_name)
self.unsubscribe(cordelia, stream_name)
self.login_user(hamlet)
users_to_be_notified = list(map(notify, [hamlet.id]))
do_update_message_topic_success(hamlet, message, "Change again", users_to_be_notified)
@mock.patch("zerver.actions.message_edit.send_event")
def test_edit_muted_topic(self, mock_send_event: mock.MagicMock) -> None:
stream_name = "Stream 123"
stream = self.make_stream(stream_name)
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
aaron = self.example_user("aaron")
self.subscribe(hamlet, stream_name)
self.login_user(hamlet)
message_id = self.send_stream_message(
hamlet, stream_name, topic_name="Topic1", content="Hello World"
)
self.subscribe(cordelia, stream_name)
self.login_user(cordelia)
self.subscribe(aaron, stream_name)
self.login_user(aaron)
already_muted_topic = "Already muted topic"
muted_topics = [
[stream_name, "Topic1"],
[stream_name, "Topic2"],
[stream_name, already_muted_topic],
]
set_topic_mutes(hamlet, muted_topics)
set_topic_mutes(cordelia, muted_topics)
# Returns the users that need to be notified when a message topic is changed
def notify(user_id: int) -> Dict[str, Any]:
um = UserMessage.objects.get(message=message_id)
if um.user_profile_id == user_id:
return {
"id": user_id,
"flags": um.flags_list(),
}
else:
return {
"id": user_id,
"flags": ["read"],
}
users_to_be_notified = list(map(notify, [hamlet.id, cordelia.id, aaron.id]))
change_all_topic_name = "Topic 1 edited"
with queries_captured() as queries:
check_update_message(
user_profile=hamlet,
message_id=message_id,
stream_id=None,
topic_name=change_all_topic_name,
propagate_mode="change_all",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
)
# This code path adds 9 (1 + 4/user with muted topics) to
# the number of database queries for moving a topic.
self.assert_length(queries, 18)
for muting_user in get_users_muting_topic(stream.id, change_all_topic_name):
for user in users_to_be_notified:
if muting_user.id == user["id"]:
user["muted_topics"] = get_topic_mutes(muting_user)
break
self.assertFalse(topic_is_muted(hamlet, stream.id, "Topic1"))
self.assertFalse(topic_is_muted(cordelia, stream.id, "Topic1"))
self.assertFalse(topic_is_muted(aaron, stream.id, "Topic1"))
self.assertTrue(topic_is_muted(hamlet, stream.id, "Topic2"))
self.assertTrue(topic_is_muted(cordelia, stream.id, "Topic2"))
self.assertFalse(topic_is_muted(aaron, stream.id, "Topic2"))
self.assertTrue(topic_is_muted(hamlet, stream.id, change_all_topic_name))
self.assertTrue(topic_is_muted(cordelia, stream.id, change_all_topic_name))
self.assertFalse(topic_is_muted(aaron, stream.id, change_all_topic_name))
change_later_topic_name = "Topic 1 edited again"
check_update_message(
user_profile=hamlet,
message_id=message_id,
stream_id=None,
topic_name=change_later_topic_name,
propagate_mode="change_later",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
)
self.assertFalse(topic_is_muted(hamlet, stream.id, change_all_topic_name))
self.assertTrue(topic_is_muted(hamlet, stream.id, change_later_topic_name))
# Make sure we safely handle the case of the new topic being already muted.
check_update_message(
user_profile=hamlet,
message_id=message_id,
stream_id=None,
topic_name=already_muted_topic,
propagate_mode="change_all",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
)
self.assertFalse(topic_is_muted(hamlet, stream.id, change_later_topic_name))
self.assertTrue(topic_is_muted(hamlet, stream.id, already_muted_topic))
change_one_topic_name = "Topic 1 edited change_one"
check_update_message(
user_profile=hamlet,
message_id=message_id,
stream_id=None,
topic_name=change_one_topic_name,
propagate_mode="change_one",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
)
self.assertTrue(topic_is_muted(hamlet, stream.id, change_one_topic_name))
self.assertFalse(topic_is_muted(hamlet, stream.id, change_later_topic_name))
# Move topic between two public streams.
desdemona = self.example_user("desdemona")
message_id = self.send_stream_message(
hamlet, stream_name, topic_name="New topic", content="Hello World"
)
new_public_stream = self.make_stream("New public stream")
self.subscribe(desdemona, new_public_stream.name)
self.login_user(desdemona)
muted_topics = [
[stream_name, "New topic"],
]
set_topic_mutes(desdemona, muted_topics)
set_topic_mutes(cordelia, muted_topics)
with queries_captured() as queries:
check_update_message(
user_profile=desdemona,
message_id=message_id,
stream_id=new_public_stream.id,
propagate_mode="change_all",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
)
self.assert_length(queries, 31)
self.assertFalse(topic_is_muted(desdemona, stream.id, "New topic"))
self.assertFalse(topic_is_muted(cordelia, stream.id, "New topic"))
self.assertFalse(topic_is_muted(aaron, stream.id, "New topic"))
self.assertTrue(topic_is_muted(desdemona, new_public_stream.id, "New topic"))
self.assertTrue(topic_is_muted(cordelia, new_public_stream.id, "New topic"))
self.assertFalse(topic_is_muted(aaron, new_public_stream.id, "New topic"))
# Move topic to a private stream.
message_id = self.send_stream_message(
hamlet, stream_name, topic_name="New topic", content="Hello World"
)
new_private_stream = self.make_stream("New private stream", invite_only=True)
self.subscribe(desdemona, new_private_stream.name)
self.login_user(desdemona)
muted_topics = [
[stream_name, "New topic"],
]
set_topic_mutes(desdemona, muted_topics)
set_topic_mutes(cordelia, muted_topics)
with queries_captured() as queries:
check_update_message(
user_profile=desdemona,
message_id=message_id,
stream_id=new_private_stream.id,
propagate_mode="change_all",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
)
self.assert_length(queries, 33)
# Cordelia is not subscribed to the private stream, so
# Cordelia should have had the topic unmuted, while Desdemona
# should have had her muted topic record moved.
self.assertFalse(topic_is_muted(desdemona, stream.id, "New topic"))
self.assertFalse(topic_is_muted(cordelia, stream.id, "New topic"))
self.assertFalse(topic_is_muted(aaron, stream.id, "New topic"))
self.assertTrue(topic_is_muted(desdemona, new_private_stream.id, "New topic"))
self.assertFalse(topic_is_muted(cordelia, new_private_stream.id, "New topic"))
self.assertFalse(topic_is_muted(aaron, new_private_stream.id, "New topic"))
# Move topic between two public streams with change in topic name.
desdemona = self.example_user("desdemona")
message_id = self.send_stream_message(
hamlet, stream_name, topic_name="New topic 2", content="Hello World"
)
self.login_user(desdemona)
muted_topics = [
[stream_name, "New topic 2"],
]
set_topic_mutes(desdemona, muted_topics)
set_topic_mutes(cordelia, muted_topics)
with queries_captured() as queries:
check_update_message(
user_profile=desdemona,
message_id=message_id,
stream_id=new_public_stream.id,
topic_name="changed topic name",
propagate_mode="change_all",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
)
self.assert_length(queries, 31)
self.assertFalse(topic_is_muted(desdemona, stream.id, "New topic 2"))
self.assertFalse(topic_is_muted(cordelia, stream.id, "New topic 2"))
self.assertFalse(topic_is_muted(aaron, stream.id, "New topic 2"))
self.assertTrue(topic_is_muted(desdemona, new_public_stream.id, "changed topic name"))
self.assertTrue(topic_is_muted(cordelia, new_public_stream.id, "changed topic name"))
self.assertFalse(topic_is_muted(aaron, new_public_stream.id, "changed topic name"))
# Moving only half the messages doesn't move MutedTopic records.
second_message_id = self.send_stream_message(
hamlet, stream_name, topic_name="changed topic name", content="Second message"
)
with queries_captured() as queries:
check_update_message(
user_profile=desdemona,
message_id=second_message_id,
stream_id=new_public_stream.id,
topic_name="final topic name",
propagate_mode="change_later",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
)
self.assert_length(queries, 25)
self.assertTrue(topic_is_muted(desdemona, new_public_stream.id, "changed topic name"))
self.assertTrue(topic_is_muted(cordelia, new_public_stream.id, "changed topic name"))
self.assertFalse(topic_is_muted(aaron, new_public_stream.id, "changed topic name"))
self.assertFalse(topic_is_muted(desdemona, new_public_stream.id, "final topic name"))
self.assertFalse(topic_is_muted(cordelia, new_public_stream.id, "final topic name"))
self.assertFalse(topic_is_muted(aaron, new_public_stream.id, "final topic name"))
@mock.patch("zerver.actions.message_edit.send_event")
def test_wildcard_mention(self, mock_send_event: mock.MagicMock) -> None:
stream_name = "Macbeth"
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
self.make_stream(stream_name, history_public_to_subscribers=True)
self.subscribe(hamlet, stream_name)
self.subscribe(cordelia, stream_name)
self.login_user(hamlet)
message_id = self.send_stream_message(hamlet, stream_name, "Hello everyone")
def notify(user_id: int) -> Dict[str, Any]:
return {
"id": user_id,
"flags": ["wildcard_mentioned"],
}
users_to_be_notified = sorted(map(notify, [cordelia.id, hamlet.id]), key=itemgetter("id"))
result = self.client_patch(
f"/json/messages/{message_id}",
{
"content": "Hello @**everyone**",
},
)
self.assert_json_success(result)
# Extract the send_event call where event type is 'update_message'.
# Here we assert wildcard_mention_user_ids has been set properly.
called = False
for call_args in mock_send_event.call_args_list:
(arg_realm, arg_event, arg_notified_users) = call_args[0]
if arg_event["type"] == "update_message":
self.assertEqual(arg_event["type"], "update_message")
self.assertEqual(arg_event["wildcard_mention_user_ids"], [cordelia.id, hamlet.id])
self.assertEqual(
sorted(arg_notified_users, key=itemgetter("id")), users_to_be_notified
)
called = True
self.assertTrue(called)
def test_wildcard_mention_restrictions_when_editing(self) -> None:
cordelia = self.example_user("cordelia")
shiva = self.example_user("shiva")
self.login("cordelia")
stream_name = "Macbeth"
self.make_stream(stream_name, history_public_to_subscribers=True)
self.subscribe(cordelia, stream_name)
self.subscribe(shiva, stream_name)
message_id = self.send_stream_message(cordelia, stream_name, "Hello everyone")
realm = cordelia.realm
do_set_realm_property(
realm,
"wildcard_mention_policy",
Realm.WILDCARD_MENTION_POLICY_MODERATORS,
acting_user=None,
)
with mock.patch("zerver.lib.message.num_subscribers_for_stream_id", return_value=17):
result = self.client_patch(
"/json/messages/" + str(message_id),
{
"content": "Hello @**everyone**",
},
)
self.assert_json_error(
result, "You do not have permission to use wildcard mentions in this stream."
)
with mock.patch("zerver.lib.message.num_subscribers_for_stream_id", return_value=14):
result = self.client_patch(
"/json/messages/" + str(message_id),
{
"content": "Hello @**everyone**",
},
)
self.assert_json_success(result)
self.login("shiva")
message_id = self.send_stream_message(shiva, stream_name, "Hi everyone")
with mock.patch("zerver.lib.message.num_subscribers_for_stream_id", return_value=17):
result = self.client_patch(
"/json/messages/" + str(message_id),
{
"content": "Hello @**everyone**",
},
)
self.assert_json_success(result)
def test_topic_edit_history_saved_in_all_message(self) -> None:
self.login("hamlet")
id1 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic1")
id2 = self.send_stream_message(self.example_user("iago"), "Denmark", topic_name="topic1")
id3 = self.send_stream_message(self.example_user("iago"), "Verona", topic_name="topic1")
id4 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic2")
id5 = self.send_stream_message(self.example_user("iago"), "Denmark", topic_name="topic1")
def verify_edit_history(new_topic: str, len_edit_history: int) -> None:
for msg_id in [id1, id2, id5]:
msg = Message.objects.get(id=msg_id)
self.assertEqual(
new_topic,
msg.topic_name(),
)
# Since edit history is being generated by do_update_message,
# it's contents can vary over time; So, to keep this test
# future proof, we only verify it's length.
self.assert_length(orjson.loads(msg.edit_history), len_edit_history)
for msg_id in [id3, id4]:
msg = Message.objects.get(id=msg_id)
self.assertEqual(msg.edit_history, None)
new_topic = "edited"
result = self.client_patch(
f"/json/messages/{id1}",
{
"topic": new_topic,
"propagate_mode": "change_later",
},
)
self.assert_json_success(result)
verify_edit_history(new_topic, 1)
new_topic = "edited2"
result = self.client_patch(
f"/json/messages/{id1}",
{
"topic": new_topic,
"propagate_mode": "change_later",
},
)
self.assert_json_success(result)
verify_edit_history(new_topic, 2)
def test_topic_and_content_edit(self) -> None:
self.login("hamlet")
id1 = self.send_stream_message(self.example_user("hamlet"), "Denmark", "message 1", "topic")
id2 = self.send_stream_message(self.example_user("iago"), "Denmark", "message 2", "topic")
id3 = self.send_stream_message(self.example_user("hamlet"), "Denmark", "message 3", "topic")
new_topic = "edited"
result = self.client_patch(
"/json/messages/" + str(id1),
{
"topic": new_topic,
"propagate_mode": "change_later",
"content": "edited message",
},
)
self.assert_json_success(result)
# Content change of only id1 should come in edit history
# and topic change should be present in all the messages.
msg1 = Message.objects.get(id=id1)
msg2 = Message.objects.get(id=id2)
msg3 = Message.objects.get(id=id3)
msg1_edit_history = orjson.loads(msg1.edit_history)
self.assertTrue("prev_content" in msg1_edit_history[0].keys())
for msg in [msg2, msg3]:
self.assertFalse("prev_content" in orjson.loads(msg.edit_history)[0].keys())
for msg in [msg1, msg2, msg3]:
self.assertEqual(
new_topic,
msg.topic_name(),
)
self.assert_length(orjson.loads(msg.edit_history), 1)
def test_propagate_topic_forward(self) -> None:
self.login("hamlet")
id1 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic1")
id2 = self.send_stream_message(self.example_user("iago"), "Denmark", topic_name="topic1")
id3 = self.send_stream_message(self.example_user("iago"), "Verona", topic_name="topic1")
id4 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic2")
id5 = self.send_stream_message(self.example_user("iago"), "Denmark", topic_name="topic1")
result = self.client_patch(
f"/json/messages/{id1}",
{
"topic": "edited",
"propagate_mode": "change_later",
},
)
self.assert_json_success(result)
self.check_topic(id1, topic_name="edited")
self.check_topic(id2, topic_name="edited")
self.check_topic(id3, topic_name="topic1")
self.check_topic(id4, topic_name="topic2")
self.check_topic(id5, topic_name="edited")
def test_propagate_all_topics(self) -> None:
self.login("hamlet")
id1 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic1")
id2 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic1")
id3 = self.send_stream_message(self.example_user("iago"), "Verona", topic_name="topic1")
id4 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic2")
id5 = self.send_stream_message(self.example_user("iago"), "Denmark", topic_name="topic1")
id6 = self.send_stream_message(self.example_user("iago"), "Denmark", topic_name="topic3")
result = self.client_patch(
f"/json/messages/{id2}",
{
"topic": "edited",
"propagate_mode": "change_all",
},
)
self.assert_json_success(result)
self.check_topic(id1, topic_name="edited")
self.check_topic(id2, topic_name="edited")
self.check_topic(id3, topic_name="topic1")
self.check_topic(id4, topic_name="topic2")
self.check_topic(id5, topic_name="edited")
self.check_topic(id6, topic_name="topic3")
def test_propagate_all_topics_with_different_uppercase_letters(self) -> None:
self.login("hamlet")
id1 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic1")
id2 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="Topic1")
id3 = self.send_stream_message(self.example_user("iago"), "Verona", topic_name="topiC1")
id4 = self.send_stream_message(self.example_user("iago"), "Denmark", topic_name="toPic1")
result = self.client_patch(
f"/json/messages/{id2}",
{
"topic": "edited",
"propagate_mode": "change_all",
},
)
self.assert_json_success(result)
self.check_topic(id1, topic_name="edited")
self.check_topic(id2, topic_name="edited")
self.check_topic(id3, topic_name="topiC1")
self.check_topic(id4, topic_name="edited")
def test_move_message_to_stream(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_lt) = self.prepare_move_topics(
"iago",
"test move stream",
"new stream",
"test",
# Set the user's translation language to German to test that
# it is overridden by the realm's default language.
"de",
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
HTTP_ACCEPT_LANGUAGE="de",
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 1)
self.assertEqual(
messages[0].content,
f"This topic was moved to #**new stream>test** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
self.assertEqual(
messages[3].content,
f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
def test_move_message_realm_admin_cant_move_to_another_realm(self) -> None:
user_profile = self.example_user("iago")
self.assertEqual(user_profile.role, UserProfile.ROLE_REALM_ADMINISTRATOR)
self.login("iago")
lear_realm = get_realm("lear")
new_stream = self.make_stream("new", lear_realm)
msg_id = self.send_stream_message(user_profile, "Verona", topic_name="test123")
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
)
self.assert_json_error(result, "Invalid stream id")
def test_move_message_realm_admin_cant_move_to_private_stream_without_subscription(
self,
) -> None:
user_profile = self.example_user("iago")
self.assertEqual(user_profile.role, UserProfile.ROLE_REALM_ADMINISTRATOR)
self.login("iago")
new_stream = self.make_stream("new", invite_only=True)
msg_id = self.send_stream_message(user_profile, "Verona", topic_name="test123")
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
)
self.assert_json_error(result, "Invalid stream id")
def test_move_message_realm_admin_cant_move_from_private_stream_without_subscription(
self,
) -> None:
user_profile = self.example_user("iago")
self.assertEqual(user_profile.role, UserProfile.ROLE_REALM_ADMINISTRATOR)
self.login("iago")
self.make_stream("privatestream", invite_only=True)
self.subscribe(user_profile, "privatestream")
msg_id = self.send_stream_message(user_profile, "privatestream", topic_name="test123")
self.unsubscribe(user_profile, "privatestream")
verona = get_stream("Verona", user_profile.realm)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"stream_id": verona.id,
"propagate_mode": "change_all",
},
)
self.assert_json_error(
result,
"You don't have permission to move this message due to missing access to its stream",
)
def test_move_message_from_private_stream_message_access_checks(
self,
) -> None:
hamlet = self.example_user("hamlet")
user_profile = self.example_user("iago")
self.assertEqual(user_profile.role, UserProfile.ROLE_REALM_ADMINISTRATOR)
self.login("iago")
private_stream = self.make_stream(
"privatestream", invite_only=True, history_public_to_subscribers=False
)
self.subscribe(hamlet, "privatestream")
original_msg_id = self.send_stream_message(hamlet, "privatestream", topic_name="test123")
self.subscribe(user_profile, "privatestream")
new_msg_id = self.send_stream_message(user_profile, "privatestream", topic_name="test123")
# Now we unsub and hamlet sends a new message (we won't have access to it even after re-subbing!)
self.unsubscribe(user_profile, "privatestream")
new_inaccessible_msg_id = self.send_stream_message(
hamlet, "privatestream", topic_name="test123"
)
# Re-subscribe and send another message:
self.subscribe(user_profile, "privatestream")
newest_msg_id = self.send_stream_message(
user_profile, "privatestream", topic_name="test123"
)
verona = get_stream("Verona", user_profile.realm)
result = self.client_patch(
"/json/messages/" + str(new_msg_id),
{
"stream_id": verona.id,
"propagate_mode": "change_all",
},
)
self.assert_json_success(result)
self.assertEqual(Message.objects.get(id=new_msg_id).recipient_id, verona.recipient_id)
self.assertEqual(Message.objects.get(id=newest_msg_id).recipient_id, verona.recipient_id)
# The original message and the new, inaccessible message weren't moved,
# because user_profile doesn't have access to them.
self.assertEqual(
Message.objects.get(id=original_msg_id).recipient_id, private_stream.recipient_id
)
self.assertEqual(
Message.objects.get(id=new_inaccessible_msg_id).recipient_id,
private_stream.recipient_id,
)
def test_move_message_to_stream_change_later(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
result = self.client_patch(
f"/json/messages/{msg_id_later}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_later",
},
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 2)
self.assertEqual(messages[0].id, msg_id)
self.assertEqual(
messages[1].content,
f"2 messages were moved from this topic to #**new stream>test** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 3)
self.assertEqual(messages[0].id, msg_id_later)
self.assertEqual(
messages[2].content,
f"2 messages were moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
def test_move_message_to_stream_change_later_all_moved(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_later",
},
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 1)
self.assertEqual(
messages[0].content,
f"This topic was moved to #**new stream>test** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
self.assertEqual(messages[0].id, msg_id)
self.assertEqual(
messages[3].content,
f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
def test_move_message_to_stream_change_one(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
result = self.client_patch(
"/json/messages/" + str(msg_id_later),
{
"stream_id": new_stream.id,
"propagate_mode": "change_one",
},
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 3)
self.assertEqual(messages[0].id, msg_id)
self.assertEqual(
messages[2].content,
f"A message was moved from this topic to #**new stream>test** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 2)
self.assertEqual(messages[0].id, msg_id_later)
self.assertEqual(
messages[1].content,
f"A message was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
def test_move_message_to_stream_change_all(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
result = self.client_patch(
"/json/messages/" + str(msg_id_later),
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 1)
self.assertEqual(
messages[0].content,
f"This topic was moved to #**new stream>test** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
self.assertEqual(messages[0].id, msg_id)
self.assertEqual(
messages[3].content,
f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
def test_move_message_between_streams_policy_setting(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_1", "new_stream_1", "test"
)
def check_move_message_according_to_policy(role: int, expect_fail: bool = False) -> None:
do_change_user_role(user_profile, role, acting_user=None)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
)
if expect_fail:
self.assert_json_error(result, "You don't have permission to move this message")
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 3)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 0)
else:
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 1)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
# Check sending messages when policy is Realm.POLICY_ADMINS_ONLY.
do_set_realm_property(
user_profile.realm,
"move_messages_between_streams_policy",
Realm.POLICY_ADMINS_ONLY,
acting_user=None,
)
check_move_message_according_to_policy(UserProfile.ROLE_MODERATOR, expect_fail=True)
check_move_message_according_to_policy(UserProfile.ROLE_REALM_ADMINISTRATOR)
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_2", "new_stream_2", "test"
)
# Check sending messages when policy is Realm.POLICY_MODERATORS_ONLY.
do_set_realm_property(
user_profile.realm,
"move_messages_between_streams_policy",
Realm.POLICY_MODERATORS_ONLY,
acting_user=None,
)
check_move_message_according_to_policy(UserProfile.ROLE_MEMBER, expect_fail=True)
check_move_message_according_to_policy(UserProfile.ROLE_MODERATOR)
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_3", "new_stream_3", "test"
)
# Check sending messages when policy is Realm.POLICY_FULL_MEMBERS_ONLY.
do_set_realm_property(
user_profile.realm,
"move_messages_between_streams_policy",
Realm.POLICY_FULL_MEMBERS_ONLY,
acting_user=None,
)
do_set_realm_property(
user_profile.realm, "waiting_period_threshold", 100000, acting_user=None
)
check_move_message_according_to_policy(UserProfile.ROLE_MEMBER, expect_fail=True)
do_set_realm_property(user_profile.realm, "waiting_period_threshold", 0, acting_user=None)
check_move_message_according_to_policy(UserProfile.ROLE_MEMBER)
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_4", "new_stream_4", "test"
)
# Check sending messages when policy is Realm.POLICY_MEMBERS_ONLY.
do_set_realm_property(
user_profile.realm,
"move_messages_between_streams_policy",
Realm.POLICY_MEMBERS_ONLY,
acting_user=None,
)
check_move_message_according_to_policy(UserProfile.ROLE_GUEST, expect_fail=True)
check_move_message_according_to_policy(UserProfile.ROLE_MEMBER)
def test_move_message_to_stream_based_on_stream_post_policy(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_1", "new_stream_1", "test"
)
do_set_realm_property(
user_profile.realm,
"move_messages_between_streams_policy",
Realm.POLICY_MEMBERS_ONLY,
acting_user=None,
)
def check_move_message_to_stream(role: int, error_msg: Optional[str] = None) -> None:
do_change_user_role(user_profile, role, acting_user=None)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
)
if error_msg is not None:
self.assert_json_error(result, error_msg)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 3)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 0)
else:
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 1)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
# Check when stream_post_policy is STREAM_POST_POLICY_ADMINS.
do_change_stream_post_policy(
new_stream, Stream.STREAM_POST_POLICY_ADMINS, acting_user=user_profile
)
error_msg = "Only organization administrators can send to this stream."
check_move_message_to_stream(UserProfile.ROLE_MODERATOR, error_msg)
check_move_message_to_stream(UserProfile.ROLE_REALM_ADMINISTRATOR)
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_2", "new_stream_2", "test"
)
# Check when stream_post_policy is STREAM_POST_POLICY_MODERATORS.
do_change_stream_post_policy(
new_stream, Stream.STREAM_POST_POLICY_MODERATORS, acting_user=user_profile
)
error_msg = "Only organization administrators and moderators can send to this stream."
check_move_message_to_stream(UserProfile.ROLE_MEMBER, error_msg)
check_move_message_to_stream(UserProfile.ROLE_MODERATOR)
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_3", "new_stream_3", "test"
)
# Check when stream_post_policy is STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS.
do_change_stream_post_policy(
new_stream, Stream.STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS, acting_user=user_profile
)
error_msg = "New members cannot send to this stream."
do_set_realm_property(
user_profile.realm, "waiting_period_threshold", 100000, acting_user=None
)
check_move_message_to_stream(UserProfile.ROLE_MEMBER, error_msg)
do_set_realm_property(user_profile.realm, "waiting_period_threshold", 0, acting_user=None)
check_move_message_to_stream(UserProfile.ROLE_MEMBER)
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_4", "new_stream_4", "test"
)
# Check when stream_post_policy is STREAM_POST_POLICY_EVERYONE.
# In this case also, guest is not allowed as we do not allow guest to move
# messages between streams in any case, so stream_post_policy of new stream does
# not matter.
do_change_stream_post_policy(
new_stream, Stream.STREAM_POST_POLICY_EVERYONE, acting_user=user_profile
)
do_set_realm_property(
user_profile.realm, "waiting_period_threshold", 100000, acting_user=None
)
check_move_message_to_stream(
UserProfile.ROLE_GUEST, "You don't have permission to move this message"
)
check_move_message_to_stream(UserProfile.ROLE_MEMBER)
def test_move_message_to_stream_with_topic_editing_not_allowed(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_1", "new_stream_1", "test"
)
realm = user_profile.realm
realm.edit_topic_policy = Realm.POLICY_ADMINS_ONLY
realm.save()
self.login("cordelia")
do_set_realm_property(
user_profile.realm,
"move_messages_between_streams_policy",
Realm.POLICY_MEMBERS_ONLY,
acting_user=None,
)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"topic": "new topic",
},
)
self.assert_json_error(result, "You don't have permission to edit this message")
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 1)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
def test_move_message_to_stream_and_topic(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
with queries_captured() as queries, cache_tries_captured() as cache_tries:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"topic": "new topic",
},
)
self.assert_length(queries, 53)
self.assert_length(cache_tries, 13)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 1)
self.assertEqual(
messages[0].content,
f"This topic was moved to #**new stream>new topic** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "new topic")
self.assert_length(messages, 4)
self.assertEqual(
messages[3].content,
f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
self.assert_json_success(result)
def test_inaccessible_msg_after_stream_change(self) -> None:
"""Simulates the case where message is moved to a stream where user is not a subscribed"""
(user_profile, old_stream, new_stream, msg_id, msg_id_lt) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
guest_user = self.example_user("polonius")
non_guest_user = self.example_user("hamlet")
self.subscribe(guest_user, old_stream.name)
self.subscribe(non_guest_user, old_stream.name)
msg_id_to_test_acesss = self.send_stream_message(
user_profile, old_stream.name, topic_name="test", content="fourth"
)
self.assertEqual(
has_message_access(
guest_user, Message.objects.get(id=msg_id_to_test_acesss), has_user_message=False
),
True,
)
self.assertEqual(
has_message_access(
guest_user,
Message.objects.get(id=msg_id_to_test_acesss),
has_user_message=False,
stream=old_stream,
),
True,
)
self.assertEqual(
has_message_access(
non_guest_user,
Message.objects.get(id=msg_id_to_test_acesss),
has_user_message=False,
),
True,
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"topic": "new topic",
},
)
self.assert_json_success(result)
self.assertEqual(
has_message_access(
guest_user,
Message.objects.get(id=msg_id_to_test_acesss),
has_user_message=False,
),
False,
)
self.assertEqual(
has_message_access(
non_guest_user,
Message.objects.get(id=msg_id_to_test_acesss),
has_user_message=False,
),
True,
)
self.assertEqual(
# If the guest user were subscribed to the new stream,
# they'd have access; has_message_access does not validate
# the is_subscribed parameter.
has_message_access(
guest_user,
Message.objects.get(id=msg_id_to_test_acesss),
has_user_message=False,
stream=new_stream,
is_subscribed=True,
),
True,
)
self.assertEqual(
has_message_access(
guest_user,
Message.objects.get(id=msg_id_to_test_acesss),
has_user_message=False,
stream=new_stream,
),
False,
)
with self.assertRaises(AssertionError):
# Raises assertion if you pass an invalid stream.
has_message_access(
guest_user,
Message.objects.get(id=msg_id_to_test_acesss),
has_user_message=False,
stream=old_stream,
)
self.assertEqual(
UserMessage.objects.filter(
user_profile_id=non_guest_user.id,
message_id=msg_id_to_test_acesss,
).count(),
0,
)
self.assertEqual(
has_message_access(
self.example_user("iago"),
Message.objects.get(id=msg_id_to_test_acesss),
has_user_message=False,
),
True,
)
def test_no_notify_move_message_to_stream(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_lt) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"send_notification_to_old_thread": "false",
"send_notification_to_new_thread": "false",
},
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 0)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 3)
def test_notify_new_thread_move_message_to_stream(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_lt) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"send_notification_to_old_thread": "false",
"send_notification_to_new_thread": "true",
},
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 0)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
self.assertEqual(
messages[3].content,
f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
def test_notify_old_thread_move_message_to_stream(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_lt) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"send_notification_to_old_thread": "true",
"send_notification_to_new_thread": "false",
},
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 1)
self.assertEqual(
messages[0].content,
f"This topic was moved to #**new stream>test** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 3)
def parameterized_test_move_message_involving_private_stream(
self,
from_invite_only: bool,
history_public_to_subscribers: bool,
user_messages_created: bool,
to_invite_only: bool = True,
) -> None:
admin_user = self.example_user("iago")
user_losing_access = self.example_user("cordelia")
user_gaining_access = self.example_user("hamlet")
self.login("iago")
old_stream = self.make_stream("test move stream", invite_only=from_invite_only)
new_stream = self.make_stream(
"new stream",
invite_only=to_invite_only,
history_public_to_subscribers=history_public_to_subscribers,
)
self.subscribe(admin_user, old_stream.name)
self.subscribe(user_losing_access, old_stream.name)
self.subscribe(admin_user, new_stream.name)
self.subscribe(user_gaining_access, new_stream.name)
msg_id = self.send_stream_message(
admin_user, old_stream.name, topic_name="test", content="First"
)
self.send_stream_message(admin_user, old_stream.name, topic_name="test", content="Second")
self.assertEqual(
UserMessage.objects.filter(
user_profile_id=user_losing_access.id,
message_id=msg_id,
).count(),
1,
)
self.assertEqual(
UserMessage.objects.filter(
user_profile_id=user_gaining_access.id,
message_id=msg_id,
).count(),
0,
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
)
self.assert_json_success(result)
messages = get_topic_messages(admin_user, old_stream, "test")
self.assert_length(messages, 1)
self.assertEqual(
messages[0].content,
f"This topic was moved to #**new stream>test** by @_**Iago|{admin_user.id}**.",
)
messages = get_topic_messages(admin_user, new_stream, "test")
self.assert_length(messages, 3)
self.assertEqual(
UserMessage.objects.filter(
user_profile_id=user_losing_access.id,
message_id=msg_id,
).count(),
0,
)
# When the history is shared, UserMessage is not created for the user but the user
# can see the message.
self.assertEqual(
UserMessage.objects.filter(
user_profile_id=user_gaining_access.id,
message_id=msg_id,
).count(),
1 if user_messages_created else 0,
)
def test_move_message_from_public_to_private_stream_not_shared_history(self) -> None:
self.parameterized_test_move_message_involving_private_stream(
from_invite_only=False,
history_public_to_subscribers=False,
user_messages_created=True,
)
def test_move_message_from_public_to_private_stream_shared_history(self) -> None:
self.parameterized_test_move_message_involving_private_stream(
from_invite_only=False,
history_public_to_subscribers=True,
user_messages_created=False,
)
def test_move_message_from_private_to_private_stream_not_shared_history(self) -> None:
self.parameterized_test_move_message_involving_private_stream(
from_invite_only=True,
history_public_to_subscribers=False,
user_messages_created=True,
)
def test_move_message_from_private_to_private_stream_shared_history(self) -> None:
self.parameterized_test_move_message_involving_private_stream(
from_invite_only=True,
history_public_to_subscribers=True,
user_messages_created=False,
)
def test_move_message_from_private_to_public(self) -> None:
self.parameterized_test_move_message_involving_private_stream(
from_invite_only=True,
history_public_to_subscribers=True,
user_messages_created=False,
to_invite_only=False,
)
def test_can_move_messages_between_streams(self) -> None:
def validation_func(user_profile: UserProfile) -> bool:
user_profile.refresh_from_db()
return user_profile.can_move_messages_between_streams()
self.check_has_permission_policies("move_messages_between_streams_policy", validation_func)
def test_mark_topic_as_resolved(self) -> None:
self.login("iago")
admin_user = self.example_user("iago")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
aaron = self.example_user("aaron")
# Set the user's translation language to German to test that
# it is overridden by the realm's default language.
admin_user.default_language = "de"
admin_user.save()
stream = self.make_stream("new")
self.subscribe(admin_user, stream.name)
self.subscribe(hamlet, stream.name)
self.subscribe(cordelia, stream.name)
self.subscribe(aaron, stream.name)
original_topic = "topic 1"
id1 = self.send_stream_message(hamlet, "new", topic_name=original_topic)
id2 = self.send_stream_message(admin_user, "new", topic_name=original_topic)
msg1 = Message.objects.get(id=id1)
do_add_reaction(aaron, msg1, "tada", "1f389", "unicode_emoji")
# Check that we don't incorrectly send "unresolve topic"
# notifications when asking the preserve the current topic.
result = self.client_patch(
"/json/messages/" + str(id1),
{
"topic": original_topic,
"propagate_mode": "change_all",
},
)
self.assert_json_error(result, "Nothing to change")
resolved_topic = RESOLVED_TOPIC_PREFIX + original_topic
result = self.client_patch(
"/json/messages/" + str(id1),
{
"topic": resolved_topic,
"propagate_mode": "change_all",
},
HTTP_ACCEPT_LANGUAGE="de",
)
self.assert_json_success(result)
for msg_id in [id1, id2]:
msg = Message.objects.get(id=msg_id)
self.assertEqual(
resolved_topic,
msg.topic_name(),
)
messages = get_topic_messages(admin_user, stream, resolved_topic)
self.assert_length(messages, 3)
self.assertEqual(
messages[2].content,
f"@_**Iago|{admin_user.id}** has marked this topic as resolved.",
)
# Check topic resolved notification message is only unread for participants.
assert (
UserMessage.objects.filter(
user_profile__in=[admin_user, hamlet, aaron], message__id=messages[2].id
)
.extra(where=[UserMessage.where_unread()])
.count()
== 3
)
assert (
UserMessage.objects.filter(user_profile=cordelia, message__id=messages[2].id)
.extra(where=[UserMessage.where_unread()])
.count()
== 0
)
# Now move to a weird state and confirm no new messages
weird_topic = "✔ ✔✔" + original_topic
result = self.client_patch(
"/json/messages/" + str(id1),
{
"topic": weird_topic,
"propagate_mode": "change_all",
},
)
self.assert_json_success(result)
for msg_id in [id1, id2]:
msg = Message.objects.get(id=msg_id)
self.assertEqual(
weird_topic,
msg.topic_name(),
)
messages = get_topic_messages(admin_user, stream, weird_topic)
self.assert_length(messages, 3)
self.assertEqual(
messages[2].content,
f"@_**Iago|{admin_user.id}** has marked this topic as resolved.",
)
unresolved_topic = original_topic
result = self.client_patch(
"/json/messages/" + str(id1),
{
"topic": unresolved_topic,
"propagate_mode": "change_all",
},
)
self.assert_json_success(result)
for msg_id in [id1, id2]:
msg = Message.objects.get(id=msg_id)
self.assertEqual(
unresolved_topic,
msg.topic_name(),
)
messages = get_topic_messages(admin_user, stream, unresolved_topic)
self.assert_length(messages, 4)
self.assertEqual(
messages[3].content,
f"@_**Iago|{admin_user.id}** has marked this topic as unresolved.",
)
# Check topic unresolved notification message is only unread for participants.
assert (
UserMessage.objects.filter(
user_profile__in=[admin_user, hamlet, aaron], message__id=messages[3].id
)
.extra(where=[UserMessage.where_unread()])
.count()
== 3
)
assert (
UserMessage.objects.filter(user_profile=cordelia, message__id=messages[3].id)
.extra(where=[UserMessage.where_unread()])
.count()
== 0
)
class DeleteMessageTest(ZulipTestCase):
def test_delete_message_invalid_request_format(self) -> None:
self.login("iago")
hamlet = self.example_user("hamlet")
msg_id = self.send_stream_message(hamlet, "Denmark")
result = self.client_delete(f"/json/messages/{msg_id + 1}", {"message_id": msg_id})
self.assert_json_error(result, "Invalid message(s)")
result = self.client_delete(f"/json/messages/{msg_id}")
self.assert_json_success(result)
def test_delete_message_by_user(self) -> None:
def set_message_deleting_params(
delete_own_message_policy: int, message_content_delete_limit_seconds: Union[int, str]
) -> None:
self.login("iago")
result = self.client_patch(
"/json/realm",
{
"delete_own_message_policy": delete_own_message_policy,
"message_content_delete_limit_seconds": orjson.dumps(
message_content_delete_limit_seconds
).decode(),
},
)
self.assert_json_success(result)
def test_delete_message_by_admin(msg_id: int) -> HttpResponse:
self.login("iago")
result = self.client_delete(f"/json/messages/{msg_id}")
return result
def test_delete_message_by_owner(msg_id: int) -> HttpResponse:
self.login("hamlet")
result = self.client_delete(f"/json/messages/{msg_id}")
return result
def test_delete_message_by_other_user(msg_id: int) -> HttpResponse:
self.login("cordelia")
result = self.client_delete(f"/json/messages/{msg_id}")
return result
# Test if message deleting is not allowed(default).
set_message_deleting_params(Realm.POLICY_ADMINS_ONLY, "unlimited")
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
msg_id = self.send_stream_message(hamlet, "Denmark")
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_error(result, "You don't have permission to delete this message")
result = test_delete_message_by_other_user(msg_id=msg_id)
self.assert_json_error(result, "You don't have permission to delete this message")
result = test_delete_message_by_admin(msg_id=msg_id)
self.assert_json_success(result)
# Test if message deleting is allowed.
# Test if time limit is None(no limit).
set_message_deleting_params(Realm.POLICY_EVERYONE, "unlimited")
msg_id = self.send_stream_message(hamlet, "Denmark")
message = Message.objects.get(id=msg_id)
message.date_sent = message.date_sent - datetime.timedelta(seconds=600)
message.save()
result = test_delete_message_by_other_user(msg_id=msg_id)
self.assert_json_error(result, "You don't have permission to delete this message")
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_success(result)
# Test if time limit is non-zero.
set_message_deleting_params(Realm.POLICY_EVERYONE, 240)
msg_id_1 = self.send_stream_message(hamlet, "Denmark")
message = Message.objects.get(id=msg_id_1)
message.date_sent = message.date_sent - datetime.timedelta(seconds=120)
message.save()
msg_id_2 = self.send_stream_message(hamlet, "Denmark")
message = Message.objects.get(id=msg_id_2)
message.date_sent = message.date_sent - datetime.timedelta(seconds=360)
message.save()
result = test_delete_message_by_other_user(msg_id=msg_id_1)
self.assert_json_error(result, "You don't have permission to delete this message")
result = test_delete_message_by_owner(msg_id=msg_id_1)
self.assert_json_success(result)
result = test_delete_message_by_owner(msg_id=msg_id_2)
self.assert_json_error(result, "The time limit for deleting this message has passed")
# No limit for admin.
result = test_delete_message_by_admin(msg_id=msg_id_2)
self.assert_json_success(result)
# Test multiple delete requests with no latency issues
msg_id = self.send_stream_message(hamlet, "Denmark")
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_success(result)
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_error(result, "Invalid message(s)")
# Test handling of 500 error caused by multiple delete requests due to latency.
# see issue #11219.
with mock.patch("zerver.views.message_edit.do_delete_messages") as m, mock.patch(
"zerver.views.message_edit.validate_can_delete_message", return_value=None
), mock.patch("zerver.views.message_edit.access_message", return_value=(None, None)):
m.side_effect = IntegrityError()
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_error(result, "Message already deleted")
m.side_effect = Message.DoesNotExist()
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_error(result, "Message already deleted")
def test_delete_message_according_to_delete_own_message_policy(self) -> None:
def check_delete_message_by_sender(
sender_name: str, error_msg: Optional[str] = None
) -> None:
sender = self.example_user(sender_name)
msg_id = self.send_stream_message(sender, "Verona")
self.login_user(sender)
result = self.client_delete(f"/json/messages/{msg_id}")
if error_msg is None:
self.assert_json_success(result)
else:
self.assert_json_error(result, error_msg)
realm = get_realm("zulip")
do_set_realm_property(
realm, "delete_own_message_policy", Realm.POLICY_ADMINS_ONLY, acting_user=None
)
check_delete_message_by_sender("shiva", "You don't have permission to delete this message")
check_delete_message_by_sender("iago")
do_set_realm_property(
realm, "delete_own_message_policy", Realm.POLICY_MODERATORS_ONLY, acting_user=None
)
check_delete_message_by_sender(
"cordelia", "You don't have permission to delete this message"
)
check_delete_message_by_sender("shiva")
do_set_realm_property(
realm, "delete_own_message_policy", Realm.POLICY_MEMBERS_ONLY, acting_user=None
)
check_delete_message_by_sender(
"polonius", "You don't have permission to delete this message"
)
check_delete_message_by_sender("cordelia")
do_set_realm_property(
realm, "delete_own_message_policy", Realm.POLICY_FULL_MEMBERS_ONLY, acting_user=None
)
do_set_realm_property(realm, "waiting_period_threshold", 10, acting_user=None)
cordelia = self.example_user("cordelia")
cordelia.date_joined = timezone_now() - datetime.timedelta(days=9)
cordelia.save()
check_delete_message_by_sender(
"cordelia", "You don't have permission to delete this message"
)
cordelia.date_joined = timezone_now() - datetime.timedelta(days=11)
cordelia.save()
check_delete_message_by_sender("cordelia")
do_set_realm_property(
realm, "delete_own_message_policy", Realm.POLICY_EVERYONE, acting_user=None
)
check_delete_message_by_sender("cordelia")
check_delete_message_by_sender("polonius")
def test_delete_event_sent_after_transaction_commits(self) -> None:
"""
Tests that `send_event` is hooked to `transaction.on_commit`. This is important, because
we don't want to end up holding locks on message rows for too long if the event queue runs
into a problem.
"""
hamlet = self.example_user("hamlet")
self.send_stream_message(hamlet, "Denmark")
message = self.get_last_message()
with self.tornado_redirected_to_list([], expected_num_events=1):
with mock.patch("zerver.actions.message_edit.send_event") as m:
m.side_effect = AssertionError(
"Events should be sent only after the transaction commits."
)
do_delete_messages(hamlet.realm, [message])
| 40.640069 | 110 | 0.617902 | import datetime
from operator import itemgetter
from typing import Any, Dict, List, Optional, Tuple, Union
from unittest import mock
import orjson
from django.db import IntegrityError
from django.http import HttpResponse
from django.utils.timezone import now as timezone_now
from zerver.actions.message_edit import (
check_update_message,
do_delete_messages,
do_update_message,
get_user_info_for_message_updates,
)
from zerver.actions.reactions import do_add_reaction
from zerver.actions.realm_settings import do_change_realm_plan_type, do_set_realm_property
from zerver.actions.streams import do_change_stream_post_policy, do_deactivate_stream
from zerver.actions.users import do_change_user_role
from zerver.lib.message import MessageDict, has_message_access, messages_for_ids
from zerver.lib.test_classes import ZulipTestCase, get_topic_messages
from zerver.lib.test_helpers import cache_tries_captured, queries_captured
from zerver.lib.topic import RESOLVED_TOPIC_PREFIX, TOPIC_NAME
from zerver.lib.user_topics import (
get_topic_mutes,
get_users_muting_topic,
set_topic_mutes,
topic_is_muted,
)
from zerver.models import Message, Realm, Stream, UserMessage, UserProfile, get_realm, get_stream
class EditMessageTestCase(ZulipTestCase):
def check_topic(self, msg_id: int, topic_name: str) -> None:
msg = Message.objects.get(id=msg_id)
self.assertEqual(msg.topic_name(), topic_name)
def check_message(self, msg_id: int, topic_name: str, content: str) -> None:
msg = Message.objects.get(id=msg_id)
self.assertEqual(msg.topic_name(), topic_name)
self.assertEqual(msg.content, content)
with queries_captured(keep_cache_warm=True) as queries:
(fetch_message_dict,) = messages_for_ids(
message_ids=[msg.id],
user_message_flags={msg_id: []},
search_fields={},
apply_markdown=False,
client_gravatar=False,
allow_edit_history=True,
)
self.assert_length(queries, 1)
for query in queries:
self.assertNotIn("message", query["sql"])
self.assertEqual(
fetch_message_dict[TOPIC_NAME],
msg.topic_name(),
)
self.assertEqual(
fetch_message_dict["content"],
msg.content,
)
self.assertEqual(
fetch_message_dict["sender_id"],
msg.sender_id,
)
if msg.edit_history:
self.assertEqual(
fetch_message_dict["edit_history"],
orjson.loads(msg.edit_history),
)
def prepare_move_topics(
self,
user_email: str,
old_stream: str,
new_stream: str,
topic: str,
language: Optional[str] = None,
) -> Tuple[UserProfile, Stream, Stream, int, int]:
user_profile = self.example_user(user_email)
if language is not None:
user_profile.default_language = language
user_profile.save(update_fields=["default_language"])
self.login(user_email)
stream = self.make_stream(old_stream)
new_stream = self.make_stream(new_stream)
self.subscribe(user_profile, stream.name)
self.subscribe(user_profile, new_stream.name)
msg_id = self.send_stream_message(
user_profile, stream.name, topic_name=topic, content="First"
)
msg_id_lt = self.send_stream_message(
user_profile, stream.name, topic_name=topic, content="Second"
)
self.send_stream_message(user_profile, stream.name, topic_name=topic, content="third")
return (user_profile, stream, new_stream, msg_id, msg_id_lt)
class EditMessagePayloadTest(EditMessageTestCase):
def test_edit_message_no_changes(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("hamlet"), "Denmark", topic_name="editing", content="before edit"
)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{},
)
self.assert_json_error(result, "Nothing to change")
def test_move_message_cant_move_private_message(self) -> None:
hamlet = self.example_user("hamlet")
self.login("hamlet")
cordelia = self.example_user("cordelia")
msg_id = self.send_personal_message(hamlet, cordelia)
verona = get_stream("Verona", hamlet.realm)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"stream_id": verona.id,
},
)
self.assert_json_error(result, "Private messages cannot be moved to streams.")
def test_private_message_edit_topic(self) -> None:
hamlet = self.example_user("hamlet")
self.login("hamlet")
cordelia = self.example_user("cordelia")
msg_id = self.send_personal_message(hamlet, cordelia)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"topic": "Should not exist",
},
)
self.assert_json_error(result, "Private messages cannot have topics.")
def test_propagate_invalid(self) -> None:
self.login("hamlet")
id1 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic1")
result = self.client_patch(
"/json/messages/" + str(id1),
{
"topic": "edited",
"propagate_mode": "invalid",
},
)
self.assert_json_error(result, "Invalid propagate_mode")
self.check_topic(id1, topic_name="topic1")
result = self.client_patch(
"/json/messages/" + str(id1),
{
"content": "edited",
"propagate_mode": "change_all",
},
)
self.assert_json_error(result, "Invalid propagate_mode without topic edit")
self.check_topic(id1, topic_name="topic1")
def test_edit_message_no_topic(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("hamlet"), "Denmark", topic_name="editing", content="before edit"
)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"topic": " ",
},
)
self.assert_json_error(result, "Topic can't be empty!")
def test_edit_message_invalid_topic(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("hamlet"), "Denmark", topic_name="editing", content="before edit"
)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"topic": "editing\nfun",
},
)
self.assert_json_error(result, "Invalid character in topic, at position 8!")
def test_move_message_to_stream_with_content(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"content": "Not allowed",
},
)
self.assert_json_error(result, "Cannot change message content while changing stream")
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 3)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 0)
# Right now, we prevent users from editing widgets.
def test_edit_submessage(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("hamlet"),
"Denmark",
topic_name="editing",
content="/poll Games?\nYES\nNO",
)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"content": "/poll Games?\nYES\nNO\nMaybe",
},
)
self.assert_json_error(result, "Widgets cannot be edited.")
class EditMessageTest(EditMessageTestCase):
def test_query_count_on_to_dict_uncached(self) -> None:
# `to_dict_uncached` method is used by the mechanisms
# tested in this class. Hence, its performance is tested here.
# Generate 2 messages
user = self.example_user("hamlet")
realm = user.realm
self.login_user(user)
stream_name = "public_stream"
self.subscribe(user, stream_name)
message_ids = []
message_ids.append(self.send_stream_message(user, stream_name, "Message one"))
user_2 = self.example_user("cordelia")
self.subscribe(user_2, stream_name)
message_ids.append(self.send_stream_message(user_2, stream_name, "Message two"))
self.subscribe(self.notification_bot(realm), stream_name)
message_ids.append(
self.send_stream_message(self.notification_bot(realm), stream_name, "Message three")
)
messages = [
Message.objects.select_related().get(id=message_id) for message_id in message_ids
]
# Check number of queries performed
with queries_captured() as queries:
MessageDict.to_dict_uncached(messages)
# 1 query for realm_id per message = 3
# 1 query each for reactions & submessage for all messages = 2
self.assert_length(queries, 5)
realm_id = 2 # Fetched from stream object
# Check number of queries performed with realm_id
with queries_captured() as queries:
MessageDict.to_dict_uncached(messages, realm_id)
# 1 query each for reactions & submessage for all messages = 2
self.assert_length(queries, 2)
def test_save_message(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("hamlet"), "Denmark", topic_name="editing", content="before edit"
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"content": "after edit",
},
)
self.assert_json_success(result)
self.check_message(msg_id, topic_name="editing", content="after edit")
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"topic": "edited",
},
)
self.assert_json_success(result)
self.check_topic(msg_id, topic_name="edited")
def test_fetch_message_from_id(self) -> None:
self.login("hamlet")
msg_id = self.send_personal_message(
from_user=self.example_user("hamlet"),
to_user=self.example_user("cordelia"),
content="Personal message",
)
result = self.client_get("/json/messages/" + str(msg_id))
self.assert_json_success(result)
self.assertEqual(result.json()["raw_content"], "Personal message")
self.assertEqual(result.json()["message"]["id"], msg_id)
self.assertEqual(result.json()["message"]["flags"], [])
# Send message to web-public stream where hamlet is not subscribed.
# This will test case of user having no `UserMessage` but having access
# to message.
web_public_stream = self.make_stream("web-public-stream", is_web_public=True)
self.subscribe(self.example_user("cordelia"), web_public_stream.name)
web_public_stream_msg_id = self.send_stream_message(
self.example_user("cordelia"), web_public_stream.name, content="web-public message"
)
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_success(result)
self.assertEqual(result.json()["raw_content"], "web-public message")
self.assertEqual(result.json()["message"]["id"], web_public_stream_msg_id)
self.assertEqual(result.json()["message"]["flags"], ["read", "historical"])
# Spectator should be able to fetch message in web-public stream.
self.logout()
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_success(result)
self.assertEqual(result.json()["raw_content"], "web-public message")
self.assertEqual(result.json()["message"]["id"], web_public_stream_msg_id)
# Verify default is apply_markdown=True
self.assertEqual(result.json()["message"]["content"], "<p>web-public message</p>")
# Verify apply_markdown=False works correctly.
result = self.client_get(
"/json/messages/" + str(web_public_stream_msg_id), {"apply_markdown": "false"}
)
self.assert_json_success(result)
self.assertEqual(result.json()["raw_content"], "web-public message")
self.assertEqual(result.json()["message"]["content"], "web-public message")
with self.settings(WEB_PUBLIC_STREAMS_ENABLED=False):
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_error(
result, "Not logged in: API authentication or user session required", status_code=401
)
# Test error cases
self.login("hamlet")
result = self.client_get("/json/messages/999999")
self.assert_json_error(result, "Invalid message(s)")
self.login("cordelia")
result = self.client_get(f"/json/messages/{msg_id}")
self.assert_json_success(result)
self.login("othello")
result = self.client_get(f"/json/messages/{msg_id}")
self.assert_json_error(result, "Invalid message(s)")
def test_fetch_raw_message_spectator(self) -> None:
user_profile = self.example_user("iago")
self.login("iago")
web_public_stream = self.make_stream("web-public-stream", is_web_public=True)
self.subscribe(user_profile, web_public_stream.name)
web_public_stream_msg_id = self.send_stream_message(
user_profile, web_public_stream.name, content="web-public message"
)
non_web_public_stream = self.make_stream("non-web-public-stream")
self.subscribe(user_profile, non_web_public_stream.name)
non_web_public_stream_msg_id = self.send_stream_message(
user_profile, non_web_public_stream.name, content="non-web-public message"
)
# Generate a private message to use in verification.
private_message_id = self.send_personal_message(user_profile, user_profile)
invalid_message_id = private_message_id + 1000
self.logout()
# Confirm WEB_PUBLIC_STREAMS_ENABLED is enforced.
with self.settings(WEB_PUBLIC_STREAMS_ENABLED=False):
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_error(
result, "Not logged in: API authentication or user session required", 401
)
do_set_realm_property(
user_profile.realm, "enable_spectator_access", False, acting_user=None
)
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_error(
result, "Not logged in: API authentication or user session required", 401
)
do_set_realm_property(user_profile.realm, "enable_spectator_access", True, acting_user=None)
# Verify success with web-public stream and default SELF_HOSTED plan type.
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_success(result)
self.assertEqual(result.json()["raw_content"], "web-public message")
self.assertEqual(result.json()["message"]["flags"], ["read"])
# Verify LIMITED plan type does not allow web-public access.
do_change_realm_plan_type(user_profile.realm, Realm.PLAN_TYPE_LIMITED, acting_user=None)
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_error(
result, "Not logged in: API authentication or user session required", 401
)
# Verify works with STANDARD_FREE plan type too.
do_change_realm_plan_type(
user_profile.realm, Realm.PLAN_TYPE_STANDARD_FREE, acting_user=None
)
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_success(result)
self.assertEqual(result.json()["raw_content"], "web-public message")
# Verify private messages are rejected.
result = self.client_get("/json/messages/" + str(private_message_id))
self.assert_json_error(
result, "Not logged in: API authentication or user session required", 401
)
# Verify an actual public stream is required.
result = self.client_get("/json/messages/" + str(non_web_public_stream_msg_id))
self.assert_json_error(
result, "Not logged in: API authentication or user session required", 401
)
# Verify invalid message IDs are rejected with the same error message.
result = self.client_get("/json/messages/" + str(invalid_message_id))
self.assert_json_error(
result, "Not logged in: API authentication or user session required", 401
)
# Verify deactivated streams are rejected. This may change in the future.
do_deactivate_stream(web_public_stream, acting_user=None)
result = self.client_get("/json/messages/" + str(web_public_stream_msg_id))
self.assert_json_error(
result, "Not logged in: API authentication or user session required", 401
)
def test_fetch_raw_message_stream_wrong_realm(self) -> None:
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
stream = self.make_stream("public_stream")
self.subscribe(user_profile, stream.name)
msg_id = self.send_stream_message(
user_profile, stream.name, topic_name="test", content="test"
)
result = self.client_get(f"/json/messages/{msg_id}")
self.assert_json_success(result)
mit_user = self.mit_user("sipbtest")
self.login_user(mit_user)
result = self.client_get(f"/json/messages/{msg_id}", subdomain="zephyr")
self.assert_json_error(result, "Invalid message(s)")
def test_fetch_raw_message_private_stream(self) -> None:
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
stream = self.make_stream("private_stream", invite_only=True)
self.subscribe(user_profile, stream.name)
msg_id = self.send_stream_message(
user_profile, stream.name, topic_name="test", content="test"
)
result = self.client_get(f"/json/messages/{msg_id}")
self.assert_json_success(result)
self.login("othello")
result = self.client_get(f"/json/messages/{msg_id}")
self.assert_json_error(result, "Invalid message(s)")
def test_edit_message_no_permission(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("iago"), "Denmark", topic_name="editing", content="before edit"
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"content": "content after edit",
},
)
self.assert_json_error(result, "You don't have permission to edit this message")
def test_edit_message_no_content(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("hamlet"), "Denmark", topic_name="editing", content="before edit"
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"content": " ",
},
)
self.assert_json_success(result)
content = Message.objects.filter(id=msg_id).values_list("content", flat=True)[0]
self.assertEqual(content, "(deleted)")
def test_edit_message_history_disabled(self) -> None:
user_profile = self.example_user("hamlet")
do_set_realm_property(user_profile.realm, "allow_edit_history", False, acting_user=None)
self.login("hamlet")
msg_id_1 = self.send_stream_message(
self.example_user("hamlet"),
"Denmark",
topic_name="editing",
content="content before edit",
)
new_content_1 = "content after edit"
result_1 = self.client_patch(
f"/json/messages/{msg_id_1}",
{
"content": new_content_1,
},
)
self.assert_json_success(result_1)
result = self.client_get(f"/json/messages/{msg_id_1}/history")
self.assert_json_error(result, "Message edit history is disabled in this organization")
# edit history data attached.
messages_result = self.client_get(
"/json/messages", {"anchor": msg_id_1, "num_before": 0, "num_after": 10}
)
self.assert_json_success(messages_result)
json_messages = orjson.loads(messages_result.content)
for msg in json_messages["messages"]:
self.assertNotIn("edit_history", msg)
def test_edit_message_history(self) -> None:
self.login("hamlet")
# Single-line edit
msg_id_1 = self.send_stream_message(
self.example_user("hamlet"),
"Denmark",
topic_name="editing",
content="content before edit",
)
new_content_1 = "content after edit"
result_1 = self.client_patch(
f"/json/messages/{msg_id_1}",
{
"content": new_content_1,
},
)
self.assert_json_success(result_1)
message_edit_history_1 = self.client_get(f"/json/messages/{msg_id_1}/history")
json_response_1 = orjson.loads(message_edit_history_1.content)
message_history_1 = json_response_1["message_history"]
# Check content of message after edit.
self.assertEqual(message_history_1[0]["rendered_content"], "<p>content before edit</p>")
self.assertEqual(message_history_1[1]["rendered_content"], "<p>content after edit</p>")
self.assertEqual(
message_history_1[1]["content_html_diff"],
(
"<div><p>content "
'<span class="highlight_text_inserted">after</span> '
'<span class="highlight_text_deleted">before</span>'
" edit</p></div>"
),
)
# Check content of message before edit.
self.assertEqual(
message_history_1[1]["prev_rendered_content"], "<p>content before edit</p>"
)
# Edits on new lines
msg_id_2 = self.send_stream_message(
self.example_user("hamlet"),
"Denmark",
topic_name="editing",
content="content before edit, line 1\n\ncontent before edit, line 3",
)
new_content_2 = (
"content before edit, line 1\n"
"content after edit, line 2\n"
"content before edit, line 3"
)
result_2 = self.client_patch(
f"/json/messages/{msg_id_2}",
{
"content": new_content_2,
},
)
self.assert_json_success(result_2)
message_edit_history_2 = self.client_get(f"/json/messages/{msg_id_2}/history")
json_response_2 = orjson.loads(message_edit_history_2.content)
message_history_2 = json_response_2["message_history"]
self.assertEqual(
message_history_2[0]["rendered_content"],
"<p>content before edit, line 1</p>\n<p>content before edit, line 3</p>",
)
self.assertEqual(
message_history_2[1]["rendered_content"],
(
"<p>content before edit, line 1<br>\n"
"content after edit, line 2<br>\n"
"content before edit, line 3</p>"
),
)
self.assertEqual(
message_history_2[1]["content_html_diff"],
(
"<div><p>content before edit, line 1<br> "
'content <span class="highlight_text_inserted">after edit, line 2<br> '
"content</span> before edit, line 3</p></div>"
),
)
self.assertEqual(
message_history_2[1]["prev_rendered_content"],
"<p>content before edit, line 1</p>\n<p>content before edit, line 3</p>",
)
def test_empty_message_edit(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("hamlet"),
"Denmark",
topic_name="editing",
content="We will edit this to render as empty.",
)
# Edit that manually to simulate a rendering bug
message = Message.objects.get(id=msg_id)
message.rendered_content = ""
message.save(update_fields=["rendered_content"])
self.assert_json_success(
self.client_patch(
"/json/messages/" + str(msg_id),
{
"content": "We will edit this to also render as empty.",
},
)
)
# And again tweak to simulate a rendering bug
message = Message.objects.get(id=msg_id)
message.rendered_content = ""
message.save(update_fields=["rendered_content"])
history = self.client_get("/json/messages/" + str(msg_id) + "/history")
message_history = orjson.loads(history.content)["message_history"]
self.assertEqual(message_history[0]["rendered_content"], "")
self.assertEqual(message_history[1]["rendered_content"], "")
self.assertEqual(message_history[1]["content_html_diff"], "<div></div>")
def test_edit_link(self) -> None:
# Link editing
self.login("hamlet")
msg_id_1 = self.send_stream_message(
self.example_user("hamlet"),
"Denmark",
topic_name="editing",
content="Here is a link to [zulip](www.zulip.org).",
)
new_content_1 = "Here is a link to [zulip](www.zulipchat.com)."
result_1 = self.client_patch(
f"/json/messages/{msg_id_1}",
{
"content": new_content_1,
},
)
self.assert_json_success(result_1)
message_edit_history_1 = self.client_get(f"/json/messages/{msg_id_1}/history")
json_response_1 = orjson.loads(message_edit_history_1.content)
message_history_1 = json_response_1["message_history"]
# Check content of message after edit.
self.assertEqual(
message_history_1[0]["rendered_content"],
"<p>Here is a link to " '<a href="http://www.zulip.org">zulip</a>.</p>',
)
self.assertEqual(
message_history_1[1]["rendered_content"],
"<p>Here is a link to " '<a href="http://www.zulipchat.com">zulip</a>.</p>',
)
self.assertEqual(
message_history_1[1]["content_html_diff"],
(
'<div><p>Here is a link to <a href="http://www.zulipchat.com"'
">zulip "
'<span class="highlight_text_inserted"> Link: http://www.zulipchat.com .'
'</span> <span class="highlight_text_deleted"> Link: http://www.zulip.org .'
"</span> </a></p></div>"
),
)
def test_edit_history_unedited(self) -> None:
self.login("hamlet")
msg_id = self.send_stream_message(
self.example_user("hamlet"),
"Denmark",
topic_name="editing",
content="This message has not been edited.",
)
result = self.client_get(f"/json/messages/{msg_id}/history")
self.assert_json_success(result)
message_history = result.json()["message_history"]
self.assert_length(message_history, 1)
def test_user_info_for_updates(self) -> None:
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
self.login_user(hamlet)
self.subscribe(hamlet, "Denmark")
self.subscribe(cordelia, "Denmark")
msg_id = self.send_stream_message(
hamlet, "Denmark", content="@**Cordelia, Lear's daughter**"
)
user_info = get_user_info_for_message_updates(msg_id)
message_user_ids = user_info["message_user_ids"]
self.assertIn(hamlet.id, message_user_ids)
self.assertIn(cordelia.id, message_user_ids)
mention_user_ids = user_info["mention_user_ids"]
self.assertEqual(mention_user_ids, {cordelia.id})
def test_edit_cases(self) -> None:
self.login("hamlet")
hamlet = self.example_user("hamlet")
stream_1 = self.make_stream("stream 1")
stream_2 = self.make_stream("stream 2")
stream_3 = self.make_stream("stream 3")
self.subscribe(hamlet, stream_1.name)
self.subscribe(hamlet, stream_2.name)
self.subscribe(hamlet, stream_3.name)
msg_id = self.send_stream_message(
self.example_user("hamlet"), "stream 1", topic_name="topic 1", content="content 1"
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"content": "content 2",
},
)
self.assert_json_success(result)
history = orjson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]["prev_content"], "content 1")
self.assertEqual(history[0]["user_id"], hamlet.id)
self.assertEqual(
set(history[0].keys()),
{
"timestamp",
"prev_content",
"user_id",
"prev_rendered_content",
"prev_rendered_content_version",
},
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"topic": "topic 2",
},
)
self.assert_json_success(result)
history = orjson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]["prev_topic"], "topic 1")
self.assertEqual(history[0]["topic"], "topic 2")
self.assertEqual(history[0]["user_id"], hamlet.id)
self.assertEqual(
set(history[0].keys()),
{"timestamp", "prev_topic", "topic", "user_id"},
)
self.login("iago")
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": stream_2.id,
},
)
self.assert_json_success(result)
history = orjson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]["prev_stream"], stream_1.id)
self.assertEqual(history[0]["stream"], stream_2.id)
self.assertEqual(history[0]["user_id"], self.example_user("iago").id)
self.assertEqual(set(history[0].keys()), {"timestamp", "prev_stream", "stream", "user_id"})
self.login("hamlet")
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"content": "content 3",
"topic": "topic 3",
},
)
self.assert_json_success(result)
history = orjson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]["prev_content"], "content 2")
self.assertEqual(history[0]["prev_topic"], "topic 2")
self.assertEqual(history[0]["topic"], "topic 3")
self.assertEqual(history[0]["user_id"], hamlet.id)
self.assertEqual(
set(history[0].keys()),
{
"timestamp",
"prev_topic",
"topic",
"prev_content",
"user_id",
"prev_rendered_content",
"prev_rendered_content_version",
},
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"content": "content 4",
},
)
self.assert_json_success(result)
history = orjson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]["prev_content"], "content 3")
self.assertEqual(history[0]["user_id"], hamlet.id)
self.login("iago")
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"topic": "topic 4",
"stream_id": stream_3.id,
},
)
self.assert_json_success(result)
history = orjson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]["prev_topic"], "topic 3")
self.assertEqual(history[0]["topic"], "topic 4")
self.assertEqual(history[0]["prev_stream"], stream_2.id)
self.assertEqual(history[0]["stream"], stream_3.id)
self.assertEqual(history[0]["user_id"], self.example_user("iago").id)
self.assertEqual(
set(history[0].keys()),
{
"timestamp",
"prev_topic",
"topic",
"prev_stream",
"stream",
"user_id",
},
)
history = orjson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]["prev_topic"], "topic 3")
self.assertEqual(history[0]["topic"], "topic 4")
self.assertEqual(history[0]["stream"], stream_3.id)
self.assertEqual(history[0]["prev_stream"], stream_2.id)
self.assertEqual(history[1]["prev_content"], "content 3")
self.assertEqual(history[2]["prev_topic"], "topic 2")
self.assertEqual(history[2]["topic"], "topic 3")
self.assertEqual(history[2]["prev_content"], "content 2")
self.assertEqual(history[3]["stream"], stream_2.id)
self.assertEqual(history[3]["prev_stream"], stream_1.id)
self.assertEqual(history[4]["prev_topic"], "topic 1")
self.assertEqual(history[4]["topic"], "topic 2")
self.assertEqual(history[5]["prev_content"], "content 1")
message_edit_history = self.client_get(f"/json/messages/{msg_id}/history")
json_response = orjson.loads(message_edit_history.content)
message_history = list(reversed(json_response["message_history"]))
i = 0
for entry in message_history:
expected_entries = {"content", "rendered_content", "topic", "timestamp", "user_id"}
if i in {0, 2, 4}:
expected_entries.add("prev_topic")
expected_entries.add("topic")
if i in {1, 2, 5}:
expected_entries.add("prev_content")
expected_entries.add("prev_rendered_content")
expected_entries.add("content_html_diff")
if i in {0, 3}:
expected_entries.add("prev_stream")
expected_entries.add("stream")
i += 1
self.assertEqual(expected_entries, set(entry.keys()))
self.assert_length(message_history, 7)
self.assertEqual(message_history[0]["topic"], "topic 4")
self.assertEqual(message_history[0]["prev_topic"], "topic 3")
self.assertEqual(message_history[0]["stream"], stream_3.id)
self.assertEqual(message_history[0]["prev_stream"], stream_2.id)
self.assertEqual(message_history[0]["content"], "content 4")
self.assertEqual(message_history[1]["topic"], "topic 3")
self.assertEqual(message_history[1]["content"], "content 4")
self.assertEqual(message_history[1]["prev_content"], "content 3")
self.assertEqual(message_history[2]["topic"], "topic 3")
self.assertEqual(message_history[2]["prev_topic"], "topic 2")
self.assertEqual(message_history[2]["content"], "content 3")
self.assertEqual(message_history[2]["prev_content"], "content 2")
self.assertEqual(message_history[3]["topic"], "topic 2")
self.assertEqual(message_history[3]["stream"], stream_2.id)
self.assertEqual(message_history[3]["prev_stream"], stream_1.id)
self.assertEqual(message_history[3]["content"], "content 2")
self.assertEqual(message_history[4]["topic"], "topic 2")
self.assertEqual(message_history[4]["prev_topic"], "topic 1")
self.assertEqual(message_history[4]["content"], "content 2")
self.assertEqual(message_history[5]["topic"], "topic 1")
self.assertEqual(message_history[5]["content"], "content 2")
self.assertEqual(message_history[5]["prev_content"], "content 1")
self.assertEqual(message_history[6]["content"], "content 1")
self.assertEqual(message_history[6]["topic"], "topic 1")
def test_edit_message_content_limit(self) -> None:
def set_message_editing_params(
allow_message_editing: bool,
message_content_edit_limit_seconds: int,
edit_topic_policy: int,
) -> None:
result = self.client_patch(
"/json/realm",
{
"allow_message_editing": orjson.dumps(allow_message_editing).decode(),
"message_content_edit_limit_seconds": message_content_edit_limit_seconds,
"edit_topic_policy": edit_topic_policy,
},
)
self.assert_json_success(result)
def do_edit_message_assert_success(
id_: int, unique_str: str, topic_only: bool = False
) -> None:
new_topic = "topic" + unique_str
new_content = "content" + unique_str
params_dict = {"topic": new_topic}
if not topic_only:
params_dict["content"] = new_content
result = self.client_patch(f"/json/messages/{id_}", params_dict)
self.assert_json_success(result)
if topic_only:
self.check_topic(id_, topic_name=new_topic)
else:
self.check_message(id_, topic_name=new_topic, content=new_content)
def do_edit_message_assert_error(
id_: int, unique_str: str, error: str, topic_only: bool = False
) -> None:
message = Message.objects.get(id=id_)
old_topic = message.topic_name()
old_content = message.content
new_topic = "topic" + unique_str
new_content = "content" + unique_str
params_dict = {"topic": new_topic}
if not topic_only:
params_dict["content"] = new_content
result = self.client_patch(f"/json/messages/{id_}", params_dict)
message = Message.objects.get(id=id_)
self.assert_json_error(result, error)
msg = Message.objects.get(id=id_)
self.assertEqual(msg.topic_name(), old_topic)
self.assertEqual(msg.content, old_content)
self.login("iago")
id_ = self.send_stream_message(
self.example_user("iago"), "Denmark", content="content", topic_name="topic"
)
message = Message.objects.get(id=id_)
message.date_sent = message.date_sent - datetime.timedelta(seconds=180)
message.save()
set_message_editing_params(True, 240, Realm.POLICY_ADMINS_ONLY)
do_edit_message_assert_success(id_, "A")
set_message_editing_params(True, 120, Realm.POLICY_ADMINS_ONLY)
do_edit_message_assert_success(id_, "B", True)
do_edit_message_assert_error(id_, "C", "The time limit for editing this message has passed")
set_message_editing_params(True, 0, Realm.POLICY_ADMINS_ONLY)
do_edit_message_assert_success(id_, "D")
set_message_editing_params(False, 240, Realm.POLICY_ADMINS_ONLY)
do_edit_message_assert_error(
id_, "E", "Your organization has turned off message editing", True
)
set_message_editing_params(False, 120, Realm.POLICY_ADMINS_ONLY)
do_edit_message_assert_error(
id_, "F", "Your organization has turned off message editing", True
)
set_message_editing_params(False, 0, Realm.POLICY_ADMINS_ONLY)
do_edit_message_assert_error(
id_, "G", "Your organization has turned off message editing", True
)
def test_edit_topic_policy(self) -> None:
def set_message_editing_params(
allow_message_editing: bool,
message_content_edit_limit_seconds: int,
edit_topic_policy: int,
) -> None:
self.login("iago")
result = self.client_patch(
"/json/realm",
{
"allow_message_editing": orjson.dumps(allow_message_editing).decode(),
"message_content_edit_limit_seconds": message_content_edit_limit_seconds,
"edit_topic_policy": edit_topic_policy,
},
)
self.assert_json_success(result)
def do_edit_message_assert_success(id_: int, unique_str: str, acting_user: str) -> None:
self.login(acting_user)
new_topic = "topic" + unique_str
params_dict = {"topic": new_topic}
result = self.client_patch(f"/json/messages/{id_}", params_dict)
self.assert_json_success(result)
self.check_topic(id_, topic_name=new_topic)
def do_edit_message_assert_error(
id_: int, unique_str: str, error: str, acting_user: str
) -> None:
self.login(acting_user)
message = Message.objects.get(id=id_)
old_topic = message.topic_name()
old_content = message.content
new_topic = "topic" + unique_str
params_dict = {"topic": new_topic}
result = self.client_patch(f"/json/messages/{id_}", params_dict)
message = Message.objects.get(id=id_)
self.assert_json_error(result, error)
msg = Message.objects.get(id=id_)
self.assertEqual(msg.topic_name(), old_topic)
self.assertEqual(msg.content, old_content)
id_ = self.send_stream_message(
self.example_user("hamlet"), "Denmark", content="content", topic_name="topic"
)
message = Message.objects.get(id=id_)
message.date_sent = message.date_sent - datetime.timedelta(seconds=180)
message.save()
polonius = self.example_user("polonius")
self.subscribe(polonius, "Denmark")
set_message_editing_params(True, 0, Realm.POLICY_EVERYONE)
do_edit_message_assert_success(id_, "A", "polonius")
set_message_editing_params(True, 0, Realm.POLICY_MEMBERS_ONLY)
do_edit_message_assert_error(
id_, "B", "You don't have permission to edit this message", "polonius"
)
do_edit_message_assert_success(id_, "B", "cordelia")
# only full members can edit topic of a message
set_message_editing_params(True, 0, Realm.POLICY_FULL_MEMBERS_ONLY)
cordelia = self.example_user("cordelia")
do_set_realm_property(cordelia.realm, "waiting_period_threshold", 10, acting_user=None)
cordelia.date_joined = timezone_now() - datetime.timedelta(days=9)
cordelia.save()
do_edit_message_assert_error(
id_, "C", "You don't have permission to edit this message", "cordelia"
)
cordelia.date_joined = timezone_now() - datetime.timedelta(days=11)
cordelia.save()
do_edit_message_assert_success(id_, "C", "cordelia")
set_message_editing_params(True, 0, Realm.POLICY_MODERATORS_ONLY)
do_edit_message_assert_error(
id_, "D", "You don't have permission to edit this message", "cordelia"
)
do_edit_message_assert_success(id_, "D", "shiva")
# only admins can edit the topics of messages
set_message_editing_params(True, 0, Realm.POLICY_ADMINS_ONLY)
do_edit_message_assert_error(
id_, "E", "You don't have permission to edit this message", "shiva"
)
do_edit_message_assert_success(id_, "E", "iago")
set_message_editing_params(False, 0, Realm.POLICY_EVERYONE)
do_edit_message_assert_error(
id_, "D", "Your organization has turned off message editing", "cordelia"
)
message.date_sent = message.date_sent - datetime.timedelta(seconds=290000)
message.save()
set_message_editing_params(True, 0, Realm.POLICY_EVERYONE)
do_edit_message_assert_success(id_, "E", "iago")
do_edit_message_assert_success(id_, "F", "shiva")
do_edit_message_assert_error(
id_, "G", "The time limit for editing this message's topic has passed", "cordelia"
)
# anyone should be able to edit "no topic" indefinitely
message.set_topic_name("(no topic)")
message.save()
do_edit_message_assert_success(id_, "D", "cordelia")
@mock.patch("zerver.actions.message_edit.send_event")
def test_edit_topic_public_history_stream(self, mock_send_event: mock.MagicMock) -> None:
stream_name = "Macbeth"
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
self.make_stream(stream_name, history_public_to_subscribers=True)
self.subscribe(hamlet, stream_name)
self.login_user(hamlet)
message_id = self.send_stream_message(hamlet, stream_name, "Where am I?")
self.login_user(cordelia)
self.subscribe(cordelia, stream_name)
message = Message.objects.get(id=message_id)
def do_update_message_topic_success(
user_profile: UserProfile,
message: Message,
topic_name: str,
users_to_be_notified: List[Dict[str, Any]],
) -> None:
do_update_message(
user_profile=user_profile,
target_message=message,
new_stream=None,
topic_name=topic_name,
propagate_mode="change_later",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
rendering_result=None,
prior_mention_user_ids=set(),
mention_data=None,
)
mock_send_event.assert_called_with(mock.ANY, mock.ANY, users_to_be_notified)
# Returns the users that need to be notified when a message topic is changed
def notify(user_id: int) -> Dict[str, Any]:
um = UserMessage.objects.get(message=message_id)
if um.user_profile_id == user_id:
return {
"id": user_id,
"flags": um.flags_list(),
}
else:
return {
"id": user_id,
"flags": ["read"],
}
users_to_be_notified = list(map(notify, [hamlet.id, cordelia.id]))
# Edit topic of a message sent before Cordelia subscribed the stream
do_update_message_topic_success(
cordelia, message, "Othello eats apple", users_to_be_notified
)
# If Cordelia is long-term idle, she doesn't get a notification.
cordelia.long_term_idle = True
cordelia.save()
users_to_be_notified = list(map(notify, [hamlet.id]))
do_update_message_topic_success(
cordelia, message, "Another topic idle", users_to_be_notified
)
cordelia.long_term_idle = False
cordelia.save()
self.unsubscribe(hamlet, stream_name)
users_to_be_notified = list(map(notify, [hamlet.id, cordelia.id]))
do_update_message_topic_success(cordelia, message, "Another topic", users_to_be_notified)
# changed because she is not a subscriber and doesn't have a UserMessage row.
self.subscribe(hamlet, stream_name)
self.unsubscribe(cordelia, stream_name)
self.login_user(hamlet)
users_to_be_notified = list(map(notify, [hamlet.id]))
do_update_message_topic_success(hamlet, message, "Change again", users_to_be_notified)
@mock.patch("zerver.actions.message_edit.send_event")
def test_edit_muted_topic(self, mock_send_event: mock.MagicMock) -> None:
stream_name = "Stream 123"
stream = self.make_stream(stream_name)
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
aaron = self.example_user("aaron")
self.subscribe(hamlet, stream_name)
self.login_user(hamlet)
message_id = self.send_stream_message(
hamlet, stream_name, topic_name="Topic1", content="Hello World"
)
self.subscribe(cordelia, stream_name)
self.login_user(cordelia)
self.subscribe(aaron, stream_name)
self.login_user(aaron)
already_muted_topic = "Already muted topic"
muted_topics = [
[stream_name, "Topic1"],
[stream_name, "Topic2"],
[stream_name, already_muted_topic],
]
set_topic_mutes(hamlet, muted_topics)
set_topic_mutes(cordelia, muted_topics)
def notify(user_id: int) -> Dict[str, Any]:
um = UserMessage.objects.get(message=message_id)
if um.user_profile_id == user_id:
return {
"id": user_id,
"flags": um.flags_list(),
}
else:
return {
"id": user_id,
"flags": ["read"],
}
users_to_be_notified = list(map(notify, [hamlet.id, cordelia.id, aaron.id]))
change_all_topic_name = "Topic 1 edited"
with queries_captured() as queries:
check_update_message(
user_profile=hamlet,
message_id=message_id,
stream_id=None,
topic_name=change_all_topic_name,
propagate_mode="change_all",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
)
self.assert_length(queries, 18)
for muting_user in get_users_muting_topic(stream.id, change_all_topic_name):
for user in users_to_be_notified:
if muting_user.id == user["id"]:
user["muted_topics"] = get_topic_mutes(muting_user)
break
self.assertFalse(topic_is_muted(hamlet, stream.id, "Topic1"))
self.assertFalse(topic_is_muted(cordelia, stream.id, "Topic1"))
self.assertFalse(topic_is_muted(aaron, stream.id, "Topic1"))
self.assertTrue(topic_is_muted(hamlet, stream.id, "Topic2"))
self.assertTrue(topic_is_muted(cordelia, stream.id, "Topic2"))
self.assertFalse(topic_is_muted(aaron, stream.id, "Topic2"))
self.assertTrue(topic_is_muted(hamlet, stream.id, change_all_topic_name))
self.assertTrue(topic_is_muted(cordelia, stream.id, change_all_topic_name))
self.assertFalse(topic_is_muted(aaron, stream.id, change_all_topic_name))
change_later_topic_name = "Topic 1 edited again"
check_update_message(
user_profile=hamlet,
message_id=message_id,
stream_id=None,
topic_name=change_later_topic_name,
propagate_mode="change_later",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
)
self.assertFalse(topic_is_muted(hamlet, stream.id, change_all_topic_name))
self.assertTrue(topic_is_muted(hamlet, stream.id, change_later_topic_name))
check_update_message(
user_profile=hamlet,
message_id=message_id,
stream_id=None,
topic_name=already_muted_topic,
propagate_mode="change_all",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
)
self.assertFalse(topic_is_muted(hamlet, stream.id, change_later_topic_name))
self.assertTrue(topic_is_muted(hamlet, stream.id, already_muted_topic))
change_one_topic_name = "Topic 1 edited change_one"
check_update_message(
user_profile=hamlet,
message_id=message_id,
stream_id=None,
topic_name=change_one_topic_name,
propagate_mode="change_one",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
)
self.assertTrue(topic_is_muted(hamlet, stream.id, change_one_topic_name))
self.assertFalse(topic_is_muted(hamlet, stream.id, change_later_topic_name))
desdemona = self.example_user("desdemona")
message_id = self.send_stream_message(
hamlet, stream_name, topic_name="New topic", content="Hello World"
)
new_public_stream = self.make_stream("New public stream")
self.subscribe(desdemona, new_public_stream.name)
self.login_user(desdemona)
muted_topics = [
[stream_name, "New topic"],
]
set_topic_mutes(desdemona, muted_topics)
set_topic_mutes(cordelia, muted_topics)
with queries_captured() as queries:
check_update_message(
user_profile=desdemona,
message_id=message_id,
stream_id=new_public_stream.id,
propagate_mode="change_all",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
)
self.assert_length(queries, 31)
self.assertFalse(topic_is_muted(desdemona, stream.id, "New topic"))
self.assertFalse(topic_is_muted(cordelia, stream.id, "New topic"))
self.assertFalse(topic_is_muted(aaron, stream.id, "New topic"))
self.assertTrue(topic_is_muted(desdemona, new_public_stream.id, "New topic"))
self.assertTrue(topic_is_muted(cordelia, new_public_stream.id, "New topic"))
self.assertFalse(topic_is_muted(aaron, new_public_stream.id, "New topic"))
message_id = self.send_stream_message(
hamlet, stream_name, topic_name="New topic", content="Hello World"
)
new_private_stream = self.make_stream("New private stream", invite_only=True)
self.subscribe(desdemona, new_private_stream.name)
self.login_user(desdemona)
muted_topics = [
[stream_name, "New topic"],
]
set_topic_mutes(desdemona, muted_topics)
set_topic_mutes(cordelia, muted_topics)
with queries_captured() as queries:
check_update_message(
user_profile=desdemona,
message_id=message_id,
stream_id=new_private_stream.id,
propagate_mode="change_all",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
)
self.assert_length(queries, 33)
self.assertFalse(topic_is_muted(desdemona, stream.id, "New topic"))
self.assertFalse(topic_is_muted(cordelia, stream.id, "New topic"))
self.assertFalse(topic_is_muted(aaron, stream.id, "New topic"))
self.assertTrue(topic_is_muted(desdemona, new_private_stream.id, "New topic"))
self.assertFalse(topic_is_muted(cordelia, new_private_stream.id, "New topic"))
self.assertFalse(topic_is_muted(aaron, new_private_stream.id, "New topic"))
desdemona = self.example_user("desdemona")
message_id = self.send_stream_message(
hamlet, stream_name, topic_name="New topic 2", content="Hello World"
)
self.login_user(desdemona)
muted_topics = [
[stream_name, "New topic 2"],
]
set_topic_mutes(desdemona, muted_topics)
set_topic_mutes(cordelia, muted_topics)
with queries_captured() as queries:
check_update_message(
user_profile=desdemona,
message_id=message_id,
stream_id=new_public_stream.id,
topic_name="changed topic name",
propagate_mode="change_all",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
)
self.assert_length(queries, 31)
self.assertFalse(topic_is_muted(desdemona, stream.id, "New topic 2"))
self.assertFalse(topic_is_muted(cordelia, stream.id, "New topic 2"))
self.assertFalse(topic_is_muted(aaron, stream.id, "New topic 2"))
self.assertTrue(topic_is_muted(desdemona, new_public_stream.id, "changed topic name"))
self.assertTrue(topic_is_muted(cordelia, new_public_stream.id, "changed topic name"))
self.assertFalse(topic_is_muted(aaron, new_public_stream.id, "changed topic name"))
second_message_id = self.send_stream_message(
hamlet, stream_name, topic_name="changed topic name", content="Second message"
)
with queries_captured() as queries:
check_update_message(
user_profile=desdemona,
message_id=second_message_id,
stream_id=new_public_stream.id,
topic_name="final topic name",
propagate_mode="change_later",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
)
self.assert_length(queries, 25)
self.assertTrue(topic_is_muted(desdemona, new_public_stream.id, "changed topic name"))
self.assertTrue(topic_is_muted(cordelia, new_public_stream.id, "changed topic name"))
self.assertFalse(topic_is_muted(aaron, new_public_stream.id, "changed topic name"))
self.assertFalse(topic_is_muted(desdemona, new_public_stream.id, "final topic name"))
self.assertFalse(topic_is_muted(cordelia, new_public_stream.id, "final topic name"))
self.assertFalse(topic_is_muted(aaron, new_public_stream.id, "final topic name"))
@mock.patch("zerver.actions.message_edit.send_event")
def test_wildcard_mention(self, mock_send_event: mock.MagicMock) -> None:
stream_name = "Macbeth"
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
self.make_stream(stream_name, history_public_to_subscribers=True)
self.subscribe(hamlet, stream_name)
self.subscribe(cordelia, stream_name)
self.login_user(hamlet)
message_id = self.send_stream_message(hamlet, stream_name, "Hello everyone")
def notify(user_id: int) -> Dict[str, Any]:
return {
"id": user_id,
"flags": ["wildcard_mentioned"],
}
users_to_be_notified = sorted(map(notify, [cordelia.id, hamlet.id]), key=itemgetter("id"))
result = self.client_patch(
f"/json/messages/{message_id}",
{
"content": "Hello @**everyone**",
},
)
self.assert_json_success(result)
# Extract the send_event call where event type is 'update_message'.
# Here we assert wildcard_mention_user_ids has been set properly.
called = False
for call_args in mock_send_event.call_args_list:
(arg_realm, arg_event, arg_notified_users) = call_args[0]
if arg_event["type"] == "update_message":
self.assertEqual(arg_event["type"], "update_message")
self.assertEqual(arg_event["wildcard_mention_user_ids"], [cordelia.id, hamlet.id])
self.assertEqual(
sorted(arg_notified_users, key=itemgetter("id")), users_to_be_notified
)
called = True
self.assertTrue(called)
def test_wildcard_mention_restrictions_when_editing(self) -> None:
cordelia = self.example_user("cordelia")
shiva = self.example_user("shiva")
self.login("cordelia")
stream_name = "Macbeth"
self.make_stream(stream_name, history_public_to_subscribers=True)
self.subscribe(cordelia, stream_name)
self.subscribe(shiva, stream_name)
message_id = self.send_stream_message(cordelia, stream_name, "Hello everyone")
realm = cordelia.realm
do_set_realm_property(
realm,
"wildcard_mention_policy",
Realm.WILDCARD_MENTION_POLICY_MODERATORS,
acting_user=None,
)
with mock.patch("zerver.lib.message.num_subscribers_for_stream_id", return_value=17):
result = self.client_patch(
"/json/messages/" + str(message_id),
{
"content": "Hello @**everyone**",
},
)
self.assert_json_error(
result, "You do not have permission to use wildcard mentions in this stream."
)
with mock.patch("zerver.lib.message.num_subscribers_for_stream_id", return_value=14):
result = self.client_patch(
"/json/messages/" + str(message_id),
{
"content": "Hello @**everyone**",
},
)
self.assert_json_success(result)
self.login("shiva")
message_id = self.send_stream_message(shiva, stream_name, "Hi everyone")
with mock.patch("zerver.lib.message.num_subscribers_for_stream_id", return_value=17):
result = self.client_patch(
"/json/messages/" + str(message_id),
{
"content": "Hello @**everyone**",
},
)
self.assert_json_success(result)
def test_topic_edit_history_saved_in_all_message(self) -> None:
self.login("hamlet")
id1 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic1")
id2 = self.send_stream_message(self.example_user("iago"), "Denmark", topic_name="topic1")
id3 = self.send_stream_message(self.example_user("iago"), "Verona", topic_name="topic1")
id4 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic2")
id5 = self.send_stream_message(self.example_user("iago"), "Denmark", topic_name="topic1")
def verify_edit_history(new_topic: str, len_edit_history: int) -> None:
for msg_id in [id1, id2, id5]:
msg = Message.objects.get(id=msg_id)
self.assertEqual(
new_topic,
msg.topic_name(),
)
# Since edit history is being generated by do_update_message,
# it's contents can vary over time; So, to keep this test
self.assert_length(orjson.loads(msg.edit_history), len_edit_history)
for msg_id in [id3, id4]:
msg = Message.objects.get(id=msg_id)
self.assertEqual(msg.edit_history, None)
new_topic = "edited"
result = self.client_patch(
f"/json/messages/{id1}",
{
"topic": new_topic,
"propagate_mode": "change_later",
},
)
self.assert_json_success(result)
verify_edit_history(new_topic, 1)
new_topic = "edited2"
result = self.client_patch(
f"/json/messages/{id1}",
{
"topic": new_topic,
"propagate_mode": "change_later",
},
)
self.assert_json_success(result)
verify_edit_history(new_topic, 2)
def test_topic_and_content_edit(self) -> None:
self.login("hamlet")
id1 = self.send_stream_message(self.example_user("hamlet"), "Denmark", "message 1", "topic")
id2 = self.send_stream_message(self.example_user("iago"), "Denmark", "message 2", "topic")
id3 = self.send_stream_message(self.example_user("hamlet"), "Denmark", "message 3", "topic")
new_topic = "edited"
result = self.client_patch(
"/json/messages/" + str(id1),
{
"topic": new_topic,
"propagate_mode": "change_later",
"content": "edited message",
},
)
self.assert_json_success(result)
# Content change of only id1 should come in edit history
# and topic change should be present in all the messages.
msg1 = Message.objects.get(id=id1)
msg2 = Message.objects.get(id=id2)
msg3 = Message.objects.get(id=id3)
msg1_edit_history = orjson.loads(msg1.edit_history)
self.assertTrue("prev_content" in msg1_edit_history[0].keys())
for msg in [msg2, msg3]:
self.assertFalse("prev_content" in orjson.loads(msg.edit_history)[0].keys())
for msg in [msg1, msg2, msg3]:
self.assertEqual(
new_topic,
msg.topic_name(),
)
self.assert_length(orjson.loads(msg.edit_history), 1)
def test_propagate_topic_forward(self) -> None:
self.login("hamlet")
id1 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic1")
id2 = self.send_stream_message(self.example_user("iago"), "Denmark", topic_name="topic1")
id3 = self.send_stream_message(self.example_user("iago"), "Verona", topic_name="topic1")
id4 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic2")
id5 = self.send_stream_message(self.example_user("iago"), "Denmark", topic_name="topic1")
result = self.client_patch(
f"/json/messages/{id1}",
{
"topic": "edited",
"propagate_mode": "change_later",
},
)
self.assert_json_success(result)
self.check_topic(id1, topic_name="edited")
self.check_topic(id2, topic_name="edited")
self.check_topic(id3, topic_name="topic1")
self.check_topic(id4, topic_name="topic2")
self.check_topic(id5, topic_name="edited")
def test_propagate_all_topics(self) -> None:
self.login("hamlet")
id1 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic1")
id2 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic1")
id3 = self.send_stream_message(self.example_user("iago"), "Verona", topic_name="topic1")
id4 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic2")
id5 = self.send_stream_message(self.example_user("iago"), "Denmark", topic_name="topic1")
id6 = self.send_stream_message(self.example_user("iago"), "Denmark", topic_name="topic3")
result = self.client_patch(
f"/json/messages/{id2}",
{
"topic": "edited",
"propagate_mode": "change_all",
},
)
self.assert_json_success(result)
self.check_topic(id1, topic_name="edited")
self.check_topic(id2, topic_name="edited")
self.check_topic(id3, topic_name="topic1")
self.check_topic(id4, topic_name="topic2")
self.check_topic(id5, topic_name="edited")
self.check_topic(id6, topic_name="topic3")
def test_propagate_all_topics_with_different_uppercase_letters(self) -> None:
self.login("hamlet")
id1 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="topic1")
id2 = self.send_stream_message(self.example_user("hamlet"), "Denmark", topic_name="Topic1")
id3 = self.send_stream_message(self.example_user("iago"), "Verona", topic_name="topiC1")
id4 = self.send_stream_message(self.example_user("iago"), "Denmark", topic_name="toPic1")
result = self.client_patch(
f"/json/messages/{id2}",
{
"topic": "edited",
"propagate_mode": "change_all",
},
)
self.assert_json_success(result)
self.check_topic(id1, topic_name="edited")
self.check_topic(id2, topic_name="edited")
self.check_topic(id3, topic_name="topiC1")
self.check_topic(id4, topic_name="edited")
def test_move_message_to_stream(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_lt) = self.prepare_move_topics(
"iago",
"test move stream",
"new stream",
"test",
# Set the user's translation language to German to test that
"de",
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
HTTP_ACCEPT_LANGUAGE="de",
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 1)
self.assertEqual(
messages[0].content,
f"This topic was moved to #**new stream>test** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
self.assertEqual(
messages[3].content,
f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
def test_move_message_realm_admin_cant_move_to_another_realm(self) -> None:
user_profile = self.example_user("iago")
self.assertEqual(user_profile.role, UserProfile.ROLE_REALM_ADMINISTRATOR)
self.login("iago")
lear_realm = get_realm("lear")
new_stream = self.make_stream("new", lear_realm)
msg_id = self.send_stream_message(user_profile, "Verona", topic_name="test123")
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
)
self.assert_json_error(result, "Invalid stream id")
def test_move_message_realm_admin_cant_move_to_private_stream_without_subscription(
self,
) -> None:
user_profile = self.example_user("iago")
self.assertEqual(user_profile.role, UserProfile.ROLE_REALM_ADMINISTRATOR)
self.login("iago")
new_stream = self.make_stream("new", invite_only=True)
msg_id = self.send_stream_message(user_profile, "Verona", topic_name="test123")
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
)
self.assert_json_error(result, "Invalid stream id")
def test_move_message_realm_admin_cant_move_from_private_stream_without_subscription(
self,
) -> None:
user_profile = self.example_user("iago")
self.assertEqual(user_profile.role, UserProfile.ROLE_REALM_ADMINISTRATOR)
self.login("iago")
self.make_stream("privatestream", invite_only=True)
self.subscribe(user_profile, "privatestream")
msg_id = self.send_stream_message(user_profile, "privatestream", topic_name="test123")
self.unsubscribe(user_profile, "privatestream")
verona = get_stream("Verona", user_profile.realm)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"stream_id": verona.id,
"propagate_mode": "change_all",
},
)
self.assert_json_error(
result,
"You don't have permission to move this message due to missing access to its stream",
)
def test_move_message_from_private_stream_message_access_checks(
self,
) -> None:
hamlet = self.example_user("hamlet")
user_profile = self.example_user("iago")
self.assertEqual(user_profile.role, UserProfile.ROLE_REALM_ADMINISTRATOR)
self.login("iago")
private_stream = self.make_stream(
"privatestream", invite_only=True, history_public_to_subscribers=False
)
self.subscribe(hamlet, "privatestream")
original_msg_id = self.send_stream_message(hamlet, "privatestream", topic_name="test123")
self.subscribe(user_profile, "privatestream")
new_msg_id = self.send_stream_message(user_profile, "privatestream", topic_name="test123")
self.unsubscribe(user_profile, "privatestream")
new_inaccessible_msg_id = self.send_stream_message(
hamlet, "privatestream", topic_name="test123"
)
# Re-subscribe and send another message:
self.subscribe(user_profile, "privatestream")
newest_msg_id = self.send_stream_message(
user_profile, "privatestream", topic_name="test123"
)
verona = get_stream("Verona", user_profile.realm)
result = self.client_patch(
"/json/messages/" + str(new_msg_id),
{
"stream_id": verona.id,
"propagate_mode": "change_all",
},
)
self.assert_json_success(result)
self.assertEqual(Message.objects.get(id=new_msg_id).recipient_id, verona.recipient_id)
self.assertEqual(Message.objects.get(id=newest_msg_id).recipient_id, verona.recipient_id)
# The original message and the new, inaccessible message weren't moved,
self.assertEqual(
Message.objects.get(id=original_msg_id).recipient_id, private_stream.recipient_id
)
self.assertEqual(
Message.objects.get(id=new_inaccessible_msg_id).recipient_id,
private_stream.recipient_id,
)
def test_move_message_to_stream_change_later(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
result = self.client_patch(
f"/json/messages/{msg_id_later}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_later",
},
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 2)
self.assertEqual(messages[0].id, msg_id)
self.assertEqual(
messages[1].content,
f"2 messages were moved from this topic to #**new stream>test** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 3)
self.assertEqual(messages[0].id, msg_id_later)
self.assertEqual(
messages[2].content,
f"2 messages were moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
def test_move_message_to_stream_change_later_all_moved(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_later",
},
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 1)
self.assertEqual(
messages[0].content,
f"This topic was moved to #**new stream>test** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
self.assertEqual(messages[0].id, msg_id)
self.assertEqual(
messages[3].content,
f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
def test_move_message_to_stream_change_one(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
result = self.client_patch(
"/json/messages/" + str(msg_id_later),
{
"stream_id": new_stream.id,
"propagate_mode": "change_one",
},
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 3)
self.assertEqual(messages[0].id, msg_id)
self.assertEqual(
messages[2].content,
f"A message was moved from this topic to #**new stream>test** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 2)
self.assertEqual(messages[0].id, msg_id_later)
self.assertEqual(
messages[1].content,
f"A message was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
def test_move_message_to_stream_change_all(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
result = self.client_patch(
"/json/messages/" + str(msg_id_later),
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 1)
self.assertEqual(
messages[0].content,
f"This topic was moved to #**new stream>test** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
self.assertEqual(messages[0].id, msg_id)
self.assertEqual(
messages[3].content,
f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
def test_move_message_between_streams_policy_setting(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_1", "new_stream_1", "test"
)
def check_move_message_according_to_policy(role: int, expect_fail: bool = False) -> None:
do_change_user_role(user_profile, role, acting_user=None)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
)
if expect_fail:
self.assert_json_error(result, "You don't have permission to move this message")
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 3)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 0)
else:
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 1)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
do_set_realm_property(
user_profile.realm,
"move_messages_between_streams_policy",
Realm.POLICY_ADMINS_ONLY,
acting_user=None,
)
check_move_message_according_to_policy(UserProfile.ROLE_MODERATOR, expect_fail=True)
check_move_message_according_to_policy(UserProfile.ROLE_REALM_ADMINISTRATOR)
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_2", "new_stream_2", "test"
)
do_set_realm_property(
user_profile.realm,
"move_messages_between_streams_policy",
Realm.POLICY_MODERATORS_ONLY,
acting_user=None,
)
check_move_message_according_to_policy(UserProfile.ROLE_MEMBER, expect_fail=True)
check_move_message_according_to_policy(UserProfile.ROLE_MODERATOR)
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_3", "new_stream_3", "test"
)
do_set_realm_property(
user_profile.realm,
"move_messages_between_streams_policy",
Realm.POLICY_FULL_MEMBERS_ONLY,
acting_user=None,
)
do_set_realm_property(
user_profile.realm, "waiting_period_threshold", 100000, acting_user=None
)
check_move_message_according_to_policy(UserProfile.ROLE_MEMBER, expect_fail=True)
do_set_realm_property(user_profile.realm, "waiting_period_threshold", 0, acting_user=None)
check_move_message_according_to_policy(UserProfile.ROLE_MEMBER)
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_4", "new_stream_4", "test"
)
do_set_realm_property(
user_profile.realm,
"move_messages_between_streams_policy",
Realm.POLICY_MEMBERS_ONLY,
acting_user=None,
)
check_move_message_according_to_policy(UserProfile.ROLE_GUEST, expect_fail=True)
check_move_message_according_to_policy(UserProfile.ROLE_MEMBER)
def test_move_message_to_stream_based_on_stream_post_policy(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_1", "new_stream_1", "test"
)
do_set_realm_property(
user_profile.realm,
"move_messages_between_streams_policy",
Realm.POLICY_MEMBERS_ONLY,
acting_user=None,
)
def check_move_message_to_stream(role: int, error_msg: Optional[str] = None) -> None:
do_change_user_role(user_profile, role, acting_user=None)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
)
if error_msg is not None:
self.assert_json_error(result, error_msg)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 3)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 0)
else:
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 1)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
do_change_stream_post_policy(
new_stream, Stream.STREAM_POST_POLICY_ADMINS, acting_user=user_profile
)
error_msg = "Only organization administrators can send to this stream."
check_move_message_to_stream(UserProfile.ROLE_MODERATOR, error_msg)
check_move_message_to_stream(UserProfile.ROLE_REALM_ADMINISTRATOR)
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_2", "new_stream_2", "test"
)
do_change_stream_post_policy(
new_stream, Stream.STREAM_POST_POLICY_MODERATORS, acting_user=user_profile
)
error_msg = "Only organization administrators and moderators can send to this stream."
check_move_message_to_stream(UserProfile.ROLE_MEMBER, error_msg)
check_move_message_to_stream(UserProfile.ROLE_MODERATOR)
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_3", "new_stream_3", "test"
)
do_change_stream_post_policy(
new_stream, Stream.STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS, acting_user=user_profile
)
error_msg = "New members cannot send to this stream."
do_set_realm_property(
user_profile.realm, "waiting_period_threshold", 100000, acting_user=None
)
check_move_message_to_stream(UserProfile.ROLE_MEMBER, error_msg)
do_set_realm_property(user_profile.realm, "waiting_period_threshold", 0, acting_user=None)
check_move_message_to_stream(UserProfile.ROLE_MEMBER)
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_4", "new_stream_4", "test"
)
do_change_stream_post_policy(
new_stream, Stream.STREAM_POST_POLICY_EVERYONE, acting_user=user_profile
)
do_set_realm_property(
user_profile.realm, "waiting_period_threshold", 100000, acting_user=None
)
check_move_message_to_stream(
UserProfile.ROLE_GUEST, "You don't have permission to move this message"
)
check_move_message_to_stream(UserProfile.ROLE_MEMBER)
def test_move_message_to_stream_with_topic_editing_not_allowed(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_1", "new_stream_1", "test"
)
realm = user_profile.realm
realm.edit_topic_policy = Realm.POLICY_ADMINS_ONLY
realm.save()
self.login("cordelia")
do_set_realm_property(
user_profile.realm,
"move_messages_between_streams_policy",
Realm.POLICY_MEMBERS_ONLY,
acting_user=None,
)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"topic": "new topic",
},
)
self.assert_json_error(result, "You don't have permission to edit this message")
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 1)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
def test_move_message_to_stream_and_topic(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
with queries_captured() as queries, cache_tries_captured() as cache_tries:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"topic": "new topic",
},
)
self.assert_length(queries, 53)
self.assert_length(cache_tries, 13)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 1)
self.assertEqual(
messages[0].content,
f"This topic was moved to #**new stream>new topic** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "new topic")
self.assert_length(messages, 4)
self.assertEqual(
messages[3].content,
f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
self.assert_json_success(result)
def test_inaccessible_msg_after_stream_change(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_lt) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
guest_user = self.example_user("polonius")
non_guest_user = self.example_user("hamlet")
self.subscribe(guest_user, old_stream.name)
self.subscribe(non_guest_user, old_stream.name)
msg_id_to_test_acesss = self.send_stream_message(
user_profile, old_stream.name, topic_name="test", content="fourth"
)
self.assertEqual(
has_message_access(
guest_user, Message.objects.get(id=msg_id_to_test_acesss), has_user_message=False
),
True,
)
self.assertEqual(
has_message_access(
guest_user,
Message.objects.get(id=msg_id_to_test_acesss),
has_user_message=False,
stream=old_stream,
),
True,
)
self.assertEqual(
has_message_access(
non_guest_user,
Message.objects.get(id=msg_id_to_test_acesss),
has_user_message=False,
),
True,
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"topic": "new topic",
},
)
self.assert_json_success(result)
self.assertEqual(
has_message_access(
guest_user,
Message.objects.get(id=msg_id_to_test_acesss),
has_user_message=False,
),
False,
)
self.assertEqual(
has_message_access(
non_guest_user,
Message.objects.get(id=msg_id_to_test_acesss),
has_user_message=False,
),
True,
)
self.assertEqual(
# the is_subscribed parameter.
has_message_access(
guest_user,
Message.objects.get(id=msg_id_to_test_acesss),
has_user_message=False,
stream=new_stream,
is_subscribed=True,
),
True,
)
self.assertEqual(
has_message_access(
guest_user,
Message.objects.get(id=msg_id_to_test_acesss),
has_user_message=False,
stream=new_stream,
),
False,
)
with self.assertRaises(AssertionError):
# Raises assertion if you pass an invalid stream.
has_message_access(
guest_user,
Message.objects.get(id=msg_id_to_test_acesss),
has_user_message=False,
stream=old_stream,
)
self.assertEqual(
UserMessage.objects.filter(
user_profile_id=non_guest_user.id,
message_id=msg_id_to_test_acesss,
).count(),
0,
)
self.assertEqual(
has_message_access(
self.example_user("iago"),
Message.objects.get(id=msg_id_to_test_acesss),
has_user_message=False,
),
True,
)
def test_no_notify_move_message_to_stream(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_lt) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"send_notification_to_old_thread": "false",
"send_notification_to_new_thread": "false",
},
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 0)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 3)
def test_notify_new_thread_move_message_to_stream(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_lt) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"send_notification_to_old_thread": "false",
"send_notification_to_new_thread": "true",
},
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 0)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
self.assertEqual(
messages[3].content,
f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
def test_notify_old_thread_move_message_to_stream(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_lt) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"send_notification_to_old_thread": "true",
"send_notification_to_new_thread": "false",
},
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assert_length(messages, 1)
self.assertEqual(
messages[0].content,
f"This topic was moved to #**new stream>test** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 3)
def parameterized_test_move_message_involving_private_stream(
self,
from_invite_only: bool,
history_public_to_subscribers: bool,
user_messages_created: bool,
to_invite_only: bool = True,
) -> None:
admin_user = self.example_user("iago")
user_losing_access = self.example_user("cordelia")
user_gaining_access = self.example_user("hamlet")
self.login("iago")
old_stream = self.make_stream("test move stream", invite_only=from_invite_only)
new_stream = self.make_stream(
"new stream",
invite_only=to_invite_only,
history_public_to_subscribers=history_public_to_subscribers,
)
self.subscribe(admin_user, old_stream.name)
self.subscribe(user_losing_access, old_stream.name)
self.subscribe(admin_user, new_stream.name)
self.subscribe(user_gaining_access, new_stream.name)
msg_id = self.send_stream_message(
admin_user, old_stream.name, topic_name="test", content="First"
)
self.send_stream_message(admin_user, old_stream.name, topic_name="test", content="Second")
self.assertEqual(
UserMessage.objects.filter(
user_profile_id=user_losing_access.id,
message_id=msg_id,
).count(),
1,
)
self.assertEqual(
UserMessage.objects.filter(
user_profile_id=user_gaining_access.id,
message_id=msg_id,
).count(),
0,
)
result = self.client_patch(
f"/json/messages/{msg_id}",
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
)
self.assert_json_success(result)
messages = get_topic_messages(admin_user, old_stream, "test")
self.assert_length(messages, 1)
self.assertEqual(
messages[0].content,
f"This topic was moved to #**new stream>test** by @_**Iago|{admin_user.id}**.",
)
messages = get_topic_messages(admin_user, new_stream, "test")
self.assert_length(messages, 3)
self.assertEqual(
UserMessage.objects.filter(
user_profile_id=user_losing_access.id,
message_id=msg_id,
).count(),
0,
)
# When the history is shared, UserMessage is not created for the user but the user
# can see the message.
self.assertEqual(
UserMessage.objects.filter(
user_profile_id=user_gaining_access.id,
message_id=msg_id,
).count(),
1 if user_messages_created else 0,
)
def test_move_message_from_public_to_private_stream_not_shared_history(self) -> None:
self.parameterized_test_move_message_involving_private_stream(
from_invite_only=False,
history_public_to_subscribers=False,
user_messages_created=True,
)
def test_move_message_from_public_to_private_stream_shared_history(self) -> None:
self.parameterized_test_move_message_involving_private_stream(
from_invite_only=False,
history_public_to_subscribers=True,
user_messages_created=False,
)
def test_move_message_from_private_to_private_stream_not_shared_history(self) -> None:
self.parameterized_test_move_message_involving_private_stream(
from_invite_only=True,
history_public_to_subscribers=False,
user_messages_created=True,
)
def test_move_message_from_private_to_private_stream_shared_history(self) -> None:
self.parameterized_test_move_message_involving_private_stream(
from_invite_only=True,
history_public_to_subscribers=True,
user_messages_created=False,
)
def test_move_message_from_private_to_public(self) -> None:
self.parameterized_test_move_message_involving_private_stream(
from_invite_only=True,
history_public_to_subscribers=True,
user_messages_created=False,
to_invite_only=False,
)
def test_can_move_messages_between_streams(self) -> None:
def validation_func(user_profile: UserProfile) -> bool:
user_profile.refresh_from_db()
return user_profile.can_move_messages_between_streams()
self.check_has_permission_policies("move_messages_between_streams_policy", validation_func)
def test_mark_topic_as_resolved(self) -> None:
self.login("iago")
admin_user = self.example_user("iago")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
aaron = self.example_user("aaron")
# Set the user's translation language to German to test that
admin_user.default_language = "de"
admin_user.save()
stream = self.make_stream("new")
self.subscribe(admin_user, stream.name)
self.subscribe(hamlet, stream.name)
self.subscribe(cordelia, stream.name)
self.subscribe(aaron, stream.name)
original_topic = "topic 1"
id1 = self.send_stream_message(hamlet, "new", topic_name=original_topic)
id2 = self.send_stream_message(admin_user, "new", topic_name=original_topic)
msg1 = Message.objects.get(id=id1)
do_add_reaction(aaron, msg1, "tada", "1f389", "unicode_emoji")
# Check that we don't incorrectly send "unresolve topic"
result = self.client_patch(
"/json/messages/" + str(id1),
{
"topic": original_topic,
"propagate_mode": "change_all",
},
)
self.assert_json_error(result, "Nothing to change")
resolved_topic = RESOLVED_TOPIC_PREFIX + original_topic
result = self.client_patch(
"/json/messages/" + str(id1),
{
"topic": resolved_topic,
"propagate_mode": "change_all",
},
HTTP_ACCEPT_LANGUAGE="de",
)
self.assert_json_success(result)
for msg_id in [id1, id2]:
msg = Message.objects.get(id=msg_id)
self.assertEqual(
resolved_topic,
msg.topic_name(),
)
messages = get_topic_messages(admin_user, stream, resolved_topic)
self.assert_length(messages, 3)
self.assertEqual(
messages[2].content,
f"@_**Iago|{admin_user.id}** has marked this topic as resolved.",
)
assert (
UserMessage.objects.filter(
user_profile__in=[admin_user, hamlet, aaron], message__id=messages[2].id
)
.extra(where=[UserMessage.where_unread()])
.count()
== 3
)
assert (
UserMessage.objects.filter(user_profile=cordelia, message__id=messages[2].id)
.extra(where=[UserMessage.where_unread()])
.count()
== 0
)
weird_topic = "✔ ✔✔" + original_topic
result = self.client_patch(
"/json/messages/" + str(id1),
{
"topic": weird_topic,
"propagate_mode": "change_all",
},
)
self.assert_json_success(result)
for msg_id in [id1, id2]:
msg = Message.objects.get(id=msg_id)
self.assertEqual(
weird_topic,
msg.topic_name(),
)
messages = get_topic_messages(admin_user, stream, weird_topic)
self.assert_length(messages, 3)
self.assertEqual(
messages[2].content,
f"@_**Iago|{admin_user.id}** has marked this topic as resolved.",
)
unresolved_topic = original_topic
result = self.client_patch(
"/json/messages/" + str(id1),
{
"topic": unresolved_topic,
"propagate_mode": "change_all",
},
)
self.assert_json_success(result)
for msg_id in [id1, id2]:
msg = Message.objects.get(id=msg_id)
self.assertEqual(
unresolved_topic,
msg.topic_name(),
)
messages = get_topic_messages(admin_user, stream, unresolved_topic)
self.assert_length(messages, 4)
self.assertEqual(
messages[3].content,
f"@_**Iago|{admin_user.id}** has marked this topic as unresolved.",
)
assert (
UserMessage.objects.filter(
user_profile__in=[admin_user, hamlet, aaron], message__id=messages[3].id
)
.extra(where=[UserMessage.where_unread()])
.count()
== 3
)
assert (
UserMessage.objects.filter(user_profile=cordelia, message__id=messages[3].id)
.extra(where=[UserMessage.where_unread()])
.count()
== 0
)
class DeleteMessageTest(ZulipTestCase):
def test_delete_message_invalid_request_format(self) -> None:
self.login("iago")
hamlet = self.example_user("hamlet")
msg_id = self.send_stream_message(hamlet, "Denmark")
result = self.client_delete(f"/json/messages/{msg_id + 1}", {"message_id": msg_id})
self.assert_json_error(result, "Invalid message(s)")
result = self.client_delete(f"/json/messages/{msg_id}")
self.assert_json_success(result)
def test_delete_message_by_user(self) -> None:
def set_message_deleting_params(
delete_own_message_policy: int, message_content_delete_limit_seconds: Union[int, str]
) -> None:
self.login("iago")
result = self.client_patch(
"/json/realm",
{
"delete_own_message_policy": delete_own_message_policy,
"message_content_delete_limit_seconds": orjson.dumps(
message_content_delete_limit_seconds
).decode(),
},
)
self.assert_json_success(result)
def test_delete_message_by_admin(msg_id: int) -> HttpResponse:
self.login("iago")
result = self.client_delete(f"/json/messages/{msg_id}")
return result
def test_delete_message_by_owner(msg_id: int) -> HttpResponse:
self.login("hamlet")
result = self.client_delete(f"/json/messages/{msg_id}")
return result
def test_delete_message_by_other_user(msg_id: int) -> HttpResponse:
self.login("cordelia")
result = self.client_delete(f"/json/messages/{msg_id}")
return result
set_message_deleting_params(Realm.POLICY_ADMINS_ONLY, "unlimited")
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
msg_id = self.send_stream_message(hamlet, "Denmark")
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_error(result, "You don't have permission to delete this message")
result = test_delete_message_by_other_user(msg_id=msg_id)
self.assert_json_error(result, "You don't have permission to delete this message")
result = test_delete_message_by_admin(msg_id=msg_id)
self.assert_json_success(result)
set_message_deleting_params(Realm.POLICY_EVERYONE, "unlimited")
msg_id = self.send_stream_message(hamlet, "Denmark")
message = Message.objects.get(id=msg_id)
message.date_sent = message.date_sent - datetime.timedelta(seconds=600)
message.save()
result = test_delete_message_by_other_user(msg_id=msg_id)
self.assert_json_error(result, "You don't have permission to delete this message")
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_success(result)
# Test if time limit is non-zero.
set_message_deleting_params(Realm.POLICY_EVERYONE, 240)
msg_id_1 = self.send_stream_message(hamlet, "Denmark")
message = Message.objects.get(id=msg_id_1)
message.date_sent = message.date_sent - datetime.timedelta(seconds=120)
message.save()
msg_id_2 = self.send_stream_message(hamlet, "Denmark")
message = Message.objects.get(id=msg_id_2)
message.date_sent = message.date_sent - datetime.timedelta(seconds=360)
message.save()
result = test_delete_message_by_other_user(msg_id=msg_id_1)
self.assert_json_error(result, "You don't have permission to delete this message")
result = test_delete_message_by_owner(msg_id=msg_id_1)
self.assert_json_success(result)
result = test_delete_message_by_owner(msg_id=msg_id_2)
self.assert_json_error(result, "The time limit for deleting this message has passed")
result = test_delete_message_by_admin(msg_id=msg_id_2)
self.assert_json_success(result)
msg_id = self.send_stream_message(hamlet, "Denmark")
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_success(result)
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_error(result, "Invalid message(s)")
with mock.patch("zerver.views.message_edit.do_delete_messages") as m, mock.patch(
"zerver.views.message_edit.validate_can_delete_message", return_value=None
), mock.patch("zerver.views.message_edit.access_message", return_value=(None, None)):
m.side_effect = IntegrityError()
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_error(result, "Message already deleted")
m.side_effect = Message.DoesNotExist()
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_error(result, "Message already deleted")
def test_delete_message_according_to_delete_own_message_policy(self) -> None:
def check_delete_message_by_sender(
sender_name: str, error_msg: Optional[str] = None
) -> None:
sender = self.example_user(sender_name)
msg_id = self.send_stream_message(sender, "Verona")
self.login_user(sender)
result = self.client_delete(f"/json/messages/{msg_id}")
if error_msg is None:
self.assert_json_success(result)
else:
self.assert_json_error(result, error_msg)
realm = get_realm("zulip")
do_set_realm_property(
realm, "delete_own_message_policy", Realm.POLICY_ADMINS_ONLY, acting_user=None
)
check_delete_message_by_sender("shiva", "You don't have permission to delete this message")
check_delete_message_by_sender("iago")
do_set_realm_property(
realm, "delete_own_message_policy", Realm.POLICY_MODERATORS_ONLY, acting_user=None
)
check_delete_message_by_sender(
"cordelia", "You don't have permission to delete this message"
)
check_delete_message_by_sender("shiva")
do_set_realm_property(
realm, "delete_own_message_policy", Realm.POLICY_MEMBERS_ONLY, acting_user=None
)
check_delete_message_by_sender(
"polonius", "You don't have permission to delete this message"
)
check_delete_message_by_sender("cordelia")
do_set_realm_property(
realm, "delete_own_message_policy", Realm.POLICY_FULL_MEMBERS_ONLY, acting_user=None
)
do_set_realm_property(realm, "waiting_period_threshold", 10, acting_user=None)
cordelia = self.example_user("cordelia")
cordelia.date_joined = timezone_now() - datetime.timedelta(days=9)
cordelia.save()
check_delete_message_by_sender(
"cordelia", "You don't have permission to delete this message"
)
cordelia.date_joined = timezone_now() - datetime.timedelta(days=11)
cordelia.save()
check_delete_message_by_sender("cordelia")
do_set_realm_property(
realm, "delete_own_message_policy", Realm.POLICY_EVERYONE, acting_user=None
)
check_delete_message_by_sender("cordelia")
check_delete_message_by_sender("polonius")
def test_delete_event_sent_after_transaction_commits(self) -> None:
hamlet = self.example_user("hamlet")
self.send_stream_message(hamlet, "Denmark")
message = self.get_last_message()
with self.tornado_redirected_to_list([], expected_num_events=1):
with mock.patch("zerver.actions.message_edit.send_event") as m:
m.side_effect = AssertionError(
"Events should be sent only after the transaction commits."
)
do_delete_messages(hamlet.realm, [message])
| true | true |
1c2d4ad2832851c2f372570d135f6d01dac4482d | 285 | py | Python | Zad_ChainOfResponsibility/Application.py | Paarzivall/Wzorce-Projektowe | aa4136f140ad02c0fc0de45709b5a01ca42b417f | [
"MIT"
] | null | null | null | Zad_ChainOfResponsibility/Application.py | Paarzivall/Wzorce-Projektowe | aa4136f140ad02c0fc0de45709b5a01ca42b417f | [
"MIT"
] | null | null | null | Zad_ChainOfResponsibility/Application.py | Paarzivall/Wzorce-Projektowe | aa4136f140ad02c0fc0de45709b5a01ca42b417f | [
"MIT"
] | null | null | null | from Handler import Handler
class Application(Handler):
def __init__(self, successor=None):
self._successor = successor
def HandleHelp(self):
self.ShowHelp()
def ShowHelp(self):
print("anApplication: Potrafie obsluzyc zadanie, wyswietlam pomoc") | 23.75 | 75 | 0.694737 | from Handler import Handler
class Application(Handler):
def __init__(self, successor=None):
self._successor = successor
def HandleHelp(self):
self.ShowHelp()
def ShowHelp(self):
print("anApplication: Potrafie obsluzyc zadanie, wyswietlam pomoc") | true | true |
1c2d4afe34500cdb857f2d349ee215e5b65d61c3 | 1,870 | py | Python | python/src/nnabla/backward_function/global_average_pooling.py | daniel-falk/nnabla | 3fe132ea52dc10521cc029a5d6ba8f565cf65ccf | [
"Apache-2.0"
] | 2,792 | 2017-06-26T13:05:44.000Z | 2022-03-28T07:55:26.000Z | python/src/nnabla/backward_function/global_average_pooling.py | daniel-falk/nnabla | 3fe132ea52dc10521cc029a5d6ba8f565cf65ccf | [
"Apache-2.0"
] | 138 | 2017-06-27T07:04:44.000Z | 2022-02-28T01:37:15.000Z | python/src/nnabla/backward_function/global_average_pooling.py | daniel-falk/nnabla | 3fe132ea52dc10521cc029a5d6ba8f565cf65ccf | [
"Apache-2.0"
] | 380 | 2017-06-26T13:23:52.000Z | 2022-03-25T16:51:30.000Z | # Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.function as _F
import nnabla.functions as F
from .backward_function import UnaryDataGrad
class GlobalAveragePoolingDataGrad(UnaryDataGrad):
def __init__(self, ctx):
super(GlobalAveragePoolingDataGrad, self).__init__(ctx)
self._func = _F.GlobalAveragePooling(ctx)
def global_average_pooling_backward(inputs):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
dy = inputs[0]
x0 = inputs[1]
ctx = nn.get_current_context()
pool = GlobalAveragePoolingDataGrad(ctx)
pool.xshape = x0.shape
dx0 = pool(dy)
return dx0
def global_average_pooling_data_grad_backward(inputs):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
gdx = inputs[0]
gdy = F.global_average_pooling(gdx)
return gdy
| 31.166667 | 86 | 0.730481 |
import nnabla as nn
import nnabla.function as _F
import nnabla.functions as F
from .backward_function import UnaryDataGrad
class GlobalAveragePoolingDataGrad(UnaryDataGrad):
def __init__(self, ctx):
super(GlobalAveragePoolingDataGrad, self).__init__(ctx)
self._func = _F.GlobalAveragePooling(ctx)
def global_average_pooling_backward(inputs):
dy = inputs[0]
x0 = inputs[1]
ctx = nn.get_current_context()
pool = GlobalAveragePoolingDataGrad(ctx)
pool.xshape = x0.shape
dx0 = pool(dy)
return dx0
def global_average_pooling_data_grad_backward(inputs):
gdx = inputs[0]
gdy = F.global_average_pooling(gdx)
return gdy
| true | true |
1c2d4bb203154168c741a10cbee8e7df483c0846 | 1,315 | py | Python | sdk/python/pulumi_kubernetes/core/v1/PodTemplateList.py | rosskevin/pulumi-kubernetes | e4fa04b13a20929c879aca1bbe58fb5a95d16f7c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_kubernetes/core/v1/PodTemplateList.py | rosskevin/pulumi-kubernetes | e4fa04b13a20929c879aca1bbe58fb5a95d16f7c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_kubernetes/core/v1/PodTemplateList.py | rosskevin/pulumi-kubernetes | e4fa04b13a20929c879aca1bbe58fb5a95d16f7c | [
"Apache-2.0"
] | null | null | null | import pulumi
import pulumi.runtime
from ... import tables
class PodTemplateList(pulumi.CustomResource):
"""
PodTemplateList is a list of PodTemplates.
"""
def __init__(self, __name__, __opts__=None, items=None, metadata=None):
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, str):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'v1'
__props__['kind'] = 'PodTemplateList'
if items is None:
raise TypeError('Missing required property items')
__props__['items'] = items
__props__['metadata'] = metadata
super(PodTemplateList, self).__init__(
"kubernetes:core/v1:PodTemplateList",
__name__,
__props__,
__opts__)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| 34.605263 | 89 | 0.656274 | import pulumi
import pulumi.runtime
from ... import tables
class PodTemplateList(pulumi.CustomResource):
def __init__(self, __name__, __opts__=None, items=None, metadata=None):
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, str):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'v1'
__props__['kind'] = 'PodTemplateList'
if items is None:
raise TypeError('Missing required property items')
__props__['items'] = items
__props__['metadata'] = metadata
super(PodTemplateList, self).__init__(
"kubernetes:core/v1:PodTemplateList",
__name__,
__props__,
__opts__)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| true | true |
1c2d4bbb9696658238315a913f60295083292da6 | 2,835 | py | Python | tests/test_serialized_schema.py | TheBigSasha/OpenTimelineIO | 0d857086cbb4fc39c1303947c61318aa6e523ea5 | [
"Apache-2.0"
] | 5 | 2018-07-27T03:52:26.000Z | 2021-04-02T04:10:15.000Z | tests/test_serialized_schema.py | TheBigSasha/OpenTimelineIO | 0d857086cbb4fc39c1303947c61318aa6e523ea5 | [
"Apache-2.0"
] | 1 | 2019-06-20T04:02:54.000Z | 2019-06-20T04:02:54.000Z | tests/test_serialized_schema.py | TheBigSasha/OpenTimelineIO | 0d857086cbb4fc39c1303947c61318aa6e523ea5 | [
"Apache-2.0"
] | 1 | 2019-11-15T21:20:24.000Z | 2019-11-15T21:20:24.000Z | #
# Copyright Contributors to the OpenTimelineIO project
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
import unittest
import os
from opentimelineio.console import (
autogen_serialized_datamodel as asd,
autogen_plugin_documentation as apd,
)
class SerializedSchemaTester(unittest.TestCase):
def test_serialized_schema(self):
"""Test if the schema has changed since last time the serialized schema
documentation was generated.
"""
pt = os.path.dirname(os.path.dirname(__file__))
fp = os.path.join(pt, "docs", "tutorials", "otio-serialized-schema.md")
with open(fp) as fi:
baseline_text = fi.read()
test_text, _ = asd.generate_and_write_documentation()
self.maxDiff = None
self.longMessage = True
self.assertMultiLineEqual(
baseline_text,
test_text,
"\n The schema has changed and the autogenerated documentation in {}"
" needs to be updated. run: `make doc-model-update`".format(fp)
)
class PluginDocumentationTester(unittest.TestCase):
def test_plugin_documentation(self):
"""Verify that the plugin manifest matches what is checked into the
documentation.
"""
pt = os.path.dirname(os.path.dirname(__file__))
fp = os.path.join(pt, "docs", "tutorials", "otio-plugins.md")
with open(fp) as fi:
baseline_text = fi.read()
test_text = apd.generate_and_write_documentation_plugins(True, True)
self.maxDiff = None
self.longMessage = True
self.assertMultiLineEqual(
baseline_text,
test_text,
"\n The schema has changed and the autogenerated documentation in {}"
" needs to be updated. run: `make doc-plugins-update`".format(fp)
)
| 36.346154 | 81 | 0.687478 |
import unittest
import os
from opentimelineio.console import (
autogen_serialized_datamodel as asd,
autogen_plugin_documentation as apd,
)
class SerializedSchemaTester(unittest.TestCase):
def test_serialized_schema(self):
pt = os.path.dirname(os.path.dirname(__file__))
fp = os.path.join(pt, "docs", "tutorials", "otio-serialized-schema.md")
with open(fp) as fi:
baseline_text = fi.read()
test_text, _ = asd.generate_and_write_documentation()
self.maxDiff = None
self.longMessage = True
self.assertMultiLineEqual(
baseline_text,
test_text,
"\n The schema has changed and the autogenerated documentation in {}"
" needs to be updated. run: `make doc-model-update`".format(fp)
)
class PluginDocumentationTester(unittest.TestCase):
def test_plugin_documentation(self):
pt = os.path.dirname(os.path.dirname(__file__))
fp = os.path.join(pt, "docs", "tutorials", "otio-plugins.md")
with open(fp) as fi:
baseline_text = fi.read()
test_text = apd.generate_and_write_documentation_plugins(True, True)
self.maxDiff = None
self.longMessage = True
self.assertMultiLineEqual(
baseline_text,
test_text,
"\n The schema has changed and the autogenerated documentation in {}"
" needs to be updated. run: `make doc-plugins-update`".format(fp)
)
| true | true |
1c2d4be972df48e5af1c18035a305472b31301cf | 1,044 | py | Python | kubernetes/test/test_extensions_v1beta1_scale_spec.py | Prahladk09/python-1 | 2dfb3035535e4be52ba549f1ff47acbe573b73f6 | [
"Apache-2.0"
] | 1 | 2020-04-13T09:54:21.000Z | 2020-04-13T09:54:21.000Z | kubernetes/test/test_extensions_v1beta1_scale_spec.py | Prahladk09/python-1 | 2dfb3035535e4be52ba549f1ff47acbe573b73f6 | [
"Apache-2.0"
] | 1 | 2019-08-15T14:27:17.000Z | 2019-08-15T14:28:07.000Z | kubernetes/test/test_extensions_v1beta1_scale_spec.py | Prahladk09/python-1 | 2dfb3035535e4be52ba549f1ff47acbe573b73f6 | [
"Apache-2.0"
] | 2 | 2020-08-05T03:06:48.000Z | 2020-08-05T16:08:21.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.extensions_v1beta1_scale_spec import ExtensionsV1beta1ScaleSpec
class TestExtensionsV1beta1ScaleSpec(unittest.TestCase):
""" ExtensionsV1beta1ScaleSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testExtensionsV1beta1ScaleSpec(self):
"""
Test ExtensionsV1beta1ScaleSpec
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.extensions_v1beta1_scale_spec.ExtensionsV1beta1ScaleSpec()
pass
if __name__ == '__main__':
unittest.main()
| 23.2 | 105 | 0.731801 |
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.extensions_v1beta1_scale_spec import ExtensionsV1beta1ScaleSpec
class TestExtensionsV1beta1ScaleSpec(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testExtensionsV1beta1ScaleSpec(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
1c2d4c95596e5691e3248bce1aafe05d87bda59a | 646 | py | Python | integration_tests/emukit/bayesian_optimization/test_single_objective_bayesian_optimization.py | alexgessner/emukit | 355e26bb30edd772a81af2a1267c569d7f446d42 | [
"Apache-2.0"
] | 1 | 2019-07-02T15:37:47.000Z | 2019-07-02T15:37:47.000Z | integration_tests/emukit/bayesian_optimization/test_single_objective_bayesian_optimization.py | Tony-Chiong/emukit | a068c8d5e06b2ae8b038f67bf2e4f66c4d91651a | [
"Apache-2.0"
] | null | null | null | integration_tests/emukit/bayesian_optimization/test_single_objective_bayesian_optimization.py | Tony-Chiong/emukit | a068c8d5e06b2ae8b038f67bf2e4f66c4d91651a | [
"Apache-2.0"
] | 1 | 2020-01-12T19:50:44.000Z | 2020-01-12T19:50:44.000Z | import numpy as np
from emukit.core.continuous_parameter import ContinuousParameter
from emukit.examples.gp_bayesian_optimization.single_objective_bayesian_optimization import GPBayesianOptimization
def f(x):
return x**2
def test_loop():
n_iterations = 5
x_init = np.random.rand(5, 1)
y_init = np.random.rand(5, 1)
x = ContinuousParameter('x', 0, 1)
bo = GPBayesianOptimization(variables_list=[x], X=x_init, Y=y_init)
bo.run_optimization(f, n_iterations)
# Check we got the correct number of points
assert bo.loop_state.X.shape[0] == n_iterations + 5
assert bo.suggest_new_locations().shape == (1,)
| 28.086957 | 114 | 0.733746 | import numpy as np
from emukit.core.continuous_parameter import ContinuousParameter
from emukit.examples.gp_bayesian_optimization.single_objective_bayesian_optimization import GPBayesianOptimization
def f(x):
return x**2
def test_loop():
n_iterations = 5
x_init = np.random.rand(5, 1)
y_init = np.random.rand(5, 1)
x = ContinuousParameter('x', 0, 1)
bo = GPBayesianOptimization(variables_list=[x], X=x_init, Y=y_init)
bo.run_optimization(f, n_iterations)
assert bo.loop_state.X.shape[0] == n_iterations + 5
assert bo.suggest_new_locations().shape == (1,)
| true | true |
1c2d4cdd0cc7aa83fa162d48e7f635a182fda07e | 6,917 | py | Python | yolo2/models/layers.py | weihao94/keras-YOLOv3-model-set | 7b54809d5f5513a41a191289625612d71056e3ec | [
"MIT"
] | null | null | null | yolo2/models/layers.py | weihao94/keras-YOLOv3-model-set | 7b54809d5f5513a41a191289625612d71056e3ec | [
"MIT"
] | null | null | null | yolo2/models/layers.py | weihao94/keras-YOLOv3-model-set | 7b54809d5f5513a41a191289625612d71056e3ec | [
"MIT"
] | 2 | 2020-10-29T19:04:12.000Z | 2021-01-26T10:10:14.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Common layer definition for YOLOv2 models building
"""
from functools import wraps, reduce, partial
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, MaxPooling2D, Concatenate, Lambda
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.regularizers import l2
from common.backbones.layers import CustomBatchNormalization
# Partial wrapper for Convolution2D with static default argument.
_DarknetConv2D = partial(Conv2D, padding='same')
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
@wraps(Conv2D)
def DarknetConv2D(*args, **kwargs):
"""Wrapper to set Darknet weight regularizer for Convolution2D."""
darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
darknet_conv_kwargs.update(kwargs)
return _DarknetConv2D(*args, **darknet_conv_kwargs)
@wraps(DepthwiseConv2D)
def DarknetDepthwiseConv2D(*args, **kwargs):
"""Wrapper to set Darknet parameters for Convolution2D."""
darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
darknet_conv_kwargs.update(kwargs)
return DepthwiseConv2D(*args, **darknet_conv_kwargs)
def Darknet_Depthwise_Separable_Conv2D_BN_Leaky(filters, kernel_size=(3, 3), block_id_str=None, **kwargs):
"""Depthwise Separable Convolution2D."""
if not block_id_str:
block_id_str = str(K.get_uid())
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetDepthwiseConv2D(kernel_size, name='conv_dw_' + block_id_str, **no_bias_kwargs),
CustomBatchNormalization(name='conv_dw_%s_bn' % block_id_str),
LeakyReLU(alpha=0.1, name='conv_dw_%s_leaky_relu' % block_id_str),
Conv2D(filters, (1,1), padding='same', use_bias=False, strides=(1, 1), name='conv_pw_%s' % block_id_str),
CustomBatchNormalization(name='conv_pw_%s_bn' % block_id_str),
LeakyReLU(alpha=0.1, name='conv_pw_%s_leaky_relu' % block_id_str))
def Depthwise_Separable_Conv2D_BN_Leaky(filters, kernel_size=(3, 3), block_id_str=None):
"""Depthwise Separable Convolution2D."""
if not block_id_str:
block_id_str = str(K.get_uid())
return compose(
DepthwiseConv2D(kernel_size, padding='same', name='conv_dw_' + block_id_str),
CustomBatchNormalization(name='conv_dw_%s_bn' % block_id_str),
LeakyReLU(alpha=0.1, name='conv_dw_%s_leaky_relu' % block_id_str),
Conv2D(filters, (1,1), padding='same', use_bias=False, strides=(1, 1), name='conv_pw_%s' % block_id_str),
CustomBatchNormalization(name='conv_pw_%s_bn' % block_id_str),
LeakyReLU(alpha=0.1, name='conv_pw_%s_leaky_relu' % block_id_str))
def DarknetConv2D_BN_Leaky(*args, **kwargs):
"""Darknet Convolution2D followed by CustomBatchNormalization and LeakyReLU."""
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
CustomBatchNormalization(),
LeakyReLU(alpha=0.1))
def bottleneck_block(outer_filters, bottleneck_filters):
"""Bottleneck block of 3x3, 1x1, 3x3 convolutions."""
return compose(
DarknetConv2D_BN_Leaky(outer_filters, (3, 3)),
DarknetConv2D_BN_Leaky(bottleneck_filters, (1, 1)),
DarknetConv2D_BN_Leaky(outer_filters, (3, 3)))
def bottleneck_x2_block(outer_filters, bottleneck_filters):
"""Bottleneck block of 3x3, 1x1, 3x3, 1x1, 3x3 convolutions."""
return compose(
bottleneck_block(outer_filters, bottleneck_filters),
DarknetConv2D_BN_Leaky(bottleneck_filters, (1, 1)),
DarknetConv2D_BN_Leaky(outer_filters, (3, 3)))
def space_to_depth_x2(x):
"""Thin wrapper for Tensorflow space_to_depth with block_size=2."""
# Import currently required to make Lambda work.
# See: https://github.com/fchollet/keras/issues/5088#issuecomment-273851273
import tensorflow as tf
return tf.nn.space_to_depth(x, block_size=2)
def space_to_depth_x2_output_shape(input_shape):
"""Determine space_to_depth output shape for block_size=2.
Note: For Lambda with TensorFlow backend, output shape may not be needed.
"""
return (input_shape[0], input_shape[1] // 2, input_shape[2] // 2, 4 *
input_shape[3]) if input_shape[1] else (input_shape[0], None, None,
4 * input_shape[3])
def yolo2_predictions(feature_maps, feature_channel_nums, num_anchors, num_classes):
f1, f2 = feature_maps
f1_channel_num, f2_channel_num = feature_channel_nums
x1 = compose(
DarknetConv2D_BN_Leaky(f1_channel_num, (3, 3)),
DarknetConv2D_BN_Leaky(f1_channel_num, (3, 3)))(f1)
# Here change the f2 channel number to f2_channel_num//8 first,
# then expand back to f2_channel_num//2 with "space_to_depth_x2"
x2 = DarknetConv2D_BN_Leaky(f2_channel_num//8, (1, 1))(f2)
# TODO: Allow Keras Lambda to use func arguments for output_shape?
x2_reshaped = Lambda(
space_to_depth_x2,
output_shape=space_to_depth_x2_output_shape,
name='space_to_depth')(x2)
x = Concatenate()([x2_reshaped, x1])
x = DarknetConv2D_BN_Leaky(f1_channel_num, (3, 3))(x)
y = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1), name='predict_conv')(x)
return y
def yolo2lite_predictions(feature_maps, feature_channel_nums, num_anchors, num_classes):
f1, f2 = feature_maps
f1_channel_num, f2_channel_num = feature_channel_nums
x1 = compose(
Depthwise_Separable_Conv2D_BN_Leaky(filters=f1_channel_num, kernel_size=(3, 3), block_id_str='pred_1'),
Depthwise_Separable_Conv2D_BN_Leaky(filters=f1_channel_num, kernel_size=(3, 3), block_id_str='pred_2'))(f1)
# Here change the f2 channel number to f2_channel_num//8 first,
# then expand back to f2_channel_num//2 with "space_to_depth_x2"
x2 = DarknetConv2D_BN_Leaky(f2_channel_num//8, (1, 1))(f2)
# TODO: Allow Keras Lambda to use func arguments for output_shape?
x2_reshaped = Lambda(
space_to_depth_x2,
output_shape=space_to_depth_x2_output_shape,
name='space_to_depth')(x2)
x = Concatenate()([x2_reshaped, x1])
x = Depthwise_Separable_Conv2D_BN_Leaky(filters=f1_channel_num, kernel_size=(3, 3), block_id_str='pred_3')(x)
y = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1), name='predict_conv')(x)
return y
| 41.668675 | 115 | 0.714038 |
from functools import wraps, reduce, partial
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, MaxPooling2D, Concatenate, Lambda
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.regularizers import l2
from common.backbones.layers import CustomBatchNormalization
_DarknetConv2D = partial(Conv2D, padding='same')
def compose(*funcs):
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
@wraps(Conv2D)
def DarknetConv2D(*args, **kwargs):
darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
darknet_conv_kwargs.update(kwargs)
return _DarknetConv2D(*args, **darknet_conv_kwargs)
@wraps(DepthwiseConv2D)
def DarknetDepthwiseConv2D(*args, **kwargs):
darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
darknet_conv_kwargs.update(kwargs)
return DepthwiseConv2D(*args, **darknet_conv_kwargs)
def Darknet_Depthwise_Separable_Conv2D_BN_Leaky(filters, kernel_size=(3, 3), block_id_str=None, **kwargs):
if not block_id_str:
block_id_str = str(K.get_uid())
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetDepthwiseConv2D(kernel_size, name='conv_dw_' + block_id_str, **no_bias_kwargs),
CustomBatchNormalization(name='conv_dw_%s_bn' % block_id_str),
LeakyReLU(alpha=0.1, name='conv_dw_%s_leaky_relu' % block_id_str),
Conv2D(filters, (1,1), padding='same', use_bias=False, strides=(1, 1), name='conv_pw_%s' % block_id_str),
CustomBatchNormalization(name='conv_pw_%s_bn' % block_id_str),
LeakyReLU(alpha=0.1, name='conv_pw_%s_leaky_relu' % block_id_str))
def Depthwise_Separable_Conv2D_BN_Leaky(filters, kernel_size=(3, 3), block_id_str=None):
if not block_id_str:
block_id_str = str(K.get_uid())
return compose(
DepthwiseConv2D(kernel_size, padding='same', name='conv_dw_' + block_id_str),
CustomBatchNormalization(name='conv_dw_%s_bn' % block_id_str),
LeakyReLU(alpha=0.1, name='conv_dw_%s_leaky_relu' % block_id_str),
Conv2D(filters, (1,1), padding='same', use_bias=False, strides=(1, 1), name='conv_pw_%s' % block_id_str),
CustomBatchNormalization(name='conv_pw_%s_bn' % block_id_str),
LeakyReLU(alpha=0.1, name='conv_pw_%s_leaky_relu' % block_id_str))
def DarknetConv2D_BN_Leaky(*args, **kwargs):
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
CustomBatchNormalization(),
LeakyReLU(alpha=0.1))
def bottleneck_block(outer_filters, bottleneck_filters):
return compose(
DarknetConv2D_BN_Leaky(outer_filters, (3, 3)),
DarknetConv2D_BN_Leaky(bottleneck_filters, (1, 1)),
DarknetConv2D_BN_Leaky(outer_filters, (3, 3)))
def bottleneck_x2_block(outer_filters, bottleneck_filters):
return compose(
bottleneck_block(outer_filters, bottleneck_filters),
DarknetConv2D_BN_Leaky(bottleneck_filters, (1, 1)),
DarknetConv2D_BN_Leaky(outer_filters, (3, 3)))
def space_to_depth_x2(x):
as tf
return tf.nn.space_to_depth(x, block_size=2)
def space_to_depth_x2_output_shape(input_shape):
return (input_shape[0], input_shape[1] // 2, input_shape[2] // 2, 4 *
input_shape[3]) if input_shape[1] else (input_shape[0], None, None,
4 * input_shape[3])
def yolo2_predictions(feature_maps, feature_channel_nums, num_anchors, num_classes):
f1, f2 = feature_maps
f1_channel_num, f2_channel_num = feature_channel_nums
x1 = compose(
DarknetConv2D_BN_Leaky(f1_channel_num, (3, 3)),
DarknetConv2D_BN_Leaky(f1_channel_num, (3, 3)))(f1)
x2 = DarknetConv2D_BN_Leaky(f2_channel_num//8, (1, 1))(f2)
x2_reshaped = Lambda(
space_to_depth_x2,
output_shape=space_to_depth_x2_output_shape,
name='space_to_depth')(x2)
x = Concatenate()([x2_reshaped, x1])
x = DarknetConv2D_BN_Leaky(f1_channel_num, (3, 3))(x)
y = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1), name='predict_conv')(x)
return y
def yolo2lite_predictions(feature_maps, feature_channel_nums, num_anchors, num_classes):
f1, f2 = feature_maps
f1_channel_num, f2_channel_num = feature_channel_nums
x1 = compose(
Depthwise_Separable_Conv2D_BN_Leaky(filters=f1_channel_num, kernel_size=(3, 3), block_id_str='pred_1'),
Depthwise_Separable_Conv2D_BN_Leaky(filters=f1_channel_num, kernel_size=(3, 3), block_id_str='pred_2'))(f1)
x2 = DarknetConv2D_BN_Leaky(f2_channel_num//8, (1, 1))(f2)
x2_reshaped = Lambda(
space_to_depth_x2,
output_shape=space_to_depth_x2_output_shape,
name='space_to_depth')(x2)
x = Concatenate()([x2_reshaped, x1])
x = Depthwise_Separable_Conv2D_BN_Leaky(filters=f1_channel_num, kernel_size=(3, 3), block_id_str='pred_3')(x)
y = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1), name='predict_conv')(x)
return y
| true | true |
1c2d4d55b91d987a3493e6c2b7aeb2884a7eaead | 10,559 | py | Python | scripts/get_assessor_data.py | mxndrwgrdnr/evictions | bf59bfe98903c2aa839daf7597951dc36cdecaad | [
"BSD-2-Clause"
] | null | null | null | scripts/get_assessor_data.py | mxndrwgrdnr/evictions | bf59bfe98903c2aa839daf7597951dc36cdecaad | [
"BSD-2-Clause"
] | null | null | null | scripts/get_assessor_data.py | mxndrwgrdnr/evictions | bf59bfe98903c2aa839daf7597951dc36cdecaad | [
"BSD-2-Clause"
] | 1 | 2022-02-25T04:05:36.000Z | 2022-02-25T04:05:36.000Z | #######################################################
# script to compile and standardize raw assessor data #
#######################################################
import pandas as pd
from tqdm import tqdm
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
import numpy as np
import geopandas as gpd
from matplotlib import pyplot as plt
# only use 10 years of history
files = [
# '../data/2019.8.12__SF_ASR_Secured_Roll_Data_2017-2018_0.xlsx',
'../data/2019.8.12__SF_ASR_Secured_Roll_Data_2016-2017_0.xlsx',
'../data/2019.8.12__SF_ASR_Secured_Roll_Data_2015-2016_0.xlsx',
'../data/2019.8.20__SF_ASR_Secured_Roll_Data_2014-2015.xlsx',
'../data/2019.8.20__SF_ASR_Secured_Roll_Data_2013-2014.xlsx',
'../data/2019.8.20__SF_ASR_Secured_Roll_Data_2012-2013.xlsx',
'../data/2019.8.20__SF_ASR_Secured_Roll_Data_2011-2012.xlsx',
'../data/2019.8.20__SF_ASR_Secured_Roll_Data_2010-2011.xlsx',
'../data/2019.8.20__SF_ASR_Secured_Roll_Data_2009-2010.xlsx',
'../data/2019.8.20__SF_ASR_Secured_Roll_Data_2008-2009.xlsx',
'../data/2019.8.20__SF_ASR_Secured_Roll_Data_2007-2008.xlsx',
]
years = [
# 2017,
2016, 2015, 2014, 2013, 2012, 2011, 2010, 2009, 2008, 2007]
asr = pd.DataFrame()
for i, f in tqdm(enumerate(files), total=len(files)):
tmp = pd.read_excel(f, engine='openpyxl')
tmp['asr_yr'] = years[i]
asr = pd.concat((asr, tmp), sort=True)
codes = pd.read_csv('../data/Reference__Assessor-Recorder_Property_Class_Codes.csv')
code_dict = dict(zip(codes['Class Code'], codes['Use Code']))
rc_dict = dict(zip(codes['Class Code'], codes['rc_eligible']))
asr['use_code'] = asr['RP1CLACDE'].map(code_dict)
asr['rc_eligible'] = asr['RP1CLACDE'].map(rc_dict)
asr.loc[asr['RP1PRCLID'].isin(['1530 035', '1530 036']), 'PROPLOC'] = '0411 0409 14TH AV0000'
asr.loc[asr['RP1PRCLID'].isin(['1432 061', '1432 060']), 'PROPLOC'] = '0355 0353 ARGUELLO BL0000'
asr.loc[asr['RP1PRCLID'].isin(['1744 031', '1744 032']), 'PROPLOC'] = '0506 0504 HUGO ST0000'
asr.loc[asr['RP1PRCLID'].isin(['1254 098', '1254 097']), 'PROPLOC'] = '0083 0081 DOWNEY ST0000'
asr.loc[asr['RP1PRCLID'].isin(['0942 051', '0942 052']), 'PROPLOC'] = '2794 2792 FILBERT ST0000'
asr.loc[asr['RP1PRCLID'] == '1187 016', 'PROPLOC'] = '0000 0048 ASHBURY ST0000'
asr.loc[asr['RP1PRCLID'] == '1187 017', 'PROPLOC'] = '0000 0050 ASHBURY ST0000'
asr.loc[asr['RP1PRCLID'].isin(
['3730 188', '3730 189', '3730 190', '3730 191', '3730 192']), 'PROPLOC'] = '0000 0019 RAUSCH ST0000'
asr.loc[asr['RP1PRCLID'] == '3630 031', 'PROPLOC'] = '0175 0175 CHATTANOOGA ST0000'
asr.loc[asr['RP1PRCLID'] == '3630 032', 'PROPLOC'] = '0177 0177 CHATTANOOGA ST0000'
asr.loc[asr['RP1PRCLID'] == '3630 033', 'PROPLOC'] = '0179 0179 CHATTANOOGA ST0000'
asr.loc[asr['RP1PRCLID'] == '3630 030', 'PROPLOC'] = '0173 0173 CHATTANOOGA ST0000'
asr.loc[asr['RP1PRCLID'] == '3731 242', 'PROPLOC'] = '0000 0038 MOSS ST0000'
asr = asr[asr['PROPLOC'] != '0000 0000 0000']
asr['house_1'] = asr['PROPLOC'].str[0:4].str.lstrip('0')
asr['house_2'] = asr['PROPLOC'].str[5:9].str.lstrip('0')
asr['house_1'] = asr['house_1'].str.replace('\D', '')
asr.loc[asr['house_1'] == '', 'house_1'] = -999
asr['house_2'] = asr['house_2'].str.replace('\D', '')
asr = asr[asr['house_2'] != '']
asr = asr[~asr['PROPLOC'].str.contains('SITUS TO BE ASSIGNED')]
asr['street_name'] = asr['PROPLOC'].str[10:].str.strip().str.split(' ').str[:-1].str.join(' ').str.strip().str.lstrip('0')
asr['street_rest'] = asr['PROPLOC'].str[10:].str.strip().str.split(' ').str[-1].str.strip()
asr['street_type'] = None
asr['unit_num'] = None
asr.loc[asr['street_rest'].str.len().isin([6, 7]), 'street_type'] = asr.loc[
asr['street_rest'].str.len().isin([6, 7]), 'street_rest'].str[0:2]
asr.loc[asr['street_rest'].str.len().isin([6, 7]), 'unit_num'] = asr.loc[
asr['street_rest'].str.len().isin([6, 7]), 'street_rest'].str[2:]
asr.loc[asr['street_rest'].str.len().isin([4, 5]), 'unit_num'] = asr.loc[
asr['street_rest'].str.len().isin([4, 5]), 'street_rest']
asr.loc[asr['PROPLOC'].str.contains(
'NORTH POINT'), 'street_name'] = 'NORTH POINT'
asr.loc[asr['PROPLOC'].str.contains(
'NORTH POINT'), 'street_type'] = 'ST'
asr.loc[asr['street_name'].str.contains('\sAVE$|\sAVENUE$|\sSTREET$|\sST$'), 'street_type'] = asr.loc[
asr['street_name'].str.contains('\sAVE$|\sAVENUE$|\sSTREET$|\sST$'),
'street_name'].str.extract('(\sAVE$|\sAVENUE$|\sSTREET$|\sST$)', expand=False).str.strip().str[0:2]
asr.loc[asr['street_name'].str.contains('\sAVE$|\sAVENUE$|\sSTREET$|\sST$'), 'street_name'] = asr.loc[
asr['street_name'].str.contains('\sAVE$|\sAVENUE$|\sSTREET$|\sST$'), 'street_name'].str.split(
'\sAVE$|\sAVENUE$|\sSTREET$|\sST$').str[0]
asr.loc[(~pd.isnull(asr['street_name'])) & (asr['street_name'].str.contains(
'\sSTT$|\sSTIT$|\sSTITE$|\sSTNIT$')), 'street_type'] = 'street'
asr.loc[(~pd.isnull(asr['street_name'])) & (asr['street_name'].str.contains('\sSTT$|\sSTIT$|\sSTITE$|\sSTNIT$')), 'street_name'] = asr.loc[
(~pd.isnull(asr['street_name'])) &
(asr['street_name'].str.contains('\sSTT$|\sSTIT$|\sSTITE$|\sSTNIT$')), 'street_name'].str.split(
'\sSTT$|\sSTIT$|\sSTITE$|\sSTNIT$').str[0].str.strip()
asr.loc[asr['street_name'].str.contains('\sNOR$'), 'street_type'] = 'BLVD'
asr.loc[asr['street_name'].str.contains('FARRELL'), 'street_name'] = 'OFARRELL'
asr.loc[asr['street_name'] == 'EDINBURG', 'street_name'] = 'EDINBURGH'
asr.loc[asr['street_name'] == 'EDINBURG', 'street_type'] = 'ST'
asr.loc[asr['PROPLOC'].str.contains('.*SO.*VAN NESS.*'), 'street_name'] = 'SOUTH VAN NESS'
asr.loc[asr['PROPLOC'].str.contains('.*SO.*VAN NESS.*'), 'street_type'] = 'AVE'
asr.loc[asr['PROPLOC'].str.contains('BROADWAY'), 'street_type'] = 'ST'
# for pre in ['A', 'B', 'C']:
# for street in ['COLLINGWOOD', 'HAYES', 'MASONIC', 'RODGERS']:
# asr.loc[asr['street_name'] == pre + street, 'street_name'] = street
# for pre in ['A', 'B']:
# for street in [
# # 'CHURCH', 'UPPER', '14TH', 'FOLSOM', 'PINE', 'FREDERICK', 'PROSPECT', 'HARPER', 'PARNASSUS',
# # 'MACONDRAY',
# 'STANYAN']:
# asr.loc[asr['street_name'] == pre + street, 'street_name'] = street
# for pre in ['A']:
# for street in ['DOWNEY', 'CLINTON PARK']:
# asr.loc[asr['street_name'] == pre + street, 'street_name'] = street
# many streets have 4 digit street numbers with trailing zeros not present in the eviction records
# asr.loc[(asr['street_name'] == 'FREDERICK') & (asr['house_2'].astype(str).str.len() == 4), 'house_2'] = \
# asr.loc[(asr['street_name'] == 'FREDERICK') & (asr['house_2'].astype(str).str.len() == 4), 'house_2'].str[0:3]
# asr.loc[(asr['street_name'] == 'FREDERICK') & (asr['house_1'].astype(str).str.len() == 4), 'house_1'] = \
# asr.loc[(asr['street_name'] == 'FREDERICK') & (asr['house_1'].astype(str).str.len() == 4), 'house_1'].str[0:3]
# asr.loc[(asr['street_name'] == 'DOWNEY') & (asr['house_2'].astype(str).str.len() == 4), 'house_2'] = \
# asr.loc[(asr['street_name'] == 'DOWNEY') & (asr['house_2'].astype(str).str.len() == 4), 'house_2'].str[0:3]
# asr.loc[(asr['street_name'] == 'BELVEDERE') & (asr['house_2'].astype(str).str.len() == 4), 'house_2'] = \
# asr.loc[(asr['street_name'] == 'BELVEDERE') & (asr['house_2'].astype(str).str.len() == 4), 'house_2'].str[0:3]
# asr.loc[(asr['street_name']=='SHRADER') & (asr['house_2'] > 2000) & (asr['house_2'].astype(str).str[-1] == '0'), 'house_2'] = \
# asr.loc[(asr['street_name']=='SHRADER') & (asr['house_2'] > 2000) & (asr['house_2'].astype(str).str[-1] == '0'), 'house_2'].str[0:3]
# asr.loc[(asr['street_name']=='SHRADER') & (asr['house_1'] > 2000) & (asr['house_1'].astype(str).str[-1] == '0'), 'house_1'] = \
# asr.loc[(asr['street_name']=='SHRADER') & (asr['house_1'] > 2000) & (asr['house_1'].astype(str).str[-1] == '0'), 'house_1'].str[0:3]
# asr.loc[(asr['street_name']=='WALLER') & (asr['house_2'] > 1000) & (asr['house_2'].astype(str).str[-1] == '0'), 'house_2'] = \
# asr.loc[(asr['street_name']=='WALLER') & (asr['house_2'] > 1000) & (asr['house_2'].astype(str).str[-1] == '0'), 'house_2'].str[0:3]
# asr.loc[(asr['street_name']=='WALLER') & (asr['house_1'] > 1000) & (asr['house_1'].astype(str).str[-1] == '0'), 'house_1'] = \
# asr.loc[(asr['street_name']=='WALLER') & (asr['house_1'] > 1000) & (asr['house_1'].astype(str).str[-1] == '0'), 'house_1'].str[0:3]
asr.loc[asr['street_name'].str.contains('^VALLEY.*F$'), 'street_name'] = 'VALLEY'
# # a bunch of street names have an erroneous letter "V" appended to the beginning
# asr.loc[(~pd.isnull(asr['street_name'])) & (asr['street_name'].str.contains('^V[^AEIOU]')), 'street_name'] = \
# asr.loc[(~pd.isnull(asr['street_name'])) & (asr['street_name'].str.contains('^V[^AEIOU]')), 'street_name'].str[1:]
# other_weird_vnames = [
# 'VELSIE', 'VUNDERWOOD', 'VEDGEHILL', 'VEGBERT', 'VOAKDALE', 'VANDOVER',
# 'VINGERSON', 'VERVINE', 'VEDDY', 'VEVANS', 'VUNION', 'VALEMANY',
# 'VARMSTRONG', 'VELMIRA', 'VIRVING', 'VOCEAN', 'VESMERALDA', 'VELLSWORTH',
# 'VORIZABA', 'VALABAMA', 'VARGUELLO', 'VATHENS', 'VOAK', 'VELLIS',
# 'VORTEGA', 'VALBERTA', 'VUPPER', 'VINGALLS', 'VELIZABETH', 'VARBOR',
# 'VINDIANA', 'VUNIVERSITY', 'VEUCALYPTUS', 'VAPOLLO', 'VULLOA', 'VALADDIN',
# 'VEATON', 'VEDGEWOOD', 'VERIE', 'VAQUAVISTA', 'VALTA', 'VALTON', 'VOTSEGO',
# 'VORD', 'VAPTOS', 'VEXETER', 'VOCTAVIA', 'VURBANO', 'VAGNON', 'VOGDEN',
# 'VASHTON', 'VAUSTIN', 'VASHBURY', 'VABBEY', 'VALDER', 'VARKANSAS',
# 'VOAK GROVE', 'VARCH', 'VEDGAR', 'VILLINOIS', 'VARLETA']
# asr.loc[asr['street_name'].isin(other_weird_vnames), 'street_name'] = \
# asr.loc[asr['street_name'].isin(other_weird_vnames), 'street_name'].str[1:]
st_typ_dict = {'street': 'ST', 'AV': 'AVE', 'BL': 'BLVD', 'WY': 'WAY',
'TE': 'TER', 'PK': 'PARK', 'HW': 'HWY', 'LANE': 'LN', 'AL': 'ALY',
'CR': 'CIR', 'LA': 'LN', 'PZ': 'PLZ', 'TR': 'TER', 'RW': 'ROW', 'BV': 'BLVD',
'WK': 'WALK'}
asr = asr.replace({'street_type': st_typ_dict})
bldg_typ_dict = {'SRES': 1, 'GOVT': 2, 'IND': 3, 'COMM': 4,
'COMR': 5, 'COMO': 6, 'COMH': 7, 'MISC': 8, 'MRES': 9}
asr['bldg_type'] = asr.replace({'use_code': bldg_typ_dict})['use_code']
#
asr.to_csv('../data/assessor_2007-2016.csv', index=False) | 58.661111 | 139 | 0.604603 | asr['PROPLOC'].str[5:9].str.lstrip('0')
asr['house_1'] = asr['house_1'].str.replace('\D', '')
asr.loc[asr['house_1'] == '', 'house_1'] = -999
asr['house_2'] = asr['house_2'].str.replace('\D', '')
asr = asr[asr['house_2'] != '']
asr = asr[~asr['PROPLOC'].str.contains('SITUS TO BE ASSIGNED')]
asr['street_name'] = asr['PROPLOC'].str[10:].str.strip().str.split(' ').str[:-1].str.join(' ').str.strip().str.lstrip('0')
asr['street_rest'] = asr['PROPLOC'].str[10:].str.strip().str.split(' ').str[-1].str.strip()
asr['street_type'] = None
asr['unit_num'] = None
asr.loc[asr['street_rest'].str.len().isin([6, 7]), 'street_type'] = asr.loc[
asr['street_rest'].str.len().isin([6, 7]), 'street_rest'].str[0:2]
asr.loc[asr['street_rest'].str.len().isin([6, 7]), 'unit_num'] = asr.loc[
asr['street_rest'].str.len().isin([6, 7]), 'street_rest'].str[2:]
asr.loc[asr['street_rest'].str.len().isin([4, 5]), 'unit_num'] = asr.loc[
asr['street_rest'].str.len().isin([4, 5]), 'street_rest']
asr.loc[asr['PROPLOC'].str.contains(
'NORTH POINT'), 'street_name'] = 'NORTH POINT'
asr.loc[asr['PROPLOC'].str.contains(
'NORTH POINT'), 'street_type'] = 'ST'
asr.loc[asr['street_name'].str.contains('\sAVE$|\sAVENUE$|\sSTREET$|\sST$'), 'street_type'] = asr.loc[
asr['street_name'].str.contains('\sAVE$|\sAVENUE$|\sSTREET$|\sST$'),
'street_name'].str.extract('(\sAVE$|\sAVENUE$|\sSTREET$|\sST$)', expand=False).str.strip().str[0:2]
asr.loc[asr['street_name'].str.contains('\sAVE$|\sAVENUE$|\sSTREET$|\sST$'), 'street_name'] = asr.loc[
asr['street_name'].str.contains('\sAVE$|\sAVENUE$|\sSTREET$|\sST$'), 'street_name'].str.split(
'\sAVE$|\sAVENUE$|\sSTREET$|\sST$').str[0]
asr.loc[(~pd.isnull(asr['street_name'])) & (asr['street_name'].str.contains(
'\sSTT$|\sSTIT$|\sSTITE$|\sSTNIT$')), 'street_type'] = 'street'
asr.loc[(~pd.isnull(asr['street_name'])) & (asr['street_name'].str.contains('\sSTT$|\sSTIT$|\sSTITE$|\sSTNIT$')), 'street_name'] = asr.loc[
(~pd.isnull(asr['street_name'])) &
(asr['street_name'].str.contains('\sSTT$|\sSTIT$|\sSTITE$|\sSTNIT$')), 'street_name'].str.split(
'\sSTT$|\sSTIT$|\sSTITE$|\sSTNIT$').str[0].str.strip()
asr.loc[asr['street_name'].str.contains('\sNOR$'), 'street_type'] = 'BLVD'
asr.loc[asr['street_name'].str.contains('FARRELL'), 'street_name'] = 'OFARRELL'
asr.loc[asr['street_name'] == 'EDINBURG', 'street_name'] = 'EDINBURGH'
asr.loc[asr['street_name'] == 'EDINBURG', 'street_type'] = 'ST'
asr.loc[asr['PROPLOC'].str.contains('.*SO.*VAN NESS.*'), 'street_name'] = 'SOUTH VAN NESS'
asr.loc[asr['PROPLOC'].str.contains('.*SO.*VAN NESS.*'), 'street_type'] = 'AVE'
asr.loc[asr['PROPLOC'].str.contains('BROADWAY'), 'street_type'] = 'ST'
'WAY',
'TE': 'TER', 'PK': 'PARK', 'HW': 'HWY', 'LANE': 'LN', 'AL': 'ALY',
'CR': 'CIR', 'LA': 'LN', 'PZ': 'PLZ', 'TR': 'TER', 'RW': 'ROW', 'BV': 'BLVD',
'WK': 'WALK'}
asr = asr.replace({'street_type': st_typ_dict})
bldg_typ_dict = {'SRES': 1, 'GOVT': 2, 'IND': 3, 'COMM': 4,
'COMR': 5, 'COMO': 6, 'COMH': 7, 'MISC': 8, 'MRES': 9}
asr['bldg_type'] = asr.replace({'use_code': bldg_typ_dict})['use_code']
asr.to_csv('../data/assessor_2007-2016.csv', index=False) | true | true |
1c2d50214ff393c69f09f4c26e3e39a5615ac21d | 4,425 | py | Python | tests/data/parser_data.py | arniebarni/rhasspy_weather | 6a9df72adad3e5dafa7962c2be37c824dc04137b | [
"MIT"
] | 5 | 2020-03-29T01:00:30.000Z | 2022-02-06T20:00:00.000Z | tests/data/parser_data.py | arniebarni/rhasspy_weather | 6a9df72adad3e5dafa7962c2be37c824dc04137b | [
"MIT"
] | 12 | 2020-04-02T15:09:05.000Z | 2021-10-11T00:44:21.000Z | tests/data/parser_data.py | arniebarni/rhasspy_weather | 6a9df72adad3e5dafa7962c2be37c824dc04137b | [
"MIT"
] | 5 | 2020-03-25T08:33:02.000Z | 2021-05-18T08:47:41.000Z | import argparse
from rhasspyhermes.intent import Intent, Slot
from rhasspyhermes.nlu import NluIntent
rhasspy_intent = {
"request_weather_full_day": '{"entities": [{"end": 25, "entity": "when_day", "raw_end": 25, "raw_start": 20, "raw_value": "heute", '
'"start": 20, "value": "heute", "value_details": {"kind": "Unknown", "value": "heute"}}], '
'"intent": {"confidence": 1, "name": "GetWeatherForecast"}, "raw_text": "wie wird das wetter heute", '
'"raw_tokens": ["wie", "wird", "das", "wetter", "heute"], "recognize_seconds": 0.16436054417863488, '
'"slots": {"when_day": "heute"}, "speech_confidence": 1, "text": "wie wird das wetter heute", '
'"tokens": ["wie", "wird", "das", "wetter", "heute"], "wakeword_id": null}',
"request_weather_full_time": '{"entities": [{"end": 25, "entity": "when_day", "raw_end": 25, "raw_start": 20, "raw_value": "heute", '
'"start": 20, "value": "heute", "value_details": {"kind": "Unknown", "value": "heute"}}, '
'{"end": 28, "entity": "when_time", "raw_end": 37, "raw_start": 26, "raw_value": "um zehn uhr", "start": 26, '
'"value": 10, "value_details": {"kind": "Number", "value": 10}}], '
'"intent": {"confidence": 1, "name": "GetWeatherForecast"}, '
'"raw_text": "wie wird das wetter heute um 10 uhr", '
'"raw_tokens": ["wie", "wird", "das", "wetter", "heute", "um", "10", "uhr"], '
'"recognize_seconds": 0.17279156856238842, "slots": {"when_day": "heute", "when_time": 10}, '
'"speech_confidence": 1, "text": "wie wird das wetter heute 10", '
'"tokens": ["wie", "wird", "das", "wetter", "heute", "10"], "wakeword_id": null}',
"request_weather_full_interval": '{"entities": [{"end": 25, "entity": "when_day", "raw_end": 25, "raw_start": 20, "raw_value": "heute", '
'"start": 20, "value": "heute", "value_details": {"kind": "Unknown", "value": "heute"}}, {"end": 32, '
'"entity": "when_time", "raw_end": 32, "raw_start": 26, "raw_value": "mittag", "start": 26, '
'"value": "Mittag", "value_details": {"kind": "Unknown", "value": "Mittag"}}], '
'"intent": {"confidence": 1, "name": "GetWeatherForecast"}, "raw_text": "wie wird das wetter heute mittag", '
'"raw_tokens": ["wie", "wird", "das", "wetter", "heute", "mittag"], "recognize_seconds": 0.11356039298698306, '
'"slots": {"when_day": "heute", "when_time": "Mittag"}, "speech_confidence": 1, '
'"text": "wie wird das wetter heute Mittag", "tokens": ["wie", "wird", "das", "wetter", "heute", "Mittag"], '
'"wakeword_id": null}'
}
day_slot = Slot(entity="test", slot_name="when_day", value={"value": "heute"}, raw_value="heute")
nlu_intent = {
"request_weather_full_day": NluIntent("Wie wird das Wetter heute?", Intent("GetWeatherForecastFull", 1), slots=[day_slot]),
"request_weather_full_time": NluIntent("Wie wird das Wetter heute um 10 Uhr?", Intent("GetWeatherForecastFull", 1), slots=[day_slot, Slot(entity="test", slot_name="when_time", value={"value": 10}, raw_value="zehn")]),
"request_weather_full_interval": NluIntent("Wie wird das Wetter heute mittag?", Intent("GetWeatherForecastFull", 1), slots=[day_slot, Slot(entity="test", slot_name="when_time", value={"value": "mittag"}, raw_value="mittag")])
}
console_args = {
"request_weather_full_day": argparse.ArgumentParser("-d", "heute", "-t", "mittag"),
"request_weather_full_time": argparse.ArgumentParser("-d", "heute", "-t", "10"),
"request_weather_full_interval": argparse.ArgumentParser("-d", "heute", "-t", "mittag")
}
intents = {
"rhasspy_intent": rhasspy_intent,
"nlu_intent": nlu_intent,
"console_args": console_args
}
| 86.764706 | 233 | 0.515254 | import argparse
from rhasspyhermes.intent import Intent, Slot
from rhasspyhermes.nlu import NluIntent
rhasspy_intent = {
"request_weather_full_day": '{"entities": [{"end": 25, "entity": "when_day", "raw_end": 25, "raw_start": 20, "raw_value": "heute", '
'"start": 20, "value": "heute", "value_details": {"kind": "Unknown", "value": "heute"}}], '
'"intent": {"confidence": 1, "name": "GetWeatherForecast"}, "raw_text": "wie wird das wetter heute", '
'"raw_tokens": ["wie", "wird", "das", "wetter", "heute"], "recognize_seconds": 0.16436054417863488, '
'"slots": {"when_day": "heute"}, "speech_confidence": 1, "text": "wie wird das wetter heute", '
'"tokens": ["wie", "wird", "das", "wetter", "heute"], "wakeword_id": null}',
"request_weather_full_time": '{"entities": [{"end": 25, "entity": "when_day", "raw_end": 25, "raw_start": 20, "raw_value": "heute", '
'"start": 20, "value": "heute", "value_details": {"kind": "Unknown", "value": "heute"}}, '
'{"end": 28, "entity": "when_time", "raw_end": 37, "raw_start": 26, "raw_value": "um zehn uhr", "start": 26, '
'"value": 10, "value_details": {"kind": "Number", "value": 10}}], '
'"intent": {"confidence": 1, "name": "GetWeatherForecast"}, '
'"raw_text": "wie wird das wetter heute um 10 uhr", '
'"raw_tokens": ["wie", "wird", "das", "wetter", "heute", "um", "10", "uhr"], '
'"recognize_seconds": 0.17279156856238842, "slots": {"when_day": "heute", "when_time": 10}, '
'"speech_confidence": 1, "text": "wie wird das wetter heute 10", '
'"tokens": ["wie", "wird", "das", "wetter", "heute", "10"], "wakeword_id": null}',
"request_weather_full_interval": '{"entities": [{"end": 25, "entity": "when_day", "raw_end": 25, "raw_start": 20, "raw_value": "heute", '
'"start": 20, "value": "heute", "value_details": {"kind": "Unknown", "value": "heute"}}, {"end": 32, '
'"entity": "when_time", "raw_end": 32, "raw_start": 26, "raw_value": "mittag", "start": 26, '
'"value": "Mittag", "value_details": {"kind": "Unknown", "value": "Mittag"}}], '
'"intent": {"confidence": 1, "name": "GetWeatherForecast"}, "raw_text": "wie wird das wetter heute mittag", '
'"raw_tokens": ["wie", "wird", "das", "wetter", "heute", "mittag"], "recognize_seconds": 0.11356039298698306, '
'"slots": {"when_day": "heute", "when_time": "Mittag"}, "speech_confidence": 1, '
'"text": "wie wird das wetter heute Mittag", "tokens": ["wie", "wird", "das", "wetter", "heute", "Mittag"], '
'"wakeword_id": null}'
}
day_slot = Slot(entity="test", slot_name="when_day", value={"value": "heute"}, raw_value="heute")
nlu_intent = {
"request_weather_full_day": NluIntent("Wie wird das Wetter heute?", Intent("GetWeatherForecastFull", 1), slots=[day_slot]),
"request_weather_full_time": NluIntent("Wie wird das Wetter heute um 10 Uhr?", Intent("GetWeatherForecastFull", 1), slots=[day_slot, Slot(entity="test", slot_name="when_time", value={"value": 10}, raw_value="zehn")]),
"request_weather_full_interval": NluIntent("Wie wird das Wetter heute mittag?", Intent("GetWeatherForecastFull", 1), slots=[day_slot, Slot(entity="test", slot_name="when_time", value={"value": "mittag"}, raw_value="mittag")])
}
console_args = {
"request_weather_full_day": argparse.ArgumentParser("-d", "heute", "-t", "mittag"),
"request_weather_full_time": argparse.ArgumentParser("-d", "heute", "-t", "10"),
"request_weather_full_interval": argparse.ArgumentParser("-d", "heute", "-t", "mittag")
}
intents = {
"rhasspy_intent": rhasspy_intent,
"nlu_intent": nlu_intent,
"console_args": console_args
}
| true | true |
1c2d503b6b0ab6dedf0afdc302bf880aaefa0cf3 | 10,017 | py | Python | tests/test_model.py | nicolasramy/optimove-client | d492ddaa7b20493c1077308a2404994730f8c6cc | [
"MIT"
] | 3 | 2016-08-10T14:14:54.000Z | 2016-09-01T05:48:03.000Z | tests/test_model.py | nicolasramy/optimove-client | d492ddaa7b20493c1077308a2404994730f8c6cc | [
"MIT"
] | 6 | 2016-08-23T13:03:29.000Z | 2018-02-08T17:01:59.000Z | tests/test_model.py | nicolasramy/optimove-client | d492ddaa7b20493c1077308a2404994730f8c6cc | [
"MIT"
] | 3 | 2016-09-01T09:58:26.000Z | 2019-10-16T13:27:58.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import json
import unittest
from six.moves.urllib.parse import parse_qs, urlparse
from optimove.client import Client
from optimove.constants import DEFAULT_URL
import responses
from tests.constants import HEADERS
from tests.helpers import login_callback, token_required
"""Callbacks"""
@token_required
def get_customer_attribute_list_callback(request):
resp_body = [
{'RealFieldName': 'Affiliate', 'Description': 'Acquisition affiliate'},
{'RealFieldName': 'Age', 'Description': 'Customer age'},
{'RealFieldName': 'Country', 'Description': 'Country of residence'}
]
return 200, HEADERS['json'], json.dumps(resp_body)
@token_required
def get_lifecycle_stage_list_callback(request):
resp_body = [
{'StageID': 1, 'StageName': 'New'},
{'StageID': 2, 'StageName': 'Active'},
{'StageID': 3, 'StageName': 'FromChurn'},
{'StageID': 4, 'StageName': 'Churn'}
]
return 200, HEADERS['json'], json.dumps(resp_body)
@token_required
def get_microsegment_list_callback(request):
resp_body = [
{'MicrosegmentID': 1, 'MicrosegmentName': 'DWag1-Europe-Winner',
'LifecycleStageID': 1, 'FutureValue': 870.55, 'ChurnRate': 0.55},
{'MicrosegmentID': 2, 'MicrosegmentName': 'DWag2-US-Loser',
'LifecycleStageID': 2, 'FutureValue': 1065.10, 'ChurnRate': 0.52},
{'MicrosegmentID': 3, 'MicrosegmentName': 'DWag3-ROW-Winner',
'LifecycleStageID': 2, 'FutureValue': 1213.76, 'ChurnRate': 0.57}
]
return 200, HEADERS['json'], json.dumps(resp_body)
@token_required
def get_microsegment_changers_callback(request):
resp_body = [
{'CustomerID': '231342', 'InitialMicrosegmentID': 4, 'FinalMicrosegmentID': 12},
{'CustomerID': '231342', 'InitialMicrosegmentID': 3, 'FinalMicrosegmentID': 67}
]
return 200, HEADERS['json'], json.dumps(resp_body)
@token_required
def get_microsegment_changers_with_attributes_callback(request):
params = parse_qs(urlparse(request.url).query)
if params['StartDate'][0] == '2016-01-01' and params['EndDate'][0] == '2016-01-31'\
and params['CustomerAttributes'][0] == 'Alias;Country'\
and params['CustomerAttributesDelimiter'][0] == ',':
resp_body = [
{'CustomerID': '231342', 'InitialMicrosegmentID': 4, 'FinalMicrosegmentID': 12,
'CustomerAttributes': ['BuddyZZ', 'UK']},
{'CustomerID': '231342', 'InitialMicrosegmentID': 3, 'FinalMicrosegmentID': 67,
'CustomerAttributes': ['Player99', 'US']}
]
return 200, HEADERS['json'], json.dumps(resp_body)
else:
return 404, HEADERS['text'], 'Not Found'
"""Tests"""
class TestModel(unittest.TestCase):
@responses.activate
def test_get_customer_attribute_list(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/model/GetCustomerAttributeList',
callback=get_customer_attribute_list_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.model.get_customer_attribute_list()
self.assertEqual(data, {
'Affiliate': 'Acquisition affiliate',
'Age': 'Customer age',
'Country': 'Country of residence',
})
@responses.activate
def test_get_lifecycle_stage_list(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/model/GetLifecycleStageList',
callback=get_lifecycle_stage_list_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.model.get_lifecycle_stage_list()
self.assertEqual(data, {
1: 'New',
2: 'Active',
3: 'FromChurn',
4: 'Churn',
})
@responses.activate
def test_get_microsegment_list(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/model/GetMicrosegmentList',
callback=get_microsegment_list_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.model.get_microsegment_list()
self.assertEqual(data, {
1: {
'name': 'DWag1-Europe-Winner',
'stage_id': 1,
'future_value': 870.55,
'churn_rate': 0.55
},
2: {
'name': 'DWag2-US-Loser',
'stage_id': 2,
'future_value': 1065.10,
'churn_rate': 0.52
},
3: {
'name': 'DWag3-ROW-Winner',
'stage_id': 2,
'future_value': 1213.76,
'churn_rate': 0.57
}
})
@responses.activate
def test_get_microsegment_changers(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/model/GetMicrosegmentChangers',
callback=get_microsegment_changers_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.model.get_microsegment_changers('2016-01-01', '2016-01-31')
self.assertEqual(data, [
{
'customer_id': '231342',
'initial': 4,
'final': 12
},
{
'customer_id': '231342',
'initial': 3,
'final': 67
},
])
@responses.activate
def test_get_microsegment_changers_with_attributes(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/model/GetMicrosegmentChangers',
callback=get_microsegment_changers_with_attributes_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.model.get_microsegment_changers('2016-01-01', '2016-01-31', ['Alias', 'Country'], ',')
self.assertEqual(data, [
{
'customer_id': '231342',
'initial': 4,
'final': 12,
'attributes': {
'Alias': 'BuddyZZ',
'Country': 'UK'
}
},
{
'customer_id': '231342',
'initial': 3,
'final': 67,
'attributes': {
'Alias': 'Player99',
'Country': 'US'
}
},
])
@responses.activate
def test_get_microsegment_changers_with_wrong_delimiter(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/model/GetMicrosegmentChangers',
callback=get_microsegment_changers_with_attributes_callback,
content_type='application/json'
)
client = Client('username', 'password')
self.assertRaises(Exception, client.model.get_microsegment_changers,
'2016-01-01', '2016-01-31', ['Alias', 'Country'], '/')
@responses.activate
def test_get_microsegment_changers_with_empty_dates(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/model/GetMicrosegmentChangers',
callback=get_microsegment_changers_with_attributes_callback,
content_type='application/json'
)
client = Client('username', 'password')
self.assertRaises(Exception, client.model.get_microsegment_changers,
'2016-01-01', None, ['Alias', 'Country'], ',')
@responses.activate
def test_get_microsegment_changers_with_wrong_dates(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/model/GetMicrosegmentChangers',
callback=get_microsegment_changers_with_attributes_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.model.get_microsegment_changers('3016-01-01', '3016-01-31', ['Alias', 'Country'], ',')
self.assertFalse(data)
| 33.059406 | 108 | 0.578616 |
from __future__ import absolute_import, unicode_literals
import json
import unittest
from six.moves.urllib.parse import parse_qs, urlparse
from optimove.client import Client
from optimove.constants import DEFAULT_URL
import responses
from tests.constants import HEADERS
from tests.helpers import login_callback, token_required
@token_required
def get_customer_attribute_list_callback(request):
resp_body = [
{'RealFieldName': 'Affiliate', 'Description': 'Acquisition affiliate'},
{'RealFieldName': 'Age', 'Description': 'Customer age'},
{'RealFieldName': 'Country', 'Description': 'Country of residence'}
]
return 200, HEADERS['json'], json.dumps(resp_body)
@token_required
def get_lifecycle_stage_list_callback(request):
resp_body = [
{'StageID': 1, 'StageName': 'New'},
{'StageID': 2, 'StageName': 'Active'},
{'StageID': 3, 'StageName': 'FromChurn'},
{'StageID': 4, 'StageName': 'Churn'}
]
return 200, HEADERS['json'], json.dumps(resp_body)
@token_required
def get_microsegment_list_callback(request):
resp_body = [
{'MicrosegmentID': 1, 'MicrosegmentName': 'DWag1-Europe-Winner',
'LifecycleStageID': 1, 'FutureValue': 870.55, 'ChurnRate': 0.55},
{'MicrosegmentID': 2, 'MicrosegmentName': 'DWag2-US-Loser',
'LifecycleStageID': 2, 'FutureValue': 1065.10, 'ChurnRate': 0.52},
{'MicrosegmentID': 3, 'MicrosegmentName': 'DWag3-ROW-Winner',
'LifecycleStageID': 2, 'FutureValue': 1213.76, 'ChurnRate': 0.57}
]
return 200, HEADERS['json'], json.dumps(resp_body)
@token_required
def get_microsegment_changers_callback(request):
resp_body = [
{'CustomerID': '231342', 'InitialMicrosegmentID': 4, 'FinalMicrosegmentID': 12},
{'CustomerID': '231342', 'InitialMicrosegmentID': 3, 'FinalMicrosegmentID': 67}
]
return 200, HEADERS['json'], json.dumps(resp_body)
@token_required
def get_microsegment_changers_with_attributes_callback(request):
params = parse_qs(urlparse(request.url).query)
if params['StartDate'][0] == '2016-01-01' and params['EndDate'][0] == '2016-01-31'\
and params['CustomerAttributes'][0] == 'Alias;Country'\
and params['CustomerAttributesDelimiter'][0] == ',':
resp_body = [
{'CustomerID': '231342', 'InitialMicrosegmentID': 4, 'FinalMicrosegmentID': 12,
'CustomerAttributes': ['BuddyZZ', 'UK']},
{'CustomerID': '231342', 'InitialMicrosegmentID': 3, 'FinalMicrosegmentID': 67,
'CustomerAttributes': ['Player99', 'US']}
]
return 200, HEADERS['json'], json.dumps(resp_body)
else:
return 404, HEADERS['text'], 'Not Found'
class TestModel(unittest.TestCase):
@responses.activate
def test_get_customer_attribute_list(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/model/GetCustomerAttributeList',
callback=get_customer_attribute_list_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.model.get_customer_attribute_list()
self.assertEqual(data, {
'Affiliate': 'Acquisition affiliate',
'Age': 'Customer age',
'Country': 'Country of residence',
})
@responses.activate
def test_get_lifecycle_stage_list(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/model/GetLifecycleStageList',
callback=get_lifecycle_stage_list_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.model.get_lifecycle_stage_list()
self.assertEqual(data, {
1: 'New',
2: 'Active',
3: 'FromChurn',
4: 'Churn',
})
@responses.activate
def test_get_microsegment_list(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/model/GetMicrosegmentList',
callback=get_microsegment_list_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.model.get_microsegment_list()
self.assertEqual(data, {
1: {
'name': 'DWag1-Europe-Winner',
'stage_id': 1,
'future_value': 870.55,
'churn_rate': 0.55
},
2: {
'name': 'DWag2-US-Loser',
'stage_id': 2,
'future_value': 1065.10,
'churn_rate': 0.52
},
3: {
'name': 'DWag3-ROW-Winner',
'stage_id': 2,
'future_value': 1213.76,
'churn_rate': 0.57
}
})
@responses.activate
def test_get_microsegment_changers(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/model/GetMicrosegmentChangers',
callback=get_microsegment_changers_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.model.get_microsegment_changers('2016-01-01', '2016-01-31')
self.assertEqual(data, [
{
'customer_id': '231342',
'initial': 4,
'final': 12
},
{
'customer_id': '231342',
'initial': 3,
'final': 67
},
])
@responses.activate
def test_get_microsegment_changers_with_attributes(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/model/GetMicrosegmentChangers',
callback=get_microsegment_changers_with_attributes_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.model.get_microsegment_changers('2016-01-01', '2016-01-31', ['Alias', 'Country'], ',')
self.assertEqual(data, [
{
'customer_id': '231342',
'initial': 4,
'final': 12,
'attributes': {
'Alias': 'BuddyZZ',
'Country': 'UK'
}
},
{
'customer_id': '231342',
'initial': 3,
'final': 67,
'attributes': {
'Alias': 'Player99',
'Country': 'US'
}
},
])
@responses.activate
def test_get_microsegment_changers_with_wrong_delimiter(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/model/GetMicrosegmentChangers',
callback=get_microsegment_changers_with_attributes_callback,
content_type='application/json'
)
client = Client('username', 'password')
self.assertRaises(Exception, client.model.get_microsegment_changers,
'2016-01-01', '2016-01-31', ['Alias', 'Country'], '/')
@responses.activate
def test_get_microsegment_changers_with_empty_dates(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/model/GetMicrosegmentChangers',
callback=get_microsegment_changers_with_attributes_callback,
content_type='application/json'
)
client = Client('username', 'password')
self.assertRaises(Exception, client.model.get_microsegment_changers,
'2016-01-01', None, ['Alias', 'Country'], ',')
@responses.activate
def test_get_microsegment_changers_with_wrong_dates(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/model/GetMicrosegmentChangers',
callback=get_microsegment_changers_with_attributes_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.model.get_microsegment_changers('3016-01-01', '3016-01-31', ['Alias', 'Country'], ',')
self.assertFalse(data)
| true | true |
1c2d514493460328193b72d99aa045afae9eace2 | 1,192 | py | Python | deploy/probers/postmark_api_probe.py | d0sadata/studio | ec3b805c0b546fe8884d446152eead90bea4174d | [
"MIT"
] | 60 | 2018-03-29T23:33:29.000Z | 2022-03-19T12:10:56.000Z | deploy/probers/postmark_api_probe.py | d0sadata/studio | ec3b805c0b546fe8884d446152eead90bea4174d | [
"MIT"
] | 2,260 | 2018-03-02T23:14:49.000Z | 2022-03-29T20:57:21.000Z | deploy/probers/postmark_api_probe.py | d0sadata/studio | ec3b805c0b546fe8884d446152eead90bea4174d | [
"MIT"
] | 86 | 2018-03-19T21:26:35.000Z | 2022-03-28T10:09:17.000Z | #!/usr/bin/env python
import requests
from base import BaseProbe
POSTMARK_SERVICE_STATUS_URL = "https://status.postmarkapp.com/api/1.0/services"
# (See here for API details: https://status.postmarkapp.com/api)
ALL_POSSIBLE_STATUSES = ['UP', 'MAINTENANCE', 'DELAY', 'DEGRADED', 'DOWN']
PASSING_POSTMARK_STATUSES = {
'/services/smtp': ['UP', 'MAINTENANCE'],
'/services/api': ALL_POSSIBLE_STATUSES,
'/services/inbound': ALL_POSSIBLE_STATUSES,
'/services/web': ALL_POSSIBLE_STATUSES
}
class PostmarkProbe(BaseProbe):
metric = "postmark_api_latency_msec"
def do_probe(self):
r = requests.get(url=POSTMARK_SERVICE_STATUS_URL)
for service in r.json():
allowed_statuses = PASSING_POSTMARK_STATUSES.get(service['url'])
passing = service['status'] in allowed_statuses
if passing:
continue
raise Exception("Postmark's `%s` service has status %s, but we require one of the following: %s" % (
service['name'],
service['status'],
allowed_statuses
)
)
if __name__ == "__main__":
PostmarkProbe().run()
| 29.8 | 112 | 0.630872 |
import requests
from base import BaseProbe
POSTMARK_SERVICE_STATUS_URL = "https://status.postmarkapp.com/api/1.0/services"
ALL_POSSIBLE_STATUSES = ['UP', 'MAINTENANCE', 'DELAY', 'DEGRADED', 'DOWN']
PASSING_POSTMARK_STATUSES = {
'/services/smtp': ['UP', 'MAINTENANCE'],
'/services/api': ALL_POSSIBLE_STATUSES,
'/services/inbound': ALL_POSSIBLE_STATUSES,
'/services/web': ALL_POSSIBLE_STATUSES
}
class PostmarkProbe(BaseProbe):
metric = "postmark_api_latency_msec"
def do_probe(self):
r = requests.get(url=POSTMARK_SERVICE_STATUS_URL)
for service in r.json():
allowed_statuses = PASSING_POSTMARK_STATUSES.get(service['url'])
passing = service['status'] in allowed_statuses
if passing:
continue
raise Exception("Postmark's `%s` service has status %s, but we require one of the following: %s" % (
service['name'],
service['status'],
allowed_statuses
)
)
if __name__ == "__main__":
PostmarkProbe().run()
| true | true |
1c2d51e5bab3f453fb62111623bcdc152a930b61 | 4,551 | py | Python | reference plug-in/_rsgTmp069_Form.py | GCaptainNemo/ABAQUS-FML-impact-plugin | 8f18ba231b9f64ee43c0217f1d9e488d0df9963b | [
"MIT"
] | null | null | null | reference plug-in/_rsgTmp069_Form.py | GCaptainNemo/ABAQUS-FML-impact-plugin | 8f18ba231b9f64ee43c0217f1d9e488d0df9963b | [
"MIT"
] | null | null | null | reference plug-in/_rsgTmp069_Form.py | GCaptainNemo/ABAQUS-FML-impact-plugin | 8f18ba231b9f64ee43c0217f1d9e488d0df9963b | [
"MIT"
] | null | null | null | from abaqusGui import *
from abaqusConstants import ALL
import osutils, os
###########################################################################
# Class definition
###########################################################################
class _rsgTmp069_Form(AFXForm):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, owner):
# Construct the base class.
#
AFXForm.__init__(self, owner)
self.radioButtonGroups = {}
self.cmd = AFXGuiCommand(mode=self, method='Test',
objectName='impact_kernel', registerQuery=False)
pickedDefault = ''
self.Long_WholeKw = AFXFloatKeyword(self.cmd, 'Long_Whole', True, 170)
self.Width_WholeKw = AFXFloatKeyword(self.cmd, 'Width_Whole', True, 90)
self.Mesh_Size_WholeKw = AFXFloatKeyword(self.cmd, 'Mesh_Size_Whole', True, 1.2)
self.Long_CenterKw = AFXFloatKeyword(self.cmd, 'Long_Center', True, 40)
self.Width_CenterKw = AFXFloatKeyword(self.cmd, 'Width_Center', True, 40)
self.Mesh_Size_CenterKw = AFXFloatKeyword(self.cmd, 'Mesh_Size_Center', True, 0.3)
self.RadiusKw = AFXFloatKeyword(self.cmd, 'Radius', True, 6.5)
self.SpeedKw = AFXFloatKeyword(self.cmd, 'Speed', True, 3200)
self.Mesh_Size_ImpactKw = AFXFloatKeyword(self.cmd, 'Mesh_Size_Impact', True, 1)
self.Total_TimeKw = AFXFloatKeyword(self.cmd, 'Total_Time', True, 0.01)
if not self.radioButtonGroups.has_key('GroupBox14'):
self.GroupBox14Kw1 = AFXIntKeyword(None, 'GroupBox14Dummy', True)
self.GroupBox14Kw2 = AFXStringKeyword(self.cmd, 'GroupBox14', True)
self.radioButtonGroups['GroupBox14'] = (self.GroupBox14Kw1, self.GroupBox14Kw2, {})
self.radioButtonGroups['GroupBox14'][2][118] = '1/4 model'
if not self.radioButtonGroups.has_key('GroupBox14'):
self.GroupBox14Kw1 = AFXIntKeyword(None, 'GroupBox14Dummy', True)
self.GroupBox14Kw2 = AFXStringKeyword(self.cmd, 'GroupBox14', True)
self.radioButtonGroups['GroupBox14'] = (self.GroupBox14Kw1, self.GroupBox14Kw2, {})
self.radioButtonGroups['GroupBox14'][2][119] = '1/2 model'
self.Metal_nameKw = AFXStringKeyword(self.cmd, 'Metal_name', True)
self.polymer_nameKw = AFXStringKeyword(self.cmd, 'polymer_name', True)
self.StackKw = AFXTableKeyword(self.cmd, 'Stack', True)
self.StackKw.setColumnType(0, AFXTABLE_TYPE_BOOL)
self.StackKw.setColumnType(1, AFXTABLE_TYPE_BOOL)
self.StackKw.setColumnType(2, AFXTABLE_TYPE_FLOAT)
self.StackKw.setColumnType(3, AFXTABLE_TYPE_FLOAT)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def getFirstDialog(self):
import _rsgTmp069_DB
return _rsgTmp069_DB._rsgTmp069_DB(self)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def doCustomChecks(self):
# Try to set the appropriate radio button on. If the user did
# not specify any buttons to be on, do nothing.
#
for kw1,kw2,d in self.radioButtonGroups.values():
try:
value = d[ kw1.getValue() ]
kw2.setValue(value)
except:
pass
return True
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def deactivate(self):
try:
osutils.remove(os.path.join('c:\\Users\\wang1\\abaqus_plugins\\impact_xiugai', '_rsgTmp069_DB.py'), force=True )
osutils.remove(os.path.join('c:\\Users\\wang1\\abaqus_plugins\\impact_xiugai', '_rsgTmp069_DB.pyc'), force=True )
except:
pass
try:
osutils.remove(os.path.join('c:\\Users\\wang1\\abaqus_plugins\\impact_xiugai', '_rsgTmp069_Form.py'), force=True )
osutils.remove(os.path.join('c:\\Users\\wang1\\abaqus_plugins\\impact_xiugai', '_rsgTmp069_Form.pyc'), force=True )
except:
pass
AFXForm.deactivate(self)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def getCommandString(self):
cmds = 'import impact_kernel\n'
cmds += AFXForm.getCommandString(self)
return cmds
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def okToCancel(self):
# No need to close the dialog when a file operation (such
# as New or Open) or model change is executed.
#
return False
| 45.51 | 127 | 0.564052 | from abaqusGui import *
from abaqusConstants import ALL
import osutils, os
| true | true |
1c2d521f423e63631e5d234cf7221103c532ad42 | 111 | py | Python | utilities/generators/null_generator.py | gioele8/AI-soccer-highlights | 756b6b6f332cedbfbc5a3540d0c6d7aa50219e51 | [
"MIT"
] | null | null | null | utilities/generators/null_generator.py | gioele8/AI-soccer-highlights | 756b6b6f332cedbfbc5a3540d0c6d7aa50219e51 | [
"MIT"
] | null | null | null | utilities/generators/null_generator.py | gioele8/AI-soccer-highlights | 756b6b6f332cedbfbc5a3540d0c6d7aa50219e51 | [
"MIT"
] | null | null | null | class NullGenerator:
def __init__(self):
pass
def generate(self, video):
return video
| 15.857143 | 30 | 0.612613 | class NullGenerator:
def __init__(self):
pass
def generate(self, video):
return video
| true | true |
1c2d5252031ca9f832d73527b31abcd0d8f9fda6 | 28,574 | py | Python | src/dnc/azext_dnc/vendored_sdks/dnc/models/_models.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/dnc/azext_dnc/vendored_sdks/dnc/models/_models.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/dnc/azext_dnc/vendored_sdks/dnc/models/_models.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class ControllerDetails(msrest.serialization.Model):
"""controller details.
:param id: controller arm resource id.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ControllerDetails, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
class ControllerResource(msrest.serialization.Model):
"""Represents an instance of a resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: An identifier that represents the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of resource.
:vartype type: str
:param location: Location of the resource.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(ControllerResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
class ControllerResourceUpdateParameters(msrest.serialization.Model):
"""Parameters for updating a resource.
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(ControllerResourceUpdateParameters, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class DelegatedController(ControllerResource):
"""Represents an instance of a DNC controller.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: An identifier that represents the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of resource.
:vartype type: str
:param location: Location of the resource.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:ivar properties: Properties of the provision operation request.
:vartype properties: ~dnc.models.DelegatedControllerProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'DelegatedControllerProperties'},
}
def __init__(
self,
**kwargs
):
super(DelegatedController, self).__init__(**kwargs)
self.properties = None
class DelegatedControllerProperties(msrest.serialization.Model):
"""Properties of Delegated controller resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar resource_guid: Resource guid.
:vartype resource_guid: str
:ivar provisioning_state: The current state of dnc controller resource. Possible values
include: "Deleting", "Succeeded", "Failed", "Provisioning".
:vartype provisioning_state: str or ~dnc.models.ControllerState
:ivar dnc_app_id: dnc application id should be used by customer to authenticate with dnc
gateway.
:vartype dnc_app_id: str
:ivar dnc_tenant_id: tenant id of dnc application id.
:vartype dnc_tenant_id: str
:ivar dnc_endpoint: dnc endpoint url that customers can use to connect to.
:vartype dnc_endpoint: str
"""
_validation = {
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
'dnc_app_id': {'readonly': True},
'dnc_tenant_id': {'readonly': True},
'dnc_endpoint': {'readonly': True},
}
_attribute_map = {
'resource_guid': {'key': 'resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'dnc_app_id': {'key': 'dncAppId', 'type': 'str'},
'dnc_tenant_id': {'key': 'dncTenantId', 'type': 'str'},
'dnc_endpoint': {'key': 'dncEndpoint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DelegatedControllerProperties, self).__init__(**kwargs)
self.resource_guid = None
self.provisioning_state = None
self.dnc_app_id = None
self.dnc_tenant_id = None
self.dnc_endpoint = None
class DelegatedControllers(msrest.serialization.Model):
"""An array of Delegated controller resources.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. An array of Delegated controller resources.
:type value: list[~dnc.models.DelegatedController]
:ivar next_link: The URL to get the next set of controllers.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DelegatedController]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DelegatedControllers, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class DelegatedSubnetResource(msrest.serialization.Model):
"""Represents an instance of a resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: An identifier that represents the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of resource.
:vartype type: str
:param location: Location of the resource.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(DelegatedSubnetResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
class DelegatedSubnet(DelegatedSubnetResource):
"""Represents an instance of a orchestrator.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: An identifier that represents the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of resource.
:vartype type: str
:param location: Location of the resource.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:ivar resource_guid: Resource guid.
:vartype resource_guid: str
:ivar provisioning_state: The current state of dnc delegated subnet resource. Possible values
include: "Deleting", "Succeeded", "Failed", "Provisioning".
:vartype provisioning_state: str or ~dnc.models.DelegatedSubnetState
:param subnet_details: subnet details.
:type subnet_details: ~dnc.models.SubnetDetails
:param controller_details: Properties of the controller.
:type controller_details: ~dnc.models.ControllerDetails
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'subnet_details': {'key': 'properties.subnetDetails', 'type': 'SubnetDetails'},
'controller_details': {'key': 'properties.controllerDetails', 'type': 'ControllerDetails'},
}
def __init__(
self,
**kwargs
):
super(DelegatedSubnet, self).__init__(**kwargs)
self.resource_guid = None
self.provisioning_state = None
self.subnet_details = kwargs.get('subnet_details', None)
self.controller_details = kwargs.get('controller_details', None)
class DelegatedSubnets(msrest.serialization.Model):
"""An array of DelegatedSubnet resources.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. An array of DelegatedSubnet resources.
:type value: list[~dnc.models.DelegatedSubnet]
:ivar next_link: The URL to get the next set of DelegatedSubnet resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DelegatedSubnet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DelegatedSubnets, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: object
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~dnc.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~dnc.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~dnc.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class Operation(msrest.serialization.Model):
"""Details of a REST API operation, returned from the Resource Provider Operations API.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the operation, as per Resource-Based Access Control (RBAC). Examples:
"Microsoft.Compute/virtualMachines/write", "Microsoft.Compute/virtualMachines/capture/action".
:vartype name: str
:ivar is_data_action: Whether the operation applies to data-plane. This is "true" for data-
plane operations and "false" for ARM/control-plane operations.
:vartype is_data_action: bool
:param display: Localized display information for this particular operation.
:type display: ~dnc.models.OperationDisplay
:ivar origin: The intended executor of the operation; as in Resource Based Access Control
(RBAC) and audit logs UX. Default value is "user,system". Possible values include: "user",
"system", "user,system".
:vartype origin: str or ~dnc.models.Origin
:ivar action_type: Enum. Indicates the action type. "Internal" refers to actions that are for
internal only APIs. Possible values include: "Internal".
:vartype action_type: str or ~dnc.models.ActionType
"""
_validation = {
'name': {'readonly': True},
'is_data_action': {'readonly': True},
'origin': {'readonly': True},
'action_type': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'action_type': {'key': 'actionType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.is_data_action = None
self.display = kwargs.get('display', None)
self.origin = None
self.action_type = None
class OperationDisplay(msrest.serialization.Model):
"""Localized display information for this particular operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: The localized friendly form of the resource provider name, e.g. "Microsoft
Monitoring Insights" or "Microsoft Compute".
:vartype provider: str
:ivar resource: The localized friendly name of the resource type related to this operation.
E.g. "Virtual Machines" or "Job Schedule Collections".
:vartype resource: str
:ivar operation: The concise, localized friendly name for the operation; suitable for
dropdowns. E.g. "Create or Update Virtual Machine", "Restart Virtual Machine".
:vartype operation: str
:ivar description: The short, localized friendly description of the operation; suitable for
tool tips and detailed views.
:vartype description: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
self.description = None
class OperationListResult(msrest.serialization.Model):
"""A list of REST API operations supported by an Azure Resource Provider. It contains an URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of operations supported by the resource provider.
:vartype value: list[~dnc.models.Operation]
:ivar next_link: URL to get the next set of operation list results (if there are any).
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class OrchestratorResource(msrest.serialization.Model):
"""Represents an instance of a resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: An identifier that represents the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of resource.
:vartype type: str
:param location: Location of the resource.
:type location: str
:param kind: Required. The kind of workbook. Choices are user and shared. Possible values
include: "Kubernetes".
:type kind: str or ~dnc.models.OrchestratorKind
:param identity: The identity of the orchestrator.
:type identity: ~dnc.models.OrchestratorIdentity
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'OrchestratorIdentity'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(OrchestratorResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.kind = kwargs['kind']
self.identity = kwargs.get('identity', None)
self.tags = kwargs.get('tags', None)
class Orchestrator(OrchestratorResource):
"""Represents an instance of a orchestrator.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: An identifier that represents the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of resource.
:vartype type: str
:param location: Location of the resource.
:type location: str
:param kind: Required. The kind of workbook. Choices are user and shared. Possible values
include: "Kubernetes".
:type kind: str or ~dnc.models.OrchestratorKind
:param identity: The identity of the orchestrator.
:type identity: ~dnc.models.OrchestratorIdentity
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param properties: Properties of the provision operation request.
:type properties: ~dnc.models.OrchestratorResourceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'OrchestratorIdentity'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'OrchestratorResourceProperties'},
}
def __init__(
self,
**kwargs
):
super(Orchestrator, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class OrchestratorIdentity(msrest.serialization.Model):
"""OrchestratorIdentity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of the system assigned identity which is used by
orchestrator.
:vartype principal_id: str
:ivar tenant_id: The tenant id of the system assigned identity which is used by orchestrator.
:vartype tenant_id: str
:param type: The type of identity used for orchestrator cluster. Type 'SystemAssigned' will use
an implicitly created identity orchestrator clusters. Possible values include:
"SystemAssigned", "None".
:type type: str or ~dnc.models.ResourceIdentityType
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OrchestratorIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = kwargs.get('type', None)
class OrchestratorResourceProperties(msrest.serialization.Model):
"""Properties of orchestrator.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar resource_guid: Resource guid.
:vartype resource_guid: str
:ivar provisioning_state: The current state of orchestratorInstance resource. Possible values
include: "Deleting", "Succeeded", "Failed", "Provisioning".
:vartype provisioning_state: str or ~dnc.models.OrchestratorInstanceState
:param orchestrator_app_id: AAD ID used with apiserver.
:type orchestrator_app_id: str
:param orchestrator_tenant_id: TenantID of server App ID.
:type orchestrator_tenant_id: str
:param cluster_root_ca: RootCA certificate of kubernetes cluster base64 encoded.
:type cluster_root_ca: str
:param api_server_endpoint: K8s APIServer url. Either one of apiServerEndpoint or
privateLinkResourceId can be specified.
:type api_server_endpoint: str
:param private_link_resource_id: private link arm resource id. Either one of apiServerEndpoint
or privateLinkResourceId can be specified.
:type private_link_resource_id: str
:param controller_details: Required. Properties of the controller.
:type controller_details: ~dnc.models.ControllerDetails
"""
_validation = {
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
'controller_details': {'required': True},
}
_attribute_map = {
'resource_guid': {'key': 'resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'orchestrator_app_id': {'key': 'orchestratorAppId', 'type': 'str'},
'orchestrator_tenant_id': {'key': 'orchestratorTenantId', 'type': 'str'},
'cluster_root_ca': {'key': 'clusterRootCA', 'type': 'str'},
'api_server_endpoint': {'key': 'apiServerEndpoint', 'type': 'str'},
'private_link_resource_id': {'key': 'privateLinkResourceId', 'type': 'str'},
'controller_details': {'key': 'controllerDetails', 'type': 'ControllerDetails'},
}
def __init__(
self,
**kwargs
):
super(OrchestratorResourceProperties, self).__init__(**kwargs)
self.resource_guid = None
self.provisioning_state = None
self.orchestrator_app_id = kwargs.get('orchestrator_app_id', None)
self.orchestrator_tenant_id = kwargs.get('orchestrator_tenant_id', None)
self.cluster_root_ca = kwargs.get('cluster_root_ca', None)
self.api_server_endpoint = kwargs.get('api_server_endpoint', None)
self.private_link_resource_id = kwargs.get('private_link_resource_id', None)
self.controller_details = kwargs['controller_details']
class OrchestratorResourceUpdateParameters(msrest.serialization.Model):
"""Parameters for updating a resource.
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(OrchestratorResourceUpdateParameters, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class Orchestrators(msrest.serialization.Model):
"""An array of OrchestratorInstance resources.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. An array of OrchestratorInstance resources.
:type value: list[~dnc.models.Orchestrator]
:ivar next_link: The URL to get the next set of orchestrators.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Orchestrator]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Orchestrators, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class ResourceUpdateParameters(msrest.serialization.Model):
"""Parameters for updating a resource.
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(ResourceUpdateParameters, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class SubnetDetails(msrest.serialization.Model):
"""Properties of orchestrator.
:param id: subnet arm resource id.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubnetDetails, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
| 33.380841 | 165 | 0.623189 |
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class ControllerDetails(msrest.serialization.Model):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ControllerDetails, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
class ControllerResource(msrest.serialization.Model):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(ControllerResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
class ControllerResourceUpdateParameters(msrest.serialization.Model):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(ControllerResourceUpdateParameters, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class DelegatedController(ControllerResource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'DelegatedControllerProperties'},
}
def __init__(
self,
**kwargs
):
super(DelegatedController, self).__init__(**kwargs)
self.properties = None
class DelegatedControllerProperties(msrest.serialization.Model):
_validation = {
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
'dnc_app_id': {'readonly': True},
'dnc_tenant_id': {'readonly': True},
'dnc_endpoint': {'readonly': True},
}
_attribute_map = {
'resource_guid': {'key': 'resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'dnc_app_id': {'key': 'dncAppId', 'type': 'str'},
'dnc_tenant_id': {'key': 'dncTenantId', 'type': 'str'},
'dnc_endpoint': {'key': 'dncEndpoint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DelegatedControllerProperties, self).__init__(**kwargs)
self.resource_guid = None
self.provisioning_state = None
self.dnc_app_id = None
self.dnc_tenant_id = None
self.dnc_endpoint = None
class DelegatedControllers(msrest.serialization.Model):
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DelegatedController]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DelegatedControllers, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class DelegatedSubnetResource(msrest.serialization.Model):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(DelegatedSubnetResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
class DelegatedSubnet(DelegatedSubnetResource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'subnet_details': {'key': 'properties.subnetDetails', 'type': 'SubnetDetails'},
'controller_details': {'key': 'properties.controllerDetails', 'type': 'ControllerDetails'},
}
def __init__(
self,
**kwargs
):
super(DelegatedSubnet, self).__init__(**kwargs)
self.resource_guid = None
self.provisioning_state = None
self.subnet_details = kwargs.get('subnet_details', None)
self.controller_details = kwargs.get('controller_details', None)
class DelegatedSubnets(msrest.serialization.Model):
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DelegatedSubnet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DelegatedSubnets, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class ErrorAdditionalInfo(msrest.serialization.Model):
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class Operation(msrest.serialization.Model):
_validation = {
'name': {'readonly': True},
'is_data_action': {'readonly': True},
'origin': {'readonly': True},
'action_type': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'action_type': {'key': 'actionType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.is_data_action = None
self.display = kwargs.get('display', None)
self.origin = None
self.action_type = None
class OperationDisplay(msrest.serialization.Model):
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
self.description = None
class OperationListResult(msrest.serialization.Model):
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class OrchestratorResource(msrest.serialization.Model):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'OrchestratorIdentity'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(OrchestratorResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.kind = kwargs['kind']
self.identity = kwargs.get('identity', None)
self.tags = kwargs.get('tags', None)
class Orchestrator(OrchestratorResource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'OrchestratorIdentity'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'OrchestratorResourceProperties'},
}
def __init__(
self,
**kwargs
):
super(Orchestrator, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class OrchestratorIdentity(msrest.serialization.Model):
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OrchestratorIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = kwargs.get('type', None)
class OrchestratorResourceProperties(msrest.serialization.Model):
_validation = {
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
'controller_details': {'required': True},
}
_attribute_map = {
'resource_guid': {'key': 'resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'orchestrator_app_id': {'key': 'orchestratorAppId', 'type': 'str'},
'orchestrator_tenant_id': {'key': 'orchestratorTenantId', 'type': 'str'},
'cluster_root_ca': {'key': 'clusterRootCA', 'type': 'str'},
'api_server_endpoint': {'key': 'apiServerEndpoint', 'type': 'str'},
'private_link_resource_id': {'key': 'privateLinkResourceId', 'type': 'str'},
'controller_details': {'key': 'controllerDetails', 'type': 'ControllerDetails'},
}
def __init__(
self,
**kwargs
):
super(OrchestratorResourceProperties, self).__init__(**kwargs)
self.resource_guid = None
self.provisioning_state = None
self.orchestrator_app_id = kwargs.get('orchestrator_app_id', None)
self.orchestrator_tenant_id = kwargs.get('orchestrator_tenant_id', None)
self.cluster_root_ca = kwargs.get('cluster_root_ca', None)
self.api_server_endpoint = kwargs.get('api_server_endpoint', None)
self.private_link_resource_id = kwargs.get('private_link_resource_id', None)
self.controller_details = kwargs['controller_details']
class OrchestratorResourceUpdateParameters(msrest.serialization.Model):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(OrchestratorResourceUpdateParameters, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class Orchestrators(msrest.serialization.Model):
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Orchestrator]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Orchestrators, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class ResourceUpdateParameters(msrest.serialization.Model):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(ResourceUpdateParameters, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class SubnetDetails(msrest.serialization.Model):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubnetDetails, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
| true | true |
1c2d528294ca63b0f78f6e625d55e5b6098b4ec2 | 5,605 | py | Python | support/closure-library/closure/bin/build/depstree.py | joe-greenawalt/skulpt | 1db078e2f6d453403287233254b012bf31960ef4 | [
"MIT"
] | 2 | 2021-01-10T16:19:38.000Z | 2021-06-14T22:09:59.000Z | support/closure-library/closure/bin/build/depstree.py | csev/skulpt | 9aa25b7dbf29f23ee8d3140d01a6f4353d12e66f | [
"MIT"
] | null | null | null | support/closure-library/closure/bin/build/depstree.py | csev/skulpt | 9aa25b7dbf29f23ee8d3140d01a6f4353d12e66f | [
"MIT"
] | 1 | 2015-06-28T18:58:22.000Z | 2015-06-28T18:58:22.000Z | # Copyright 2009 The Closure Library Authors. All Rights Reserved.
"""Class to represent a full Closure Library dependency tree.
Offers a queryable tree of dependencies of a given set of sources. The tree
will also do logical validation to prevent duplicate provides and circular
dependencies.
"""
class DepsTree(object):
"""Represents the set of dependencies between source files."""
def __init__(self, sources):
"""Initializes the tree with a set of sources.
Args:
sources: A set of JavaScript sources.
Raises:
MultipleProvideError: A namespace is provided by muplitple sources.
NamespaceNotFoundError: A namespace is required but never provided.
"""
self._sources = sources
self._provides_map = dict()
# Ensure nothing was provided twice.
for source in sources:
for provide in source.provides:
if provide in self._provides_map:
raise MultipleProvideError(
provide, [self._provides_map[provide], source])
self._provides_map[provide] = source
# Check that all required namespaces are provided.
for source in sources:
for require in source.requires:
if require not in self._provides_map:
raise NamespaceNotFoundError(require, source)
def GetDependencies(self, required_namespaces):
"""Get source dependencies, in order, for the given namespaces.
Args:
required_namespaces: A string (for one) or list (for one or more) of
namespaces.
Returns:
A list of source objects that provide those namespaces and all
requirements, in dependency order.
Raises:
NamespaceNotFoundError: A namespace is requested but doesn't exist.
CircularDependencyError: A cycle is detected in the dependency tree.
"""
if type(required_namespaces) is str:
required_namespaces = [required_namespaces]
deps_sources = []
for namespace in required_namespaces:
for source in DepsTree._ResolveDependencies(
namespace, [], self._provides_map, []):
if source not in deps_sources:
deps_sources.append(source)
return deps_sources
@staticmethod
def _ResolveDependencies(required_namespace, deps_list, provides_map,
traversal_path):
"""Resolve dependencies for Closure source files.
Follows the dependency tree down and builds a list of sources in dependency
order. This function will recursively call itself to fill all dependencies
below the requested namespaces, and then append its sources at the end of
the list.
Args:
required_namespace: String of required namespace.
deps_list: List of sources in dependency order. This function will append
the required source once all of its dependencies are satisfied.
provides_map: Map from namespace to source that provides it.
traversal_path: List of namespaces of our path from the root down the
dependency/recursion tree. Used to identify cyclical dependencies.
This is a list used as a stack -- when the function is entered, the
current namespace is pushed and popped right before returning.
Each recursive call will check that the current namespace does not
appear in the list, throwing a CircularDependencyError if it does.
Returns:
The given deps_list object filled with sources in dependency order.
Raises:
NamespaceNotFoundError: A namespace is requested but doesn't exist.
CircularDependencyError: A cycle is detected in the dependency tree.
"""
source = provides_map.get(required_namespace)
if not source:
raise NamespaceNotFoundError(required_namespace)
if required_namespace in traversal_path:
traversal_path.append(required_namespace) # do this *after* the test
# This must be a cycle.
raise CircularDependencyError(traversal_path)
traversal_path.append(required_namespace)
for require in source.requires:
# Append all other dependencies before we append our own.
DepsTree._ResolveDependencies(require, deps_list, provides_map,
traversal_path)
deps_list.append(source)
traversal_path.pop()
return deps_list
class BaseDepsTreeError(Exception):
"""Base DepsTree error."""
def __init__(self):
Exception.__init__(self)
class CircularDependencyError(BaseDepsTreeError):
"""Raised when a dependency cycle is encountered."""
def __init__(self, dependency_list):
BaseDepsTreeError.__init__(self)
self._dependency_list = dependency_list
def __str__(self):
return ('Encountered circular dependency:\n%s\n' %
'\n'.join(self._dependency_list))
class MultipleProvideError(BaseDepsTreeError):
"""Raised when a namespace is provided more than once."""
def __init__(self, namespace, sources):
BaseDepsTreeError.__init__(self)
self._namespace = namespace
self._sources = sources
def __str__(self):
source_strs = map(str, self._sources)
return ('Namespace "%s" provided more than once in sources:\n%s\n' %
(self._namespace, '\n'.join(source_strs)))
class NamespaceNotFoundError(BaseDepsTreeError):
"""Raised when a namespace is requested but not provided."""
def __init__(self, namespace, source=None):
BaseDepsTreeError.__init__(self)
self._namespace = namespace
self._source = source
def __str__(self):
msg = 'Namespace "%s" never provided.' % self._namespace
if self._source:
msg += ' Required in %s' % self._source
return msg
| 32.028571 | 80 | 0.710259 |
class DepsTree(object):
def __init__(self, sources):
self._sources = sources
self._provides_map = dict()
for source in sources:
for provide in source.provides:
if provide in self._provides_map:
raise MultipleProvideError(
provide, [self._provides_map[provide], source])
self._provides_map[provide] = source
for source in sources:
for require in source.requires:
if require not in self._provides_map:
raise NamespaceNotFoundError(require, source)
def GetDependencies(self, required_namespaces):
if type(required_namespaces) is str:
required_namespaces = [required_namespaces]
deps_sources = []
for namespace in required_namespaces:
for source in DepsTree._ResolveDependencies(
namespace, [], self._provides_map, []):
if source not in deps_sources:
deps_sources.append(source)
return deps_sources
@staticmethod
def _ResolveDependencies(required_namespace, deps_list, provides_map,
traversal_path):
source = provides_map.get(required_namespace)
if not source:
raise NamespaceNotFoundError(required_namespace)
if required_namespace in traversal_path:
traversal_path.append(required_namespace)
raise CircularDependencyError(traversal_path)
traversal_path.append(required_namespace)
for require in source.requires:
DepsTree._ResolveDependencies(require, deps_list, provides_map,
traversal_path)
deps_list.append(source)
traversal_path.pop()
return deps_list
class BaseDepsTreeError(Exception):
def __init__(self):
Exception.__init__(self)
class CircularDependencyError(BaseDepsTreeError):
def __init__(self, dependency_list):
BaseDepsTreeError.__init__(self)
self._dependency_list = dependency_list
def __str__(self):
return ('Encountered circular dependency:\n%s\n' %
'\n'.join(self._dependency_list))
class MultipleProvideError(BaseDepsTreeError):
def __init__(self, namespace, sources):
BaseDepsTreeError.__init__(self)
self._namespace = namespace
self._sources = sources
def __str__(self):
source_strs = map(str, self._sources)
return ('Namespace "%s" provided more than once in sources:\n%s\n' %
(self._namespace, '\n'.join(source_strs)))
class NamespaceNotFoundError(BaseDepsTreeError):
def __init__(self, namespace, source=None):
BaseDepsTreeError.__init__(self)
self._namespace = namespace
self._source = source
def __str__(self):
msg = 'Namespace "%s" never provided.' % self._namespace
if self._source:
msg += ' Required in %s' % self._source
return msg
| true | true |
1c2d5367feaf6d35c6324d2dcee1a16f4eb96b0e | 57,762 | py | Python | utils/datasets.py | Royzon/YOLOV4_MCMOT | cd4c8b1b60f9cf809579609caa29d408432845ba | [
"MIT"
] | 94 | 2020-08-10T13:37:23.000Z | 2022-03-03T10:08:53.000Z | utils/datasets.py | Royzon/YOLOV4_MCMOT | cd4c8b1b60f9cf809579609caa29d408432845ba | [
"MIT"
] | 18 | 2020-09-30T09:55:10.000Z | 2021-11-22T19:52:20.000Z | utils/datasets.py | Royzon/YOLOV4_MCMOT | cd4c8b1b60f9cf809579609caa29d408432845ba | [
"MIT"
] | 30 | 2020-09-23T02:39:07.000Z | 2021-12-30T09:58:47.000Z | # encoding=utf-8
import glob
import math
import os
import random
import shutil
import time
from collections import defaultdict
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.utils import xyxy2xywh, xywh2xyxy
help_url = 'https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
class LoadImages: # for inference
def __init__(self, path, net_w=416, net_h=416):
"""
:param path:
:param net_w:
:param net_h:
"""
if type(path) == list:
self.files = path
nI, nV = len(self.files), 0
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
# net input height width
self.net_w = net_w
self.net_h = net_h
self.mode = 'images'
self.cap = None
else:
path = str(Path(path)) # os-agnostic
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
else:
print('[Err]: invalid file list path.')
exit(-1)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.net_w = net_w
self.net_h = net_h
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in ' + path
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
if self.frame % 30 == 0:
# print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path))
print('video (%g/%g) %s: ' % (self.frame, self.nframes, path))
self.frame += 1
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # HWC(BGR)
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Pad and resize
# img = letterbox(img0, new_shape=self.img_size)[0] # to make sure mod by 64
img = pad_resize_ratio(img0, self.net_w, self.net_h)
# Convert: BGR to RGB and HWC to CHW
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=416):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=416):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImgsAndLbsWithID(Dataset): # for training/testing
def __init__(self,
path,
img_size=416,
batch_size=16,
augment=False,
hyp=None,
rect=False,
image_weights=False,
cache_images=False,
single_cls=False):
"""
:param path:
:param img_size:
:param batch_size:
:param augment:
:param hyp:
:param rect:
:param image_weights:
:param cache_images:
:param single_cls:
"""
path = str(Path(path)) # os-agnostic
assert os.path.isfile(path), 'File not found %s. See %s' % (path, help_url)
with open(path, 'r') as f:
self.img_files = [x.replace('/', os.sep) for x in f.read().splitlines() # os-agnostic
if os.path.splitext(x)[-1].lower() in img_formats]
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n
self.batch = bi # batch index of each image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
# load 4 images at a time into a mosaic (only during training)
self.mosaic = self.augment and not self.rect
# self.mosaic = False
# Define labels
self.label_files = [x.replace('JPEGImages', 'labels_with_ids').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Read image shapes (wh)
sp = path.replace('.txt', '.shapes') # shape file path
try:
with open(sp, 'r') as f: # read existing shape file
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shape file out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
# Sort by aspect ratio
s = np.array(s, dtype=np.float64)
ar = s[:, 1] / s[:, 0] # aspect ratio
i = ar.argsort()
self.img_files = [self.img_files[i] for i in i]
self.label_files = [self.label_files[i] for i in i]
self.shapes = s[i] # wh
ar = ar[i]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / 64.).astype(np.int) * 64
# ----- Cache labels
# count max track ids for each object class
self.max_ids_dict = defaultdict(int) # cls_id => max track id
self.imgs = [None] * n
self.labels = [np.zeros((0, 6), dtype=np.float32)] * n
extract_bounding_boxes = False
create_data_subset = False
p_bar = tqdm(self.label_files, desc='Caching labels')
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
for i, file in enumerate(p_bar):
try:
with open(file, 'r') as f:
lb = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if lb.shape[0]: # objects number in the image
assert lb.shape[1] == 6, '!= 6 label columns: %s' % file
assert (lb >= 0).all(), 'negative labels: %s' % file
assert (lb[:, 2:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(lb, axis=0).shape[0] < lb.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
lb[:, 0] = 0 # force dataset into single-class mode: turn mc to sc
self.labels[i] = lb
# count independant id number for each object class
for item in lb: # each GT object in the label
if item[1] > self.max_ids_dict[int(item[0])]: # item[0]: cls_id, item[1]: track id
self.max_ids_dict[int(item[0])] = int(item[1])
nf += 1 # file found
# Create sub-dataset (a smaller dataset)
if create_data_subset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in lb[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(lb):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
p_bar.desc = 'Caching labels (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
nf, nm, ne, nd, n)
# assert nf > 0, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
if nf == 0:
print('No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url))
exit(-1)
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
p_bar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in p_bar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
p_bar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, idx):
if self.image_weights:
idx = self.indices[idx]
hyp = self.hyp
if self.mosaic:
# Load mosaic
# img, labels = load_mosaic(self, idx)
img, labels, track_ids = load_mosaic_with_ids(self, idx)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, idx)
# Letterbox
shape = self.batch_shapes[self.batch[idx]] if self.rect else self.img_size # final letter_boxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[idx][:, [0, 2, 3, 4, 5]] # do not load track id here
if x.size > 0: # pad[0]: pad width, pad[1]: pad height
# Normalized xywh to pixel xyxy format: in net input size(e.g. 768×768)
labels = x.copy() # labels of this image
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # x1
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # y1
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0] # x2
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1] # y2
# Load track ids
track_ids = self.labels[idx][:, 1]
track_ids -= 1 # track id starts from 1(not 0)
if self.augment:
# Augment image space
if not self.mosaic: # after random affine, some GT objects may be out of iamge range
# img, labels = random_affine(img,
# labels,
# degrees=hyp['degrees'],
# translate=hyp['translate'],
# scale=hyp['scale'],
# shear=hyp['shear'])
img, labels, track_ids = random_affine_with_ids(img,
labels,
track_ids,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh(center_x, center_y, bbox_w, bbox_h)
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment: # random flipping
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6)) # column0 means item_i in the batch
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
#
track_ids = torch.from_numpy(track_ids).long()
return torch.from_numpy(img), labels_out, self.img_files[idx], shapes, track_ids
@staticmethod
def collate_fn(batch):
img, label, path, shapes, track_ids = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets(): index of the sample in the batch
return torch.stack(img, 0), torch.cat(label, 0), path, shapes, torch.cat(track_ids, 0)
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self,
path,
img_size=416,
batch_size=16,
augment=False,
hyp=None,
rect=False,
image_weights=False,
cache_images=False,
single_cls=False):
"""
:param path:
:param img_size:
:param batch_size:
:param augment:
:param hyp:
:param rect:
:param image_weights:
:param cache_images:
:param single_cls:
"""
path = str(Path(path)) # os-agnostic
assert os.path.isfile(path), 'File not found %s. See %s' % (path, help_url)
with open(path, 'r') as f:
self.img_files = [x.replace('/', os.sep) for x in f.read().splitlines() # os-agnostic
if os.path.splitext(x)[-1].lower() in img_formats]
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n
self.batch = bi # batch index of each image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
# load 4 images at a time into a mosaic (only during training)
self.mosaic = self.augment and not self.rect
# self.mosaic = False
# Define labels
self.label_files = [x.replace('JPEGImages', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# print(self.label_files[0])
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Read image shapes (wh)
sp = path.replace('.txt', '.shapes') # shape file path
try:
with open(sp, 'r') as f: # read existing shape file
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shape file out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
# Sort by aspect ratio
s = np.array(s, dtype=np.float64)
ar = s[:, 1] / s[:, 0] # aspect ratio
i = ar.argsort()
self.img_files = [self.img_files[i] for i in i]
self.label_files = [self.label_files[i] for i in i]
self.shapes = s[i] # wh
ar = ar[i]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / 64.).astype(np.int) * 64
# ---------- Cache labels: pure negative image sample(only contain background)
# by caching
self.imgs = [None] * n
self.labels = [np.zeros((0, 5), dtype=np.float32)] * n
extract_bounding_boxes = False
create_data_subset = False
pbar = tqdm(self.label_files, desc='Caching labels')
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
for i, file in enumerate(pbar):
try:
with open(file, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]: # objects number of this label
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode: turn mc to sc
# Filling the label
self.labels[i] = l
nf += 1 # file found
# Create sub dataset (a smaller dataset)
if create_data_subset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Caching labels (%g found, %g missing, %g empty, %g duplicate, for %g images)' \
% (nf, nm, ne, nd, n)
assert nf > 0, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# ---------- Letterbox
# final letter-boxed shape
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size
# ----- letter box
# or pad_resize_ratio(this methods keeps consistency with dark-net)
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# ----------
# Load labels
labels = []
x = self.labels[index]
if x.size > 0: # pad[0]: pad width, pad[1]: pad height
# Normalized xywh to pixel xyxy format: in net input size(e.g. 768×768)
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # x1
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # y1
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0] # x2
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1] # y2
if self.augment:
# Augment image space
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear']) # 变换后, 可能会排除一些超出图片范围之内的label
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh(center_x, center_y, bbox_w, bbox_h)
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6)) # 为什么要在第0列多加一列0?
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets(): index of the sample in the batch
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r < 1 or (self.augment and r != 1): # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic_with_ids(self, index):
"""
:param self:
:param index:
:param track_ids:
:return:
"""
# loads images in a mosaic
labels4, label4_orig = [], []
s = self.img_size
xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
pad_w = x1a - x1b
pad_h = y1a - y1b
# Labels
x = self.labels[index][:, [0, 2, 3, 4, 5]] # do not load track id here.
y = self.labels[index]
labels = x.copy() # labels without ids
labels_orig = y.copy() # labels with ids
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + pad_w
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + pad_h
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + pad_w
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + pad_h
labels4.append(labels)
label4_orig.append(labels_orig)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
label4_orig = np.concatenate(label4_orig, 0) # fractional coordinates
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
track_ids = label4_orig[:, 1]
track_ids -= 1 # track id starts from 1(not 0)
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4, track_ids = random_affine_with_ids(img4,
labels4,
track_ids,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=-s // 2) # border to remove
return img4, labels4, track_ids
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=-s // 2) # border to remove
return img4, labels4
# keep aspect ratio
def pad_resize_ratio(img, net_w, net_h):
"""
:param img:
:param net_w:
:param net_h:
:return:
"""
img = np.array(img) # H x W x channels
H, W, channels = img.shape
if net_h / net_w < H / W: # padding w
new_h = int(net_h)
new_w = int(net_h / H * W)
pad = (net_w - new_w) // 2
left = round(pad - 0.1)
right = round(pad + 0.1)
top, bottom = 0, 0
else: # padding w
new_h = int(net_w / W * H)
new_w = int(net_w)
pad = (net_h - new_h) // 2
left, right = 0, 0
top = round(pad - 0.1)
bottom = round(pad + 0.1)
img_resize = cv2.resize(img, (new_w, new_h), cv2.INTER_LINEAR)
# add border
img_out = cv2.copyMakeBorder(img_resize, top, bottom, left, right, cv2.BORDER_CONSTANT, value=127)
return img_out
def pad_resize_img_square(img, square_size):
"""
:param img: RGB image
:return: square image
"""
img = np.array(img) # H x W x channels
H, W, channels = img.shape
dim_diff = np.abs(H - W)
# upper(left) and lower(right) padding
pad_lu = dim_diff // 2 # integer division
pad_rd = dim_diff - pad_lu
# determine padding for each axis: H, W, channels
pad = ((pad_lu, pad_rd), (0, 0), (0, 0)) if H <= W else \
((0, 0), (pad_lu, pad_rd), (0, 0))
# do padding(0.5) and normalize
img = np.pad(img,
pad,
'constant',
constant_values=127.5) # / 255.0
img = cv2.resize(img,
(square_size, square_size),
cv2.INTER_LINEAR)
# img.tofile('/mnt/diskb/even/img.bin')
return img
def letterbox(img,
new_shape=(416, 416),
color=(114, 114, 114),
auto=True,
scaleFill=False,
scaleup=True):
"""
:param img:
:param new_shape:
:param color:
:param auto:
:param scaleFill:
:param scaleup:
:return:
"""
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=0):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
if targets is None: # targets = [cls, xyxy]
targets = []
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def random_affine_with_ids(img,
targets,
track_ids,
degrees=10,
translate=0.1,
scale=0.1,
shear=10,
border=0):
"""
:param img:
:param targets:
:param track_ids:
:param degrees:
:param translate:
:param scale:
:param shear:
:param border:
:return:
"""
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
if targets is None: # targets = [cls, xyxy]
targets = []
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
track_ids = track_ids[i]
targets[:, 1:5] = xy[i]
return img, targets, track_ids
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='../data/sm4/images',
img_size=1024): # from evaluate_utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from evaluate_utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(file, 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from evaluate_utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from evaluate_utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
| 40.224234 | 238 | 0.511686 |
import glob
import math
import os
import random
import shutil
import time
from collections import defaultdict
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.utils import xyxy2xywh, xywh2xyxy
help_url = 'https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4']
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
s = img.size
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6:
s = (s[1], s[0])
elif rotation == 8:
s = (s[1], s[0])
except:
pass
return s
class LoadImages:
def __init__(self, path, net_w=416, net_h=416):
if type(path) == list:
self.files = path
nI, nV = len(self.files), 0
self.nF = nI + nV
self.video_flag = [False] * nI + [True] * nV
self.net_w = net_w
self.net_h = net_h
self.mode = 'images'
self.cap = None
else:
path = str(Path(path))
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
else:
print('[Err]: invalid file list path.')
exit(-1)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.net_w = net_w
self.net_h = net_h
self.files = images + videos
self.nF = nI + nV
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
if any(videos):
self.new_video(videos[0])
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in ' + path
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF:
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
if self.frame % 30 == 0:
print('video (%g/%g) %s: ' % (self.frame, self.nframes, path))
self.frame += 1
else:
self.count += 1
img0 = cv2.imread(path)
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
e_ratio(img0, self.net_w, self.net_h)
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img)
img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF
class LoadWebcam:
def __init__(self, pipe=0, img_size=416):
self.img_size = img_size
if pipe == '0':
pipe = 0
swers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=416):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImgsAndLbsWithID(Dataset): # for training/testing
def __init__(self,
path,
img_size=416,
batch_size=16,
augment=False,
hyp=None,
rect=False,
image_weights=False,
cache_images=False,
single_cls=False):
path = str(Path(path)) # os-agnostic
assert os.path.isfile(path), 'File not found %s. See %s' % (path, help_url)
with open(path, 'r') as f:
self.img_files = [x.replace('/', os.sep) for x in f.read().splitlines() # os-agnostic
if os.path.splitext(x)[-1].lower() in img_formats]
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n
self.batch = bi # batch index of each image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
# load 4 images at a time into a mosaic (only during training)
self.mosaic = self.augment and not self.rect
# self.mosaic = False
# Define labels
self.label_files = [x.replace('JPEGImages', 'labels_with_ids').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Read image shapes (wh)
sp = path.replace('.txt', '.shapes') # shape file path
try:
with open(sp, 'r') as f: # read existing shape file
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shape file out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
# Sort by aspect ratio
s = np.array(s, dtype=np.float64)
ar = s[:, 1] / s[:, 0] # aspect ratio
i = ar.argsort()
self.img_files = [self.img_files[i] for i in i]
self.label_files = [self.label_files[i] for i in i]
self.shapes = s[i] # wh
ar = ar[i]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / 64.).astype(np.int) * 64
# ----- Cache labels
# count max track ids for each object class
self.max_ids_dict = defaultdict(int) # cls_id => max track id
self.imgs = [None] * n
self.labels = [np.zeros((0, 6), dtype=np.float32)] * n
extract_bounding_boxes = False
create_data_subset = False
p_bar = tqdm(self.label_files, desc='Caching labels')
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
for i, file in enumerate(p_bar):
try:
with open(file, 'r') as f:
lb = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if lb.shape[0]: # objects number in the image
assert lb.shape[1] == 6, '!= 6 label columns: %s' % file
assert (lb >= 0).all(), 'negative labels: %s' % file
assert (lb[:, 2:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(lb, axis=0).shape[0] < lb.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
lb[:, 0] = 0 # force dataset into single-class mode: turn mc to sc
self.labels[i] = lb
# count independant id number for each object class
for item in lb: # each GT object in the label
if item[1] > self.max_ids_dict[int(item[0])]: # item[0]: cls_id, item[1]: track id
self.max_ids_dict[int(item[0])] = int(item[1])
nf += 1 # file found
# Create sub-dataset (a smaller dataset)
if create_data_subset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in lb[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(lb):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
p_bar.desc = 'Caching labels (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
nf, nm, ne, nd, n)
# assert nf > 0, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
if nf == 0:
print('No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url))
exit(-1)
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
p_bar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in p_bar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
p_bar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, idx):
if self.image_weights:
idx = self.indices[idx]
hyp = self.hyp
if self.mosaic:
# Load mosaic
# img, labels = load_mosaic(self, idx)
img, labels, track_ids = load_mosaic_with_ids(self, idx)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, idx)
# Letterbox
shape = self.batch_shapes[self.batch[idx]] if self.rect else self.img_size # final letter_boxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[idx][:, [0, 2, 3, 4, 5]] # do not load track id here
if x.size > 0: # pad[0]: pad width, pad[1]: pad height
# Normalized xywh to pixel xyxy format: in net input size(e.g. 768×768)
labels = x.copy() # labels of this image
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # x1
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # y1
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0] # x2
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1] # y2
# Load track ids
track_ids = self.labels[idx][:, 1]
track_ids -= 1 # track id starts from 1(not 0)
if self.augment:
# Augment image space
if not self.mosaic: # after random affine, some GT objects may be out of iamge range
# img, labels = random_affine(img,
# labels,
# degrees=hyp['degrees'],
# translate=hyp['translate'],
# scale=hyp['scale'],
# shear=hyp['shear'])
img, labels, track_ids = random_affine_with_ids(img,
labels,
track_ids,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh(center_x, center_y, bbox_w, bbox_h)
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment: # random flipping
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6)) # column0 means item_i in the batch
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
#
track_ids = torch.from_numpy(track_ids).long()
return torch.from_numpy(img), labels_out, self.img_files[idx], shapes, track_ids
@staticmethod
def collate_fn(batch):
img, label, path, shapes, track_ids = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets(): index of the sample in the batch
return torch.stack(img, 0), torch.cat(label, 0), path, shapes, torch.cat(track_ids, 0)
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self,
path,
img_size=416,
batch_size=16,
augment=False,
hyp=None,
rect=False,
image_weights=False,
cache_images=False,
single_cls=False):
path = str(Path(path)) # os-agnostic
assert os.path.isfile(path), 'File not found %s. See %s' % (path, help_url)
with open(path, 'r') as f:
self.img_files = [x.replace('/', os.sep) for x in f.read().splitlines() # os-agnostic
if os.path.splitext(x)[-1].lower() in img_formats]
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n
self.batch = bi # batch index of each image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
# load 4 images at a time into a mosaic (only during training)
self.mosaic = self.augment and not self.rect
# self.mosaic = False
# Define labels
self.label_files = [x.replace('JPEGImages', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# print(self.label_files[0])
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Read image shapes (wh)
sp = path.replace('.txt', '.shapes') # shape file path
try:
with open(sp, 'r') as f: # read existing shape file
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shape file out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
# Sort by aspect ratio
s = np.array(s, dtype=np.float64)
ar = s[:, 1] / s[:, 0] # aspect ratio
i = ar.argsort()
self.img_files = [self.img_files[i] for i in i]
self.label_files = [self.label_files[i] for i in i]
self.shapes = s[i] # wh
ar = ar[i]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / 64.).astype(np.int) * 64
# ---------- Cache labels: pure negative image sample(only contain background)
# by caching
self.imgs = [None] * n
self.labels = [np.zeros((0, 5), dtype=np.float32)] * n
extract_bounding_boxes = False
create_data_subset = False
pbar = tqdm(self.label_files, desc='Caching labels')
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
for i, file in enumerate(pbar):
try:
with open(file, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]: # objects number of this label
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode: turn mc to sc
# Filling the label
self.labels[i] = l
nf += 1 # file found
# Create sub dataset (a smaller dataset)
if create_data_subset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Caching labels (%g found, %g missing, %g empty, %g duplicate, for %g images)' \
% (nf, nm, ne, nd, n)
assert nf > 0, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# ---------- Letterbox
# final letter-boxed shape
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size
# ----- letter box
# or pad_resize_ratio(this methods keeps consistency with dark-net)
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# ----------
# Load labels
labels = []
x = self.labels[index]
if x.size > 0: # pad[0]: pad width, pad[1]: pad height
# Normalized xywh to pixel xyxy format: in net input size(e.g. 768×768)
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # x1
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # y1
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0] # x2
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1] # y2
if self.augment:
# Augment image space
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear']) # 变换后, 可能会排除一些超出图片范围之内的label
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh(center_x, center_y, bbox_w, bbox_h)
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6)) # 为什么要在第0列多加一列0?
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets(): index of the sample in the batch
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r < 1 or (self.augment and r != 1): # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic_with_ids(self, index):
# loads images in a mosaic
labels4, label4_orig = [], []
s = self.img_size
xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
pad_w = x1a - x1b
pad_h = y1a - y1b
# Labels
x = self.labels[index][:, [0, 2, 3, 4, 5]] # do not load track id here.
y = self.labels[index]
labels = x.copy() # labels without ids
labels_orig = y.copy() # labels with ids
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + pad_w
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + pad_h
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + pad_w
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + pad_h
labels4.append(labels)
label4_orig.append(labels_orig)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
label4_orig = np.concatenate(label4_orig, 0) # fractional coordinates
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
track_ids = label4_orig[:, 1]
track_ids -= 1 # track id starts from 1(not 0)
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4, track_ids = random_affine_with_ids(img4,
labels4,
track_ids,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=-s // 2) # border to remove
return img4, labels4, track_ids
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=-s // 2) # border to remove
return img4, labels4
# keep aspect ratio
def pad_resize_ratio(img, net_w, net_h):
img = np.array(img) # H x W x channels
H, W, channels = img.shape
if net_h / net_w < H / W: # padding w
new_h = int(net_h)
new_w = int(net_h / H * W)
pad = (net_w - new_w) // 2
left = round(pad - 0.1)
right = round(pad + 0.1)
top, bottom = 0, 0
else: # padding w
new_h = int(net_w / W * H)
new_w = int(net_w)
pad = (net_h - new_h) // 2
left, right = 0, 0
top = round(pad - 0.1)
bottom = round(pad + 0.1)
img_resize = cv2.resize(img, (new_w, new_h), cv2.INTER_LINEAR)
# add border
img_out = cv2.copyMakeBorder(img_resize, top, bottom, left, right, cv2.BORDER_CONSTANT, value=127)
return img_out
def pad_resize_img_square(img, square_size):
img = np.array(img) # H x W x channels
H, W, channels = img.shape
dim_diff = np.abs(H - W)
# upper(left) and lower(right) padding
pad_lu = dim_diff // 2 # integer division
pad_rd = dim_diff - pad_lu
# determine padding for each axis: H, W, channels
pad = ((pad_lu, pad_rd), (0, 0), (0, 0)) if H <= W else \
((0, 0), (pad_lu, pad_rd), (0, 0))
# do padding(0.5) and normalize
img = np.pad(img,
pad,
'constant',
constant_values=127.5) # / 255.0
img = cv2.resize(img,
(square_size, square_size),
cv2.INTER_LINEAR)
# img.tofile('/mnt/diskb/even/img.bin')
return img
def letterbox(img,
new_shape=(416, 416),
color=(114, 114, 114),
auto=True,
scaleFill=False,
scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=0):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
if targets is None: # targets = [cls, xyxy]
targets = []
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def random_affine_with_ids(img,
targets,
track_ids,
degrees=10,
translate=0.1,
scale=0.1,
shear=10,
border=0):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
if targets is None: # targets = [cls, xyxy]
targets = []
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
track_ids = track_ids[i]
targets[:, 1:5] = xy[i]
return img, targets, track_ids
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='../data/sm4/images',
img_size=1024): # from evaluate_utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from evaluate_utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(file, 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from evaluate_utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from evaluate_utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
| true | true |
1c2d536e0514934471c9643923e3132b4e8f0682 | 1,322 | py | Python | app/core/tests/test_admin.py | muminfarooq190/recipe-api | 270d89df9d39e5903245734b18acaaee505d94d0 | [
"MIT"
] | 1 | 2021-05-17T06:58:33.000Z | 2021-05-17T06:58:33.000Z | app/core/tests/test_admin.py | muminfarooq190/recipe-api | 270d89df9d39e5903245734b18acaaee505d94d0 | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | muminfarooq190/recipe-api | 270d89df9d39e5903245734b18acaaee505d94d0 | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='muminfarooq586@gmail.com',
password='testpass123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test1234@gmail.com',
password='test123',
name='test full name'
)
def test_users_listed(self):
"""Test that users are listed on the user page"""
url = reverse('admin:core_myuser_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_page_change(self):
"""Test that the user edit page works"""
url = reverse('admin:core_myuser_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_user_page_add(self):
"""Test that new user can be added"""
url = reverse('admin:core_myuser_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 32.243902 | 70 | 0.641452 | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='muminfarooq586@gmail.com',
password='testpass123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test1234@gmail.com',
password='test123',
name='test full name'
)
def test_users_listed(self):
url = reverse('admin:core_myuser_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_page_change(self):
url = reverse('admin:core_myuser_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_user_page_add(self):
url = reverse('admin:core_myuser_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| true | true |
1c2d53f021fcc0f127524587c762f40d3b18bb0c | 336 | py | Python | ABC_D/ABC137_D.py | ryosuke0825/atcoder_python | 185cdbe7db44ecca1aaf357858d16d31ce515ddb | [
"MIT"
] | null | null | null | ABC_D/ABC137_D.py | ryosuke0825/atcoder_python | 185cdbe7db44ecca1aaf357858d16d31ce515ddb | [
"MIT"
] | null | null | null | ABC_D/ABC137_D.py | ryosuke0825/atcoder_python | 185cdbe7db44ecca1aaf357858d16d31ce515ddb | [
"MIT"
] | null | null | null | import heapq
N, M = map(int, input().split())
AB = [[] for i in range(10**5+1)]
for i in range(N):
A, B = map(int, input().split())
AB[A].append(-B)
hq = []
heapq.heapify(hq)
ans = 0
for i in range(1, M+1):
for b in AB[i]:
heapq.heappush(hq, b)
if len(hq) > 0:
ans += -heapq.heappop(hq)
print(ans)
| 15.272727 | 36 | 0.529762 | import heapq
N, M = map(int, input().split())
AB = [[] for i in range(10**5+1)]
for i in range(N):
A, B = map(int, input().split())
AB[A].append(-B)
hq = []
heapq.heapify(hq)
ans = 0
for i in range(1, M+1):
for b in AB[i]:
heapq.heappush(hq, b)
if len(hq) > 0:
ans += -heapq.heappop(hq)
print(ans)
| true | true |
1c2d544a237d9edbef6ba35f669d795612a30701 | 6,647 | py | Python | cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchBase.py | mkinsner/llvm | 589d48844edb12cd357b3024248b93d64b6760bf | [
"Apache-2.0"
] | 2,338 | 2018-06-19T17:34:51.000Z | 2022-03-31T11:00:37.000Z | cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchBase.py | mkinsner/llvm | 589d48844edb12cd357b3024248b93d64b6760bf | [
"Apache-2.0"
] | 3,740 | 2019-01-23T15:36:48.000Z | 2022-03-31T22:01:13.000Z | cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexExpectWatchBase.py | mkinsner/llvm | 589d48844edb12cd357b3024248b93d64b6760bf | [
"Apache-2.0"
] | 500 | 2019-01-23T07:49:22.000Z | 2022-03-30T02:59:37.000Z | # DExTer : Debugging Experience Tester
# ~~~~~~ ~ ~~ ~ ~~
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""DexExpectWatch base class, holds logic for how to build and process expected
watch commands.
"""
import abc
import difflib
import os
from collections import namedtuple
from dex.command.CommandBase import CommandBase, StepExpectInfo
from dex.command.StepValueInfo import StepValueInfo
class DexExpectWatchBase(CommandBase):
def __init__(self, *args, **kwargs):
if len(args) < 2:
raise TypeError('expected at least two args')
self.expression = args[0]
self.values = [str(arg) for arg in args[1:]]
try:
on_line = kwargs.pop('on_line')
self._from_line = on_line
self._to_line = on_line
except KeyError:
self._from_line = kwargs.pop('from_line', 1)
self._to_line = kwargs.pop('to_line', 999999)
self._require_in_order = kwargs.pop('require_in_order', True)
if kwargs:
raise TypeError('unexpected named args: {}'.format(
', '.join(kwargs)))
# Number of times that this watch has been encountered.
self.times_encountered = 0
# We'll pop from this set as we encounter values so anything left at
# the end can be considered as not having been seen.
self._missing_values = set(self.values)
self.misordered_watches = []
# List of StepValueInfos for any watch that is encountered as invalid.
self.invalid_watches = []
# List of StepValueInfo any any watch where we couldn't retrieve its
# data.
self.irretrievable_watches = []
# List of StepValueInfos for any watch that is encountered as having
# been optimized out.
self.optimized_out_watches = []
# List of StepValueInfos for any watch that is encountered that has an
# expected value.
self.expected_watches = []
# List of StepValueInfos for any watch that is encountered that has an
# unexpected value.
self.unexpected_watches = []
super(DexExpectWatchBase, self).__init__()
def get_watches(self):
return [StepExpectInfo(self.expression, self.path, 0, range(self._from_line, self._to_line + 1))]
@property
def line_range(self):
return list(range(self._from_line, self._to_line + 1))
@property
def missing_values(self):
return sorted(list(self._missing_values))
@property
def encountered_values(self):
return sorted(list(set(self.values) - self._missing_values))
@abc.abstractmethod
def _get_expected_field(self, watch):
"""Return a field from watch that this ExpectWatch command is checking.
"""
def _handle_watch(self, step_info):
self.times_encountered += 1
if not step_info.watch_info.could_evaluate:
self.invalid_watches.append(step_info)
return
if step_info.watch_info.is_optimized_away:
self.optimized_out_watches.append(step_info)
return
if step_info.watch_info.is_irretrievable:
self.irretrievable_watches.append(step_info)
return
if step_info.expected_value not in self.values:
self.unexpected_watches.append(step_info)
return
self.expected_watches.append(step_info)
try:
self._missing_values.remove(step_info.expected_value)
except KeyError:
pass
def _check_watch_order(self, actual_watches, expected_values):
"""Use difflib to figure out whether the values are in the expected order
or not.
"""
differences = []
actual_values = [w.expected_value for w in actual_watches]
value_differences = list(difflib.Differ().compare(actual_values,
expected_values))
missing_value = False
index = 0
for vd in value_differences:
kind = vd[0]
if kind == '+':
# A value that is encountered in the expected list but not in the
# actual list. We'll keep a note that something is wrong and flag
# the next value that matches as misordered.
missing_value = True
elif kind == ' ':
# This value is as expected. It might still be wrong if we've
# previously encountered a value that is in the expected list but
# not the actual list.
if missing_value:
missing_value = False
differences.append(actual_watches[index])
index += 1
elif kind == '-':
# A value that is encountered in the actual list but not the
# expected list.
differences.append(actual_watches[index])
index += 1
else:
assert False, 'unexpected diff:{}'.format(vd)
return differences
def eval(self, step_collection):
assert os.path.exists(self.path)
for step in step_collection.steps:
loc = step.current_location
if (loc.path and os.path.exists(loc.path) and
os.path.samefile(loc.path, self.path) and
loc.lineno in self.line_range):
try:
watch = step.program_state.frames[0].watches[self.expression]
except KeyError:
pass
else:
expected_field = self._get_expected_field(watch)
step_info = StepValueInfo(step.step_index, watch,
expected_field)
self._handle_watch(step_info)
if self._require_in_order:
# A list of all watches where the value has changed.
value_change_watches = []
prev_value = None
for watch in self.expected_watches:
if watch.expected_value != prev_value:
value_change_watches.append(watch)
prev_value = watch.expected_value
self.misordered_watches = self._check_watch_order(
value_change_watches, [
v for v in self.values if v in
[w.expected_value for w in self.expected_watches]
])
| 35.92973 | 105 | 0.597563 |
import abc
import difflib
import os
from collections import namedtuple
from dex.command.CommandBase import CommandBase, StepExpectInfo
from dex.command.StepValueInfo import StepValueInfo
class DexExpectWatchBase(CommandBase):
def __init__(self, *args, **kwargs):
if len(args) < 2:
raise TypeError('expected at least two args')
self.expression = args[0]
self.values = [str(arg) for arg in args[1:]]
try:
on_line = kwargs.pop('on_line')
self._from_line = on_line
self._to_line = on_line
except KeyError:
self._from_line = kwargs.pop('from_line', 1)
self._to_line = kwargs.pop('to_line', 999999)
self._require_in_order = kwargs.pop('require_in_order', True)
if kwargs:
raise TypeError('unexpected named args: {}'.format(
', '.join(kwargs)))
self.times_encountered = 0
# the end can be considered as not having been seen.
self._missing_values = set(self.values)
self.misordered_watches = []
# List of StepValueInfos for any watch that is encountered as invalid.
self.invalid_watches = []
# List of StepValueInfo any any watch where we couldn't retrieve its
self.irretrievable_watches = []
self.optimized_out_watches = []
self.expected_watches = []
self.unexpected_watches = []
super(DexExpectWatchBase, self).__init__()
def get_watches(self):
return [StepExpectInfo(self.expression, self.path, 0, range(self._from_line, self._to_line + 1))]
@property
def line_range(self):
return list(range(self._from_line, self._to_line + 1))
@property
def missing_values(self):
return sorted(list(self._missing_values))
@property
def encountered_values(self):
return sorted(list(set(self.values) - self._missing_values))
@abc.abstractmethod
def _get_expected_field(self, watch):
def _handle_watch(self, step_info):
self.times_encountered += 1
if not step_info.watch_info.could_evaluate:
self.invalid_watches.append(step_info)
return
if step_info.watch_info.is_optimized_away:
self.optimized_out_watches.append(step_info)
return
if step_info.watch_info.is_irretrievable:
self.irretrievable_watches.append(step_info)
return
if step_info.expected_value not in self.values:
self.unexpected_watches.append(step_info)
return
self.expected_watches.append(step_info)
try:
self._missing_values.remove(step_info.expected_value)
except KeyError:
pass
def _check_watch_order(self, actual_watches, expected_values):
differences = []
actual_values = [w.expected_value for w in actual_watches]
value_differences = list(difflib.Differ().compare(actual_values,
expected_values))
missing_value = False
index = 0
for vd in value_differences:
kind = vd[0]
if kind == '+':
# the next value that matches as misordered.
missing_value = True
elif kind == ' ':
# This value is as expected. It might still be wrong if we've
if missing_value:
missing_value = False
differences.append(actual_watches[index])
index += 1
elif kind == '-':
differences.append(actual_watches[index])
index += 1
else:
assert False, 'unexpected diff:{}'.format(vd)
return differences
def eval(self, step_collection):
assert os.path.exists(self.path)
for step in step_collection.steps:
loc = step.current_location
if (loc.path and os.path.exists(loc.path) and
os.path.samefile(loc.path, self.path) and
loc.lineno in self.line_range):
try:
watch = step.program_state.frames[0].watches[self.expression]
except KeyError:
pass
else:
expected_field = self._get_expected_field(watch)
step_info = StepValueInfo(step.step_index, watch,
expected_field)
self._handle_watch(step_info)
if self._require_in_order:
value_change_watches = []
prev_value = None
for watch in self.expected_watches:
if watch.expected_value != prev_value:
value_change_watches.append(watch)
prev_value = watch.expected_value
self.misordered_watches = self._check_watch_order(
value_change_watches, [
v for v in self.values if v in
[w.expected_value for w in self.expected_watches]
])
| true | true |
1c2d549baa7b01f366b099fa732274bb92dcb368 | 1,265 | py | Python | examples/basic_experiment.py | drammock/expyfun | b92bf5291318ee4cb1692e7bcb9757a422f48304 | [
"BSD-3-Clause"
] | 7 | 2015-09-27T23:54:07.000Z | 2022-01-17T01:12:12.000Z | examples/basic_experiment.py | drammock/expyfun | b92bf5291318ee4cb1692e7bcb9757a422f48304 | [
"BSD-3-Clause"
] | 218 | 2015-02-17T20:29:31.000Z | 2022-02-28T20:55:24.000Z | examples/basic_experiment.py | drammock/expyfun | b92bf5291318ee4cb1692e7bcb9757a422f48304 | [
"BSD-3-Clause"
] | 19 | 2015-02-19T18:43:43.000Z | 2021-11-12T23:13:12.000Z | """
===========================
Run a very basic experiment
===========================
This example demonstrates an (almost) minimum working example of the
ExperimentController class.
"""
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from expyfun import ExperimentController, analyze, building_doc
from expyfun.visual import FixationDot
print(__doc__)
# set configuration
fs = 24414. # default for ExperimentController
dur = 1.0
tone = np.sin(2 * np.pi * 1000 * np.arange(int(fs * dur)) / float(fs))
tone *= 0.01 * np.sqrt(2) # Set RMS to 0.01
max_wait = 1. if not building_doc else 0.
with ExperimentController('testExp', participant='foo', session='001',
output_dir=None, version='dev') as ec:
ec.screen_prompt('Press a button when you hear the tone',
max_wait=max_wait)
dot = FixationDot(ec)
ec.load_buffer(tone)
dot.draw()
screenshot = ec.screenshot() # only because we want to show it in the docs
ec.identify_trial(ec_id='tone', ttl_id=[0, 0])
ec.start_stimulus()
presses = ec.wait_for_presses(dur if not building_doc else 0.)
ec.trial_ok()
print('Presses:\n{}'.format(presses))
analyze.plot_screen(screenshot)
| 28.75 | 79 | 0.657708 |
import numpy as np
from expyfun import ExperimentController, analyze, building_doc
from expyfun.visual import FixationDot
print(__doc__)
fs = 24414.
dur = 1.0
tone = np.sin(2 * np.pi * 1000 * np.arange(int(fs * dur)) / float(fs))
tone *= 0.01 * np.sqrt(2)
max_wait = 1. if not building_doc else 0.
with ExperimentController('testExp', participant='foo', session='001',
output_dir=None, version='dev') as ec:
ec.screen_prompt('Press a button when you hear the tone',
max_wait=max_wait)
dot = FixationDot(ec)
ec.load_buffer(tone)
dot.draw()
screenshot = ec.screenshot()
ec.identify_trial(ec_id='tone', ttl_id=[0, 0])
ec.start_stimulus()
presses = ec.wait_for_presses(dur if not building_doc else 0.)
ec.trial_ok()
print('Presses:\n{}'.format(presses))
analyze.plot_screen(screenshot)
| true | true |
1c2d5502a9ba19cd4fb107246ec66b3d8905832b | 26,575 | py | Python | pytorch_lightning/trainer/data_loading.py | MaximumEntropy/pytorch-lightning | d8c501b22bc2c02a4bedb8b686daa5f904d6cf23 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/trainer/data_loading.py | MaximumEntropy/pytorch-lightning | d8c501b22bc2c02a4bedb8b686daa5f904d6cf23 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/trainer/data_loading.py | MaximumEntropy/pytorch-lightning | d8c501b22bc2c02a4bedb8b686daa5f904d6cf23 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import multiprocessing
import os
from abc import ABC
from copy import deepcopy
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from torch.utils.data import BatchSampler, DataLoader, RandomSampler, Sampler, SequentialSampler
from torch.utils.data.dataset import IterableDataset
from torch.utils.data.distributed import DistributedSampler
import pytorch_lightning as pl
from pytorch_lightning.accelerators import Accelerator
from pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper, UnrepeatedDistributedSampler
from pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector
from pytorch_lightning.trainer.states import RunningStage
from pytorch_lightning.trainer.supporters import CombinedLoader
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.auto_restart import (
_capture_metadata_collate,
CaptureIterableDataset,
CaptureMapDataset,
FastForwardSampler,
)
from pytorch_lightning.utilities.data import has_iterable_dataset, has_len
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _fault_tolerant_training
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.seed import pl_worker_init_function
class TrainerDataLoadingMixin(ABC):
# this is just a summary on variables used in this abstract class,
# the proper values/initialisation should be done in child class
val_check_interval: float
tpu_local_core_rank: int
train_dataloader: DataLoader
num_training_batches: Union[int, float]
val_check_batch: float
val_dataloaders: Optional[List[DataLoader]]
num_val_batches: List[Union[int, float]]
test_dataloaders: Optional[List[DataLoader]]
num_test_batches: List[Union[int, float]]
limit_train_batches: Union[int, float]
log_every_n_steps: int
overfit_batches: Union[int, float]
distributed_sampler_kwargs: dict
accelerator: Accelerator
accelerator_connector: AcceleratorConnector
call_hook: Callable
def _worker_check(self, dataloader: DataLoader, name: str) -> None:
if not isinstance(dataloader, DataLoader):
return
using_spawn = self.accelerator_connector.distributed_backend == "ddp_spawn"
num_cpus = multiprocessing.cpu_count()
# ddp_spawn + num_workers > 0 don't mix! tell the user
if dataloader.num_workers > 0 and using_spawn:
# checks for the attr persistent_workers available in pytorch >= 1.7
if hasattr(dataloader, "persistent_workers"):
if not dataloader.persistent_workers:
rank_zero_warn(
"num_workers>0, persistent_workers=False, and accelerator=ddp_spawn"
" may result in data loading bottlenecks."
" Consider setting persistent_workers=True"
" (this is a limitation of Python .spawn() and PyTorch)"
)
else:
rank_zero_warn(
"num_workers>0 and accelerator=ddp_spawn do not mix well"
" and may result in data loading bottlenecks."
" Consider setting accelerator=ddp to use num_workers>0"
" (this is a limitation of Python .spawn() and PyTorch)"
)
elif dataloader.num_workers == 0 and using_spawn:
# checks for the attr persistent_workers available in pytorch >= 1.7
if hasattr(dataloader, "persistent_workers"):
if not dataloader.persistent_workers:
rank_zero_warn(
"accelerator=ddp_spawn and num_workers=0 may result in data loading bottlenecks."
" Consider setting num_workers>0 and persistent_workers=True"
)
else:
rank_zero_warn(
"accelerator=ddp_spawn and num_workers=0 may result in data loading bottlenecks."
" Consider setting accelerator=ddp and set num_workers>0"
)
elif dataloader.num_workers <= 2 < num_cpus and not using_spawn:
rank_zero_warn(
f"The dataloader, {name}, does not have many workers which may be a bottleneck."
" Consider increasing the value of the `num_workers` argument`"
f" (try {num_cpus} which is the number of cpus on this machine)"
" in the `DataLoader` init to improve performance."
)
def auto_add_worker_init_fn(self, dataloader: DataLoader) -> None:
if int(os.environ.get("PL_SEED_WORKERS", 0)) and dataloader.worker_init_fn is None:
dataloader.worker_init_fn = partial(pl_worker_init_function, rank=self.global_rank)
def auto_add_sampler(self, dataloader: Any, shuffle: bool, mode: Optional[RunningStage] = None) -> Any:
if isinstance(dataloader, CombinedLoader):
# apply `auto_add_sampler` on all the collection of loaders
dataloader.loaders = apply_to_collection(
dataloader.loaders, DataLoader, self.auto_add_sampler, shuffle, mode=mode
)
return dataloader
# don't do anything if it's not a dataloader
if not isinstance(dataloader, DataLoader):
return dataloader
if (
self.accelerator_connector.replace_sampler_ddp
and self.accelerator_connector.is_distributed
and not isinstance(dataloader.sampler, DistributedSampler)
and not has_iterable_dataset(dataloader)
):
if not isinstance(dataloader.sampler, (SequentialSampler, RandomSampler)):
raise MisconfigurationException(
"You seem to have configured a sampler in your DataLoader. This will be replaced "
" by `DistributedSampler` since `replace_sampler_ddp` is True and you are using"
" distributed training. Either remove the sampler from your DataLoader or set"
" `replace_sampler_ddp`=False if you want to use your custom sampler."
)
sampler = self._get_distributed_sampler(dataloader, shuffle, mode=mode)
dataloader = self.replace_sampler(dataloader, sampler, mode=mode)
else:
# use current sampler
sampler = dataloader.sampler
return dataloader
@staticmethod
def _resolve_batch_sampler(
dataloader: DataLoader, sampler: Optional[Sampler], mode: Optional[RunningStage] = None
) -> Dict[str, Any]:
batch_sampler = getattr(dataloader, "batch_sampler")
is_predicting = mode == RunningStage.PREDICTING
# checking the batch sampler type is different than PyTorch default.
if (batch_sampler is not None and type(batch_sampler) is not BatchSampler) or is_predicting:
batch_sampler = type(batch_sampler)(
sampler,
batch_size=batch_sampler.batch_size,
drop_last=(False if is_predicting else batch_sampler.drop_last),
)
if is_predicting:
batch_sampler = IndexBatchSamplerWrapper(batch_sampler)
if _fault_tolerant_training():
fast_forward_sampler = batch_sampler = FastForwardSampler(batch_sampler)
fast_forward_sampler.setup(dataloader_batch_size=1)
return {
"sampler": None,
"shuffle": False,
"batch_sampler": batch_sampler,
"batch_size": 1,
"drop_last": False,
}
if _fault_tolerant_training():
fast_forward_sampler = sampler = FastForwardSampler(sampler)
fast_forward_sampler.setup(dataloader_batch_size=dataloader.batch_size)
return {"sampler": sampler, "shuffle": False, "batch_sampler": None}
@staticmethod
def _get_dataloader_init_kwargs(
dataloader: DataLoader, sampler: Optional[Sampler], mode: Optional[RunningStage] = None
) -> Dict[str, Any]:
if not isinstance(dataloader, DataLoader):
raise ValueError(f"The dataloader {dataloader} needs to subclass `torch.utils.data.DataLoader`")
# get the dataloader instance attributes
attrs = {k: v for k, v in vars(dataloader).items() if not k.startswith("_")}
# not part of `vars`
attrs["multiprocessing_context"] = dataloader.multiprocessing_context
# get the dataloader instance `__init__` parameters
params = dict(inspect.signature(dataloader.__init__).parameters)
has_variadic_kwargs = any(p.kind is p.VAR_KEYWORD for p in params.values())
if has_variadic_kwargs:
# if the signature takes **kwargs, assume they will be passed down with `super().__init__(**kwargs)`
params.update(inspect.signature(DataLoader.__init__).parameters)
del params["self"]
# keep only the params whose default is different to the current attr value
non_defaults = {name for name, p in params.items() if name in attrs and p.default != attrs[name]}
# add `dataset` as it might have been replaced with `*args`
non_defaults.add("dataset")
# kwargs to re-construct the dataloader
dl_kwargs = {k: v for k, v in attrs.items() if k in non_defaults}
dl_kwargs.update(TrainerDataLoadingMixin._resolve_batch_sampler(dataloader, sampler, mode=mode))
required_args = {
p.name
for p in params.values()
if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD)
and p.default is p.empty
and p.name not in dl_kwargs
}
# the dataloader has required args which we could not extract from the existing attributes
if required_args:
required_args = sorted(required_args)
dataloader_cls_name = dataloader.__class__.__name__
raise MisconfigurationException(
f"Trying to inject `DistributedSampler` into the `{dataloader_cls_name}` instance. "
"This would fail as some of the `__init__` arguments are not available as instance attributes. "
f"The missing attributes are {required_args}. "
f"HINT: If you wrote the `{dataloader_cls_name}` class, define `self.missing_arg_name` or "
"manually add the `DistributedSampler` as: "
f"`{dataloader_cls_name}(dataset, sampler=DistributedSampler(dataset))`."
)
if not has_variadic_kwargs:
# the dataloader signature does not allow keyword arguments that need to be passed
missing_kwargs = dl_kwargs.keys() - params.keys()
if missing_kwargs:
missing_kwargs = sorted(missing_kwargs)
dataloader_cls_name = dataloader.__class__.__name__
raise MisconfigurationException(
f"Trying to inject `DistributedSampler` into the `{dataloader_cls_name}` instance. "
"This would fail as it doesn't expose all its attributes in the `__init__` signature. "
f"The missing arguments are {missing_kwargs}. "
f"HINT: If you wrote the `{dataloader_cls_name}` class, add the `__init__` arguments or "
"manually add the `DistributedSampler` as: "
f"`{dataloader_cls_name}(dataset, sampler=DistributedSampler(dataset))`."
)
if isinstance(dl_kwargs["dataset"], IterableDataset):
dl_kwargs["batch_sampler"] = None
dl_kwargs["sampler"] = None
if _fault_tolerant_training():
if isinstance(dl_kwargs["dataset"], IterableDataset):
# wrap the `IterableDataset` into a `CaptureIterableDataset` to record sampler states.
dl_kwargs["dataset"] = CaptureIterableDataset(dataset=dl_kwargs["dataset"])
elif len(dl_kwargs["dataset"]):
dl_kwargs["dataset"] = CaptureMapDataset(dataset=dl_kwargs["dataset"])
else:
raise MisconfigurationException(
"This shouldn't happen, please open an issue on Lightning Github repository."
)
return dl_kwargs
@staticmethod
def replace_sampler(dataloader: DataLoader, sampler, mode: Optional[RunningStage] = None) -> DataLoader:
dl_kwargs = TrainerDataLoadingMixin._get_dataloader_init_kwargs(dataloader, sampler, mode=mode)
dl_cls = type(dataloader)
dataloader = dl_cls(**dl_kwargs)
return dataloader
def _get_distributed_sampler(
self, dataloader: DataLoader, shuffle: bool, mode: Optional[RunningStage] = None
) -> DistributedSampler:
kwargs = self.distributed_sampler_kwargs
kwargs["shuffle"] = shuffle and not self.overfit_batches
kwargs.setdefault("seed", int(os.getenv("PL_GLOBAL_SEED", 0)))
cls = UnrepeatedDistributedSampler if mode == RunningStage.PREDICTING else DistributedSampler
sampler = cls(dataloader.dataset, **kwargs)
return sampler
def reset_train_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
"""Resets the train dataloader and initialises required variables (number of batches, when to validate,
etc.).
Args:
model: The `LightningModule` if calling this outside of the trainer scope.
"""
self.train_dataloader = self.request_dataloader(RunningStage.TRAINING, model=model)
if self.overfit_batches > 0:
if hasattr(self.train_dataloader, "sampler") and isinstance(self.train_dataloader.sampler, RandomSampler):
rank_zero_warn(
"You requested to overfit but enabled training dataloader shuffling."
" We are turning off the training dataloader shuffling for you."
)
self.train_dataloader = self.replace_sampler(
self.train_dataloader, SequentialSampler(self.train_dataloader.dataset), mode=RunningStage.TRAINING
)
# automatically add samplers
self.train_dataloader = apply_to_collection(
self.train_dataloader, DataLoader, self.auto_add_sampler, shuffle=True, mode=RunningStage.TRAINING
)
# check the workers recursively
apply_to_collection(self.train_dataloader, DataLoader, self._worker_check, "train_dataloader")
# add worker_init_fn for correct seeding in worker processes
apply_to_collection(self.train_dataloader, DataLoader, self.auto_add_worker_init_fn)
# add collate_fn to collect metadata for fault tolerant training
if _fault_tolerant_training():
apply_to_collection(self.train_dataloader, DataLoader, self._add_sampler_metadata_collate)
# wrap the sequence of train loaders to a CombinedLoader object for computing the num_training_batches
self.train_dataloader = CombinedLoader(self.train_dataloader, self.data_connector.multiple_trainloader_mode)
self.num_training_batches = len(self.train_dataloader) if has_len(self.train_dataloader) else float("inf")
if isinstance(self.limit_train_batches, int) or self.limit_train_batches == 0.0:
self.num_training_batches = min(self.num_training_batches, int(self.limit_train_batches))
elif self.num_training_batches != float("inf"):
self.num_training_batches = int(self.num_training_batches * self.limit_train_batches)
elif self.limit_train_batches != 1.0:
raise MisconfigurationException(
"When using an IterableDataset for `limit_train_batches`,"
" `Trainer(limit_train_batches)` must be `0.0`, `1.0` or an int. An int k specifies"
" `num_training_batches` to use."
)
# determine when to check validation
# if int passed in, val checks that often
# otherwise, it checks in [0, 1.0] % range of a training epoch
if isinstance(self.val_check_interval, int):
self.val_check_batch = self.val_check_interval
if self.val_check_batch > self.num_training_batches:
raise ValueError(
f"`val_check_interval` ({self.val_check_interval}) must be less than or equal "
f"to the number of the training batches ({self.num_training_batches}). "
"If you want to disable validation set `limit_val_batches` to 0.0 instead."
)
else:
if not has_len(self.train_dataloader):
if self.val_check_interval == 1.0:
self.val_check_batch = float("inf")
else:
raise MisconfigurationException(
"When using an IterableDataset for `train_dataloader`,"
" `Trainer(val_check_interval)` must be `1.0` or an int. An int k specifies"
" checking validation every k training batches."
)
else:
self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
self.val_check_batch = max(1, self.val_check_batch)
if self.logger and self.num_training_batches < self.log_every_n_steps:
rank_zero_warn(
f"The number of training samples ({self.num_training_batches}) is smaller than the logging interval"
f" Trainer(log_every_n_steps={self.log_every_n_steps}). Set a lower value for log_every_n_steps if"
" you want to see logs for the training epoch."
)
def _reset_eval_dataloader(
self, mode: RunningStage, model: Optional["pl.LightningModule"] = None
) -> Tuple[List[Union[int, float]], List[DataLoader]]:
"""Generic method to reset a dataloader for evaluation.
Args:
mode: The running stage of the ``Trainer``
model: The ``LightningModule`` if calling this outside of the trainer scope.
Returns:
Tuple (num_batches, dataloaders)
"""
assert mode.evaluating or mode == RunningStage.PREDICTING
# always get the loaders first so we can count how many there are
dataloaders = self.request_dataloader(mode, model=model)
if not isinstance(dataloaders, list):
dataloaders = [dataloaders]
# when overfitting, use the training loader as val and test
# duplicate it the numb of times needed to match the train loaders
if self.overfit_batches > 0:
train_dataloader = self.request_dataloader(RunningStage.TRAINING, model=model)
dataloaders = [deepcopy(train_dataloader) for _ in range(len(dataloaders))]
for loader_i in range(len(dataloaders)):
loader = dataloaders[loader_i]
if hasattr(loader, "sampler") and isinstance(loader.sampler, RandomSampler):
# when overfitting, the dataloader should not have sampler
if self.overfit_batches > 0 and mode.evaluating:
rank_zero_warn(
"You requested to overfit but enabled val/test dataloader shuffling."
" We are turning it off for you."
)
dataloaders[loader_i] = self.replace_sampler(loader, SequentialSampler(loader.dataset), mode=mode)
else:
rank_zero_warn(
f"Your `{mode.dataloader_prefix}_dataloader` has `shuffle=True`,"
"it is strongly recommended that you turn this off for val/test/predict dataloaders."
)
if any(dl is None for dl in dataloaders):
rank_zero_warn("One of given dataloaders is None and it will be skipped.")
# add samplers
dataloaders = [self.auto_add_sampler(dl, False, mode=mode) for dl in dataloaders if dl is not None]
# add worker_init_fn for correct seeding in worker processes
apply_to_collection(dataloaders, dtype=DataLoader, function=self.auto_add_worker_init_fn)
loader_num_batches = []
# determine number of batches
# datasets could be none, 1 or 2+
if len(dataloaders) != 0:
for i, dataloader in enumerate(dataloaders):
num_batches = len(dataloader) if has_len(dataloader) else float("inf")
self._worker_check(dataloader, f"{mode.dataloader_prefix}_dataloader {i}")
# percent or num_steps
limit_eval_batches = getattr(self, f"limit_{mode.dataloader_prefix}_batches")
# limit num batches either as a percent or num steps
if isinstance(limit_eval_batches, int) or limit_eval_batches == 0.0:
num_batches = min(num_batches, int(limit_eval_batches))
elif num_batches != float("inf"):
num_batches = int(num_batches * limit_eval_batches)
elif limit_eval_batches != 1.0:
raise MisconfigurationException(
f"When using an IterableDataset for `limit_{mode}_batches`,"
f" `Trainer(limit_{mode.dataloader_prefix}_batches)` must be `0.0`, `1.0` or an int. An int k"
f" specifies `num_{mode.dataloader_prefix}_batches` to use."
)
if num_batches == 0 and limit_eval_batches > 0.0 and isinstance(limit_eval_batches, float):
min_pct = 1.0 / len(dataloader)
raise MisconfigurationException(
f"you requested to check {limit_eval_batches} of the `{mode.dataloader_prefix}_dataloader` but"
f" {limit_eval_batches}*{num_batches} < 1. Please increase the"
f" `limit_{mode.dataloader_prefix}_batches` flag. Try at least"
f" `limit_{mode.dataloader_prefix}_batches={min_pct}`"
)
loader_num_batches.append(num_batches)
return loader_num_batches, dataloaders
def reset_val_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
"""Resets the validation dataloader and determines the number of batches.
Args:
model: The `LightningModule` if called outside of the trainer scope.
"""
pl_module = self.lightning_module or model
has_loader = is_overridden("val_dataloader", pl_module)
has_step = is_overridden("validation_step", pl_module)
if has_loader and has_step:
self.num_val_batches, self.val_dataloaders = self._reset_eval_dataloader(
RunningStage.VALIDATING, model=pl_module
)
def reset_test_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
"""Resets the test dataloader and determines the number of batches.
Args:
model: The `LightningModule` if called outside of the trainer scope.
"""
pl_module = self.lightning_module or model
has_loader = is_overridden("test_dataloader", pl_module)
has_step = is_overridden("test_step", pl_module)
if has_loader and has_step:
self.num_test_batches, self.test_dataloaders = self._reset_eval_dataloader(
RunningStage.TESTING, model=pl_module
)
def reset_predict_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
"""Resets the predict dataloader and determines the number of batches.
Args:
model: The `LightningModule` if called outside of the trainer scope.
"""
pl_module = self.lightning_module or model
has_loader = is_overridden("predict_dataloader", pl_module)
if has_loader:
self.num_predict_batches, self.predict_dataloaders = self._reset_eval_dataloader(
RunningStage.PREDICTING, model=pl_module
)
def reset_train_val_dataloaders(self, model: Optional["pl.LightningModule"] = None) -> None:
"""Resets train and val dataloaders if none are attached to the trainer.
The val dataloader must be initialized before training loop starts, as the training loop
inspects the val dataloader to determine whether to run the evaluation loop.
Args:
model: The `LightningModule` if called outside of the trainer scope.
"""
if self.train_dataloader is None:
self.reset_train_dataloader(model=model)
if self.val_dataloaders is None:
self.reset_val_dataloader(model=model)
def request_dataloader(
self, stage: RunningStage, model: Optional["pl.LightningModule"] = None
) -> Union[DataLoader, List[DataLoader]]:
"""Handles downloading data in the GPU or TPU case.
Returns:
The dataloader
"""
hook = f"{stage.dataloader_prefix}_dataloader"
self.call_hook("on_" + hook, pl_module=model)
dataloader = self.call_hook(hook, pl_module=model)
if isinstance(dataloader, tuple):
dataloader = list(dataloader)
self.training_type_plugin.barrier("get_dataloaders")
return dataloader
@staticmethod
def _add_sampler_metadata_collate(dataloader: DataLoader) -> None:
"""Wrap default collate function to retrive ``FastForwardSampler`` state dict when fault tolerant is
enabled."""
dataloader.collate_fn = partial(
_capture_metadata_collate, dataset=dataloader.dataset, default_collate=dataloader.collate_fn
)
| 49.672897 | 119 | 0.654939 |
import inspect
import multiprocessing
import os
from abc import ABC
from copy import deepcopy
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from torch.utils.data import BatchSampler, DataLoader, RandomSampler, Sampler, SequentialSampler
from torch.utils.data.dataset import IterableDataset
from torch.utils.data.distributed import DistributedSampler
import pytorch_lightning as pl
from pytorch_lightning.accelerators import Accelerator
from pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper, UnrepeatedDistributedSampler
from pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector
from pytorch_lightning.trainer.states import RunningStage
from pytorch_lightning.trainer.supporters import CombinedLoader
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.auto_restart import (
_capture_metadata_collate,
CaptureIterableDataset,
CaptureMapDataset,
FastForwardSampler,
)
from pytorch_lightning.utilities.data import has_iterable_dataset, has_len
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _fault_tolerant_training
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.seed import pl_worker_init_function
class TrainerDataLoadingMixin(ABC):
val_check_interval: float
tpu_local_core_rank: int
train_dataloader: DataLoader
num_training_batches: Union[int, float]
val_check_batch: float
val_dataloaders: Optional[List[DataLoader]]
num_val_batches: List[Union[int, float]]
test_dataloaders: Optional[List[DataLoader]]
num_test_batches: List[Union[int, float]]
limit_train_batches: Union[int, float]
log_every_n_steps: int
overfit_batches: Union[int, float]
distributed_sampler_kwargs: dict
accelerator: Accelerator
accelerator_connector: AcceleratorConnector
call_hook: Callable
def _worker_check(self, dataloader: DataLoader, name: str) -> None:
if not isinstance(dataloader, DataLoader):
return
using_spawn = self.accelerator_connector.distributed_backend == "ddp_spawn"
num_cpus = multiprocessing.cpu_count()
if dataloader.num_workers > 0 and using_spawn:
# checks for the attr persistent_workers available in pytorch >= 1.7
if hasattr(dataloader, "persistent_workers"):
if not dataloader.persistent_workers:
rank_zero_warn(
"num_workers>0, persistent_workers=False, and accelerator=ddp_spawn"
" may result in data loading bottlenecks."
" Consider setting persistent_workers=True"
" (this is a limitation of Python .spawn() and PyTorch)"
)
else:
rank_zero_warn(
"num_workers>0 and accelerator=ddp_spawn do not mix well"
" and may result in data loading bottlenecks."
" Consider setting accelerator=ddp to use num_workers>0"
" (this is a limitation of Python .spawn() and PyTorch)"
)
elif dataloader.num_workers == 0 and using_spawn:
# checks for the attr persistent_workers available in pytorch >= 1.7
if hasattr(dataloader, "persistent_workers"):
if not dataloader.persistent_workers:
rank_zero_warn(
"accelerator=ddp_spawn and num_workers=0 may result in data loading bottlenecks."
" Consider setting num_workers>0 and persistent_workers=True"
)
else:
rank_zero_warn(
"accelerator=ddp_spawn and num_workers=0 may result in data loading bottlenecks."
" Consider setting accelerator=ddp and set num_workers>0"
)
elif dataloader.num_workers <= 2 < num_cpus and not using_spawn:
rank_zero_warn(
f"The dataloader, {name}, does not have many workers which may be a bottleneck."
" Consider increasing the value of the `num_workers` argument`"
f" (try {num_cpus} which is the number of cpus on this machine)"
" in the `DataLoader` init to improve performance."
)
def auto_add_worker_init_fn(self, dataloader: DataLoader) -> None:
if int(os.environ.get("PL_SEED_WORKERS", 0)) and dataloader.worker_init_fn is None:
dataloader.worker_init_fn = partial(pl_worker_init_function, rank=self.global_rank)
def auto_add_sampler(self, dataloader: Any, shuffle: bool, mode: Optional[RunningStage] = None) -> Any:
if isinstance(dataloader, CombinedLoader):
# apply `auto_add_sampler` on all the collection of loaders
dataloader.loaders = apply_to_collection(
dataloader.loaders, DataLoader, self.auto_add_sampler, shuffle, mode=mode
)
return dataloader
# don't do anything if it's not a dataloader
if not isinstance(dataloader, DataLoader):
return dataloader
if (
self.accelerator_connector.replace_sampler_ddp
and self.accelerator_connector.is_distributed
and not isinstance(dataloader.sampler, DistributedSampler)
and not has_iterable_dataset(dataloader)
):
if not isinstance(dataloader.sampler, (SequentialSampler, RandomSampler)):
raise MisconfigurationException(
"You seem to have configured a sampler in your DataLoader. This will be replaced "
" by `DistributedSampler` since `replace_sampler_ddp` is True and you are using"
" distributed training. Either remove the sampler from your DataLoader or set"
" `replace_sampler_ddp`=False if you want to use your custom sampler."
)
sampler = self._get_distributed_sampler(dataloader, shuffle, mode=mode)
dataloader = self.replace_sampler(dataloader, sampler, mode=mode)
else:
# use current sampler
sampler = dataloader.sampler
return dataloader
@staticmethod
def _resolve_batch_sampler(
dataloader: DataLoader, sampler: Optional[Sampler], mode: Optional[RunningStage] = None
) -> Dict[str, Any]:
batch_sampler = getattr(dataloader, "batch_sampler")
is_predicting = mode == RunningStage.PREDICTING
# checking the batch sampler type is different than PyTorch default.
if (batch_sampler is not None and type(batch_sampler) is not BatchSampler) or is_predicting:
batch_sampler = type(batch_sampler)(
sampler,
batch_size=batch_sampler.batch_size,
drop_last=(False if is_predicting else batch_sampler.drop_last),
)
if is_predicting:
batch_sampler = IndexBatchSamplerWrapper(batch_sampler)
if _fault_tolerant_training():
fast_forward_sampler = batch_sampler = FastForwardSampler(batch_sampler)
fast_forward_sampler.setup(dataloader_batch_size=1)
return {
"sampler": None,
"shuffle": False,
"batch_sampler": batch_sampler,
"batch_size": 1,
"drop_last": False,
}
if _fault_tolerant_training():
fast_forward_sampler = sampler = FastForwardSampler(sampler)
fast_forward_sampler.setup(dataloader_batch_size=dataloader.batch_size)
return {"sampler": sampler, "shuffle": False, "batch_sampler": None}
@staticmethod
def _get_dataloader_init_kwargs(
dataloader: DataLoader, sampler: Optional[Sampler], mode: Optional[RunningStage] = None
) -> Dict[str, Any]:
if not isinstance(dataloader, DataLoader):
raise ValueError(f"The dataloader {dataloader} needs to subclass `torch.utils.data.DataLoader`")
# get the dataloader instance attributes
attrs = {k: v for k, v in vars(dataloader).items() if not k.startswith("_")}
# not part of `vars`
attrs["multiprocessing_context"] = dataloader.multiprocessing_context
# get the dataloader instance `__init__` parameters
params = dict(inspect.signature(dataloader.__init__).parameters)
has_variadic_kwargs = any(p.kind is p.VAR_KEYWORD for p in params.values())
if has_variadic_kwargs:
# if the signature takes **kwargs, assume they will be passed down with `super().__init__(**kwargs)`
params.update(inspect.signature(DataLoader.__init__).parameters)
del params["self"]
# keep only the params whose default is different to the current attr value
non_defaults = {name for name, p in params.items() if name in attrs and p.default != attrs[name]}
# add `dataset` as it might have been replaced with `*args`
non_defaults.add("dataset")
# kwargs to re-construct the dataloader
dl_kwargs = {k: v for k, v in attrs.items() if k in non_defaults}
dl_kwargs.update(TrainerDataLoadingMixin._resolve_batch_sampler(dataloader, sampler, mode=mode))
required_args = {
p.name
for p in params.values()
if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD)
and p.default is p.empty
and p.name not in dl_kwargs
}
# the dataloader has required args which we could not extract from the existing attributes
if required_args:
required_args = sorted(required_args)
dataloader_cls_name = dataloader.__class__.__name__
raise MisconfigurationException(
f"Trying to inject `DistributedSampler` into the `{dataloader_cls_name}` instance. "
"This would fail as some of the `__init__` arguments are not available as instance attributes. "
f"The missing attributes are {required_args}. "
f"HINT: If you wrote the `{dataloader_cls_name}` class, define `self.missing_arg_name` or "
"manually add the `DistributedSampler` as: "
f"`{dataloader_cls_name}(dataset, sampler=DistributedSampler(dataset))`."
)
if not has_variadic_kwargs:
# the dataloader signature does not allow keyword arguments that need to be passed
missing_kwargs = dl_kwargs.keys() - params.keys()
if missing_kwargs:
missing_kwargs = sorted(missing_kwargs)
dataloader_cls_name = dataloader.__class__.__name__
raise MisconfigurationException(
f"Trying to inject `DistributedSampler` into the `{dataloader_cls_name}` instance. "
"This would fail as it doesn't expose all its attributes in the `__init__` signature. "
f"The missing arguments are {missing_kwargs}. "
f"HINT: If you wrote the `{dataloader_cls_name}` class, add the `__init__` arguments or "
"manually add the `DistributedSampler` as: "
f"`{dataloader_cls_name}(dataset, sampler=DistributedSampler(dataset))`."
)
if isinstance(dl_kwargs["dataset"], IterableDataset):
dl_kwargs["batch_sampler"] = None
dl_kwargs["sampler"] = None
if _fault_tolerant_training():
if isinstance(dl_kwargs["dataset"], IterableDataset):
dl_kwargs["dataset"] = CaptureIterableDataset(dataset=dl_kwargs["dataset"])
elif len(dl_kwargs["dataset"]):
dl_kwargs["dataset"] = CaptureMapDataset(dataset=dl_kwargs["dataset"])
else:
raise MisconfigurationException(
"This shouldn't happen, please open an issue on Lightning Github repository."
)
return dl_kwargs
@staticmethod
def replace_sampler(dataloader: DataLoader, sampler, mode: Optional[RunningStage] = None) -> DataLoader:
dl_kwargs = TrainerDataLoadingMixin._get_dataloader_init_kwargs(dataloader, sampler, mode=mode)
dl_cls = type(dataloader)
dataloader = dl_cls(**dl_kwargs)
return dataloader
def _get_distributed_sampler(
self, dataloader: DataLoader, shuffle: bool, mode: Optional[RunningStage] = None
) -> DistributedSampler:
kwargs = self.distributed_sampler_kwargs
kwargs["shuffle"] = shuffle and not self.overfit_batches
kwargs.setdefault("seed", int(os.getenv("PL_GLOBAL_SEED", 0)))
cls = UnrepeatedDistributedSampler if mode == RunningStage.PREDICTING else DistributedSampler
sampler = cls(dataloader.dataset, **kwargs)
return sampler
def reset_train_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
self.train_dataloader = self.request_dataloader(RunningStage.TRAINING, model=model)
if self.overfit_batches > 0:
if hasattr(self.train_dataloader, "sampler") and isinstance(self.train_dataloader.sampler, RandomSampler):
rank_zero_warn(
"You requested to overfit but enabled training dataloader shuffling."
" We are turning off the training dataloader shuffling for you."
)
self.train_dataloader = self.replace_sampler(
self.train_dataloader, SequentialSampler(self.train_dataloader.dataset), mode=RunningStage.TRAINING
)
# automatically add samplers
self.train_dataloader = apply_to_collection(
self.train_dataloader, DataLoader, self.auto_add_sampler, shuffle=True, mode=RunningStage.TRAINING
)
# check the workers recursively
apply_to_collection(self.train_dataloader, DataLoader, self._worker_check, "train_dataloader")
# add worker_init_fn for correct seeding in worker processes
apply_to_collection(self.train_dataloader, DataLoader, self.auto_add_worker_init_fn)
# add collate_fn to collect metadata for fault tolerant training
if _fault_tolerant_training():
apply_to_collection(self.train_dataloader, DataLoader, self._add_sampler_metadata_collate)
# wrap the sequence of train loaders to a CombinedLoader object for computing the num_training_batches
self.train_dataloader = CombinedLoader(self.train_dataloader, self.data_connector.multiple_trainloader_mode)
self.num_training_batches = len(self.train_dataloader) if has_len(self.train_dataloader) else float("inf")
if isinstance(self.limit_train_batches, int) or self.limit_train_batches == 0.0:
self.num_training_batches = min(self.num_training_batches, int(self.limit_train_batches))
elif self.num_training_batches != float("inf"):
self.num_training_batches = int(self.num_training_batches * self.limit_train_batches)
elif self.limit_train_batches != 1.0:
raise MisconfigurationException(
"When using an IterableDataset for `limit_train_batches`,"
" `Trainer(limit_train_batches)` must be `0.0`, `1.0` or an int. An int k specifies"
" `num_training_batches` to use."
)
# determine when to check validation
# if int passed in, val checks that often
# otherwise, it checks in [0, 1.0] % range of a training epoch
if isinstance(self.val_check_interval, int):
self.val_check_batch = self.val_check_interval
if self.val_check_batch > self.num_training_batches:
raise ValueError(
f"`val_check_interval` ({self.val_check_interval}) must be less than or equal "
f"to the number of the training batches ({self.num_training_batches}). "
"If you want to disable validation set `limit_val_batches` to 0.0 instead."
)
else:
if not has_len(self.train_dataloader):
if self.val_check_interval == 1.0:
self.val_check_batch = float("inf")
else:
raise MisconfigurationException(
"When using an IterableDataset for `train_dataloader`,"
" `Trainer(val_check_interval)` must be `1.0` or an int. An int k specifies"
" checking validation every k training batches."
)
else:
self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
self.val_check_batch = max(1, self.val_check_batch)
if self.logger and self.num_training_batches < self.log_every_n_steps:
rank_zero_warn(
f"The number of training samples ({self.num_training_batches}) is smaller than the logging interval"
f" Trainer(log_every_n_steps={self.log_every_n_steps}). Set a lower value for log_every_n_steps if"
" you want to see logs for the training epoch."
)
def _reset_eval_dataloader(
self, mode: RunningStage, model: Optional["pl.LightningModule"] = None
) -> Tuple[List[Union[int, float]], List[DataLoader]]:
assert mode.evaluating or mode == RunningStage.PREDICTING
# always get the loaders first so we can count how many there are
dataloaders = self.request_dataloader(mode, model=model)
if not isinstance(dataloaders, list):
dataloaders = [dataloaders]
# when overfitting, use the training loader as val and test
# duplicate it the numb of times needed to match the train loaders
if self.overfit_batches > 0:
train_dataloader = self.request_dataloader(RunningStage.TRAINING, model=model)
dataloaders = [deepcopy(train_dataloader) for _ in range(len(dataloaders))]
for loader_i in range(len(dataloaders)):
loader = dataloaders[loader_i]
if hasattr(loader, "sampler") and isinstance(loader.sampler, RandomSampler):
# when overfitting, the dataloader should not have sampler
if self.overfit_batches > 0 and mode.evaluating:
rank_zero_warn(
"You requested to overfit but enabled val/test dataloader shuffling."
" We are turning it off for you."
)
dataloaders[loader_i] = self.replace_sampler(loader, SequentialSampler(loader.dataset), mode=mode)
else:
rank_zero_warn(
f"Your `{mode.dataloader_prefix}_dataloader` has `shuffle=True`,"
"it is strongly recommended that you turn this off for val/test/predict dataloaders."
)
if any(dl is None for dl in dataloaders):
rank_zero_warn("One of given dataloaders is None and it will be skipped.")
# add samplers
dataloaders = [self.auto_add_sampler(dl, False, mode=mode) for dl in dataloaders if dl is not None]
# add worker_init_fn for correct seeding in worker processes
apply_to_collection(dataloaders, dtype=DataLoader, function=self.auto_add_worker_init_fn)
loader_num_batches = []
# determine number of batches
# datasets could be none, 1 or 2+
if len(dataloaders) != 0:
for i, dataloader in enumerate(dataloaders):
num_batches = len(dataloader) if has_len(dataloader) else float("inf")
self._worker_check(dataloader, f"{mode.dataloader_prefix}_dataloader {i}")
# percent or num_steps
limit_eval_batches = getattr(self, f"limit_{mode.dataloader_prefix}_batches")
# limit num batches either as a percent or num steps
if isinstance(limit_eval_batches, int) or limit_eval_batches == 0.0:
num_batches = min(num_batches, int(limit_eval_batches))
elif num_batches != float("inf"):
num_batches = int(num_batches * limit_eval_batches)
elif limit_eval_batches != 1.0:
raise MisconfigurationException(
f"When using an IterableDataset for `limit_{mode}_batches`,"
f" `Trainer(limit_{mode.dataloader_prefix}_batches)` must be `0.0`, `1.0` or an int. An int k"
f" specifies `num_{mode.dataloader_prefix}_batches` to use."
)
if num_batches == 0 and limit_eval_batches > 0.0 and isinstance(limit_eval_batches, float):
min_pct = 1.0 / len(dataloader)
raise MisconfigurationException(
f"you requested to check {limit_eval_batches} of the `{mode.dataloader_prefix}_dataloader` but"
f" {limit_eval_batches}*{num_batches} < 1. Please increase the"
f" `limit_{mode.dataloader_prefix}_batches` flag. Try at least"
f" `limit_{mode.dataloader_prefix}_batches={min_pct}`"
)
loader_num_batches.append(num_batches)
return loader_num_batches, dataloaders
def reset_val_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
pl_module = self.lightning_module or model
has_loader = is_overridden("val_dataloader", pl_module)
has_step = is_overridden("validation_step", pl_module)
if has_loader and has_step:
self.num_val_batches, self.val_dataloaders = self._reset_eval_dataloader(
RunningStage.VALIDATING, model=pl_module
)
def reset_test_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
pl_module = self.lightning_module or model
has_loader = is_overridden("test_dataloader", pl_module)
has_step = is_overridden("test_step", pl_module)
if has_loader and has_step:
self.num_test_batches, self.test_dataloaders = self._reset_eval_dataloader(
RunningStage.TESTING, model=pl_module
)
def reset_predict_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
pl_module = self.lightning_module or model
has_loader = is_overridden("predict_dataloader", pl_module)
if has_loader:
self.num_predict_batches, self.predict_dataloaders = self._reset_eval_dataloader(
RunningStage.PREDICTING, model=pl_module
)
def reset_train_val_dataloaders(self, model: Optional["pl.LightningModule"] = None) -> None:
if self.train_dataloader is None:
self.reset_train_dataloader(model=model)
if self.val_dataloaders is None:
self.reset_val_dataloader(model=model)
def request_dataloader(
self, stage: RunningStage, model: Optional["pl.LightningModule"] = None
) -> Union[DataLoader, List[DataLoader]]:
hook = f"{stage.dataloader_prefix}_dataloader"
self.call_hook("on_" + hook, pl_module=model)
dataloader = self.call_hook(hook, pl_module=model)
if isinstance(dataloader, tuple):
dataloader = list(dataloader)
self.training_type_plugin.barrier("get_dataloaders")
return dataloader
@staticmethod
def _add_sampler_metadata_collate(dataloader: DataLoader) -> None:
dataloader.collate_fn = partial(
_capture_metadata_collate, dataset=dataloader.dataset, default_collate=dataloader.collate_fn
)
| true | true |
1c2d551991c207d885140cc9edd7b12e8dc576cb | 111 | py | Python | src/init/init.py | zhengruohuang/toddler | 0d7bde9aaf1fab8fed5f37973eeda9eaa100bd7a | [
"BSD-2-Clause"
] | 80 | 2016-03-27T04:26:57.000Z | 2021-12-24T08:27:55.000Z | src/init/init.py | zhengruohuang/toddler | 0d7bde9aaf1fab8fed5f37973eeda9eaa100bd7a | [
"BSD-2-Clause"
] | 1 | 2016-12-08T18:08:20.000Z | 2018-02-23T02:51:35.000Z | src/init/init.py | zhengruohuang/toddler | 0d7bde9aaf1fab8fed5f37973eeda9eaa100bd7a | [
"BSD-2-Clause"
] | 11 | 2017-05-09T01:42:07.000Z | 2020-02-13T13:56:36.000Z | print('hello world line 1')
print('hello world line 2')
print('hello world line 3')
print('hello world line 4') | 27.75 | 27 | 0.720721 | print('hello world line 1')
print('hello world line 2')
print('hello world line 3')
print('hello world line 4') | true | true |
1c2d57ae90b266553dcd84e80e7c0e9c44ac6eb5 | 3,615 | py | Python | selfdrive/controls/lib/lane_planner.py | egreen-park/crwusiz | da528ebea9945f5a2a0a286467bb1c349aa7472f | [
"MIT"
] | null | null | null | selfdrive/controls/lib/lane_planner.py | egreen-park/crwusiz | da528ebea9945f5a2a0a286467bb1c349aa7472f | [
"MIT"
] | null | null | null | selfdrive/controls/lib/lane_planner.py | egreen-park/crwusiz | da528ebea9945f5a2a0a286467bb1c349aa7472f | [
"MIT"
] | null | null | null | from common.numpy_fast import interp
import numpy as np
from selfdrive.hardware import EON, TICI
from cereal import log
TRAJECTORY_SIZE = 33
# camera offset is meters from center car to camera
if EON:
CAMERA_OFFSET = 0.10
PATH_OFFSET = 0.0
elif TICI:
CAMERA_OFFSET = -0.04
PATH_OFFSET = -0.04
else:
CAMERA_OFFSET = 0.0
PATH_OFFSET = 0.0
class LanePlanner:
def __init__(self, wide_camera=False):
self.ll_t = np.zeros((TRAJECTORY_SIZE,))
self.ll_x = np.zeros((TRAJECTORY_SIZE,))
self.lll_y = np.zeros((TRAJECTORY_SIZE,))
self.rll_y = np.zeros((TRAJECTORY_SIZE,))
self.lane_width_estimate = 3.7
self.lane_width_certainty = 1.0
self.lane_width = 3.7
self.lll_prob = 0.
self.rll_prob = 0.
self.d_prob = 0.
self.lll_std = 0.
self.rll_std = 0.
self.l_lane_change_prob = 0.
self.r_lane_change_prob = 0.
self.camera_offset = -CAMERA_OFFSET if wide_camera else CAMERA_OFFSET
self.path_offset = -PATH_OFFSET if wide_camera else PATH_OFFSET
def parse_model(self, md):
if len(md.laneLines) == 4 and len(md.laneLines[0].t) == TRAJECTORY_SIZE:
self.ll_t = (np.array(md.laneLines[1].t) + np.array(md.laneLines[2].t))/2
# left and right ll x is the same
self.ll_x = md.laneLines[1].x
# only offset left and right lane lines; offsetting path does not make sense
self.lll_y = np.array(md.laneLines[1].y) - self.camera_offset
self.rll_y = np.array(md.laneLines[2].y) - self.camera_offset
self.lll_prob = md.laneLineProbs[1]
self.rll_prob = md.laneLineProbs[2]
self.lll_std = md.laneLineStds[1]
self.rll_std = md.laneLineStds[2]
if len(md.meta.desireState):
self.l_lane_change_prob = md.meta.desireState[log.LateralPlan.Desire.laneChangeLeft]
self.r_lane_change_prob = md.meta.desireState[log.LateralPlan.Desire.laneChangeRight]
def get_d_path(self, v_ego, path_t, path_xyz):
# Reduce reliance on lanelines that are too far apart or
# will be in a few seconds
path_xyz[:, 1] -= self.path_offset
l_prob, r_prob = self.lll_prob, self.rll_prob
width_pts = self.rll_y - self.lll_y
prob_mods = []
for t_check in [0.0, 1.5, 3.0]:
width_at_t = interp(t_check * (v_ego + 7), self.ll_x, width_pts)
prob_mods.append(interp(width_at_t, [4.0, 5.0], [1.0, 0.0]))
mod = min(prob_mods)
l_prob *= mod
r_prob *= mod
# Reduce reliance on uncertain lanelines
l_std_mod = interp(self.lll_std, [.15, .3], [1.0, 0.0])
r_std_mod = interp(self.rll_std, [.15, .3], [1.0, 0.0])
l_prob *= l_std_mod
r_prob *= r_std_mod
# Find current lanewidth
self.lane_width_certainty += 0.05 * (l_prob * r_prob - self.lane_width_certainty)
current_lane_width = abs(self.rll_y[0] - self.lll_y[0])
self.lane_width_estimate += 0.005 * (current_lane_width - self.lane_width_estimate)
speed_lane_width = interp(v_ego, [0., 31.], [2.8, 3.5])
self.lane_width = self.lane_width_certainty * self.lane_width_estimate + \
(1 - self.lane_width_certainty) * speed_lane_width
clipped_lane_width = min(4.0, self.lane_width)
path_from_left_lane = self.lll_y + clipped_lane_width / 2.0
path_from_right_lane = self.rll_y - clipped_lane_width / 2.0
self.d_prob = l_prob + r_prob - l_prob * r_prob
lane_path_y = (l_prob * path_from_left_lane + r_prob * path_from_right_lane) / (l_prob + r_prob + 0.0001)
lane_path_y_interp = np.interp(path_t, self.ll_t, lane_path_y)
path_xyz[:,1] = self.d_prob * lane_path_y_interp + (1.0 - self.d_prob) * path_xyz[:,1]
return path_xyz
| 37.268041 | 109 | 0.684371 | from common.numpy_fast import interp
import numpy as np
from selfdrive.hardware import EON, TICI
from cereal import log
TRAJECTORY_SIZE = 33
if EON:
CAMERA_OFFSET = 0.10
PATH_OFFSET = 0.0
elif TICI:
CAMERA_OFFSET = -0.04
PATH_OFFSET = -0.04
else:
CAMERA_OFFSET = 0.0
PATH_OFFSET = 0.0
class LanePlanner:
def __init__(self, wide_camera=False):
self.ll_t = np.zeros((TRAJECTORY_SIZE,))
self.ll_x = np.zeros((TRAJECTORY_SIZE,))
self.lll_y = np.zeros((TRAJECTORY_SIZE,))
self.rll_y = np.zeros((TRAJECTORY_SIZE,))
self.lane_width_estimate = 3.7
self.lane_width_certainty = 1.0
self.lane_width = 3.7
self.lll_prob = 0.
self.rll_prob = 0.
self.d_prob = 0.
self.lll_std = 0.
self.rll_std = 0.
self.l_lane_change_prob = 0.
self.r_lane_change_prob = 0.
self.camera_offset = -CAMERA_OFFSET if wide_camera else CAMERA_OFFSET
self.path_offset = -PATH_OFFSET if wide_camera else PATH_OFFSET
def parse_model(self, md):
if len(md.laneLines) == 4 and len(md.laneLines[0].t) == TRAJECTORY_SIZE:
self.ll_t = (np.array(md.laneLines[1].t) + np.array(md.laneLines[2].t))/2
self.ll_x = md.laneLines[1].x
self.lll_y = np.array(md.laneLines[1].y) - self.camera_offset
self.rll_y = np.array(md.laneLines[2].y) - self.camera_offset
self.lll_prob = md.laneLineProbs[1]
self.rll_prob = md.laneLineProbs[2]
self.lll_std = md.laneLineStds[1]
self.rll_std = md.laneLineStds[2]
if len(md.meta.desireState):
self.l_lane_change_prob = md.meta.desireState[log.LateralPlan.Desire.laneChangeLeft]
self.r_lane_change_prob = md.meta.desireState[log.LateralPlan.Desire.laneChangeRight]
def get_d_path(self, v_ego, path_t, path_xyz):
path_xyz[:, 1] -= self.path_offset
l_prob, r_prob = self.lll_prob, self.rll_prob
width_pts = self.rll_y - self.lll_y
prob_mods = []
for t_check in [0.0, 1.5, 3.0]:
width_at_t = interp(t_check * (v_ego + 7), self.ll_x, width_pts)
prob_mods.append(interp(width_at_t, [4.0, 5.0], [1.0, 0.0]))
mod = min(prob_mods)
l_prob *= mod
r_prob *= mod
l_std_mod = interp(self.lll_std, [.15, .3], [1.0, 0.0])
r_std_mod = interp(self.rll_std, [.15, .3], [1.0, 0.0])
l_prob *= l_std_mod
r_prob *= r_std_mod
self.lane_width_certainty += 0.05 * (l_prob * r_prob - self.lane_width_certainty)
current_lane_width = abs(self.rll_y[0] - self.lll_y[0])
self.lane_width_estimate += 0.005 * (current_lane_width - self.lane_width_estimate)
speed_lane_width = interp(v_ego, [0., 31.], [2.8, 3.5])
self.lane_width = self.lane_width_certainty * self.lane_width_estimate + \
(1 - self.lane_width_certainty) * speed_lane_width
clipped_lane_width = min(4.0, self.lane_width)
path_from_left_lane = self.lll_y + clipped_lane_width / 2.0
path_from_right_lane = self.rll_y - clipped_lane_width / 2.0
self.d_prob = l_prob + r_prob - l_prob * r_prob
lane_path_y = (l_prob * path_from_left_lane + r_prob * path_from_right_lane) / (l_prob + r_prob + 0.0001)
lane_path_y_interp = np.interp(path_t, self.ll_t, lane_path_y)
path_xyz[:,1] = self.d_prob * lane_path_y_interp + (1.0 - self.d_prob) * path_xyz[:,1]
return path_xyz
| true | true |
1c2d5983cf93611a62edd17dcf520c868260654e | 3,865 | py | Python | tests/model_fields/test_decimalfield.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/model_fields/test_decimalfield.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/model_fields/test_decimalfield.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | import unittest
from decimal import Decimal
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import connection, models
from django.test import TestCase
from .models import BigD, Foo
class DecimalFieldTests(TestCase):
def test_to_python(self):
f = models.DecimalField(max_digits=4, decimal_places=2)
self.assertEqual(f.to_python(3), Decimal('3'))
self.assertEqual(f.to_python('3.14'), Decimal('3.14'))
# to_python() converts floats and honors max_digits.
self.assertEqual(f.to_python(3.1415926535897), Decimal('3.142'))
self.assertEqual(f.to_python(2.4), Decimal('2.400'))
# Uses default rounding of ROUND_HALF_EVEN.
self.assertEqual(f.to_python(2.0625), Decimal('2.062'))
self.assertEqual(f.to_python(2.1875), Decimal('2.188'))
msg = "'abc' value must be a decimal number."
with self.assertRaisesMessage(ValidationError, msg):
f.to_python('abc')
def test_default(self):
f = models.DecimalField(default=Decimal('0.00'))
self.assertEqual(f.get_default(), Decimal('0.00'))
def test_get_prep_value(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertIsNone(f.get_prep_value(None))
self.assertEqual(f.get_prep_value('2.4'), Decimal('2.4'))
def test_filter_with_strings(self):
"""
Should be able to filter decimal fields using strings (#8023).
"""
foo = Foo.objects.create(a='abc', d=Decimal('12.34'))
self.assertEqual(list(Foo.objects.filter(d='12.34')), [foo])
def test_save_without_float_conversion(self):
"""
Ensure decimals don't go through a corrupting float conversion during
save (#5079).
"""
bd = BigD(d='12.9')
bd.save()
bd = BigD.objects.get(pk=bd.pk)
self.assertEqual(bd.d, Decimal('12.9'))
@unittest.skipIf(connection.vendor == 'sqlite', 'SQLite stores values rounded to 15 significant digits.')
def test_fetch_from_db_without_float_rounding(self):
big_decimal = BigD.objects.create(d=Decimal('.100000000000000000000000000005'))
big_decimal.refresh_from_db()
self.assertEqual(big_decimal.d, Decimal('.100000000000000000000000000005'))
def test_lookup_really_big_value(self):
"""
Really big values can be used in a filter statement.
"""
# This should not crash.
Foo.objects.filter(d__gte=100000000000)
def test_max_digits_validation(self):
field = models.DecimalField(max_digits=2)
expected_message = validators.DecimalValidator.messages['max_digits'] % {'max': 2}
with self.assertRaisesMessage(ValidationError, expected_message):
field.clean(100, None)
def test_max_decimal_places_validation(self):
field = models.DecimalField(decimal_places=1)
expected_message = validators.DecimalValidator.messages['max_decimal_places'] % {'max': 1}
with self.assertRaisesMessage(ValidationError, expected_message):
field.clean(Decimal('0.99'), None)
def test_max_whole_digits_validation(self):
field = models.DecimalField(max_digits=3, decimal_places=1)
expected_message = validators.DecimalValidator.messages['max_whole_digits'] % {'max': 2}
with self.assertRaisesMessage(ValidationError, expected_message):
field.clean(Decimal('999'), None)
def test_roundtrip_with_trailing_zeros(self):
"""Trailing zeros in the fractional part aren't truncated."""
obj = Foo.objects.create(a='bar', d=Decimal('8.320'))
obj.refresh_from_db()
self.assertEqual(obj.d.compare_total(Decimal('8.320')), Decimal('0'))
| 42.944444 | 110 | 0.662096 | import unittest
from decimal import Decimal
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import connection, models
from django.test import TestCase
from .models import BigD, Foo
class DecimalFieldTests(TestCase):
def test_to_python(self):
f = models.DecimalField(max_digits=4, decimal_places=2)
self.assertEqual(f.to_python(3), Decimal('3'))
self.assertEqual(f.to_python('3.14'), Decimal('3.14'))
self.assertEqual(f.to_python(3.1415926535897), Decimal('3.142'))
self.assertEqual(f.to_python(2.4), Decimal('2.400'))
self.assertEqual(f.to_python(2.0625), Decimal('2.062'))
self.assertEqual(f.to_python(2.1875), Decimal('2.188'))
msg = "'abc' value must be a decimal number."
with self.assertRaisesMessage(ValidationError, msg):
f.to_python('abc')
def test_default(self):
f = models.DecimalField(default=Decimal('0.00'))
self.assertEqual(f.get_default(), Decimal('0.00'))
def test_get_prep_value(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertIsNone(f.get_prep_value(None))
self.assertEqual(f.get_prep_value('2.4'), Decimal('2.4'))
def test_filter_with_strings(self):
foo = Foo.objects.create(a='abc', d=Decimal('12.34'))
self.assertEqual(list(Foo.objects.filter(d='12.34')), [foo])
def test_save_without_float_conversion(self):
bd = BigD(d='12.9')
bd.save()
bd = BigD.objects.get(pk=bd.pk)
self.assertEqual(bd.d, Decimal('12.9'))
@unittest.skipIf(connection.vendor == 'sqlite', 'SQLite stores values rounded to 15 significant digits.')
def test_fetch_from_db_without_float_rounding(self):
big_decimal = BigD.objects.create(d=Decimal('.100000000000000000000000000005'))
big_decimal.refresh_from_db()
self.assertEqual(big_decimal.d, Decimal('.100000000000000000000000000005'))
def test_lookup_really_big_value(self):
Foo.objects.filter(d__gte=100000000000)
def test_max_digits_validation(self):
field = models.DecimalField(max_digits=2)
expected_message = validators.DecimalValidator.messages['max_digits'] % {'max': 2}
with self.assertRaisesMessage(ValidationError, expected_message):
field.clean(100, None)
def test_max_decimal_places_validation(self):
field = models.DecimalField(decimal_places=1)
expected_message = validators.DecimalValidator.messages['max_decimal_places'] % {'max': 1}
with self.assertRaisesMessage(ValidationError, expected_message):
field.clean(Decimal('0.99'), None)
def test_max_whole_digits_validation(self):
field = models.DecimalField(max_digits=3, decimal_places=1)
expected_message = validators.DecimalValidator.messages['max_whole_digits'] % {'max': 2}
with self.assertRaisesMessage(ValidationError, expected_message):
field.clean(Decimal('999'), None)
def test_roundtrip_with_trailing_zeros(self):
obj = Foo.objects.create(a='bar', d=Decimal('8.320'))
obj.refresh_from_db()
self.assertEqual(obj.d.compare_total(Decimal('8.320')), Decimal('0'))
| true | true |
1c2d5c01ca1dab4d07f601f9c346d1cf713a4ed6 | 4,689 | py | Python | yt_dlp/extractor/keezmovies.py | YuanHsing/yt-dlp | 38d86f4d45cf2b764f79141c602356fbb426a4b6 | [
"Unlicense"
] | 1 | 2021-12-13T14:12:47.000Z | 2021-12-13T14:12:47.000Z | yt_dlp/extractor/keezmovies.py | YuanHsing/yt-dlp | 38d86f4d45cf2b764f79141c602356fbb426a4b6 | [
"Unlicense"
] | null | null | null | yt_dlp/extractor/keezmovies.py | YuanHsing/yt-dlp | 38d86f4d45cf2b764f79141c602356fbb426a4b6 | [
"Unlicense"
] | null | null | null | import re
from .common import InfoExtractor
from ..aes import aes_decrypt_text
from ..compat import compat_urllib_parse_unquote
from ..utils import (
determine_ext,
ExtractorError,
format_field,
int_or_none,
str_to_int,
strip_or_none,
url_or_none,
)
class KeezMoviesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?keezmovies\.com/video/(?:(?P<display_id>[^/]+)-)?(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.keezmovies.com/video/arab-wife-want-it-so-bad-i-see-she-thirsty-and-has-tiny-money-18070681',
'md5': '2ac69cdb882055f71d82db4311732a1a',
'info_dict': {
'id': '18070681',
'display_id': 'arab-wife-want-it-so-bad-i-see-she-thirsty-and-has-tiny-money',
'ext': 'mp4',
'title': 'Arab wife want it so bad I see she thirsty and has tiny money.',
'thumbnail': None,
'view_count': int,
'age_limit': 18,
}
}, {
'url': 'http://www.keezmovies.com/video/18070681',
'only_matching': True,
}]
def _extract_info(self, url, fatal=True):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
display_id = (mobj.group('display_id')
if 'display_id' in mobj.groupdict()
else None) or mobj.group('id')
webpage = self._download_webpage(
url, display_id, headers={'Cookie': 'age_verified=1'})
formats = []
format_urls = set()
title = None
thumbnail = None
duration = None
encrypted = False
def extract_format(format_url, height=None):
format_url = url_or_none(format_url)
if not format_url or not format_url.startswith(('http', '//')):
return
if format_url in format_urls:
return
format_urls.add(format_url)
tbr = int_or_none(self._search_regex(
r'[/_](\d+)[kK][/_]', format_url, 'tbr', default=None))
if not height:
height = int_or_none(self._search_regex(
r'[/_](\d+)[pP][/_]', format_url, 'height', default=None))
if encrypted:
format_url = aes_decrypt_text(
video_url, title, 32).decode('utf-8')
formats.append({
'url': format_url,
'format_id': format_field(height, None, '%dp'),
'height': height,
'tbr': tbr,
})
flashvars = self._parse_json(
self._search_regex(
r'flashvars\s*=\s*({.+?});', webpage,
'flashvars', default='{}'),
display_id, fatal=False)
if flashvars:
title = flashvars.get('video_title')
thumbnail = flashvars.get('image_url')
duration = int_or_none(flashvars.get('video_duration'))
encrypted = flashvars.get('encrypted') is True
for key, value in flashvars.items():
mobj = re.search(r'quality_(\d+)[pP]', key)
if mobj:
extract_format(value, int(mobj.group(1)))
video_url = flashvars.get('video_url')
if video_url and determine_ext(video_url, None):
extract_format(video_url)
video_url = self._html_search_regex(
r'flashvars\.video_url\s*=\s*(["\'])(?P<url>http.+?)\1',
webpage, 'video url', default=None, group='url')
if video_url:
extract_format(compat_urllib_parse_unquote(video_url))
if not formats:
if 'title="This video is no longer available"' in webpage:
self.raise_no_formats(
'Video %s is no longer available' % video_id, expected=True)
try:
self._sort_formats(formats)
except ExtractorError:
if fatal:
raise
if not title:
title = self._html_search_regex(
r'<h1[^>]*>([^<]+)', webpage, 'title')
return webpage, {
'id': video_id,
'display_id': display_id,
'title': strip_or_none(title),
'thumbnail': thumbnail,
'duration': duration,
'age_limit': 18,
'formats': formats,
}
def _real_extract(self, url):
webpage, info = self._extract_info(url, fatal=False)
if not info['formats']:
return self.url_result(url, 'Generic')
info['view_count'] = str_to_int(self._search_regex(
r'<b>([\d,.]+)</b> Views?', webpage, 'view count', fatal=False))
return info
| 35.255639 | 121 | 0.538494 | import re
from .common import InfoExtractor
from ..aes import aes_decrypt_text
from ..compat import compat_urllib_parse_unquote
from ..utils import (
determine_ext,
ExtractorError,
format_field,
int_or_none,
str_to_int,
strip_or_none,
url_or_none,
)
class KeezMoviesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?keezmovies\.com/video/(?:(?P<display_id>[^/]+)-)?(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.keezmovies.com/video/arab-wife-want-it-so-bad-i-see-she-thirsty-and-has-tiny-money-18070681',
'md5': '2ac69cdb882055f71d82db4311732a1a',
'info_dict': {
'id': '18070681',
'display_id': 'arab-wife-want-it-so-bad-i-see-she-thirsty-and-has-tiny-money',
'ext': 'mp4',
'title': 'Arab wife want it so bad I see she thirsty and has tiny money.',
'thumbnail': None,
'view_count': int,
'age_limit': 18,
}
}, {
'url': 'http://www.keezmovies.com/video/18070681',
'only_matching': True,
}]
def _extract_info(self, url, fatal=True):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
display_id = (mobj.group('display_id')
if 'display_id' in mobj.groupdict()
else None) or mobj.group('id')
webpage = self._download_webpage(
url, display_id, headers={'Cookie': 'age_verified=1'})
formats = []
format_urls = set()
title = None
thumbnail = None
duration = None
encrypted = False
def extract_format(format_url, height=None):
format_url = url_or_none(format_url)
if not format_url or not format_url.startswith(('http', '//')):
return
if format_url in format_urls:
return
format_urls.add(format_url)
tbr = int_or_none(self._search_regex(
r'[/_](\d+)[kK][/_]', format_url, 'tbr', default=None))
if not height:
height = int_or_none(self._search_regex(
r'[/_](\d+)[pP][/_]', format_url, 'height', default=None))
if encrypted:
format_url = aes_decrypt_text(
video_url, title, 32).decode('utf-8')
formats.append({
'url': format_url,
'format_id': format_field(height, None, '%dp'),
'height': height,
'tbr': tbr,
})
flashvars = self._parse_json(
self._search_regex(
r'flashvars\s*=\s*({.+?});', webpage,
'flashvars', default='{}'),
display_id, fatal=False)
if flashvars:
title = flashvars.get('video_title')
thumbnail = flashvars.get('image_url')
duration = int_or_none(flashvars.get('video_duration'))
encrypted = flashvars.get('encrypted') is True
for key, value in flashvars.items():
mobj = re.search(r'quality_(\d+)[pP]', key)
if mobj:
extract_format(value, int(mobj.group(1)))
video_url = flashvars.get('video_url')
if video_url and determine_ext(video_url, None):
extract_format(video_url)
video_url = self._html_search_regex(
r'flashvars\.video_url\s*=\s*(["\'])(?P<url>http.+?)\1',
webpage, 'video url', default=None, group='url')
if video_url:
extract_format(compat_urllib_parse_unquote(video_url))
if not formats:
if 'title="This video is no longer available"' in webpage:
self.raise_no_formats(
'Video %s is no longer available' % video_id, expected=True)
try:
self._sort_formats(formats)
except ExtractorError:
if fatal:
raise
if not title:
title = self._html_search_regex(
r'<h1[^>]*>([^<]+)', webpage, 'title')
return webpage, {
'id': video_id,
'display_id': display_id,
'title': strip_or_none(title),
'thumbnail': thumbnail,
'duration': duration,
'age_limit': 18,
'formats': formats,
}
def _real_extract(self, url):
webpage, info = self._extract_info(url, fatal=False)
if not info['formats']:
return self.url_result(url, 'Generic')
info['view_count'] = str_to_int(self._search_regex(
r'<b>([\d,.]+)</b> Views?', webpage, 'view count', fatal=False))
return info
| true | true |
1c2d5c38ccc321103a599f13db5a9dd96360f17f | 42,056 | py | Python | mathics/builtin/algebra.py | Carreau/Mathics | 1706e994ae3db29ccbe7fad53d933ea5389c3be5 | [
"Apache-2.0"
] | 1 | 2021-04-11T10:49:46.000Z | 2021-04-11T10:49:46.000Z | mathics/builtin/algebra.py | jhbadger/Mathics | 395f307e758a84247b891f887368d70202b33254 | [
"Apache-2.0"
] | null | null | null | mathics/builtin/algebra.py | jhbadger/Mathics | 395f307e758a84247b891f887368d70202b33254 | [
"Apache-2.0"
] | 1 | 2018-12-21T08:04:18.000Z | 2018-12-21T08:04:18.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from mathics.builtin.base import Builtin
from mathics.core.expression import Expression, Integer, Symbol, Atom, Number
from mathics.core.convert import from_sympy, sympy_symbol_prefix
import sympy
import mpmath
from six.moves import range
def sympy_factor(expr_sympy):
try:
result = sympy.together(expr_sympy)
numer, denom = result.as_numer_denom()
if denom == 1:
result = sympy.factor(expr_sympy)
else:
result = sympy.factor(numer) / sympy.factor(denom)
except sympy.PolynomialError:
return expr_sympy
return result
def cancel(expr):
if expr.has_form('Plus', None):
return Expression('Plus', *[cancel(leaf) for leaf in expr.leaves])
else:
try:
result = expr.to_sympy()
if result is None:
return None
# result = sympy.powsimp(result, deep=True)
result = sympy.cancel(result)
# cancel factors out rationals, so we factor them again
result = sympy_factor(result)
return from_sympy(result)
except sympy.PolynomialError:
# e.g. for non-commutative expressions
return expr
def expand(expr, numer=True, denom=False, deep=False, **kwargs):
if kwargs['modulus'] is not None and kwargs['modulus'] <= 0:
return Integer(0)
sub_exprs = []
def store_sub_expr(expr):
sub_exprs.append(expr)
result = sympy.Symbol(sympy_symbol_prefix + str(len(sub_exprs) - 1))
return result
def get_sub_expr(expr):
name = expr.get_name()
assert isinstance(expr, Symbol) and name.startswith('System`')
i = int(name[len('System`'):])
return sub_exprs[i]
def convert_sympy(expr):
"converts top-level to sympy"
leaves = expr.get_leaves()
if isinstance(expr, Integer):
return sympy.Integer(expr.get_int_value())
if expr.has_form('Power', 2):
# sympy won't expand `(a + b) / x` to `a / x + b / x` if denom is False
# if denom is False we store negative powers to prevent this.
n1 = leaves[1].get_int_value()
if not denom and n1 is not None and n1 < 0:
return store_sub_expr(expr)
return sympy.Pow(*[convert_sympy(leaf) for leaf in leaves])
elif expr.has_form('Times', 2, None):
return sympy.Mul(*[convert_sympy(leaf) for leaf in leaves])
elif expr.has_form('Plus', 2, None):
return sympy.Add(*[convert_sympy(leaf) for leaf in leaves])
else:
return store_sub_expr(expr)
def unconvert_subexprs(expr):
if expr.is_atom():
if isinstance(expr, Symbol):
return get_sub_expr(expr)
else:
return expr
else:
return Expression(expr.head, *[unconvert_subexprs(leaf) for leaf in expr.get_leaves()])
sympy_expr = convert_sympy(expr)
def _expand(expr):
return expand(expr, numer=numer, denom=denom, deep=deep, **kwargs)
if deep:
# thread over everything
for i, sub_expr,in enumerate(sub_exprs):
if not sub_expr.is_atom():
head = _expand(sub_expr.head) # also expand head
leaves = sub_expr.get_leaves()
leaves = [_expand(leaf) for leaf in leaves]
sub_exprs[i] = Expression(head, *leaves)
else:
# thread over Lists etc.
threaded_heads = ('List', 'Rule')
for i, sub_expr in enumerate(sub_exprs):
for head in threaded_heads:
if sub_expr.has_form(head, None):
leaves = sub_expr.get_leaves()
leaves = [_expand(leaf) for leaf in leaves]
sub_exprs[i] = Expression(head, *leaves)
break
hints = {
'mul': True,
'multinomial': True,
'power_exp': False,
'power_base': False,
'basic': False,
'log': False,
}
hints.update(kwargs)
if numer and denom:
# don't expand fractions when modulus is True
if hints['modulus'] is not None:
hints['frac'] = True
else:
# setting both True doesn't expand denom
hints['numer'] = numer
hints['denom'] = denom
sympy_expr = sympy_expr.expand(**hints)
result = from_sympy(sympy_expr)
result = unconvert_subexprs(result)
return result
def find_all_vars(expr):
variables = set()
def find_vars(e, e_sympy):
assert e_sympy is not None
if e_sympy.is_constant():
return
elif e.is_symbol():
variables.add(e)
elif (e.has_form('Plus', None) or
e.has_form('Times', None)):
for l in e.leaves:
l_sympy = l.to_sympy()
if l_sympy is not None:
find_vars(l, l_sympy)
elif e.has_form('Power', 2):
(a, b) = e.leaves # a^b
a_sympy, b_sympy = a.to_sympy(), b.to_sympy()
if a_sympy is None or b_sympy is None:
return
if not(a_sympy.is_constant()) and b_sympy.is_rational:
find_vars(a, a_sympy)
elif not(e.is_atom()):
variables.add(e)
exprs = expr.leaves if expr.has_form('List', None) else [expr]
for e in exprs:
e_sympy = e.to_sympy()
if e_sympy is not None:
find_vars(e, e_sympy)
return variables
def find_exponents(expr, var):
"""
Find all exponents of var in expr
"""
f = expr.to_sympy()
x = var.to_sympy()
if f is None or x is None:
return {0}
result = set()
for t in f.expand(power_exp=False).as_ordered_terms():
coeff, exponent = t.as_coeff_exponent(x)
if exponent:
result.add(from_sympy(exponent))
else:
# find exponent of terms multiplied with functions: sin, cos, log, exp, ...
# e.g: x^3 * Sin[x^2] should give 3
muls = [term.as_coeff_mul(x)[1] if term.as_coeff_mul(x)[1] else (sympy.Integer(0),)
for term in coeff.as_ordered_terms()]
expos = [term.as_coeff_exponent(x)[1] for mul in muls for term in mul]
result.add(from_sympy(sympy.Max(*[e for e in expos])))
return sorted(result)
class Cancel(Builtin):
"""
<dl>
<dt>'Cancel[$expr$]'
<dd>cancels out common factors in numerators and denominators.
</dl>
>> Cancel[x / x ^ 2]
= 1 / x
'Cancel' threads over sums:
>> Cancel[x / x ^ 2 + y / y ^ 2]
= 1 / x + 1 / y
>> Cancel[f[x] / x + x * f[x] / x ^ 2]
= 2 f[x] / x
"""
def apply(self, expr, evaluation):
'Cancel[expr_]'
return cancel(expr)
class Simplify(Builtin):
"""
<dl>
<dt>'Simplify[$expr$]'
<dd>simplifies $expr$.
</dl>
>> Simplify[2*Sin[x]^2 + 2*Cos[x]^2]
= 2
>> Simplify[x]
= x
>> Simplify[f[x]]
= f[x]
#> Simplify[a*x^2+b*x^2]
= x ^ 2 (a + b)
## triggers TypeError in sympy.simplify
#> x f[{y}] // Simplify
= x f[{y}]
"""
rules = {
'Simplify[list_List]': 'Simplify /@ list',
'Simplify[rule_Rule]': 'Simplify /@ rule',
'Simplify[eq_Equal]': 'Simplify /@ eq',
}
def apply(self, expr, evaluation):
'Simplify[expr_]'
sympy_expr = expr.to_sympy()
if sympy_expr is None:
return
sympy_result = sympy.simplify(sympy_expr)
return from_sympy(sympy_result)
class Together(Builtin):
"""
<dl>
<dt>'Together[$expr$]'
<dd>writes sums of fractions in $expr$ together.
</dl>
>> Together[a / c + b / c]
= (a + b) / c
'Together' operates on lists:
>> Together[{x / (y+1) + x / (y+1)^2}]
= {x (2 + y) / (1 + y) ^ 2}
But it does not touch other functions:
>> Together[f[a / c + b / c]]
= f[a / c + b / c]
#> f[x]/x+f[x]/x^2//Together
= f[x] (1 + x) / x ^ 2
"""
attributes = ['Listable']
def apply(self, expr, evaluation):
'Together[expr_]'
expr_sympy = expr.to_sympy()
if expr_sympy is None:
return None
result = sympy.together(expr_sympy)
result = from_sympy(result)
result = cancel(result)
return result
class Factor(Builtin):
"""
<dl>
<dt>'Factor[$expr$]'
<dd>factors the polynomial expression $expr$.
</dl>
>> Factor[x ^ 2 + 2 x + 1]
= (1 + x) ^ 2
>> Factor[1 / (x^2+2x+1) + 1 / (x^4+2x^2+1)]
= (2 + 2 x + 3 x ^ 2 + x ^ 4) / ((1 + x) ^ 2 (1 + x ^ 2) ^ 2)
## Issue659
#> Factor[{x+x^2}]
= {x (1 + x)}
"""
attributes = ('Listable',)
def apply(self, expr, evaluation):
'Factor[expr_]'
expr_sympy = expr.to_sympy()
if expr_sympy is None:
return None
try:
result = sympy.together(expr_sympy)
numer, denom = result.as_numer_denom()
if denom == 1:
result = sympy.factor(expr_sympy)
else:
result = sympy.factor(numer) / sympy.factor(denom)
except sympy.PolynomialError:
return expr
return from_sympy(result)
class FactorTermsList(Builtin):
"""
<dl>
<dt>'FactorTermsList[poly]'
<dd>returns a list of 2 elements.
The first element is the numerical factor in $poly$.
The second one is the remaining of the polynomial with numerical factor removed
<dt>'FactorTermsList[poly, {x1, x2, ...}]'
<dd>returns a list of factors in $poly$.
The first element is the numerical factor in $poly$.
The next ones are factors that are independent of variables lists which
are created by removing each variable $xi$ from right to left.
The last one is the remaining of polynomial after dividing $poly$ to all previous factors
</dl>
>> FactorTermsList[2 x^2 - 2]
= {2, -1 + x ^ 2}
>> FactorTermsList[x^2 - 2 x + 1]
= {1, 1 - 2 x + x ^ 2}
#> FactorTermsList[2 x^2 - 2, x]
= {2, 1, -1 + x ^ 2}
>> f = 3 (-1 + 2 x) (-1 + y) (1 - a)
= 3 (-1 + 2 x) (-1 + y) (1 - a)
>> FactorTermsList[f]
= {-3, -1 + a - 2 a x - a y + 2 x + y - 2 x y + 2 a x y}
>> FactorTermsList[f, x]
= {-3, 1 - a - y + a y, -1 + 2 x}
#> FactorTermsList[f, y]
= {-3, 1 - a - 2 x + 2 a x, -1 + y}
>> FactorTermsList[f, {x, y}]
= {-3, -1 + a, -1 + y, -1 + 2 x}
#> FactorTermsList[f, {y, x}]
= {-3, -1 + a, -1 + 2 x, -1 + y}
#> FactorTermsList[f, {x, y, z}]
= {-3, -1 + a, 1, -1 + y, -1 + 2 x}
#> FactorTermsList[f, {x, y, z, t}]
= {-3, -1 + a, 1, 1, -1 + y, -1 + 2 x}
#> FactorTermsList[f, 3/5]
= {-3, -1 + a - 2 a x - a y + 2 x + y - 2 x y + 2 a x y}
#> FactorTermsList[f, {x, 3, y}]
= {-3, -1 + a, -1 + y, -1 + 2 x}
#> FactorTermsList[f/c]
= {-3, -1 / c + a / c - 2 a x / c - a y / c + 2 x / c + y / c - 2 x y / c + 2 a x y / c}
#> FactorTermsList[f/c, x] == FactorTermsList[f/c, {x, y}]
= True
#> g = Sin[x]*Cos[y]*(1 - 2 a)
= Cos[y] (1 - 2 a) Sin[x]
#> FactorTermsList[g]
= {-1, 2 a Cos[y] Sin[x] - Cos[y] Sin[x]}
#> FactorTermsList[g, x]
= {-1, 2 a Cos[y] Sin[x] - Cos[y] Sin[x]}
#> FactorTermsList[g, x] == FactorTermsList[g, y] == FactorTermsList[g, {x, y}]
= True
#> v = 3 * y * (1 - b) a^x
= 3 y (1 - b) a ^ x
#> FactorTermsList[v]
= {-3, -y a ^ x + b y a ^ x}
#> FactorTermsList[v, x]
= {-3, -y a ^ x + b y a ^ x}
#> FactorTermsList[v, y]
= {-3, b a ^ x - a ^ x, y}
#> FactorTermsList[7]
= {7, 1}
#> FactorTermsList[0]
= {1, 0}
#> FactorTermsList[-3]
= {-3, 1}
#> FactorTermsList[7, {y, x}]
= {7, 1}
#> FactorTermsList[7, x]
= {7, 1}
#> FactorTermsList[7 - I, x]
= {7 - I, 1}
#> FactorTermsList[(x - 1) (1 + a), {c, d}]
= {1, -1 - a + x + a x}
#> FactorTermsList[(x - 1) (1 + a), {c, x}]
= {1, 1 + a, -1 + x, 1}
#> FactorTermsList[(x - 1) (1 + a), {}] == FactorTermsList[(x - 1) (1 + a)]
= True
#> FactorTermsList[x]
= {1, x}
#> FactorTermsList["x"]
= {1, x}
"""
rules = {
'FactorTermsList[expr_]': 'FactorTermsList[expr, {}]',
'FactorTermsList[expr_, var_]': 'FactorTermsList[expr, {var}]',
}
messages = {
# 'poly': '`1` is not a polynomial.',
'ivar': '`1` is not a valid variable.',
}
def apply_list(self, expr, vars, evaluation):
'FactorTermsList[expr_, vars_List]'
if expr == Integer(0):
return Expression('List', Integer(1), Integer(0))
elif isinstance(expr, Number):
return Expression('List', expr, Integer(1))
for x in vars.leaves:
if not(isinstance(x, Atom)):
return evaluation.message('CoefficientList', 'ivar', x)
sympy_expr = expr.to_sympy()
if sympy_expr is None:
return Expression('List', Integer(1), expr)
sympy_expr = sympy.together(sympy_expr)
sympy_vars = [x.to_sympy() for x in vars.leaves if isinstance(x, Symbol) and sympy_expr.is_polynomial(x.to_sympy())]
result = []
numer, denom = sympy_expr.as_numer_denom()
try:
from sympy import factor, factor_list, Poly
if denom == 1:
# Get numerical part
num_coeff, num_polys = factor_list(Poly(numer))
result.append(num_coeff)
# Get factors are independent of sub list of variables
if (sympy_vars and isinstance(expr, Expression)
and any(x.free_symbols.issubset(sympy_expr.free_symbols) for x in sympy_vars)):
for i in reversed(range(len(sympy_vars))):
numer = factor(numer) / factor(num_coeff)
num_coeff, num_polys = factor_list(Poly(numer), *[x for x in sympy_vars[:(i+1)]])
result.append(sympy.expand(num_coeff))
# Last factor
numer = factor(numer) / factor(num_coeff)
result.append(sympy.expand(numer))
else:
num_coeff, num_polys = factor_list(Poly(numer))
den_coeff, den_polys = factor_list(Poly(denom))
result = [num_coeff / den_coeff, sympy.expand(factor(numer)/num_coeff / (factor(denom)/den_coeff))]
except sympy.PolynomialError: # MMA does not raise error for non poly
result.append(sympy.expand(numer))
# evaluation.message(self.get_name(), 'poly', expr)
return Expression('List', *[from_sympy(i) for i in result])
class Apart(Builtin):
"""
<dl>
<dt>'Apart[$expr$]'
<dd>writes $expr$ as a sum of individual fractions.
<dt>'Apart[$expr$, $var$]'
<dd>treats $var$ as the main variable.
</dl>
>> Apart[1 / (x^2 + 5x + 6)]
= 1 / (2 + x) - 1 / (3 + x)
When several variables are involved, the results can be different
depending on the main variable:
>> Apart[1 / (x^2 - y^2), x]
= -1 / (2 y (x + y)) + 1 / (2 y (x - y))
>> Apart[1 / (x^2 - y^2), y]
= 1 / (2 x (x + y)) + 1 / (2 x (x - y))
'Apart' is 'Listable':
>> Apart[{1 / (x^2 + 5x + 6)}]
= {1 / (2 + x) - 1 / (3 + x)}
But it does not touch other expressions:
>> Sin[1 / (x ^ 2 - y ^ 2)] // Apart
= Sin[1 / (x ^ 2 - y ^ 2)]
#> Attributes[f] = {HoldAll}; Apart[f[x + x]]
= f[x + x]
#> Attributes[f] = {}; Apart[f[x + x]]
= f[2 x]
"""
attributes = ['Listable']
rules = {
'Apart[expr_]': (
'Block[{vars = Cases[Level[expr, {-1}], _Symbol]},'
' If[Length[vars] > 0, Apart[expr, vars[[1]]], expr]]'),
}
def apply(self, expr, var, evaluation):
'Apart[expr_, var_Symbol]'
expr_sympy = expr.to_sympy()
var_sympy = var.to_sympy()
if expr_sympy is None or var_sympy is None:
return None
try:
result = sympy.apart(expr_sympy, var_sympy)
result = from_sympy(result)
return result
except sympy.PolynomialError:
# raised e.g. for apart(sin(1/(x**2-y**2)))
return expr
class _Expand(Builtin):
options = {
'Trig': 'False',
'Modulus': '0',
}
messages = {
'modn': 'Value of option `1` -> `2` should be an integer.',
'opttf': 'Value of option `1` -> `2` should be True or False.',
}
def convert_options(self, options, evaluation):
modulus = options['System`Modulus']
py_modulus = modulus.get_int_value()
if py_modulus is None:
return evaluation.message(self.get_name(), 'modn', Symbol('Modulus'), modulus)
if py_modulus == 0:
py_modulus = None
trig = options['System`Trig']
if trig == Symbol('True'):
py_trig = True
elif trig == Symbol('False'):
py_trig = False
else:
return evaluation.message(self.get_name(), 'opttf', Symbol('Trig'), trig)
return {'modulus': py_modulus, 'trig': py_trig}
class Expand(_Expand):
"""
<dl>
<dt>'Expand[$expr$]'
<dd>expands out positive integer powers and products of sums in $expr$.
</dl>
>> Expand[(x + y) ^ 3]
= x ^ 3 + 3 x ^ 2 y + 3 x y ^ 2 + y ^ 3
>> Expand[(a + b) (a + c + d)]
= a ^ 2 + a b + a c + a d + b c + b d
>> Expand[(a + b) (a + c + d) (e + f) + e a a]
= 2 a ^ 2 e + a ^ 2 f + a b e + a b f + a c e + a c f + a d e + a d f + b c e + b c f + b d e + b d f
>> Expand[(a + b) ^ 2 * (c + d)]
= a ^ 2 c + a ^ 2 d + 2 a b c + 2 a b d + b ^ 2 c + b ^ 2 d
>> Expand[(x + y) ^ 2 + x y]
= x ^ 2 + 3 x y + y ^ 2
>> Expand[((a + b) (c + d)) ^ 2 + b (1 + a)]
= a ^ 2 c ^ 2 + 2 a ^ 2 c d + a ^ 2 d ^ 2 + b + a b + 2 a b c ^ 2 + 4 a b c d + 2 a b d ^ 2 + b ^ 2 c ^ 2 + 2 b ^ 2 c d + b ^ 2 d ^ 2
'Expand' expands items in lists and rules:
>> Expand[{4 (x + y), 2 (x + y) -> 4 (x + y)}]
= {4 x + 4 y, 2 x + 2 y -> 4 x + 4 y}
'Expand' does not change any other expression.
>> Expand[Sin[x (1 + y)]]
= Sin[x (1 + y)]
'Expand' also works in Galois fields
>> Expand[(1 + a)^12, Modulus -> 3]
= 1 + a ^ 3 + a ^ 9 + a ^ 12
>> Expand[(1 + a)^12, Modulus -> 4]
= 1 + 2 a ^ 2 + 3 a ^ 4 + 3 a ^ 8 + 2 a ^ 10 + a ^ 12
#> Expand[x, Modulus -> -1] (* copy odd MMA behaviour *)
= 0
#> Expand[x, Modulus -> x]
: Value of option Modulus -> x should be an integer.
= Expand[x, Modulus -> x]
#> a(b(c+d)+e) // Expand
= a b c + a b d + a e
#> (y^2)^(1/2)/(2x+2y)//Expand
= Sqrt[y ^ 2] / (2 x + 2 y)
## This caused a program crash!
#> 2(3+2x)^2/(5+x^2+3x)^3 // Expand
= 24 x / (5 + 3 x + x ^ 2) ^ 3 + 8 x ^ 2 / (5 + 3 x + x ^ 2) ^ 3 + 18 / (5 + 3 x + x ^ 2) ^ 3
"""
# TODO unwrap trig expressions in expand() so the following works
"""
>> Expand[Sin[x + y], Trig -> True]
= Cos[y] Sin[x] + Cos[x] Sin[y]
"""
def apply(self, expr, evaluation, options):
'Expand[expr_, OptionsPattern[Expand]]'
kwargs = self.convert_options(options, evaluation)
if kwargs is None:
return
return expand(expr, True, False, **kwargs)
class ExpandDenominator(_Expand):
"""
<dl>
<dt>'ExpandDenominator[$expr$]'
<dd>expands out negative integer powers and products of sums in $expr$.
</dl>
>> ExpandDenominator[(a + b) ^ 2 / ((c + d)^2 (e + f))]
= (a + b) ^ 2 / (c ^ 2 e + c ^ 2 f + 2 c d e + 2 c d f + d ^ 2 e + d ^ 2 f)
## Modulus option
#> ExpandDenominator[1 / (x + y)^3, Modulus -> 3]
= 1 / (x ^ 3 + y ^ 3)
#> ExpandDenominator[1 / (x + y)^6, Modulus -> 4]
= 1 / (x ^ 6 + 2 x ^ 5 y + 3 x ^ 4 y ^ 2 + 3 x ^ 2 y ^ 4 + 2 x y ^ 5 + y ^ 6)
#> ExpandDenominator[2(3+2x)^2/(5+x^2+3x)^3]
= 2 (3 + 2 x) ^ 2 / (125 + 225 x + 210 x ^ 2 + 117 x ^ 3 + 42 x ^ 4 + 9 x ^ 5 + x ^ 6)
"""
def apply(self, expr, evaluation, options):
'ExpandDenominator[expr_, OptionsPattern[ExpandDenominator]]'
kwargs = self.convert_options(options, evaluation)
if kwargs is None:
return
return expand(expr, False, True, **kwargs)
class ExpandAll(_Expand):
"""
<dl>
<dt>'ExpandAll[$expr$]'
<dd>expands out negative integer powers and products of sums in $expr$.
</dl>
>> ExpandAll[(a + b) ^ 2 / (c + d)^2]
= a ^ 2 / (c ^ 2 + 2 c d + d ^ 2) + 2 a b / (c ^ 2 + 2 c d + d ^ 2) + b ^ 2 / (c ^ 2 + 2 c d + d ^ 2)
'ExpandAll' descends into sub expressions
>> ExpandAll[(a + Sin[x (1 + y)])^2]
= 2 a Sin[x + x y] + a ^ 2 + Sin[x + x y] ^ 2
'ExpandAll' also expands heads
>> ExpandAll[((1 + x)(1 + y))[x]]
= (1 + x + y + x y)[x]
'ExpandAll' can also work in finite fields
>> ExpandAll[(1 + a) ^ 6 / (x + y)^3, Modulus -> 3]
= (1 + 2 a ^ 3 + a ^ 6) / (x ^ 3 + y ^ 3)
"""
def apply(self, expr, evaluation, options):
'ExpandAll[expr_, OptionsPattern[ExpandAll]]'
kwargs = self.convert_options(options, evaluation)
if kwargs is None:
return
return expand(expr, numer=True, denom=True, deep=True, **kwargs)
class PowerExpand(Builtin):
"""
<dl>
<dt>'PowerExpand[$expr$]'
<dd>expands out powers of the form '(x^y)^z' and '(x*y)^z' in $expr$.
</dl>
>> PowerExpand[(a ^ b) ^ c]
= a ^ (b c)
>> PowerExpand[(a * b) ^ c]
= a ^ c b ^ c
'PowerExpand' is not correct without certain assumptions:
>> PowerExpand[(x ^ 2) ^ (1/2)]
= x
"""
rules = {
'PowerExpand[(x_ ^ y_) ^ z_]': 'x ^ (y * z)',
'PowerExpand[(x_ * y_) ^ z_]': 'x ^ z * y ^ z',
'PowerExpand[Log[x_ ^ y_]]': 'y * Log[x]',
'PowerExpand[x_Plus]': 'PowerExpand /@ x',
'PowerExpand[x_Times]': 'PowerExpand /@ x',
'PowerExpand[x_Power]': 'PowerExpand /@ x',
'PowerExpand[x_List]': 'PowerExpand /@ x',
'PowerExpand[x_Rule]': 'PowerExpand /@ x',
'PowerExpand[other_]': 'other',
}
class Numerator(Builtin):
"""
<dl>
<dt>'Numerator[$expr$]'
<dd>gives the numerator in $expr$.
</dl>
>> Numerator[a / b]
= a
>> Numerator[2 / 3]
= 2
>> Numerator[a + b]
= a + b
"""
def apply(self, expr, evaluation):
'Numerator[expr_]'
sympy_expr = expr.to_sympy()
if sympy_expr is None:
return None
numer, denom = sympy_expr.as_numer_denom()
return from_sympy(numer)
class Denominator(Builtin):
"""
<dl>
<dt>'Denominator[$expr$]'
<dd>gives the denominator in $expr$.
</dl>
>> Denominator[a / b]
= b
>> Denominator[2 / 3]
= 3
>> Denominator[a + b]
= 1
"""
def apply(self, expr, evaluation):
'Denominator[expr_]'
sympy_expr = expr.to_sympy()
if sympy_expr is None:
return None
numer, denom = sympy_expr.as_numer_denom()
return from_sympy(denom)
class Variables(Builtin):
# This builtin is incomplete. See the failing test case below.
"""
<dl>
<dt>'Variables[$expr$]'
<dd>gives a list of the variables that appear in the
polynomial $expr$.
</dl>
>> Variables[a x^2 + b x + c]
= {a, b, c, x}
>> Variables[{a + b x, c y^2 + x/2}]
= {a, b, c, x, y}
>> Variables[x + Sin[y]]
= {x, Sin[y]}
"""
"""
## failing test case from MMA docs
#> Variables[E^x]
= {}
"""
def apply(self, expr, evaluation):
'Variables[expr_]'
variables = find_all_vars(expr)
variables = Expression('List', *variables)
variables.sort() # MMA doesn't do this
return variables
class UpTo(Builtin):
messages = {
'innf': 'Expected non-negative integer or infinity at position 1 in ``.',
'argx': 'UpTo expects 1 argument, `1` arguments were given.'
}
class Missing(Builtin):
pass
class MinimalPolynomial(Builtin):
"""
<dl>
<dt>'MinimalPolynomial[s, x]'
<dd>gives the minimal polynomial in $x$ for which the algebraic number $s$ is a root.
</dl>
>> MinimalPolynomial[7, x]
= -7 + x
>> MinimalPolynomial[Sqrt[2] + Sqrt[3], x]
= 1 - 10 x ^ 2 + x ^ 4
>> MinimalPolynomial[Sqrt[1 + Sqrt[3]], x]
= -2 - 2 x ^ 2 + x ^ 4
>> MinimalPolynomial[Sqrt[I + Sqrt[6]], x]
= 49 - 10 x ^ 4 + x ^ 8
#> MinimalPolynomial[7a, x]
: 7 a is not an explicit algebraic number.
= MinimalPolynomial[7 a, x]
#> MinimalPolynomial[3x^3 + 2x^2 + y^2 + ab, x]
: ab + 2 x ^ 2 + 3 x ^ 3 + y ^ 2 is not an explicit algebraic number.
= MinimalPolynomial[ab + 2 x ^ 2 + 3 x ^ 3 + y ^ 2, x]
## PurePoly
#> MinimalPolynomial[Sqrt[2 + Sqrt[3]]]
= 1 - 4 #1 ^ 2 + #1 ^ 4
"""
attributes = ('Listable',)
messages = {
'nalg': '`1` is not an explicit algebraic number.',
}
def apply_novar(self, s, evaluation):
'MinimalPolynomial[s_]'
x = Symbol('#1')
return self.apply(s, x, evaluation)
def apply(self, s, x, evaluation):
'MinimalPolynomial[s_, x_]'
variables = find_all_vars(s)
if len(variables) > 0:
return evaluation.message('MinimalPolynomial', 'nalg', s)
if s == Symbol('Null'):
return evaluation.message('MinimalPolynomial', 'nalg', s)
sympy_s, sympy_x = s.to_sympy(), x.to_sympy()
if sympy_s is None or sympy_x is None:
return None
sympy_result = sympy.minimal_polynomial(sympy_s, sympy_x)
return from_sympy(sympy_result)
class PolynomialQ(Builtin):
"""
<dl>
<dt>'PolynomialQ[expr, var]'
<dd>returns True if $expr$ is a polynomial in $var$, and returns False otherwise.
<dt>'PolynomialQ[expr, {var1, ...}]'
<dd>tests whether $expr$ is a polynomial in the $vari$.
</dl>
## Form 1:
>> PolynomialQ[x^3 - 2 x/y + 3xz, x]
= True
>> PolynomialQ[x^3 - 2 x/y + 3xz, y]
= False
>> PolynomialQ[f[a] + f[a]^2, f[a]]
= True
## Form 2
>> PolynomialQ[x^2 + axy^2 - bSin[c], {x, y}]
= True
>> PolynomialQ[x^2 + axy^2 - bSin[c], {a, b, c}]
= False
#> PolynomialQ[x, x, y]
: PolynomialQ called with 3 arguments; 1 or 2 arguments are expected.
= PolynomialQ[x, x, y]
## Always return True if argument is Null
#> PolynomialQ[x^3 - 2 x/y + 3xz,]
: Warning: comma encountered with no adjacent expression. The expression will be treated as Null (line 1 of "<test>").
= True
#> PolynomialQ[, {x, y, z}]
: Warning: comma encountered with no adjacent expression. The expression will be treated as Null (line 1 of "<test>").
= True
#> PolynomialQ[, ]
: Warning: comma encountered with no adjacent expression. The expression will be treated as Null (line 1 of "<test>").
: Warning: comma encountered with no adjacent expression. The expression will be treated as Null (line 1 of "<test>").
= True
## TODO: MMA and Sympy handle these cases differently
## #> PolynomialQ[x^(1/2) + 6xyz]
## : No variable is not supported in PolynomialQ.
## = True
## #> PolynomialQ[x^(1/2) + 6xyz, {}]
## : No variable is not supported in PolynomialQ.
## = True
## #> PolynomialQ[x^3 - 2 x/y + 3xz]
## : No variable is not supported in PolynomialQ.
## = False
## #> PolynomialQ[x^3 - 2 x/y + 3xz, {}]
## : No variable is not supported in PolynomialQ.
## = False
"""
messages = {
'argt': 'PolynomialQ called with `1` arguments; 1 or 2 arguments are expected.',
'novar': 'No variable is not supported in PolynomialQ.',
}
def apply(self, expr, v, evaluation):
'PolynomialQ[expr_, v___]'
if expr == Symbol('Null'): return Symbol('True')
v = v.get_sequence()
if len(v) > 1: return evaluation.message('PolynomialQ', 'argt', Integer(len(v)+1))
elif len(v) == 0: return evaluation.message('PolynomialQ', 'novar')
var = v[0]
if var == Symbol('Null'): return Symbol('True')
elif var.has_form('List', None):
if len(var.leaves) == 0: return evaluation.message('PolynomialQ', 'novar')
sympy_var = [x.to_sympy() for x in var.leaves]
else:
sympy_var = [var.to_sympy()]
sympy_expr = expr.to_sympy()
sympy_result = sympy_expr.is_polynomial(*[x for x in sympy_var])
return Symbol('True') if sympy_result else Symbol('False')
# Get a coefficient of form in an expression
def _coefficient(name, expr, form, n, evaluation):
if expr == Symbol('Null') or form == Symbol('Null') or n == Symbol('Null'):
return Integer(0)
if not(isinstance(form, Symbol)) and not(isinstance(form, Expression)):
return evaluation.message(name, 'ivar', form)
sympy_exprs = expr.to_sympy().as_ordered_terms()
sympy_var = form.to_sympy()
sympy_n = n.to_sympy()
def combine_exprs(exprs):
result = 0
for e in exprs:
result += e
return result
# expand sub expressions if they contain variables
sympy_exprs = [sympy.expand(e) if sympy_var.free_symbols.issubset(e.free_symbols) else e for e in sympy_exprs]
sympy_expr = combine_exprs(sympy_exprs)
sympy_result = sympy_expr.coeff(sympy_var, sympy_n)
return from_sympy(sympy_result)
class Coefficient(Builtin):
"""
<dl>
<dt>'Coefficient[expr, form]'
<dd>returns the coefficient of $form$ in the polynomial $expr$.
<dt>'Coefficient[expr, form, n]'
<dd>return the coefficient of $form$^$n$ in $expr$.
</dl>
## Form 1
>> Coefficient[(x + y)^4, (x^2) * (y^2)]
= 6
>> Coefficient[a x^2 + b y^3 + c x + d y + 5, x]
= c
>> Coefficient[(x + 3 y)^5, x]
= 405 y ^ 4
>> Coefficient[(x + 3 y)^5, x * y^4]
= 405
>> Coefficient[(x + 2)/(y - 3) + (x + 3)/(y - 2), x]
= 1 / (-3 + y) + 1 / (-2 + y)
#> Coefficient[(x + 2)/(y - 3) + (x + 3)/(y - 2), z, 0]
= (2 + x) / (-3 + y) + (3 + x) / (-2 + y)
#> Coefficient[y (x - 2)/((y^2 - 9)) + (x + 5)/(y + 2), x]
= y / (-9 + y ^ 2) + 1 / (2 + y)
#> Coefficient[y (x - 2)/((y^2 - 9)) + (x + 5)/(y + 2), y]
= x / (-9 + y ^ 2) - 2 / (-9 + y ^ 2)
## MMA returns better one: (-2 + x) / (-9 + y ^ 2)
#> Coefficient[y (x - 2)/((y - 3)(y + 3)) + (x + 5)/(y + 2), x]
= y / (-9 + y ^ 2) + 1 / (2 + y)
#> Coefficient[y (x - 2)/((y - 3)(y + 3)) + (x + 5)/(y + 2), y]
= x / (-9 + y ^ 2) - 2 / (-9 + y ^ 2)
## MMA returns better one: (-2 + x) / ((-3 + y) (3 + y))
#> Coefficient[x^3 - 2 x/y + 3 x z, y]
= 0
#> Coefficient[x^2 + axy^2 - bSin[c], c]
= 0
>> Coefficient[x*Cos[x + 3] + 6*y, x]
= Cos[3 + x]
## Form 2
>> Coefficient[(x + 1)^3, x, 2]
= 3
>> Coefficient[a x^2 + b y^3 + c x + d y + 5, y, 3]
= b
## Find the free term in a polynomial
>> Coefficient[(x + 2)^3 + (x + 3)^2, x, 0]
= 17
>> Coefficient[(x + 2)^3 + (x + 3)^2, y, 0]
= (2 + x) ^ 3 + (3 + x) ^ 2
>> Coefficient[a x^2 + b y^3 + c x + d y + 5, x, 0]
= 5 + b y ^ 3 + d y
## Errors:
#> Coefficient[x + y + 3]
: Coefficient called with 1 argument; 2 or 3 arguments are expected.
= Coefficient[3 + x + y]
#> Coefficient[x + y + 3, 5]
: 5 is not a valid variable.
= Coefficient[3 + x + y, 5]
## This is known bug of Sympy 1.0, next Sympy version will fix it by this commit
## https://github.com/sympy/sympy/commit/25bf64b64d4d9a2dc563022818d29d06bc740d47
## #> Coefficient[x * y, z, 0]
## = x y
## ## Sympy 1.0 retuns 0
## ## TODO: Support Modulus
## >> Coefficient[(x + 2)^3 + (x + 3)^2, x, 0, Modulus -> 3]
## = 2
## #> Coefficient[(x + 2)^3 + (x + 3)^2, x, 0, {Modulus -> 3, Modulus -> 2, Modulus -> 10}]
## = {2, 1, 7}
"""
messages = {
'argtu': 'Coefficient called with 1 argument; 2 or 3 arguments are expected.',
'ivar': '`1` is not a valid variable.',
}
attributes = ('Listable',)
def apply_noform(self, expr, evaluation):
'Coefficient[expr_]'
return evaluation.message('Coefficient', 'argtu')
def apply(self, expr, form, evaluation):
'Coefficient[expr_, form_]'
return _coefficient(self.__class__.__name__, expr, form, Integer(1), evaluation)
def apply_n(self, expr, form, n, evaluation):
'Coefficient[expr_, form_, n_]'
return _coefficient(self.__class__.__name__, expr, form, n, evaluation)
class CoefficientList(Builtin):
"""
<dl>
<dt>'CoefficientList[poly, var]'
<dd>returns a list of coefficients of powers of $var$ in $poly$, starting with power 0.
<dt>'CoefficientList[poly, {var1, var2, ...}]'
<dd>returns an array of coefficients of the $vari$.
</dl>
## Form 1
>> CoefficientList[(x + 3)^5, x]
= {243, 405, 270, 90, 15, 1}
>> CoefficientList[(x + y)^4, x]
= {y ^ 4, 4 y ^ 3, 6 y ^ 2, 4 y, 1}
>> CoefficientList[a x^2 + b y^3 + c x + d y + 5, x]
= {5 + b y ^ 3 + d y, c, a}
>> CoefficientList[(x + 2)/(y - 3) + x/(y - 2), x]
= {2 / (-3 + y), 1 / (-3 + y) + 1 / (-2 + y)}
>> CoefficientList[(x + y)^3, z]
= {(x + y) ^ 3}
#> CoefficientList[x + y]
: CoefficientList called with 1 argument; 2 or 3 arguments are expected.
= CoefficientList[x + y]
#> CoefficientList[x^2 + a x y^2 - b Sin[c], y]
= {-b Sin[c] + x ^ 2, 0, a x}
#> CoefficientList[1/y, y]
: 1 / y is not a polynomial.
= CoefficientList[1 / y, y]
#> CoefficientList[0, x]
= {}
#> CoefficientList[1, x]
= {1}
#> CoefficientList[x + y, 5]
: 5 is not a valid variable.
= CoefficientList[x + y, 5]
#> CoefficientList[x + 1, {}]
= 1 + x
## Form 2
>> CoefficientList[a x^2 + b y^3 + c x + d y + 5, {x, y}]
= {{5, d, 0, b}, {c, 0, 0, 0}, {a, 0, 0, 0}}
#> CoefficientList[a x^2 + b y^3 + c x + d y + 5, {x}]
= {5 + b y ^ 3 + d y, c, a}
#> CoefficientList[a x^2 + b y^3 + c x + d y + 5, {}]
= 5 + a x ^ 2 + b y ^ 3 + c x + d y
#> CoefficientList[a x^2 + b y^3 + c x + d y + 5, {x, y + 1}]
= {{5 + b y ^ 3 + d y}, {c}, {a}}
#> CoefficientList[a x^2 + b y^3 + c x + d y + 5, {x + 1, y}]
= {{5 + a x ^ 2 + c x, d, 0, b}}
#> CoefficientList[a x^2 + b y^3 + c x + d y + 5, {x + 1, y + 1}]
= {{5 + a x ^ 2 + b y ^ 3 + c x + d y}}
>> CoefficientList[(x - 2 y + 3 z)^3, {x, y, z}]
= {{{0, 0, 0, 27}, {0, 0, -54, 0}, {0, 36, 0, 0}, {-8, 0, 0, 0}}, {{0, 0, 27, 0}, {0, -36, 0, 0}, {12, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 9, 0, 0}, {-6, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{1, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}}
#> CoefficientList[(x - 2 y)^4, {x, 2}]
: 2 is not a valid variable.
= CoefficientList[(x - 2 y) ^ 4, {x, 2}]
#> CoefficientList[x / y, {x, y}]
: x / y is not a polynomial.
= CoefficientList[x / y, {x, y}]
#> CoefficientList[y (x - 2)/((z - 3) (z + 3)) + (x + 5)/(z + 2), {x, y}]
= {{5 / (2 + z), -2 / (-9 + z ^ 2)}, {1 / (2 + z), 1 / (-9 + z ^ 2)}}
#> CoefficientList[0, {x, y}]
= {}
#> CoefficientList[1, {x, y}]
= {{1}}
"""
messages = {
'argtu': 'CoefficientList called with 1 argument; 2 or 3 arguments are expected.',
'ivar': '`1` is not a valid variable.',
'poly': '`1` is not a polynomial.',
}
def apply_noform(self, expr, evaluation):
'CoefficientList[expr_]'
return evaluation.message('CoefficientList', 'argtu')
def apply(self, expr, form, evaluation):
'CoefficientList[expr_, form_]'
vars = [form] if not form.has_form('List', None) else [v for v in form.leaves]
# check form is not a variable
for v in vars:
if not(isinstance(v, Symbol)) and not(isinstance(v, Expression)):
return evaluation.message('CoefficientList', 'ivar', v)
# special cases for expr and form
e_null = expr == Symbol('Null')
f_null = form == Symbol('Null')
if expr == Integer(0):
return Expression('List')
elif e_null and f_null:
return Expression('List', Integer(0), Integer(0))
elif e_null and not f_null:
return Expression('List', Symbol('Null'))
elif f_null:
return Expression('List', expr)
elif form.has_form('List', 0):
return expr
sympy_expr = expr.to_sympy()
sympy_vars = [v.to_sympy() for v in vars]
if not sympy_expr.is_polynomial(*[x for x in sympy_vars]):
return evaluation.message('CoefficientList', 'poly', expr)
try:
sympy_poly, sympy_opt = sympy.poly_from_expr(sympy_expr, sympy_vars)
dimensions = [sympy_poly.degree(x) if x in sympy_poly.gens else 0 for x in sympy_vars]
# single & multiple variables cases
if not form.has_form('List', None):
return Expression('List',
*[_coefficient(self.__class__.__name__,expr, form, Integer(n), evaluation)
for n in range(dimensions[0]+1)])
elif form.has_form('List', 1):
form = form.leaves[0]
return Expression('List',
*[_coefficient(self.__class__.__name__, expr, form, Integer(n), evaluation)
for n in range(dimensions[0]+1)])
else:
def _nth(poly, dims, exponents):
if not dims:
return from_sympy(poly.nth(*[i for i in exponents]))
result = Expression('List')
first_dim = dims[0]
for i in range(first_dim+1):
exponents.append(i)
subs = _nth(poly, dims[1:], exponents)
result.leaves.append(subs)
exponents.pop()
return result
return _nth(sympy_poly, dimensions, [])
except sympy.PolificationFailed:
return evaluation.message('CoefficientList', 'poly', expr)
class Exponent(Builtin):
"""
<dl>
<dt>'Exponent[expr, form]'
<dd>returns the maximum power with which $form$ appears in the expanded form of $expr$.
<dt>'Exponent[expr, form, h]'
<dd>applies $h$ to the set of exponents with which $form$ appears in $expr$.
</dl>
>> Exponent[5 x^2 - 3 x + 7, x]
= 2
#> Exponent[5 x^2 - 3 x + 7, x, List]
= {0, 1, 2}
>> Exponent[(x^3 + 1)^2 + 1, x]
= 6
#> Exponent[(x^3 + 1)^2 + 1, x, List]
= {0, 3, 6}
#> Exponent[Sqrt[I + Sqrt[6]], x]
= 0
>> Exponent[x^(n + 1) + Sqrt[x] + 1, x]
= Max[1 / 2, 1 + n]
#> Exponent[x^(n + 1) + Sqrt[x] + 1, x, List]
= {0, 1 / 2, 1 + n}
#> Exponent[(x + y)^n - 1, x, List]
= {0}
#> Exponent[(x + 3 y)^5, x*y^4]
= 0
>> Exponent[x / y, y]
= -1
>> Exponent[(x^2 + 1)^3 - 1, x, Min]
= 2
#> Exponent[(x^2 + 1)^3 - 1, x, List]
= {2, 4, 6}
>> Exponent[1 - 2 x^2 + a x^3, x, List]
= {0, 2, 3}
#> Exponent[(x + 1) + (x + 1)^2, x, List]
= {0, 1, 2}
#> Exponent[(x + 3 y - 2 z)^3 * (5 y + z), {x, y}, List]
= {{0, 1, 2, 3}, {0, 1, 2, 3, 4}}
#> Exponent[(x + 3 y - 2 z)^3*(5 y + z), {"x", "y"}, List]
= {{0}, {0}}
#> Exponent[(x + 3 y - 2 z)^3*(5 y + z), {}]
= {}
#> Exponent[x^a + b y^3 + c x + 2 y^e + 5, {x, y}, List]
= {{0, 1, a}, {0, 3, e}}
#> Exponent[x^2 / y^3, {x, y}]
= {2, -3}
#> Exponent[(x + 2)/(y - 3) + (x + 3)/(y - 2), {x, y, z}, List]
= {{0, 1}, {0}, {0}}
#> Exponent[x + 6 x^3 y^2 - 3/((x^2) (y^2)), {x, y}, List]
= {{-2, 1, 3}, {-2, 0, 2}}
#> Exponent[x^5 Sin[x^2] + x * x^3 Cos[x], x, List]
= {4, 5}
#> Exponent[x^5 Sin[x^2] + y Cos[y^2] + Log[x^3] + 6 y^4, {x, y}, List]
= {{0, 5}, {0, 1, 4}}
>> Exponent[0, x]
= -Infinity
>> Exponent[1, x]
= 0
## errors:
#> Exponent[x^2]
: Exponent called with 1 argument; 2 or 3 arguments are expected.
= Exponent[x ^ 2]
"""
messages = {
'argtu': 'Exponent called with `1` argument; 2 or 3 arguments are expected.',
}
rules = {
'Exponent[expr_, form_]': 'Exponent[expr, form, Max]',
}
def apply_novar(self, expr, evaluation):
'Exponent[expr_]'
return evaluation.message('Exponent', 'argtu', Integer(1))
def apply(self, expr, form, h, evaluation):
'Exponent[expr_, form_, h_]'
if expr == Integer(0):
return Expression('DirectedInfinity', Integer(-1))
if not form.has_form('List', None):
return Expression(h, *[i for i in find_exponents(expr, form)])
else:
exponents = [find_exponents(expr, var) for var in form.leaves]
return Expression('List', *[Expression(h, *[i for i in s]) for s in exponents])
| 31.957447 | 249 | 0.510771 |
from __future__ import unicode_literals
from __future__ import absolute_import
from mathics.builtin.base import Builtin
from mathics.core.expression import Expression, Integer, Symbol, Atom, Number
from mathics.core.convert import from_sympy, sympy_symbol_prefix
import sympy
import mpmath
from six.moves import range
def sympy_factor(expr_sympy):
try:
result = sympy.together(expr_sympy)
numer, denom = result.as_numer_denom()
if denom == 1:
result = sympy.factor(expr_sympy)
else:
result = sympy.factor(numer) / sympy.factor(denom)
except sympy.PolynomialError:
return expr_sympy
return result
def cancel(expr):
if expr.has_form('Plus', None):
return Expression('Plus', *[cancel(leaf) for leaf in expr.leaves])
else:
try:
result = expr.to_sympy()
if result is None:
return None
result = sympy.cancel(result)
result = sympy_factor(result)
return from_sympy(result)
except sympy.PolynomialError:
return expr
def expand(expr, numer=True, denom=False, deep=False, **kwargs):
if kwargs['modulus'] is not None and kwargs['modulus'] <= 0:
return Integer(0)
sub_exprs = []
def store_sub_expr(expr):
sub_exprs.append(expr)
result = sympy.Symbol(sympy_symbol_prefix + str(len(sub_exprs) - 1))
return result
def get_sub_expr(expr):
name = expr.get_name()
assert isinstance(expr, Symbol) and name.startswith('System`')
i = int(name[len('System`'):])
return sub_exprs[i]
def convert_sympy(expr):
leaves = expr.get_leaves()
if isinstance(expr, Integer):
return sympy.Integer(expr.get_int_value())
if expr.has_form('Power', 2):
# if denom is False we store negative powers to prevent this.
n1 = leaves[1].get_int_value()
if not denom and n1 is not None and n1 < 0:
return store_sub_expr(expr)
return sympy.Pow(*[convert_sympy(leaf) for leaf in leaves])
elif expr.has_form('Times', 2, None):
return sympy.Mul(*[convert_sympy(leaf) for leaf in leaves])
elif expr.has_form('Plus', 2, None):
return sympy.Add(*[convert_sympy(leaf) for leaf in leaves])
else:
return store_sub_expr(expr)
def unconvert_subexprs(expr):
if expr.is_atom():
if isinstance(expr, Symbol):
return get_sub_expr(expr)
else:
return expr
else:
return Expression(expr.head, *[unconvert_subexprs(leaf) for leaf in expr.get_leaves()])
sympy_expr = convert_sympy(expr)
def _expand(expr):
return expand(expr, numer=numer, denom=denom, deep=deep, **kwargs)
if deep:
# thread over everything
for i, sub_expr,in enumerate(sub_exprs):
if not sub_expr.is_atom():
head = _expand(sub_expr.head) # also expand head
leaves = sub_expr.get_leaves()
leaves = [_expand(leaf) for leaf in leaves]
sub_exprs[i] = Expression(head, *leaves)
else:
# thread over Lists etc.
threaded_heads = ('List', 'Rule')
for i, sub_expr in enumerate(sub_exprs):
for head in threaded_heads:
if sub_expr.has_form(head, None):
leaves = sub_expr.get_leaves()
leaves = [_expand(leaf) for leaf in leaves]
sub_exprs[i] = Expression(head, *leaves)
break
hints = {
'mul': True,
'multinomial': True,
'power_exp': False,
'power_base': False,
'basic': False,
'log': False,
}
hints.update(kwargs)
if numer and denom:
# don't expand fractions when modulus is True
if hints['modulus'] is not None:
hints['frac'] = True
else:
hints['numer'] = numer
hints['denom'] = denom
sympy_expr = sympy_expr.expand(**hints)
result = from_sympy(sympy_expr)
result = unconvert_subexprs(result)
return result
def find_all_vars(expr):
variables = set()
def find_vars(e, e_sympy):
assert e_sympy is not None
if e_sympy.is_constant():
return
elif e.is_symbol():
variables.add(e)
elif (e.has_form('Plus', None) or
e.has_form('Times', None)):
for l in e.leaves:
l_sympy = l.to_sympy()
if l_sympy is not None:
find_vars(l, l_sympy)
elif e.has_form('Power', 2):
(a, b) = e.leaves # a^b
a_sympy, b_sympy = a.to_sympy(), b.to_sympy()
if a_sympy is None or b_sympy is None:
return
if not(a_sympy.is_constant()) and b_sympy.is_rational:
find_vars(a, a_sympy)
elif not(e.is_atom()):
variables.add(e)
exprs = expr.leaves if expr.has_form('List', None) else [expr]
for e in exprs:
e_sympy = e.to_sympy()
if e_sympy is not None:
find_vars(e, e_sympy)
return variables
def find_exponents(expr, var):
f = expr.to_sympy()
x = var.to_sympy()
if f is None or x is None:
return {0}
result = set()
for t in f.expand(power_exp=False).as_ordered_terms():
coeff, exponent = t.as_coeff_exponent(x)
if exponent:
result.add(from_sympy(exponent))
else:
# find exponent of terms multiplied with functions: sin, cos, log, exp, ...
# e.g: x^3 * Sin[x^2] should give 3
muls = [term.as_coeff_mul(x)[1] if term.as_coeff_mul(x)[1] else (sympy.Integer(0),)
for term in coeff.as_ordered_terms()]
expos = [term.as_coeff_exponent(x)[1] for mul in muls for term in mul]
result.add(from_sympy(sympy.Max(*[e for e in expos])))
return sorted(result)
class Cancel(Builtin):
def apply(self, expr, evaluation):
return cancel(expr)
class Simplify(Builtin):
rules = {
'Simplify[list_List]': 'Simplify /@ list',
'Simplify[rule_Rule]': 'Simplify /@ rule',
'Simplify[eq_Equal]': 'Simplify /@ eq',
}
def apply(self, expr, evaluation):
sympy_expr = expr.to_sympy()
if sympy_expr is None:
return
sympy_result = sympy.simplify(sympy_expr)
return from_sympy(sympy_result)
class Together(Builtin):
attributes = ['Listable']
def apply(self, expr, evaluation):
expr_sympy = expr.to_sympy()
if expr_sympy is None:
return None
result = sympy.together(expr_sympy)
result = from_sympy(result)
result = cancel(result)
return result
class Factor(Builtin):
attributes = ('Listable',)
def apply(self, expr, evaluation):
expr_sympy = expr.to_sympy()
if expr_sympy is None:
return None
try:
result = sympy.together(expr_sympy)
numer, denom = result.as_numer_denom()
if denom == 1:
result = sympy.factor(expr_sympy)
else:
result = sympy.factor(numer) / sympy.factor(denom)
except sympy.PolynomialError:
return expr
return from_sympy(result)
class FactorTermsList(Builtin):
rules = {
'FactorTermsList[expr_]': 'FactorTermsList[expr, {}]',
'FactorTermsList[expr_, var_]': 'FactorTermsList[expr, {var}]',
}
messages = {
# 'poly': '`1` is not a polynomial.',
'ivar': '`1` is not a valid variable.',
}
def apply_list(self, expr, vars, evaluation):
if expr == Integer(0):
return Expression('List', Integer(1), Integer(0))
elif isinstance(expr, Number):
return Expression('List', expr, Integer(1))
for x in vars.leaves:
if not(isinstance(x, Atom)):
return evaluation.message('CoefficientList', 'ivar', x)
sympy_expr = expr.to_sympy()
if sympy_expr is None:
return Expression('List', Integer(1), expr)
sympy_expr = sympy.together(sympy_expr)
sympy_vars = [x.to_sympy() for x in vars.leaves if isinstance(x, Symbol) and sympy_expr.is_polynomial(x.to_sympy())]
result = []
numer, denom = sympy_expr.as_numer_denom()
try:
from sympy import factor, factor_list, Poly
if denom == 1:
# Get numerical part
num_coeff, num_polys = factor_list(Poly(numer))
result.append(num_coeff)
# Get factors are independent of sub list of variables
if (sympy_vars and isinstance(expr, Expression)
and any(x.free_symbols.issubset(sympy_expr.free_symbols) for x in sympy_vars)):
for i in reversed(range(len(sympy_vars))):
numer = factor(numer) / factor(num_coeff)
num_coeff, num_polys = factor_list(Poly(numer), *[x for x in sympy_vars[:(i+1)]])
result.append(sympy.expand(num_coeff))
# Last factor
numer = factor(numer) / factor(num_coeff)
result.append(sympy.expand(numer))
else:
num_coeff, num_polys = factor_list(Poly(numer))
den_coeff, den_polys = factor_list(Poly(denom))
result = [num_coeff / den_coeff, sympy.expand(factor(numer)/num_coeff / (factor(denom)/den_coeff))]
except sympy.PolynomialError: # MMA does not raise error for non poly
result.append(sympy.expand(numer))
# evaluation.message(self.get_name(), 'poly', expr)
return Expression('List', *[from_sympy(i) for i in result])
class Apart(Builtin):
attributes = ['Listable']
rules = {
'Apart[expr_]': (
'Block[{vars = Cases[Level[expr, {-1}], _Symbol]},'
' If[Length[vars] > 0, Apart[expr, vars[[1]]], expr]]'),
}
def apply(self, expr, var, evaluation):
expr_sympy = expr.to_sympy()
var_sympy = var.to_sympy()
if expr_sympy is None or var_sympy is None:
return None
try:
result = sympy.apart(expr_sympy, var_sympy)
result = from_sympy(result)
return result
except sympy.PolynomialError:
# raised e.g. for apart(sin(1/(x**2-y**2)))
return expr
class _Expand(Builtin):
options = {
'Trig': 'False',
'Modulus': '0',
}
messages = {
'modn': 'Value of option `1` -> `2` should be an integer.',
'opttf': 'Value of option `1` -> `2` should be True or False.',
}
def convert_options(self, options, evaluation):
modulus = options['System`Modulus']
py_modulus = modulus.get_int_value()
if py_modulus is None:
return evaluation.message(self.get_name(), 'modn', Symbol('Modulus'), modulus)
if py_modulus == 0:
py_modulus = None
trig = options['System`Trig']
if trig == Symbol('True'):
py_trig = True
elif trig == Symbol('False'):
py_trig = False
else:
return evaluation.message(self.get_name(), 'opttf', Symbol('Trig'), trig)
return {'modulus': py_modulus, 'trig': py_trig}
class Expand(_Expand):
# TODO unwrap trig expressions in expand() so the following works
def apply(self, expr, evaluation, options):
kwargs = self.convert_options(options, evaluation)
if kwargs is None:
return
return expand(expr, True, False, **kwargs)
class ExpandDenominator(_Expand):
def apply(self, expr, evaluation, options):
kwargs = self.convert_options(options, evaluation)
if kwargs is None:
return
return expand(expr, False, True, **kwargs)
class ExpandAll(_Expand):
def apply(self, expr, evaluation, options):
kwargs = self.convert_options(options, evaluation)
if kwargs is None:
return
return expand(expr, numer=True, denom=True, deep=True, **kwargs)
class PowerExpand(Builtin):
rules = {
'PowerExpand[(x_ ^ y_) ^ z_]': 'x ^ (y * z)',
'PowerExpand[(x_ * y_) ^ z_]': 'x ^ z * y ^ z',
'PowerExpand[Log[x_ ^ y_]]': 'y * Log[x]',
'PowerExpand[x_Plus]': 'PowerExpand /@ x',
'PowerExpand[x_Times]': 'PowerExpand /@ x',
'PowerExpand[x_Power]': 'PowerExpand /@ x',
'PowerExpand[x_List]': 'PowerExpand /@ x',
'PowerExpand[x_Rule]': 'PowerExpand /@ x',
'PowerExpand[other_]': 'other',
}
class Numerator(Builtin):
def apply(self, expr, evaluation):
sympy_expr = expr.to_sympy()
if sympy_expr is None:
return None
numer, denom = sympy_expr.as_numer_denom()
return from_sympy(numer)
class Denominator(Builtin):
def apply(self, expr, evaluation):
sympy_expr = expr.to_sympy()
if sympy_expr is None:
return None
numer, denom = sympy_expr.as_numer_denom()
return from_sympy(denom)
class Variables(Builtin):
# This builtin is incomplete. See the failing test case below.
def apply(self, expr, evaluation):
variables = find_all_vars(expr)
variables = Expression('List', *variables)
variables.sort() # MMA doesn't do this
return variables
class UpTo(Builtin):
messages = {
'innf': 'Expected non-negative integer or infinity at position 1 in ``.',
'argx': 'UpTo expects 1 argument, `1` arguments were given.'
}
class Missing(Builtin):
pass
class MinimalPolynomial(Builtin):
attributes = ('Listable',)
messages = {
'nalg': '`1` is not an explicit algebraic number.',
}
def apply_novar(self, s, evaluation):
x = Symbol('#1')
return self.apply(s, x, evaluation)
def apply(self, s, x, evaluation):
variables = find_all_vars(s)
if len(variables) > 0:
return evaluation.message('MinimalPolynomial', 'nalg', s)
if s == Symbol('Null'):
return evaluation.message('MinimalPolynomial', 'nalg', s)
sympy_s, sympy_x = s.to_sympy(), x.to_sympy()
if sympy_s is None or sympy_x is None:
return None
sympy_result = sympy.minimal_polynomial(sympy_s, sympy_x)
return from_sympy(sympy_result)
class PolynomialQ(Builtin):
messages = {
'argt': 'PolynomialQ called with `1` arguments; 1 or 2 arguments are expected.',
'novar': 'No variable is not supported in PolynomialQ.',
}
def apply(self, expr, v, evaluation):
if expr == Symbol('Null'): return Symbol('True')
v = v.get_sequence()
if len(v) > 1: return evaluation.message('PolynomialQ', 'argt', Integer(len(v)+1))
elif len(v) == 0: return evaluation.message('PolynomialQ', 'novar')
var = v[0]
if var == Symbol('Null'): return Symbol('True')
elif var.has_form('List', None):
if len(var.leaves) == 0: return evaluation.message('PolynomialQ', 'novar')
sympy_var = [x.to_sympy() for x in var.leaves]
else:
sympy_var = [var.to_sympy()]
sympy_expr = expr.to_sympy()
sympy_result = sympy_expr.is_polynomial(*[x for x in sympy_var])
return Symbol('True') if sympy_result else Symbol('False')
def _coefficient(name, expr, form, n, evaluation):
if expr == Symbol('Null') or form == Symbol('Null') or n == Symbol('Null'):
return Integer(0)
if not(isinstance(form, Symbol)) and not(isinstance(form, Expression)):
return evaluation.message(name, 'ivar', form)
sympy_exprs = expr.to_sympy().as_ordered_terms()
sympy_var = form.to_sympy()
sympy_n = n.to_sympy()
def combine_exprs(exprs):
result = 0
for e in exprs:
result += e
return result
sympy_exprs = [sympy.expand(e) if sympy_var.free_symbols.issubset(e.free_symbols) else e for e in sympy_exprs]
sympy_expr = combine_exprs(sympy_exprs)
sympy_result = sympy_expr.coeff(sympy_var, sympy_n)
return from_sympy(sympy_result)
class Coefficient(Builtin):
messages = {
'argtu': 'Coefficient called with 1 argument; 2 or 3 arguments are expected.',
'ivar': '`1` is not a valid variable.',
}
attributes = ('Listable',)
def apply_noform(self, expr, evaluation):
return evaluation.message('Coefficient', 'argtu')
def apply(self, expr, form, evaluation):
return _coefficient(self.__class__.__name__, expr, form, Integer(1), evaluation)
def apply_n(self, expr, form, n, evaluation):
return _coefficient(self.__class__.__name__, expr, form, n, evaluation)
class CoefficientList(Builtin):
messages = {
'argtu': 'CoefficientList called with 1 argument; 2 or 3 arguments are expected.',
'ivar': '`1` is not a valid variable.',
'poly': '`1` is not a polynomial.',
}
def apply_noform(self, expr, evaluation):
return evaluation.message('CoefficientList', 'argtu')
def apply(self, expr, form, evaluation):
vars = [form] if not form.has_form('List', None) else [v for v in form.leaves]
for v in vars:
if not(isinstance(v, Symbol)) and not(isinstance(v, Expression)):
return evaluation.message('CoefficientList', 'ivar', v)
e_null = expr == Symbol('Null')
f_null = form == Symbol('Null')
if expr == Integer(0):
return Expression('List')
elif e_null and f_null:
return Expression('List', Integer(0), Integer(0))
elif e_null and not f_null:
return Expression('List', Symbol('Null'))
elif f_null:
return Expression('List', expr)
elif form.has_form('List', 0):
return expr
sympy_expr = expr.to_sympy()
sympy_vars = [v.to_sympy() for v in vars]
if not sympy_expr.is_polynomial(*[x for x in sympy_vars]):
return evaluation.message('CoefficientList', 'poly', expr)
try:
sympy_poly, sympy_opt = sympy.poly_from_expr(sympy_expr, sympy_vars)
dimensions = [sympy_poly.degree(x) if x in sympy_poly.gens else 0 for x in sympy_vars]
if not form.has_form('List', None):
return Expression('List',
*[_coefficient(self.__class__.__name__,expr, form, Integer(n), evaluation)
for n in range(dimensions[0]+1)])
elif form.has_form('List', 1):
form = form.leaves[0]
return Expression('List',
*[_coefficient(self.__class__.__name__, expr, form, Integer(n), evaluation)
for n in range(dimensions[0]+1)])
else:
def _nth(poly, dims, exponents):
if not dims:
return from_sympy(poly.nth(*[i for i in exponents]))
result = Expression('List')
first_dim = dims[0]
for i in range(first_dim+1):
exponents.append(i)
subs = _nth(poly, dims[1:], exponents)
result.leaves.append(subs)
exponents.pop()
return result
return _nth(sympy_poly, dimensions, [])
except sympy.PolificationFailed:
return evaluation.message('CoefficientList', 'poly', expr)
class Exponent(Builtin):
messages = {
'argtu': 'Exponent called with `1` argument; 2 or 3 arguments are expected.',
}
rules = {
'Exponent[expr_, form_]': 'Exponent[expr, form, Max]',
}
def apply_novar(self, expr, evaluation):
return evaluation.message('Exponent', 'argtu', Integer(1))
def apply(self, expr, form, h, evaluation):
if expr == Integer(0):
return Expression('DirectedInfinity', Integer(-1))
if not form.has_form('List', None):
return Expression(h, *[i for i in find_exponents(expr, form)])
else:
exponents = [find_exponents(expr, var) for var in form.leaves]
return Expression('List', *[Expression(h, *[i for i in s]) for s in exponents])
| true | true |
1c2d5c434953c3acf443a11ab1783bd53f70c57f | 8,240 | py | Python | src/api/bkuser_core/tests/profiles/test_utils.py | Canway-shiisa/bk-user | a049e80d12082960828015742cea4b041f4af796 | [
"MIT"
] | null | null | null | src/api/bkuser_core/tests/profiles/test_utils.py | Canway-shiisa/bk-user | a049e80d12082960828015742cea4b041f4af796 | [
"MIT"
] | null | null | null | src/api/bkuser_core/tests/profiles/test_utils.py | Canway-shiisa/bk-user | a049e80d12082960828015742cea4b041f4af796 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from unittest.mock import patch
import pytest
from django.contrib.auth.hashers import check_password
from bkuser_core.audit.utils import create_profile_log
from bkuser_core.categories.models import ProfileCategory
from bkuser_core.profiles.exceptions import CountryISOCodeNotMatch, UsernameWithDomainFormatError
from bkuser_core.profiles.models import Profile
from bkuser_core.profiles.utils import (
align_country_iso_code,
check_former_passwords,
make_password_by_config,
parse_username_domain,
)
from bkuser_core.user_settings.constants import InitPasswordMethod
from bkuser_core.user_settings.models import Setting
pytestmark = pytest.mark.django_db
class TestParseUsername:
@pytest.mark.parametrize(
"raw_username, known_domain, expected_username",
[
# 包含 未知字符 $
("user-67@asdfads.com", "asdfads.com", "user-67"),
("user-67@fdfdf", "fdfdf", "user-67"),
("us_er-67@fdfdf", "fdfdf", "us_er-67"),
("us.er-67@fdfdf", "fdfdf", "us.er-67"),
],
)
def test_normal_domain_username(self, raw_username, known_domain, expected_username):
username, domain = parse_username_domain(raw_username)
assert username == expected_username
assert domain == known_domain
@pytest.mark.parametrize(
"raw_username",
["user-$67@fdfdf", "user?-67@fdfdfd", "user-67@_fdfdfd", "user-67@@fdfdfd"],
)
def test_wrong_domain_username(self, raw_username):
"""测试在不传入已知域名的情况下格式错误"""
with pytest.raises(UsernameWithDomainFormatError):
parse_username_domain(raw_username)
@pytest.mark.parametrize(
"raw_username, known_domain, expected_username",
[
# 包含 未知字符 $
("user-67@asdfads.com", "asdfads.com", "user-67"),
("user-67@fdfdf", "fdfdf", "user-67"),
("us_er-67@fdfdf", "fdfdf", "us_er-67"),
("us.er-67@fdfdf", "fdfdf", "us.er-67"),
],
)
def test_known_domain_username(self, raw_username, known_domain, expected_username):
"""测试当已知domain 和用户名时能正确解析"""
username, domain = parse_username_domain(raw_username, known_domain)
assert username == expected_username
assert known_domain == domain
@pytest.mark.parametrize(
"raw_username, known_domain",
[
# 包含 未知字符 $
("user-67@fdfdfd", "abcd"),
("user$-67@fdfdfd", "abcd"),
("user?-67@fdfdfd", "abcd"),
("user-67@_fdfdfd", "abcd"),
],
)
def test_wrong_known_domain_username(self, raw_username, known_domain):
with pytest.raises(ValueError):
parse_username_domain(raw_username, known_domain)
def test_no_domain_in_username(self):
raw_username = "user-67"
known_domain = "fdfdf"
with pytest.raises(ValueError):
parse_username_domain(raw_username, known_domain)
class TestAlignCountryISOCode:
@pytest.mark.parametrize(
"pass_in, pass_out",
[
(("86", "CN"), ("86", "CN")),
(("86", "cn"), ("86", "CN")),
(("1", "US"), ("1", "US")),
(("1", "us"), ("1", "US")),
(("0", "zz"), ("86", "CN")),
],
)
def test_both_pass_in(self, pass_in, pass_out):
"""测试正常传入--对齐检测逻辑"""
a, b = align_country_iso_code(pass_in[0], pass_in[1])
assert a == pass_out[0]
assert b == pass_out[1]
def test_only_country_code(self):
"""测试只传入 country code"""
country_code = "86"
a, b = align_country_iso_code(country_code, "")
assert a == "86"
assert b == "CN"
@pytest.mark.parametrize(
"iso_code, expected",
[
("cn", ("86", "CN")),
("CN", ("86", "CN")),
("zz", ("86", "CN")),
("us", ("1", "US")),
("asdfasdf", ("86", "CN")),
],
)
def test_only_iso_code(self, iso_code, expected):
"""测试只传入 iso code"""
a, b = align_country_iso_code("", iso_code)
assert a == expected[0]
assert b == expected[1]
@pytest.mark.parametrize("iso_code", ["ioio", "ZZ"])
def test_invalid_iso_code(self, iso_code):
"""测试传入非法 iso code"""
a, b = align_country_iso_code("", iso_code)
assert a == "86"
assert b == "CN"
def test_no_input(self):
"""测试传入空值异常"""
with pytest.raises(ValueError):
align_country_iso_code("", "")
@pytest.mark.parametrize(
"wrong_pair",
[
("86", "US"),
("999", "ZZ"),
],
)
def test_align(self, wrong_pair):
"""测试不匹配异常"""
with pytest.raises(CountryISOCodeNotMatch):
align_country_iso_code(wrong_pair[0], wrong_pair[1])
class TestMakePassword:
FAKE_RANDOM_PASSWORD = "abcdefg"
def make_fake_category(self, init_config: dict) -> ProfileCategory:
c = ProfileCategory.objects.create(display_name="Fake", domain="fake", type="local")
c.make_default_settings()
for k, v in init_config.items():
s = Setting.objects.get(category_id=c.pk, meta__key=k)
s.value = v
s.save()
return c
@pytest.mark.parametrize(
"init_method,init_password,return_raw,expected",
[
(
InitPasswordMethod.FIXED_PRESET.value,
"aaaaaa",
True,
("aaaaaa", False),
),
(
InitPasswordMethod.FIXED_PRESET.value,
"aaaaaa",
False,
("aaaaaa", False),
),
(
InitPasswordMethod.RANDOM_VIA_MAIL.value,
"bbbbbb",
True,
(FAKE_RANDOM_PASSWORD, True),
),
(
InitPasswordMethod.RANDOM_VIA_MAIL.value,
"bbbbbb",
False,
(FAKE_RANDOM_PASSWORD, True),
),
],
)
def test_make_password(self, init_method, init_password, return_raw, expected):
c = self.make_fake_category({"init_password": init_password, "init_password_method": init_method})
with patch("bkuser_core.profiles.utils.gen_password") as mocked_password:
mocked_password.return_value = self.FAKE_RANDOM_PASSWORD
if return_raw:
assert make_password_by_config(c.pk, True) == expected
else:
encrypted, should_notify = make_password_by_config(c.pk, False)
check_password(expected[0], encrypted)
assert should_notify == expected[1]
class TestCheckFormerPasswords:
@pytest.mark.parametrize(
"former_passwords,new_password,max_history,expected",
[
(["aaaa", "vvvv", "cccc"], "cccc", 3, True),
(["aaaa", "vvvv", "cccc"], "cccc", 2, True),
(["aaaa", "vvvv", "cccc"], "bbbb", 3, False),
(["aaaa", "vvvv", "cccc"], "aaaa", 2, False),
(["aaaa"], "aaaa", 3, True),
],
)
def test_in(self, former_passwords, new_password, max_history, expected):
"""if new password in former passwords"""
p = Profile.objects.get(id=1)
for pwd in former_passwords:
create_profile_log(p, "ResetPassword", {"is_success": True, "password": pwd})
assert check_former_passwords(p, new_password, max_history) == expected
| 34.767932 | 115 | 0.589563 |
from unittest.mock import patch
import pytest
from django.contrib.auth.hashers import check_password
from bkuser_core.audit.utils import create_profile_log
from bkuser_core.categories.models import ProfileCategory
from bkuser_core.profiles.exceptions import CountryISOCodeNotMatch, UsernameWithDomainFormatError
from bkuser_core.profiles.models import Profile
from bkuser_core.profiles.utils import (
align_country_iso_code,
check_former_passwords,
make_password_by_config,
parse_username_domain,
)
from bkuser_core.user_settings.constants import InitPasswordMethod
from bkuser_core.user_settings.models import Setting
pytestmark = pytest.mark.django_db
class TestParseUsername:
@pytest.mark.parametrize(
"raw_username, known_domain, expected_username",
[
("user-67@asdfads.com", "asdfads.com", "user-67"),
("user-67@fdfdf", "fdfdf", "user-67"),
("us_er-67@fdfdf", "fdfdf", "us_er-67"),
("us.er-67@fdfdf", "fdfdf", "us.er-67"),
],
)
def test_normal_domain_username(self, raw_username, known_domain, expected_username):
username, domain = parse_username_domain(raw_username)
assert username == expected_username
assert domain == known_domain
@pytest.mark.parametrize(
"raw_username",
["user-$67@fdfdf", "user?-67@fdfdfd", "user-67@_fdfdfd", "user-67@@fdfdfd"],
)
def test_wrong_domain_username(self, raw_username):
with pytest.raises(UsernameWithDomainFormatError):
parse_username_domain(raw_username)
@pytest.mark.parametrize(
"raw_username, known_domain, expected_username",
[
("user-67@asdfads.com", "asdfads.com", "user-67"),
("user-67@fdfdf", "fdfdf", "user-67"),
("us_er-67@fdfdf", "fdfdf", "us_er-67"),
("us.er-67@fdfdf", "fdfdf", "us.er-67"),
],
)
def test_known_domain_username(self, raw_username, known_domain, expected_username):
username, domain = parse_username_domain(raw_username, known_domain)
assert username == expected_username
assert known_domain == domain
@pytest.mark.parametrize(
"raw_username, known_domain",
[
("user-67@fdfdfd", "abcd"),
("user$-67@fdfdfd", "abcd"),
("user?-67@fdfdfd", "abcd"),
("user-67@_fdfdfd", "abcd"),
],
)
def test_wrong_known_domain_username(self, raw_username, known_domain):
with pytest.raises(ValueError):
parse_username_domain(raw_username, known_domain)
def test_no_domain_in_username(self):
raw_username = "user-67"
known_domain = "fdfdf"
with pytest.raises(ValueError):
parse_username_domain(raw_username, known_domain)
class TestAlignCountryISOCode:
@pytest.mark.parametrize(
"pass_in, pass_out",
[
(("86", "CN"), ("86", "CN")),
(("86", "cn"), ("86", "CN")),
(("1", "US"), ("1", "US")),
(("1", "us"), ("1", "US")),
(("0", "zz"), ("86", "CN")),
],
)
def test_both_pass_in(self, pass_in, pass_out):
a, b = align_country_iso_code(pass_in[0], pass_in[1])
assert a == pass_out[0]
assert b == pass_out[1]
def test_only_country_code(self):
country_code = "86"
a, b = align_country_iso_code(country_code, "")
assert a == "86"
assert b == "CN"
@pytest.mark.parametrize(
"iso_code, expected",
[
("cn", ("86", "CN")),
("CN", ("86", "CN")),
("zz", ("86", "CN")),
("us", ("1", "US")),
("asdfasdf", ("86", "CN")),
],
)
def test_only_iso_code(self, iso_code, expected):
a, b = align_country_iso_code("", iso_code)
assert a == expected[0]
assert b == expected[1]
@pytest.mark.parametrize("iso_code", ["ioio", "ZZ"])
def test_invalid_iso_code(self, iso_code):
a, b = align_country_iso_code("", iso_code)
assert a == "86"
assert b == "CN"
def test_no_input(self):
with pytest.raises(ValueError):
align_country_iso_code("", "")
@pytest.mark.parametrize(
"wrong_pair",
[
("86", "US"),
("999", "ZZ"),
],
)
def test_align(self, wrong_pair):
with pytest.raises(CountryISOCodeNotMatch):
align_country_iso_code(wrong_pair[0], wrong_pair[1])
class TestMakePassword:
FAKE_RANDOM_PASSWORD = "abcdefg"
def make_fake_category(self, init_config: dict) -> ProfileCategory:
c = ProfileCategory.objects.create(display_name="Fake", domain="fake", type="local")
c.make_default_settings()
for k, v in init_config.items():
s = Setting.objects.get(category_id=c.pk, meta__key=k)
s.value = v
s.save()
return c
@pytest.mark.parametrize(
"init_method,init_password,return_raw,expected",
[
(
InitPasswordMethod.FIXED_PRESET.value,
"aaaaaa",
True,
("aaaaaa", False),
),
(
InitPasswordMethod.FIXED_PRESET.value,
"aaaaaa",
False,
("aaaaaa", False),
),
(
InitPasswordMethod.RANDOM_VIA_MAIL.value,
"bbbbbb",
True,
(FAKE_RANDOM_PASSWORD, True),
),
(
InitPasswordMethod.RANDOM_VIA_MAIL.value,
"bbbbbb",
False,
(FAKE_RANDOM_PASSWORD, True),
),
],
)
def test_make_password(self, init_method, init_password, return_raw, expected):
c = self.make_fake_category({"init_password": init_password, "init_password_method": init_method})
with patch("bkuser_core.profiles.utils.gen_password") as mocked_password:
mocked_password.return_value = self.FAKE_RANDOM_PASSWORD
if return_raw:
assert make_password_by_config(c.pk, True) == expected
else:
encrypted, should_notify = make_password_by_config(c.pk, False)
check_password(expected[0], encrypted)
assert should_notify == expected[1]
class TestCheckFormerPasswords:
@pytest.mark.parametrize(
"former_passwords,new_password,max_history,expected",
[
(["aaaa", "vvvv", "cccc"], "cccc", 3, True),
(["aaaa", "vvvv", "cccc"], "cccc", 2, True),
(["aaaa", "vvvv", "cccc"], "bbbb", 3, False),
(["aaaa", "vvvv", "cccc"], "aaaa", 2, False),
(["aaaa"], "aaaa", 3, True),
],
)
def test_in(self, former_passwords, new_password, max_history, expected):
p = Profile.objects.get(id=1)
for pwd in former_passwords:
create_profile_log(p, "ResetPassword", {"is_success": True, "password": pwd})
assert check_former_passwords(p, new_password, max_history) == expected
| true | true |
1c2d5c50b871c13ddcd65e2482ce8e7c74851477 | 971 | py | Python | pos_qr_show/__manifest__.py | mosadiqit/pos-addons | 6cac2b7d227bddbec5f9d0c69859e626f2e9dc73 | [
"MIT"
] | null | null | null | pos_qr_show/__manifest__.py | mosadiqit/pos-addons | 6cac2b7d227bddbec5f9d0c69859e626f2e9dc73 | [
"MIT"
] | null | null | null | pos_qr_show/__manifest__.py | mosadiqit/pos-addons | 6cac2b7d227bddbec5f9d0c69859e626f2e9dc73 | [
"MIT"
] | 3 | 2021-06-15T05:45:42.000Z | 2021-07-27T12:28:53.000Z | # Copyright 2018 Ivan Yelizariev <https://it-projects.info/team/yelizariev>
# License MIT (https://opensource.org/licenses/MIT).
{
"name": """POS QR Showing in POS""",
"summary": """Show QR for qr-based payment systems in POS or Customer Screen""",
"category": "Hidden",
# "live_test_url": "",
"images": [],
"version": "12.0.1.0.0",
"application": False,
"author": "IT-Projects LLC, Ivan Yelizariev",
"support": "apps@itpp.dev",
"website": "https://apps.odoo.com/apps/modules/12.0/pos_qr_show/",
"license": "Other OSI approved licence", # MIT
"price": 40.00,
"currency": "EUR",
"depends": ["point_of_sale"],
"external_dependencies": {"python": [], "bin": []},
"data": ["views/assets.xml"],
"demo": [],
"qweb": ["static/src/xml/pos.xml"],
"post_load": None,
"pre_init_hook": None,
"post_init_hook": None,
"uninstall_hook": None,
"auto_install": False,
"installable": True,
}
| 33.482759 | 84 | 0.601442 |
{
"name": """POS QR Showing in POS""",
"summary": """Show QR for qr-based payment systems in POS or Customer Screen""",
"category": "Hidden",
"images": [],
"version": "12.0.1.0.0",
"application": False,
"author": "IT-Projects LLC, Ivan Yelizariev",
"support": "apps@itpp.dev",
"website": "https://apps.odoo.com/apps/modules/12.0/pos_qr_show/",
"license": "Other OSI approved licence",
"price": 40.00,
"currency": "EUR",
"depends": ["point_of_sale"],
"external_dependencies": {"python": [], "bin": []},
"data": ["views/assets.xml"],
"demo": [],
"qweb": ["static/src/xml/pos.xml"],
"post_load": None,
"pre_init_hook": None,
"post_init_hook": None,
"uninstall_hook": None,
"auto_install": False,
"installable": True,
}
| true | true |
1c2d5cca901cfaa7506bca632cd6a7ed50bd307d | 807 | py | Python | amino/__init__.py | Alert-Aigul/Amino.py | 34b578e34e8a6ee6e50e3c3b2ad8bcda6b83f266 | [
"MIT"
] | null | null | null | amino/__init__.py | Alert-Aigul/Amino.py | 34b578e34e8a6ee6e50e3c3b2ad8bcda6b83f266 | [
"MIT"
] | null | null | null | amino/__init__.py | Alert-Aigul/Amino.py | 34b578e34e8a6ee6e50e3c3b2ad8bcda6b83f266 | [
"MIT"
] | null | null | null | __title__ = "Amino.py"
__author__ = "Slimakoi"
__license__ = "MIT"
__copyright__ = "Copyright 2020-2022 Slimakoi"
__version__ = "3.0.4"
from .acm import ACM
from .client import Client
from .sub_client import SubClient
from .socket import Callbacks, SocketHandler
from .async_acm import AsyncACM
from .async_client import AsyncClient
from .async_sub_client import AsyncSubClient
from .async_socket import AsyncCallbacks, AsyncSocketHandler
from .lib.util import device, exceptions, headers, helpers, objects
from requests import get
from json import loads
__newest__ = loads(get("https://pypi.python.org/pypi/Amino.py/json").text)["info"]["version"]
if __version__ != __newest__:
print(exceptions.LibraryUpdateAvailable(f"New version of {__title__} available: {__newest__} (Using {__version__})"))
| 31.038462 | 121 | 0.788104 | __title__ = "Amino.py"
__author__ = "Slimakoi"
__license__ = "MIT"
__copyright__ = "Copyright 2020-2022 Slimakoi"
__version__ = "3.0.4"
from .acm import ACM
from .client import Client
from .sub_client import SubClient
from .socket import Callbacks, SocketHandler
from .async_acm import AsyncACM
from .async_client import AsyncClient
from .async_sub_client import AsyncSubClient
from .async_socket import AsyncCallbacks, AsyncSocketHandler
from .lib.util import device, exceptions, headers, helpers, objects
from requests import get
from json import loads
__newest__ = loads(get("https://pypi.python.org/pypi/Amino.py/json").text)["info"]["version"]
if __version__ != __newest__:
print(exceptions.LibraryUpdateAvailable(f"New version of {__title__} available: {__newest__} (Using {__version__})"))
| true | true |
1c2d5cd0da5bfba84bb4e52446a57f4113914fc8 | 1,636 | py | Python | dimod/__init__.py | hsadeghidw/dimod | c6b4adc18c22f39fd5a79e6775bbcab84b1a5489 | [
"Apache-2.0"
] | null | null | null | dimod/__init__.py | hsadeghidw/dimod | c6b4adc18c22f39fd5a79e6775bbcab84b1a5489 | [
"Apache-2.0"
] | null | null | null | dimod/__init__.py | hsadeghidw/dimod | c6b4adc18c22f39fd5a79e6775bbcab84b1a5489 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# version is used by serialization below so we need it before everything
__version__ = '0.10.6'
from dimod.bqm import *
from dimod.constrained import *
import dimod.constrained
from dimod.core import *
import dimod.core
from dimod.reference import *
import dimod.reference
from dimod.roof_duality import fix_variables
from dimod.binary import *
import dimod.binary
from dimod.discrete import *
import dimod.testing
from dimod.converters import *
import dimod.decorators
import dimod.generators
from dimod.exceptions import *
import dimod.exceptions
from dimod.higherorder import make_quadratic, poly_energy, poly_energies, BinaryPolynomial
import dimod.higherorder
from dimod.package_info import __version__, __author__, __authoremail__, __description__
from dimod.quadratic import *
import dimod.quadratic
from dimod.traversal import *
from dimod.sampleset import *
from dimod.serialization.format import set_printoptions
from dimod.utilities import *
import dimod.utilities
from dimod.vartypes import *
| 25.169231 | 90 | 0.78423 |
__version__ = '0.10.6'
from dimod.bqm import *
from dimod.constrained import *
import dimod.constrained
from dimod.core import *
import dimod.core
from dimod.reference import *
import dimod.reference
from dimod.roof_duality import fix_variables
from dimod.binary import *
import dimod.binary
from dimod.discrete import *
import dimod.testing
from dimod.converters import *
import dimod.decorators
import dimod.generators
from dimod.exceptions import *
import dimod.exceptions
from dimod.higherorder import make_quadratic, poly_energy, poly_energies, BinaryPolynomial
import dimod.higherorder
from dimod.package_info import __version__, __author__, __authoremail__, __description__
from dimod.quadratic import *
import dimod.quadratic
from dimod.traversal import *
from dimod.sampleset import *
from dimod.serialization.format import set_printoptions
from dimod.utilities import *
import dimod.utilities
from dimod.vartypes import *
| true | true |
1c2d5e9d7c8be473f77b25e53bd2c0cedbdbf59f | 1,490 | py | Python | bnn_mcmc_examples/examples/mlp/noisy_xor/setting2/optim/sgd/benchmark_run.py | papamarkou/bnn_mcmc_examples | 7bb4ecfb33db4c30a8e61e31f528bda0efb24e3d | [
"MIT"
] | 1 | 2021-09-09T15:55:37.000Z | 2021-09-09T15:55:37.000Z | bnn_mcmc_examples/examples/mlp/noisy_xor/setting2/optim/sgd/benchmark_run.py | kushagragpt99/bnn_mcmc_examples | 297cdb1e74335860989bebdb4ff6f6322b6adc06 | [
"MIT"
] | null | null | null | bnn_mcmc_examples/examples/mlp/noisy_xor/setting2/optim/sgd/benchmark_run.py | kushagragpt99/bnn_mcmc_examples | 297cdb1e74335860989bebdb4ff6f6322b6adc06 | [
"MIT"
] | 1 | 2021-10-05T06:38:57.000Z | 2021-10-05T06:38:57.000Z | # %% Import packages
from sklearn.metrics import accuracy_score
from torch.optim import SGD
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.model import model
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.optim.constants import (
num_epochs, num_solutions, verbose, verbose_step
)
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.optim.dataloaders import training_dataloader, test_dataloader
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.optim.sgd.constants import optimizer_output_benchmark_path
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.optim.sgd.optimizer import lr, momentum
# from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.optim.sgd.optimizer import loss_fn, lr, momentum
from bnn_mcmc_examples.optim import benchmark
# %% Create output directory if it does not exist
optimizer_output_benchmark_path.mkdir(parents=True, exist_ok=True)
# %% Setup SGD optimizer
model.set_params(model.prior.sample())
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
# %% Benchmark model
benchmark(
model,
training_dataloader,
optimizer,
num_solutions,
num_epochs,
optimizer_output_benchmark_path,
# loss_fn=loss_fn,
validation_loader=test_dataloader,
pred_fn=lambda labels: labels.squeeze() > 0.5,
metric_fn=lambda preds, labels: accuracy_score(preds, labels.squeeze()),
check_fn=lambda acc: acc > 0.85,
verbose=verbose,
verbose_step=verbose_step,
print_runtime=True
)
| 33.863636 | 116 | 0.797315 |
from sklearn.metrics import accuracy_score
from torch.optim import SGD
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.model import model
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.optim.constants import (
num_epochs, num_solutions, verbose, verbose_step
)
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.optim.dataloaders import training_dataloader, test_dataloader
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.optim.sgd.constants import optimizer_output_benchmark_path
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.optim.sgd.optimizer import lr, momentum
from bnn_mcmc_examples.optim import benchmark
optimizer_output_benchmark_path.mkdir(parents=True, exist_ok=True)
model.set_params(model.prior.sample())
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
benchmark(
model,
training_dataloader,
optimizer,
num_solutions,
num_epochs,
optimizer_output_benchmark_path,
validation_loader=test_dataloader,
pred_fn=lambda labels: labels.squeeze() > 0.5,
metric_fn=lambda preds, labels: accuracy_score(preds, labels.squeeze()),
check_fn=lambda acc: acc > 0.85,
verbose=verbose,
verbose_step=verbose_step,
print_runtime=True
)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.