code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
"""
Setup script.
"""
from pathlib import Path
from typing import List
import versioneer
from setuptools import find_packages, setup
def _get_dependencies(requirements_file: Path) -> List[str]:
"""
Return requirements from a requirements file.
This expects a requirements file with no ``--find-links`` lines.
"""
lines = requirements_file.read_text().strip().split('\n')
return [line for line in lines if not line.startswith('#')]
_DIRECT_REQUIRES = _get_dependencies(
requirements_file=Path('requirements.txt'),
)
INSTALL_REQUIRES = _DIRECT_REQUIRES
DEV_REQUIRES = _get_dependencies(
requirements_file=Path('dev-requirements.txt'),
)
LONG_DESCRIPTION = Path('README.rst').read_text()
setup(
name='DC/OS Installer Tools',
version=versioneer.get_version(), # type: ignore
cmdclass=versioneer.get_cmdclass(), # type: ignore
author='<NAME>',
author_email='<EMAIL>',
description='Get information from DC/OS installers.',
long_description=LONG_DESCRIPTION,
packages=find_packages(where='src'),
zip_safe=True,
package_dir={'': 'src'},
install_requires=INSTALL_REQUIRES,
include_package_data=True,
license='Apache License 2.0',
keywords='dcos',
url='https://github.com/adamtheturtle/dcos-installer-tools',
extras_require={
'dev': DEV_REQUIRES,
},
classifiers=[
'Operating System :: POSIX',
'Environment :: Web Environment',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: Apache License 2.0',
],
# Avoid dependency links because they are not supported by Read The Docs.
#
# Also, they require users to use ``--process-dependency-links``.
dependency_links=[],
)
| [
"versioneer.get_cmdclass",
"setuptools.find_packages",
"versioneer.get_version",
"pathlib.Path"
] | [((522, 546), 'pathlib.Path', 'Path', (['"""requirements.txt"""'], {}), "('requirements.txt')\n", (526, 546), False, 'from pathlib import Path\n'), ((643, 671), 'pathlib.Path', 'Path', (['"""dev-requirements.txt"""'], {}), "('dev-requirements.txt')\n", (647, 671), False, 'from pathlib import Path\n'), ((695, 713), 'pathlib.Path', 'Path', (['"""README.rst"""'], {}), "('README.rst')\n", (699, 713), False, 'from pathlib import Path\n'), ((780, 804), 'versioneer.get_version', 'versioneer.get_version', ([], {}), '()\n', (802, 804), False, 'import versioneer\n'), ((835, 860), 'versioneer.get_cmdclass', 'versioneer.get_cmdclass', ([], {}), '()\n', (858, 860), False, 'import versioneer\n'), ((1037, 1063), 'setuptools.find_packages', 'find_packages', ([], {'where': '"""src"""'}), "(where='src')\n", (1050, 1063), False, 'from setuptools import find_packages, setup\n')] |
"""Analysis Classes Base Class"""
import pickle
import json
import pandas as pd
import matplotlib.pyplot as plt
# Base class for analyses
class Analysis:
"""Root class for analysis system classes."""
# Init class with results
def __init__(self, results, t_zone=None, t_unit=None):
"""
Constructor
Called as super() from specific analysis class. Stores the results
form the request, preforms basic (result independent parsing) and
sets timezone/time unit.
Parameters
----------
results: dict
analysis results
t_zone: t_zone
timezone, if None, times will remain in epoch time [UTC].
t_unit: t_unit
time unit for conversion from epoch time [ms].
"""
# raw_results as returned from server
self._raw_results = results
# timezone and unit set in constructor
self._t_zone = t_zone or "UTC"
self._t_unit = t_unit or "ms"
# Dataframe representation
self._results_df = None
self._inputs = results.get("inputs", "Inputs not available")
self.time_column = None
def _render_plot(self, interactive, filename=None):
"""Render plot to screen (interactive) or file.
Parameters
---------
interactive: bool
Wheter to display plot on screen (True) or to store to file (False).
filename: str
Filename for the plot
Returns
-------
plot file name: str
name of plot file (or emtpy string in case of interactive plot)
"""
if interactive:
plot_file = ""
plt.show()
else:
if not filename:
if len(self.sources()) > 1:
srcstr = self.sources()[0] + "_to_" + self.sources()[-1] + "_"
else:
srcstr = self.sources()[0] + "_"
plot_file = srcstr + self.request_id() + ".png"
else:
plot_file = filename
plt.savefig(plot_file, dpi=600, bbox_inches="tight")
print(f"saved plot to {plot_file}")
return plot_file
def _add_datetime(self, time_column="timestamps"):
"""
Add a datetime column to self._results_df
Parameters
---------
time_column: str
Column name for the integer timestamps
Convert EPOCH time to datetime with the
timezone and time unit given in constructor. Will add
an additional column "datetime" to the dataframe
and set time_column to "datetime"
Works on class object dframe and sets time column to
datetime.
"""
self.time_column = time_column
self._results_df = self._add_datetime_df(self._results_df, self.time_column)
def _add_datetime_df(self, dframe, timecolumn):
"""
Convert EPOCH time to datetime with the
timezone and time unit given in constructor. Will add
an additional column "datetime" to the dataframe
Operates on given df and given time column
Parameters
----------
dframe: DataFrame
DataFrame to operate on
timecolumn: str
columns containing time
Returns
-------
DataFrame with added datetime colums
"""
# EPOCH to datetime considering time zone
dt_col = pd.to_datetime(
dframe[timecolumn], unit=self._t_unit, utc=True
).dt.tz_convert(self._t_zone)
dframe["datetime"] = dt_col
# Mark timecolumn as available
self.time_column = "datetime"
return dframe
# Accessor functions
def raw_results(self):
"""Raw results as returned by server
Returns
-------
raw_results: dict
"""
return self._raw_results.copy()
def request_id(self):
"""request_id from request
Returns
-------
request_id: str
"""
return self._raw_results["request_id"]
def feature(self):
"""feature from request
Returns
-------
feature: str
"""
return self._raw_results["feature"]
def results(self):
"""results dict as returned from request
Returns
-------
results: dict
"""
return self._raw_results["results"].copy()
def status(self):
"""status from request
Returns
-------
status: str
"""
return self._raw_results["status"]
def inputs(self):
"""inputs to the request algortihm
Returns
-------
inputs: dict
"""
return self._inputs
def sources(self):
"""sources to the request algortihm
Returns
-------
sources: list
"""
sources = self.inputs()["UUID"]
if not isinstance(self.inputs()["UUID"], list):
sources = [self.inputs()["UUID"]]
return sources
# For avoiding problems when no results are available
def check_status(self):
if "success" not in self.status():
err_str = f"Analysis {self.request_id} failed on server side"
print(err_str)
raise ValueError(err_str)
return self.status()
# Prints header for summary functions
def summary(self):
"""
Print header for summary function. Called as super() from specific
analysis class. Will raise an exception in case request was not
successful
"""
# Anounce
print(f"=== {self.feature()} ===")
print(f"request_id {self.request_id()}")
# Check success
self.check_status()
# print time info if applicable
if self.time_column is not None:
from_t = self._results_df[self.time_column].min()
to_t = self._results_df[self.time_column].max()
if self.time_column == "datetime":
from_t = from_t.strftime("%Y%m%d-%H:%M.%S")
to_t = to_t.strftime("%Y%m%d-%H:%M.%S")
print(f"from {from_t} to {to_t}")
# Default method
def plot(
self, interactive=True, time_format=None, filename=None
): # pylint: disable=unused-argument
"""Pro forma ancestor function.
Parameters
----------
interactive : bool
True: show plot, False: save plot
time_format: str, optional
strftime format specifier for tick_x_labels. If not given
only dates are shown. To show dates and time use %y%m%d-%H:%M:%S
filename: str, optional
filename for the plot.
Returns
-------
plot file name: str
name of plot file (or emtpy string in case of interactive plot)
"""
self.check_status()
print(f"Plot function not implemented for {type(self).__name__}")
return ""
# Save self as pickel
def save_pkl(self, file_name=None):
"""Serializes the analysis object as pickle file.
In case of filname is not given, filename will be
<request_id>.pkl
Parameters
----------
file_name: str
filename to save object under.
Returns
-------
Actually used file path: str
"""
if file_name is None:
file_name = f"{self.request_id()}.pkl"
print(f"Saving {self.feature()} object to", file_name)
with open(file_name, "wb") as pkl_file:
pickle.dump(self, pkl_file)
return file_name
# Return results as dataframe
def to_df(self):
"""Return a dataframe with the analysis results.
Returns
-------
Dataframe with analysis results: dataFrame
"""
self.check_status()
return self._results_df.copy()
# Save results to dataframe
def save_df(self, file_name=None):
"""Save a dataframe with the analysis results.
In case of filname is not given, filename will be
<request_id>.csv
Format of the dataframe depends on specific analysis.
Will raise an exception in case no results are available.
Parameters
----------
file_name: str
filename to save dataframe under.
Returns
-------
Actually used file path: str
"""
self.check_status()
if file_name is None:
file_name = f"{self.request_id()}.csv"
print(f"Saving {self.feature()} data frame results to", file_name)
self.to_df().to_csv(file_name, index=False)
return file_name
# Save self as pickel
def save_json(self, file_name=None, raw=False):
"""Saves the request result from the API JSON
In case of filname is not given, filename will be
<request_id>.json
Parameters
----------
file_name: str
filename to save object under.
raw: boolean
return only alogrithm results [false, default]
return full request response [true]
Returns
-------
Actually used file path: str
"""
if file_name is None:
file_name = f"{self.request_id()}.json"
print(f"Saving {self.feature()} API results to", file_name)
if raw:
s_dict = self._raw_results
else:
s_dict = self.results()
with open(file_name, "w") as json_file:
json.dump(s_dict, json_file, indent=4)
return file_name
def __repr__(self):
return (
f"<{self.__class__.__name__} - feaure='{self.feature()}'"
f" - request_id='{self.request_id()}'>"
)
| [
"pickle.dump",
"matplotlib.pyplot.savefig",
"json.dump",
"pandas.to_datetime",
"matplotlib.pyplot.show"
] | [((1691, 1701), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1699, 1701), True, 'import matplotlib.pyplot as plt\n'), ((2079, 2131), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plot_file'], {'dpi': '(600)', 'bbox_inches': '"""tight"""'}), "(plot_file, dpi=600, bbox_inches='tight')\n", (2090, 2131), True, 'import matplotlib.pyplot as plt\n'), ((7632, 7659), 'pickle.dump', 'pickle.dump', (['self', 'pkl_file'], {}), '(self, pkl_file)\n', (7643, 7659), False, 'import pickle\n'), ((9606, 9644), 'json.dump', 'json.dump', (['s_dict', 'json_file'], {'indent': '(4)'}), '(s_dict, json_file, indent=4)\n', (9615, 9644), False, 'import json\n'), ((3465, 3528), 'pandas.to_datetime', 'pd.to_datetime', (['dframe[timecolumn]'], {'unit': 'self._t_unit', 'utc': '(True)'}), '(dframe[timecolumn], unit=self._t_unit, utc=True)\n', (3479, 3528), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-07-15 20:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_markdown.models
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0051_auto_20160712_1459'),
]
operations = [
migrations.CreateModel(
name='DevhubCreateEventModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_heading', models.CharField(max_length=100)),
('event_date', models.DateTimeField(blank=True, null=True)),
('event_venue', models.CharField(max_length=100)),
('event_description', django_markdown.models.MarkdownField()),
('event_for', models.CharField(max_length=25)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('tags', taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.UserProfileModel')),
],
),
]
| [
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((391, 448), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (422, 448), False, 'from django.db import migrations, models\n'), ((639, 732), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (655, 732), False, 'from django.db import migrations, models\n'), ((765, 797), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (781, 797), False, 'from django.db import migrations, models\n'), ((831, 874), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (851, 874), False, 'from django.db import migrations, models\n'), ((909, 941), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (925, 941), False, 'from django.db import migrations, models\n'), ((1053, 1084), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(25)'}), '(max_length=25)\n', (1069, 1084), False, 'from django.db import migrations, models\n'), ((1115, 1154), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1135, 1154), False, 'from django.db import migrations, models\n'), ((1186, 1221), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1206, 1221), False, 'from django.db import migrations, models\n'), ((1420, 1516), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1437, 1516), False, 'from django.db import migrations, models\n'), ((1547, 1641), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""app.UserProfileModel"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'app.UserProfileModel')\n", (1564, 1641), False, 'from django.db import migrations, models\n')] |
# PRISM CONVERSION FROM ASCII GRIDS -- TASMIN / TASMAX
# header info
# ncols 2015
# nrows 1320
# xllcorner -2301787.7731349
# yllcorner 108069.7858797
# cellsize 2000
# NODATA_value -9999
import rasterio, glob, os
from rasterio import Affine
import numpy as np
from pathos import multiprocessing as mp
# input_path = '/Data/Base_Data/Climate/AK_CAN_2km/historical/singleBand/pr'
# #'/Data/Base_Data/Climate/AK_CAN_2km/historical/singleBand/prism/AK_2KM_PRISM/Temperature/2km/older'
# output_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2'
# groups = ['min_temp', 'max_temp']
# # # STEP 1 -- CONVERT TO GTIFF FROM ASC AND TXT
# list the data we want
variables = [ 'tmin', 'tmax' ]
input_path_ak = '/Data/Base_Data/Climate/AK_CAN_2km/historical/singleBand/prism/AK_2KM_PRISM/Temperature/2km/older'
input_path_can = '/Data/Base_Data/Climate/AK_CAN_2km/historical/singleBand/prism/AK_CAN_2km_PRISM/CAN_originals/older'
for variable in variables:
for ak_test, input_path in zip( [True,False], [input_path_ak,input_path_can] ):
output_path = os.path.join( '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2', variable,'raw_converted' )
if not os.path.exists( output_path ):
os.makedirs( output_path )
if ak_test:
input_path = input_path_ak
if variable == 'tmin':
v = 'min_temp'
elif variable == 'tmax':
v = 'max_temp'
else:
NotImplemented( 'only tmax / tmin currently supported' )
files = glob.glob( os.path.join( input_path, v, '*'+variable+'*.txt' ) )
else:
input_path = input_path_can
files = glob.glob( os.path.join( input_path, '*'+variable+'*.asc' ) )
ext = files[0].split('.')[1]
output_filenames = [ os.path.join( output_path, os.path.basename( fn ).replace( '.'+ext, '.tif' ) ) for fn in files ]
crs = {'init':'epsg:4326'}
args = [ (i,j,crs) for i,j in zip(files, output_filenames) ]
def bounds_to_extent( bounds ):
'''
take input rasterio bounds object and return an extent
'''
l,b,r,t = bounds
return [ (l,b), (r,b), (r,t), (l,t), (l,b) ]
def convert_to_gtiff( fn, output_filename, crs={'init':'epsg:3338'} ):
'''
convert the ascii rasters from PRISM to gtiff
'''
print( fn )
rst = rasterio.open( fn )
arr = rst.read( 1 ) # get the first and only band
meta = rst.meta
meta.update( compress='lzw', driver='GTiff', crs=crs )
# drop the transform to overcome rasterio warnings
if 'transform' in meta.keys():
meta.pop( 'transform' )
# write them out
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( arr, 1 )
return output_filename
if __name__ == '__main__':
pool = mp.Pool( 32 )
pool.map( lambda x: convert_to_gtiff( *x ), args )
pool.close()
pool.join()
# # # STEP 2 -- MERGE IT WITH GDAL TOOLS
# list the data
caw = sorted( glob.glob( os.path.join( '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2',variable,'raw_converted', 'caw*.tif' ) ) )
ak = sorted( glob.glob( os.path.join( '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2',variable,'raw_converted', 'ak_*.tif' ) ) )
grouped = zip( ak, caw )
# merge these files:
# log = open( '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2/batch_run.bat', 'w' )
for ak,ca in grouped:
out = ak.replace( 'ak_', 'akcan_')
ca_out = ca.replace( '.tif', '_3338.tif' )
os.system( 'gdalwarp -overwrite -r near -t_srs EPSG:3338 -s_srs EPSG:4326 -ot Float32 ' + ca + ' ' + ca_out )
ca_scale = ca_out.replace( '.tif', '_scaled.tif' )
os.system( 'gdal_calc.py --overwrite -A ' + ca_out + ' --outfile=' + ca_scale + ' --calc="A*(0.1)" --NoDataValue=-9999 --type=Float32' )
os.system( 'gdal_merge.py -init -9999 -n -9999 -a_nodata -9999 -ot Float32 -o ' + out + ' ' + ak + ' ' + ca_scale )
final = ca.replace( '.tif', '_merged.tif' ).replace( 'raw_converted', 'merged' ).replace( 'caw_', 'akcan_' )
if not os.path.exists( os.path.dirname(final) ):
os.makedirs(os.path.dirname(final))
os.system( 'gdal_translate -co "COMPRESS=LZW" ' + out + ' ' + final )
# # DUE TO SOME WEIRDNESS WITH VIRTUALENV AND GDAL_MERGE.PY I am writing this out to a text file and running it when not in virtualenv
# out = ak.replace( 'ak_', 'akcan_')
# ca_out = ca.replace( '.tif', '_3338.tif' )
# log.write( 'gdalwarp -overwrite -r near -t_srs EPSG:3338 -s_srs EPSG:4326 -ot Float32 ' + ca + ' ' + ca_out + '\n' )
# ca_scale = ca_out.replace( '.tif', '_scaled.tif' )
# log.write( 'gdal_calc.py --overwrite -A ' + ca_out + ' --outfile=' + ca_scale + ' --calc="A*(0.1)" --NoDataValue=-9999 --type=Float32' + '\n' )
# log.write( 'gdal_merge.py -init -9999 -n -9999 -a_nodata -9999 -ot Float32 -o ' + out + ' ' + ak + ' ' + ca_scale + '\n' )
# final = ca.replace( '.tif', '_merged.tif' )
# log.write( 'gdal_translate -co "COMPRESS=LZW" ' + out + ' ' + final + '\n' )
# # # STEP 3 -- INTERPOLATE / REGRID / MASK to match existing SNAP resources
def coordinates( fn=None, meta=None, numpy_array=None, input_crs=None, to_latlong=False ):
'''
take a raster file as input and return the centroid coords for each
of the grid cells as a pair of numpy 2d arrays (longitude, latitude)
'''
import rasterio
import numpy as np
from affine import Affine
from pyproj import Proj, transform
if fn:
# Read raster
with rasterio.open( fn ) as r:
T0 = r.affine # upper-left pixel corner affine transform
p1 = Proj( r.crs )
A = r.read( 1 ) # pixel values
elif (meta is not None) & (numpy_array is not None):
A = numpy_array
if input_crs != None:
p1 = Proj( input_crs )
T0 = meta[ 'affine' ]
else:
p1 = None
T0 = meta[ 'affine' ]
else:
BaseException( 'check inputs' )
# All rows and columns
cols, rows = np.meshgrid(np.arange(A.shape[1]), np.arange(A.shape[0]))
# Get affine transform for pixel centres
T1 = T0 * Affine.translation( 0.5, 0.5 )
# Function to convert pixel row/column index (from 0) to easting/northing at centre
rc2en = lambda r, c: ( c, r ) * T1
# All eastings and northings (there is probably a faster way to do this)
eastings, northings = np.vectorize(rc2en, otypes=[np.float, np.float])(rows, cols)
if to_latlong == False:
return eastings, northings
elif (to_latlong == True) & (input_crs != None):
# Project all longitudes, latitudes
longs, lats = transform(p1, p1.to_latlong(), eastings, northings)
return longs, lats
else:
BaseException( 'cant reproject to latlong without an input_crs' )
def xyz_to_grid( x, y, z, grid, method='cubic', output_dtype=np.float32, *args, **kwargs ):
'''
interpolate points to a grid. simple wrapper around
scipy.interpolate.griddata. Points and grid must be
in the same coordinate system
x = 1-D np.array of x coordinates / x,y,z must be same length
y = 1-D np.array of y coordinates / x,y,z must be same length
z = 1-D np.array of z coordinates / x,y,z must be same length
grid = tuple of meshgrid as made using numpy.meshgrid()
order (xi, yi)
method = one of 'cubic', 'near', 'linear'
'''
from scipy.interpolate import griddata
zi = griddata( (x, y), z, grid, method=method )
zi = np.flipud( zi ).astype( output_dtype )
return zi
import glob, os, rasterio
import numpy as np
from rasterio.warp import reproject, RESAMPLING
import pandas as pd
# ORIG_AK_RAW = '/Data/Base_Data/Climate/AK_CAN_2km/historical/singleBand/prism/AK_2KM_PRISM/Temperature/2km/older'
# TEMPLATE:
template_fn = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/akcan_template/tas_mean_C_AR5_CCSM4_rcp26_01_2006.tif'
rst = rasterio.open( template_fn )
mask = rst.read_masks()
input_dir = os.path.join( '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2', variable, 'merged' )
files = glob.glob( os.path.join( input_dir, 'akcan_*.tif' ) )
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# make an empty raster to hold the new output
for fn in files:
template = rasterio.open( template_fn )
meta = template.meta
meta.update( compress='lzw' )
output_filename = os.path.join( '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2', variable, os.path.basename( fn ).replace( 'merged', 'final' ) )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( np.empty_like( template.read( 1 ) ), 1 )
# run it with gdalwarp
command = 'gdalwarp ' + fn + ' ' + output_filename
os.system( command )
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# for fn in files:
# cur = rasterio.open( fn )
# # GET THE COORDS AND THE DATA IN A WAY THAT IS INTEPOLATABLE
# lons, lats = coordinates( fn )
# df = pd.DataFrame( {'lons':lons.ravel(), 'lats':lats.ravel(), 'dat':cur.read(1).ravel() } )
# new_df = df[df.dat != -9999]
# new_grid = xyz_to_grid( new_df.lons.tolist(), new_df.lats.tolist(), new_df.dat.tolist(), (lons,lats), method='cubic', output_dtype=np.float32 )
# # new_grid[ np.isnan( new_grid ) ] = -9999
# output_arr = np.empty_like( rst.read( 1 ) )
# # now reproject the new_grid to the extent/resolution/crs of the ALF data -- idiotic crs we use here
# reproject( new_grid, output_arr, src_transform=cur.affine, src_crs={ 'init':'epsg:3338' }, src_nodata=None, \
# dst_transform=rst.affine, dst_crs=rst.crs,\
# dst_nodata=None, resampling=RESAMPLING.cubic_spline, SOURCE_EXTRA=1000 )
# output_arr[ mask == 0 ] = rst.nodata
# new_path = os.path.join( '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2', variable, 'prepped' )
# output_filename = os.path.join( new_path, os.path.basename( fn ) )
# meta = rst.meta
# meta.update( compress='lzw' )
# meta.pop( 'transform' )
# with rasterio.open( output_filename, 'w', **meta ) as out:
# out.write( output_arr, 1 )
# # # # END PREP
# THIS IS HOW IT WAS CONVERTED FROM THE TXT/ASC FORMATS
# for group in groups:
# # list the data we want to convert to GTiff
# # remember that month 14 is the annual average
# files = glob.glob( os.path.join( input_path, group, '*.txt' ) )
# for fn in files:
# # print fn
# rst = rasterio.open( fn )
# arr = rst.read( 1 ) # get the first and only band
# output_filename = os.path.join( output_path, os.path.basename(fn).replace( '.txt', '.tif' ) )
# meta = rst.meta
# meta.update( compress='lzw', driver='GTiff', crs={'init':'epsg:3338'} )
# # drop the transform to overcome rasterio warnings
# if 'transform' in meta.keys():
# meta.pop( 'transform' )
# # write them out
# with rasterio.open( output_filename, 'w', **meta ) as out:
# out.write( arr, 1 )
| [
"os.path.exists",
"os.makedirs",
"pathos.multiprocessing.Pool",
"numpy.flipud",
"rasterio.open",
"scipy.interpolate.griddata",
"os.path.join",
"os.path.dirname",
"affine.Affine.translation",
"os.path.basename",
"pyproj.Proj",
"os.system",
"numpy.vectorize",
"numpy.arange"
] | [((7696, 7722), 'rasterio.open', 'rasterio.open', (['template_fn'], {}), '(template_fn)\n', (7709, 7722), False, 'import rasterio\n'), ((7764, 7883), 'os.path.join', 'os.path.join', (['"""/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2"""', 'variable', '"""merged"""'], {}), "(\n '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2'\n , variable, 'merged')\n", (7776, 7883), False, 'import glob, os, rasterio\n'), ((1110, 1236), 'os.path.join', 'os.path.join', (['"""/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2"""', 'variable', '"""raw_converted"""'], {}), "(\n '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2'\n , variable, 'raw_converted')\n", (1122, 1236), False, 'import glob, os, rasterio\n'), ((3456, 3573), 'os.system', 'os.system', (["(\n 'gdalwarp -overwrite -r near -t_srs EPSG:3338 -s_srs EPSG:4326 -ot Float32 '\n + ca + ' ' + ca_out)"], {}), "(\n 'gdalwarp -overwrite -r near -t_srs EPSG:3338 -s_srs EPSG:4326 -ot Float32 '\n + ca + ' ' + ca_out)\n", (3465, 3573), False, 'import glob, os, rasterio\n'), ((3621, 3759), 'os.system', 'os.system', (['(\'gdal_calc.py --overwrite -A \' + ca_out + \' --outfile=\' + ca_scale +\n \' --calc="A*(0.1)" --NoDataValue=-9999 --type=Float32\')'], {}), '(\'gdal_calc.py --overwrite -A \' + ca_out + \' --outfile=\' +\n ca_scale + \' --calc="A*(0.1)" --NoDataValue=-9999 --type=Float32\')\n', (3630, 3759), False, 'import glob, os, rasterio\n'), ((3760, 3882), 'os.system', 'os.system', (["('gdal_merge.py -init -9999 -n -9999 -a_nodata -9999 -ot Float32 -o ' + out +\n ' ' + ak + ' ' + ca_scale)"], {}), "(\n 'gdal_merge.py -init -9999 -n -9999 -a_nodata -9999 -ot Float32 -o ' +\n out + ' ' + ak + ' ' + ca_scale)\n", (3769, 3882), False, 'import glob, os, rasterio\n'), ((4079, 4146), 'os.system', 'os.system', (['(\'gdal_translate -co "COMPRESS=LZW" \' + out + \' \' + final)'], {}), '(\'gdal_translate -co "COMPRESS=LZW" \' + out + \' \' + final)\n', (4088, 4146), False, 'import glob, os, rasterio\n'), ((7202, 7242), 'scipy.interpolate.griddata', 'griddata', (['(x, y)', 'z', 'grid'], {'method': 'method'}), '((x, y), z, grid, method=method)\n', (7210, 7242), False, 'from scipy.interpolate import griddata\n'), ((7896, 7934), 'os.path.join', 'os.path.join', (['input_dir', '"""akcan_*.tif"""'], {}), "(input_dir, 'akcan_*.tif')\n", (7908, 7934), False, 'import glob, os, rasterio\n'), ((8134, 8160), 'rasterio.open', 'rasterio.open', (['template_fn'], {}), '(template_fn)\n', (8147, 8160), False, 'import rasterio\n'), ((8590, 8608), 'os.system', 'os.system', (['command'], {}), '(command)\n', (8599, 8608), False, 'import glob, os, rasterio\n'), ((1238, 1265), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (1252, 1265), False, 'import glob, os, rasterio\n'), ((1272, 1296), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (1283, 1296), False, 'import glob, os, rasterio\n'), ((2274, 2291), 'rasterio.open', 'rasterio.open', (['fn'], {}), '(fn)\n', (2287, 2291), False, 'import rasterio\n'), ((2712, 2723), 'pathos.multiprocessing.Pool', 'mp.Pool', (['(32)'], {}), '(32)\n', (2719, 2723), True, 'from pathos import multiprocessing as mp\n'), ((2897, 3035), 'os.path.join', 'os.path.join', (['"""/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2"""', 'variable', '"""raw_converted"""', '"""caw*.tif"""'], {}), "(\n '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2'\n , variable, 'raw_converted', 'caw*.tif')\n", (2909, 3035), False, 'import glob, os, rasterio\n'), ((3055, 3193), 'os.path.join', 'os.path.join', (['"""/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2"""', 'variable', '"""raw_converted"""', '"""ak_*.tif"""'], {}), "(\n '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2'\n , variable, 'raw_converted', 'ak_*.tif')\n", (3067, 3193), False, 'import glob, os, rasterio\n'), ((5863, 5884), 'numpy.arange', 'np.arange', (['A.shape[1]'], {}), '(A.shape[1])\n', (5872, 5884), True, 'import numpy as np\n'), ((5886, 5907), 'numpy.arange', 'np.arange', (['A.shape[0]'], {}), '(A.shape[0])\n', (5895, 5907), True, 'import numpy as np\n'), ((5964, 5992), 'affine.Affine.translation', 'Affine.translation', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (5982, 5992), False, 'from affine import Affine\n'), ((6217, 6265), 'numpy.vectorize', 'np.vectorize', (['rc2en'], {'otypes': '[np.float, np.float]'}), '(rc2en, otypes=[np.float, np.float])\n', (6229, 6265), True, 'import numpy as np\n'), ((8400, 8443), 'rasterio.open', 'rasterio.open', (['output_filename', '"""w"""'], {}), "(output_filename, 'w', **meta)\n", (8413, 8443), False, 'import rasterio\n'), ((1529, 1582), 'os.path.join', 'os.path.join', (['input_path', 'v', "('*' + variable + '*.txt')"], {}), "(input_path, v, '*' + variable + '*.txt')\n", (1541, 1582), False, 'import glob, os, rasterio\n'), ((1644, 1694), 'os.path.join', 'os.path.join', (['input_path', "('*' + variable + '*.asc')"], {}), "(input_path, '*' + variable + '*.asc')\n", (1656, 1694), False, 'import glob, os, rasterio\n'), ((2568, 2611), 'rasterio.open', 'rasterio.open', (['output_filename', '"""w"""'], {}), "(output_filename, 'w', **meta)\n", (2581, 2611), False, 'import rasterio\n'), ((4012, 4034), 'os.path.dirname', 'os.path.dirname', (['final'], {}), '(final)\n', (4027, 4034), False, 'import glob, os, rasterio\n'), ((4053, 4075), 'os.path.dirname', 'os.path.dirname', (['final'], {}), '(final)\n', (4068, 4075), False, 'import glob, os, rasterio\n'), ((5418, 5435), 'rasterio.open', 'rasterio.open', (['fn'], {}), '(fn)\n', (5431, 5435), False, 'import rasterio\n'), ((5515, 5526), 'pyproj.Proj', 'Proj', (['r.crs'], {}), '(r.crs)\n', (5519, 5526), False, 'from pyproj import Proj, transform\n'), ((7252, 7265), 'numpy.flipud', 'np.flipud', (['zi'], {}), '(zi)\n', (7261, 7265), True, 'import numpy as np\n'), ((5674, 5689), 'pyproj.Proj', 'Proj', (['input_crs'], {}), '(input_crs)\n', (5678, 5689), False, 'from pyproj import Proj, transform\n'), ((8339, 8359), 'os.path.basename', 'os.path.basename', (['fn'], {}), '(fn)\n', (8355, 8359), False, 'import glob, os, rasterio\n'), ((1777, 1797), 'os.path.basename', 'os.path.basename', (['fn'], {}), '(fn)\n', (1793, 1797), False, 'import glob, os, rasterio\n')] |
from abc import ABC, abstractmethod
import asyncio
from typing import (
AsyncIterator,
Tuple,
)
from cancel_token import (
CancelToken,
OperationCancelled,
)
from eth.constants import GENESIS_BLOCK_NUMBER
from eth.exceptions import (
HeaderNotFound,
)
from eth_typing import (
BlockNumber,
Hash32,
)
from eth_utils import (
encode_hex,
ValidationError,
)
from eth.abc import (
BlockAPI,
BlockHeaderAPI,
SignedTransactionAPI,
)
from eth2.beacon.types.blocks import BaseBeaconBlock
from p2p.constants import (
MAX_REORG_DEPTH,
SEAL_CHECK_RANDOM_SAMPLE_RATE,
)
from p2p.disconnect import DisconnectReason
from p2p.service import BaseService
from trinity._utils.headers import (
skip_complete_headers,
)
from trinity._utils.humanize import (
humanize_integer_sequence,
)
from trinity.chains.base import AsyncChainAPI
from trinity.db.eth1.header import BaseAsyncHeaderDB
from trinity.protocol.common.peer import (
BaseChainPeer,
)
from eth2.beacon.chains.base import (
BaseBeaconChain
)
from .types import SyncProgress
class PeerHeaderSyncer(BaseService):
"""
Sync as many headers as possible with a given peer.
Here, the run() method will execute the sync loop until our local head is the same as the one
with the highest TD announced by any of our peers.
"""
_seal_check_random_sample_rate = SEAL_CHECK_RANDOM_SAMPLE_RATE
def __init__(self,
chain: AsyncChainAPI,
db: BaseAsyncHeaderDB,
peer: BaseChainPeer,
token: CancelToken = None) -> None:
super().__init__(token)
self.chain = chain
self.db = db
self.sync_progress: SyncProgress = None
self._peer = peer
self._target_header_hash = peer.head_info.head_hash
def get_target_header_hash(self) -> Hash32:
if self._target_header_hash is None:
raise ValidationError("Cannot check the target hash when there is no active sync")
else:
return self._target_header_hash
async def _run(self) -> None:
await self.events.cancelled.wait()
async def next_header_batch(self) -> AsyncIterator[Tuple[BlockHeaderAPI, ...]]:
"""Try to fetch headers until the given peer's head_hash.
Returns when the peer's head_hash is available in our ChainDB, or if any error occurs
during the sync.
"""
peer = self._peer
head = await self.wait(self.db.coro_get_canonical_head())
head_td = await self.wait(self.db.coro_get_score(head.hash))
if peer.head_info.head_td <= head_td:
self.logger.info(
"Head TD (%d) announced by %s not higher than ours (%d), not syncing",
peer.head_info.head_td, peer, head_td)
return
else:
self.logger.debug(
"%s announced Head TD %d, which is higher than ours (%d), starting sync",
peer, peer.head_info.head_td, head_td)
self.sync_progress = SyncProgress(
head.block_number,
head.block_number,
peer.head_info.head_number,
)
self.logger.info("Starting sync with %s", peer)
last_received_header: BlockHeaderAPI = None
# When we start the sync with a peer, we always request up to MAX_REORG_DEPTH extra
# headers before our current head's number, in case there were chain reorgs since the last
# time _sync() was called. All of the extra headers that are already present in our DB
# will be discarded by skip_complete_headers() so we don't unnecessarily process them
# again.
start_at = BlockNumber(max(GENESIS_BLOCK_NUMBER + 1, head.block_number - MAX_REORG_DEPTH))
while self.is_operational:
if not peer.is_operational:
self.logger.info("%s disconnected, aborting sync", peer)
break
try:
all_headers = await self.wait(self._request_headers(peer, start_at))
if last_received_header is None:
# Skip over existing headers on the first run-through
completed_headers, new_headers = await self.wait(
skip_complete_headers(all_headers, self.db.coro_header_exists)
)
if len(new_headers) == 0 and len(completed_headers) > 0:
head = await self.wait(self.db.coro_get_canonical_head())
start_at = BlockNumber(max(
all_headers[-1].block_number + 1,
head.block_number - MAX_REORG_DEPTH
))
self.logger.debug(
"All %d headers redundant, head at %s, fetching from #%d",
len(completed_headers),
head,
start_at,
)
continue
elif completed_headers:
self.logger.debug(
"Header sync skipping over (%d) already stored headers %s: %s..%s",
len(completed_headers),
humanize_integer_sequence(h.block_number for h in completed_headers),
completed_headers[0],
completed_headers[-1],
)
else:
new_headers = all_headers
self.logger.debug2('sync received new headers: %s', new_headers)
except OperationCancelled:
self.logger.info("Sync with %s completed", peer)
break
except asyncio.TimeoutError:
self.logger.warning("Timeout waiting for header batch from %s, aborting sync", peer)
await peer.disconnect(DisconnectReason.TIMEOUT)
break
except ValidationError as err:
self.logger.warning(
"Invalid header response sent by peer %s disconnecting: %s",
peer, err,
)
await peer.disconnect(DisconnectReason.USELESS_PEER)
break
if not new_headers:
if last_received_header is None:
request_parent = head
else:
request_parent = last_received_header
if head_td < peer.head_info.head_td:
# peer claims to have a better header, but didn't return it. Boot peer
# TODO ... also blacklist, because it keeps trying to reconnect
self.logger.warning(
"%s announced difficulty %s, but didn't return any headers after %r@%s",
peer,
peer.head_info.head_td,
request_parent,
head_td,
)
await peer.disconnect(DisconnectReason.SUBPROTOCOL_ERROR)
else:
self.logger.info("Got no new headers from %s, aborting sync", peer)
break
first = new_headers[0]
first_parent = None
if last_received_header is None:
# on the first request, make sure that the earliest ancestor has a parent in our db
try:
first_parent = await self.wait(
self.db.coro_get_block_header_by_hash(first.parent_hash)
)
except HeaderNotFound:
self.logger.warning(
"Unable to find common ancestor betwen our chain and %s",
peer,
)
break
elif last_received_header.hash != first.parent_hash:
# on follow-ups, require the first header in this batch to be next in succession
self.logger.warning(
"Header batch starts with %r, with parent %s, but last header was %r",
first,
encode_hex(first.parent_hash[:4]),
last_received_header,
)
break
self.logger.debug(
"Got new header chain from %s: %s..%s",
peer,
first,
new_headers[-1],
)
try:
await self.chain.coro_validate_chain(
last_received_header or first_parent,
new_headers,
self._seal_check_random_sample_rate,
)
except ValidationError as e:
self.logger.warning("Received invalid headers from %s, disconnecting: %s", peer, e)
await peer.disconnect(DisconnectReason.SUBPROTOCOL_ERROR)
break
for header in new_headers:
head_td += header.difficulty
# Setting the latest header hash for the peer, before queuing header processing tasks
self._target_header_hash = peer.head_info.head_hash
yield new_headers
last_received_header = new_headers[-1]
self.sync_progress = self.sync_progress.update_current_block(
last_received_header.block_number,
)
start_at = BlockNumber(last_received_header.block_number + 1)
async def _request_headers(
self, peer: BaseChainPeer, start_at: BlockNumber) -> Tuple[BlockHeaderAPI, ...]:
"""Fetch a batch of headers starting at start_at and return the ones we're missing."""
self.logger.debug("Requsting chain of headers from %s starting at #%d", peer, start_at)
return await peer.chain_api.get_block_headers(
start_at,
peer.max_headers_fetch,
skip=0,
reverse=False,
)
class BaseBlockImporter(ABC):
@abstractmethod
async def import_block(
self,
block: BlockAPI) -> Tuple[BlockAPI, Tuple[BlockAPI, ...], Tuple[BlockAPI, ...]]:
...
async def preview_transactions(
self,
header: BlockHeaderAPI,
transactions: Tuple[SignedTransactionAPI, ...],
parent_state_root: Hash32,
lagging: bool = True) -> None:
"""
Give the importer a chance to preview upcoming blocks. This can improve performance
:param header: The header of the upcoming block
:param transactions: The transactions in the upcoming block
:param parent_state_root: The state root hash at the beginning of the upcoming block
(the end of the previous block)
:param lagging: Is the upcoming block *very* far ahead of the current block?
The lagging parameter is used to take actions that may be resource-intensive and slow,
but will accelerate the block once we catch up to it. A slow preparation is a waste of
resources unless the upcoming block is far enough in the future.
"""
# default action: none
pass
class SimpleBlockImporter(BaseBlockImporter):
def __init__(self, chain: AsyncChainAPI) -> None:
self._chain = chain
async def import_block(
self,
block: BlockAPI) -> Tuple[BlockAPI, Tuple[BlockAPI, ...], Tuple[BlockAPI, ...]]:
return await self._chain.coro_import_block(block, perform_validation=True)
class BaseSyncBlockImporter(ABC):
@abstractmethod
def import_block(
self,
block: BlockAPI) -> Tuple[BlockAPI, Tuple[BlockAPI, ...], Tuple[BlockAPI, ...]]:
...
class SyncBlockImporter(BaseSyncBlockImporter):
def __init__(self, chain: BaseBeaconChain) -> None:
self._chain = chain
def import_block(
self,
block: BaseBeaconBlock
) -> Tuple[BaseBeaconBlock, Tuple[BaseBeaconBlock, ...], Tuple[BaseBeaconBlock, ...]]:
return self._chain.import_block(block, perform_validation=True)
| [
"trinity._utils.headers.skip_complete_headers",
"trinity._utils.humanize.humanize_integer_sequence",
"eth_utils.ValidationError",
"eth_utils.encode_hex",
"eth_typing.BlockNumber"
] | [((1946, 2022), 'eth_utils.ValidationError', 'ValidationError', (['"""Cannot check the target hash when there is no active sync"""'], {}), "('Cannot check the target hash when there is no active sync')\n", (1961, 2022), False, 'from eth_utils import encode_hex, ValidationError\n'), ((9488, 9538), 'eth_typing.BlockNumber', 'BlockNumber', (['(last_received_header.block_number + 1)'], {}), '(last_received_header.block_number + 1)\n', (9499, 9538), False, 'from eth_typing import BlockNumber, Hash32\n'), ((8225, 8258), 'eth_utils.encode_hex', 'encode_hex', (['first.parent_hash[:4]'], {}), '(first.parent_hash[:4])\n', (8235, 8258), False, 'from eth_utils import encode_hex, ValidationError\n'), ((4280, 4342), 'trinity._utils.headers.skip_complete_headers', 'skip_complete_headers', (['all_headers', 'self.db.coro_header_exists'], {}), '(all_headers, self.db.coro_header_exists)\n', (4301, 4342), False, 'from trinity._utils.headers import skip_complete_headers\n'), ((5305, 5373), 'trinity._utils.humanize.humanize_integer_sequence', 'humanize_integer_sequence', (['(h.block_number for h in completed_headers)'], {}), '(h.block_number for h in completed_headers)\n', (5330, 5373), False, 'from trinity._utils.humanize import humanize_integer_sequence\n')] |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of config for use of fakes, and some black magic for
inline callbacks.
"""
import copy
import os
import sys
import tempfile
import eventlet
eventlet.monkey_patch(os=False)
import fixtures
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_log import log as logging
from oslo_utils import uuidutils
import testtools
from ironic.common import config as ironic_config
from ironic.common import context as ironic_context
from ironic.common import hash_ring
from ironic.objects import base as objects_base
from ironic.tests.unit import policy_fixture
CONF = cfg.CONF
CONF.import_opt('host', 'ironic.common.service')
CONF.import_opt('cleaning_network_uuid', 'ironic.common.neutron', 'neutron')
logging.register_options(CONF)
logging.setup(CONF, 'ironic')
class ReplaceModule(fixtures.Fixture):
"""Replace a module with a fake module."""
def __init__(self, name, new_value):
self.name = name
self.new_value = new_value
def _restore(self, old_value):
sys.modules[self.name] = old_value
def setUp(self):
super(ReplaceModule, self).setUp()
old_value = sys.modules.get(self.name)
sys.modules[self.name] = self.new_value
self.addCleanup(self._restore, old_value)
class TestingException(Exception):
pass
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
self.context = ironic_context.get_admin_context()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.log_fixture = self.useFixture(fixtures.FakeLogger())
self._set_config()
# NOTE(danms): Make sure to reset us back to non-remote objects
# for each test to avoid interactions. Also, backup the object
# registry
objects_base.IronicObject.indirection_api = None
self._base_test_obj_backup = copy.copy(
objects_base.IronicObjectRegistry.obj_classes())
self.addCleanup(self._restore_obj_registry)
self.addCleanup(self._clear_attrs)
self.addCleanup(hash_ring.HashRingManager().reset)
self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
self.policy = self.useFixture(policy_fixture.PolicyFixture())
def _set_config(self):
self.cfg_fixture = self.useFixture(config_fixture.Config(CONF))
self.config(use_stderr=False,
fatal_exception_format_errors=True,
tempdir=tempfile.tempdir)
self.config(cleaning_network_uuid=uuidutils.generate_uuid(),
group='neutron')
self.config(provisioning_network_uuid=uuidutils.generate_uuid(),
group='neutron')
self.config(enabled_network_interfaces=['flat', 'noop', 'neutron'])
self.set_defaults(host='fake-mini',
debug=True)
self.set_defaults(connection="sqlite://",
sqlite_synchronous=False,
group='database')
ironic_config.parse_args([], default_config_files=[])
def _restore_obj_registry(self):
objects_base.IronicObjectRegistry._registry._obj_classes = (
self._base_test_obj_backup)
def _clear_attrs(self):
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__ if k[0] != '_']:
del self.__dict__[key]
def config(self, **kw):
"""Override config options for a test."""
self.cfg_fixture.config(**kw)
def set_defaults(self, **kw):
"""Set default values of config options."""
group = kw.pop('group', None)
for o, v in kw.items():
self.cfg_fixture.set_default(o, v, group=group)
def path_get(self, project_file=None):
"""Get the absolute path to a file. Used for testing the API.
:param project_file: File whose path to return. Default: None.
:returns: path to the specified file, or path to project root.
"""
root = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..',
'..',
)
)
if project_file:
return os.path.join(root, project_file)
else:
return root
| [
"ironic.common.hash_ring.HashRingManager",
"ironic.common.config.parse_args",
"eventlet.monkey_patch",
"fixtures.TempHomeDir",
"ironic.objects.base.IronicObjectRegistry.obj_classes",
"oslo_utils.uuidutils.generate_uuid",
"ironic.tests.unit.policy_fixture.PolicyFixture",
"fixtures.EnvironmentVariable",... | [((930, 961), 'eventlet.monkey_patch', 'eventlet.monkey_patch', ([], {'os': '(False)'}), '(os=False)\n', (951, 961), False, 'import eventlet\n'), ((1518, 1548), 'oslo_log.log.register_options', 'logging.register_options', (['CONF'], {}), '(CONF)\n', (1542, 1548), True, 'from oslo_log import log as logging\n'), ((1549, 1578), 'oslo_log.log.setup', 'logging.setup', (['CONF', '"""ironic"""'], {}), "(CONF, 'ironic')\n", (1562, 1578), True, 'from oslo_log import log as logging\n'), ((1933, 1959), 'sys.modules.get', 'sys.modules.get', (['self.name'], {}), '(self.name)\n', (1948, 1959), False, 'import sys\n'), ((2350, 2384), 'ironic.common.context.get_admin_context', 'ironic_context.get_admin_context', ([], {}), '()\n', (2382, 2384), True, 'from ironic.common import context as ironic_context\n'), ((2408, 2444), 'os.environ.get', 'os.environ.get', (['"""OS_TEST_TIMEOUT"""', '(0)'], {}), "('OS_TEST_TIMEOUT', 0)\n", (2422, 2444), False, 'import os\n'), ((4850, 4903), 'ironic.common.config.parse_args', 'ironic_config.parse_args', (['[]'], {'default_config_files': '[]'}), '([], default_config_files=[])\n', (4874, 4903), True, 'from ironic.common import config as ironic_config\n'), ((2749, 2774), 'fixtures.NestedTempfile', 'fixtures.NestedTempfile', ([], {}), '()\n', (2772, 2774), False, 'import fixtures\n'), ((2800, 2822), 'fixtures.TempHomeDir', 'fixtures.TempHomeDir', ([], {}), '()\n', (2820, 2822), False, 'import fixtures\n'), ((3411, 3432), 'fixtures.FakeLogger', 'fixtures.FakeLogger', ([], {}), '()\n', (3430, 3432), False, 'import fixtures\n'), ((3740, 3787), 'ironic.objects.base.IronicObjectRegistry.obj_classes', 'objects_base.IronicObjectRegistry.obj_classes', ([], {}), '()\n', (3785, 3787), True, 'from ironic.objects import base as objects_base\n'), ((3968, 4010), 'fixtures.EnvironmentVariable', 'fixtures.EnvironmentVariable', (['"""http_proxy"""'], {}), "('http_proxy')\n", (3996, 4010), False, 'import fixtures\n'), ((4050, 4080), 'ironic.tests.unit.policy_fixture.PolicyFixture', 'policy_fixture.PolicyFixture', ([], {}), '()\n', (4078, 4080), False, 'from ironic.tests.unit import policy_fixture\n'), ((4153, 4180), 'oslo_config.fixture.Config', 'config_fixture.Config', (['CONF'], {}), '(CONF)\n', (4174, 4180), True, 'from oslo_config import fixture as config_fixture\n'), ((6227, 6259), 'os.path.join', 'os.path.join', (['root', 'project_file'], {}), '(root, project_file)\n', (6239, 6259), False, 'import os\n'), ((2680, 2723), 'fixtures.Timeout', 'fixtures.Timeout', (['test_timeout'], {'gentle': '(True)'}), '(test_timeout, gentle=True)\n', (2696, 2723), False, 'import fixtures\n'), ((2837, 2872), 'os.environ.get', 'os.environ.get', (['"""OS_STDOUT_CAPTURE"""'], {}), "('OS_STDOUT_CAPTURE')\n", (2851, 2872), False, 'import os\n'), ((2902, 2937), 'os.environ.get', 'os.environ.get', (['"""OS_STDOUT_CAPTURE"""'], {}), "('OS_STDOUT_CAPTURE')\n", (2916, 2937), False, 'import os\n'), ((3052, 3094), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""sys.stdout"""', 'stdout'], {}), "('sys.stdout', stdout)\n", (3072, 3094), False, 'import fixtures\n'), ((3108, 3143), 'os.environ.get', 'os.environ.get', (['"""OS_STDERR_CAPTURE"""'], {}), "('OS_STDERR_CAPTURE')\n", (3122, 3143), False, 'import os\n'), ((3173, 3208), 'os.environ.get', 'os.environ.get', (['"""OS_STDERR_CAPTURE"""'], {}), "('OS_STDERR_CAPTURE')\n", (3187, 3208), False, 'import os\n'), ((3323, 3365), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""sys.stderr"""', 'stderr'], {}), "('sys.stderr', stderr)\n", (3343, 3365), False, 'import fixtures\n'), ((3909, 3936), 'ironic.common.hash_ring.HashRingManager', 'hash_ring.HashRingManager', ([], {}), '()\n', (3934, 3936), False, 'from ironic.common import hash_ring\n'), ((4364, 4389), 'oslo_utils.uuidutils.generate_uuid', 'uuidutils.generate_uuid', ([], {}), '()\n', (4387, 4389), False, 'from oslo_utils import uuidutils\n'), ((4474, 4499), 'oslo_utils.uuidutils.generate_uuid', 'uuidutils.generate_uuid', ([], {}), '()\n', (4497, 4499), False, 'from oslo_utils import uuidutils\n'), ((5977, 6002), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5992, 6002), False, 'import os\n'), ((2984, 3015), 'fixtures.StringStream', 'fixtures.StringStream', (['"""stdout"""'], {}), "('stdout')\n", (3005, 3015), False, 'import fixtures\n'), ((3255, 3286), 'fixtures.StringStream', 'fixtures.StringStream', (['"""stderr"""'], {}), "('stderr')\n", (3276, 3286), False, 'import fixtures\n')] |
#!/usr/bin/py2
import cv2
import imutils
import numpy as np
from solver import Solver
from Recognizer import OCR
from skimage.segmentation import clear_border
from imutils.perspective import four_point_transform
class Sudoku(object):
def __init__(self, image):
self.image = image
self.gray = None
def initialize_image(self):
self.image = cv2.imread(self.image)
self.image = imutils.resize(self.image, width=600)
return
def fetch_rectangle(self):
self.gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(self.gray, (7, 7), 3)
thresh = cv2.adaptiveThreshold(blurred, 255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
thresh = cv2.bitwise_not(thresh)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
puzzleCnt = None
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
puzzleCnt = approx
break
return puzzleCnt
def extract_sudoku_board(self,board):
original_image = four_point_transform(self.image, board.reshape(4, 2))
gray_image = four_point_transform(self.gray, board.reshape(4, 2))
return gray_image
def split_board(self,board):
return board.shape[1] // 9, board.shape[0] // 9
def extract_digit(self,cell):
thresh = cv2.threshold(cell, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
thresh = clear_border(thresh)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
if len(cnts) == 0:
return None
c = max(cnts, key=cv2.contourArea)
mask = np.zeros(thresh.shape, dtype="uint8")
cv2.drawContours(mask, [c], -1, 255, -1)
(h, w) = thresh.shape
percentFilled = cv2.countNonZero(mask) / float(w * h)
if percentFilled < 0.03:
return None
digit = cv2.bitwise_and(thresh, thresh, mask=mask)
return digit
def process_cells(self,stepX,stepY,board):
ocr = OCR()
sudoku_array = np.zeros((9, 9), dtype="int")
cellLocs = []
boolean = True
for y in range(0, 9):
row = []
for x in range(0, 9):
startX = x * stepX
startY = y * stepY
endX = (x + 1) * stepX
endY = (y + 1) * stepY
row.append((startX, startY, endX, endY))
cell = board[startY:endY, startX:endX]
digit = self.extract_digit(cell)
if (digit is not None):
cv2.imwrite("img-"+str(y)+str(x)+".png",digit)
sudoku_array[y][x] = ocr.prediction(digit)
return sudoku_array
def solve(self):
self.initialize_image()
board = self.fetch_rectangle()
if board is None:
return
board = self.extract_sudoku_board(board)
x,y = self.split_board(board)
final_board = self.process_cells(x,y,board)
return final_board
def manipulate(board):
canShow = True
while (canShow):
decision = raw_input("\nWanna make any corrections to the board? Press y if yes:-")
if (decision and decision[0].lower() == "y"):
canShow = False
break
values = raw_input("\nEnter Row,Column and Value. Ex: 2,2,3: ").split(",")
try:
row,col,val = list(map(int,values[:3]))
check = lambda x: x>0 and x<10
if (all([check(i) for i in [row,col,val]])):
board[row-1][col-1] = val
print("\nUpdated Board\n")
print(board)
else:
print("\nInvalid input")
except:
print("\nInvalid input")
return board
sudoku_board = Sudoku("Images/sudoku.jpg").solve()
print(sudoku_board)
updated_board = manipulate(sudoku_board)
Solver().solution(updated_board)
| [
"Recognizer.OCR",
"cv2.drawContours",
"cv2.countNonZero",
"cv2.threshold",
"solver.Solver",
"cv2.arcLength",
"cv2.bitwise_and",
"skimage.segmentation.clear_border",
"imutils.resize",
"cv2.adaptiveThreshold",
"imutils.grab_contours",
"numpy.zeros",
"cv2.approxPolyDP",
"cv2.cvtColor",
"cv2... | [((354, 376), 'cv2.imread', 'cv2.imread', (['self.image'], {}), '(self.image)\n', (364, 376), False, 'import cv2\n'), ((392, 429), 'imutils.resize', 'imutils.resize', (['self.image'], {'width': '(600)'}), '(self.image, width=600)\n', (406, 429), False, 'import imutils\n'), ((486, 530), 'cv2.cvtColor', 'cv2.cvtColor', (['self.image', 'cv2.COLOR_BGR2GRAY'], {}), '(self.image, cv2.COLOR_BGR2GRAY)\n', (498, 530), False, 'import cv2\n'), ((543, 581), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['self.gray', '(7, 7)', '(3)'], {}), '(self.gray, (7, 7), 3)\n', (559, 581), False, 'import cv2\n'), ((594, 692), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['blurred', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(11)', '(2)'], {}), '(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 11, 2)\n', (615, 692), False, 'import cv2\n'), ((698, 721), 'cv2.bitwise_not', 'cv2.bitwise_not', (['thresh'], {}), '(thresh)\n', (713, 721), False, 'import cv2\n'), ((817, 844), 'imutils.grab_contours', 'imutils.grab_contours', (['cnts'], {}), '(cnts)\n', (838, 844), False, 'import imutils\n'), ((1521, 1541), 'skimage.segmentation.clear_border', 'clear_border', (['thresh'], {}), '(thresh)\n', (1533, 1541), False, 'from skimage.segmentation import clear_border\n'), ((1641, 1668), 'imutils.grab_contours', 'imutils.grab_contours', (['cnts'], {}), '(cnts)\n', (1662, 1668), False, 'import imutils\n'), ((1753, 1790), 'numpy.zeros', 'np.zeros', (['thresh.shape'], {'dtype': '"""uint8"""'}), "(thresh.shape, dtype='uint8')\n", (1761, 1790), True, 'import numpy as np\n'), ((1793, 1833), 'cv2.drawContours', 'cv2.drawContours', (['mask', '[c]', '(-1)', '(255)', '(-1)'], {}), '(mask, [c], -1, 255, -1)\n', (1809, 1833), False, 'import cv2\n'), ((1969, 2011), 'cv2.bitwise_and', 'cv2.bitwise_and', (['thresh', 'thresh'], {'mask': 'mask'}), '(thresh, thresh, mask=mask)\n', (1984, 2011), False, 'import cv2\n'), ((2082, 2087), 'Recognizer.OCR', 'OCR', ([], {}), '()\n', (2085, 2087), False, 'from Recognizer import OCR\n'), ((2106, 2135), 'numpy.zeros', 'np.zeros', (['(9, 9)'], {'dtype': '"""int"""'}), "((9, 9), dtype='int')\n", (2114, 2135), True, 'import numpy as np\n'), ((3596, 3604), 'solver.Solver', 'Solver', ([], {}), '()\n', (3602, 3604), False, 'from solver import Solver\n'), ((952, 974), 'cv2.arcLength', 'cv2.arcLength', (['c', '(True)'], {}), '(c, True)\n', (965, 974), False, 'import cv2\n'), ((987, 1025), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['c', '(0.02 * peri)', '(True)'], {}), '(c, 0.02 * peri, True)\n', (1003, 1025), False, 'import cv2\n'), ((1435, 1503), 'cv2.threshold', 'cv2.threshold', (['cell', '(0)', '(255)', '(cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)'], {}), '(cell, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n', (1448, 1503), False, 'import cv2\n'), ((1877, 1899), 'cv2.countNonZero', 'cv2.countNonZero', (['mask'], {}), '(mask)\n', (1893, 1899), False, 'import cv2\n')] |
import os
import os
import time
import gym
import gym_donkeycar
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
class DonkeyGymEnv(object):
def __init__(self, sim_path, host="127.0.0.1", port=9091, headless=0, env_name="donkey-generated-track-v0", sync="asynchronous", conf={}, record_location=False, delay=0):
os.environ['DONKEY_SIM_PATH'] = sim_path
os.environ['DONKEY_SIM_PORT'] = str(port)
os.environ['DONKEY_SIM_HEADLESS'] = str(headless)
os.environ['DONKEY_SIM_SYNC'] = str(sync)
if sim_path != "remote":
if not os.path.exists(sim_path):
raise Exception("The path you provided for the sim does not exist.")
if not is_exe(sim_path):
raise Exception("The path you provided is not an executable.")
self.env = gym.make(env_name, exe_path=sim_path, host=host, port=port)
self.frame = self.env.reset()
self.action = [0.0, 0.0]
self.running = True
self.info = { 'pos' : (0., 0., 0.), 'speed' : 0, 'cte': 0}
self.delay = float(delay)
self.record_location = record_location
if "body_style" in conf:
self.env.viewer.set_car_config(conf["body_style"], conf["body_rgb"], conf["car_name"], conf["font_size"])
#without this small delay, we seem to miss packets
time.sleep(0.1)
def update(self):
while self.running:
self.frame, _, _, self.info = self.env.step(self.action)
def run_threaded(self, steering, throttle):
if steering is None or throttle is None:
steering = 0.0
throttle = 0.0
if self.delay > 0.0:
time.sleep(self.delay / 1000.0)
self.action = [steering, throttle]
# Output Sim-car position information if configured
if self.record_location:
return self.frame, self.info['pos'][0], self.info['pos'][1], self.info['pos'][2], self.info['speed'], self.info['cte']
else:
return self.frame
def shutdown(self):
self.running = False
time.sleep(0.2)
self.env.close()
| [
"os.path.exists",
"os.access",
"time.sleep",
"os.path.isfile",
"gym.make"
] | [((95, 116), 'os.path.isfile', 'os.path.isfile', (['fpath'], {}), '(fpath)\n', (109, 116), False, 'import os\n'), ((121, 146), 'os.access', 'os.access', (['fpath', 'os.X_OK'], {}), '(fpath, os.X_OK)\n', (130, 146), False, 'import os\n'), ((860, 919), 'gym.make', 'gym.make', (['env_name'], {'exe_path': 'sim_path', 'host': 'host', 'port': 'port'}), '(env_name, exe_path=sim_path, host=host, port=port)\n', (868, 919), False, 'import gym\n'), ((2129, 2144), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (2139, 2144), False, 'import time\n'), ((1394, 1409), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1404, 1409), False, 'import time\n'), ((1723, 1754), 'time.sleep', 'time.sleep', (['(self.delay / 1000.0)'], {}), '(self.delay / 1000.0)\n', (1733, 1754), False, 'import time\n'), ((612, 636), 'os.path.exists', 'os.path.exists', (['sim_path'], {}), '(sim_path)\n', (626, 636), False, 'import os\n')] |
'''
Copyright (c) 2021-2022 OVGU LIA
Author: <NAME>
This source code is licensed under the Apache License 2.0 (see LICENSE.txt).
This source code may use other Open Source software components (see LICENSE.txt).
'''
try:
import queue as Queue
except ImportError:
import Queue as Queue
class DataManager(object):
'''
classdocs
'''
def __init__(self, pyAAS):
'''
Constructor
'''
self.pyAAS = pyAAS
self.InBoundProcessingQueue = Queue.Queue()
self.outBoundProcessingDict = {}
def pushInboundMessage(self,msg):
self.InBoundProcessingQueue.put(msg)
def configure(self):
self.pyAAS.serviceLogger.info('The Database manager is being configured')
def start(self):
self.POLL = True
self.pyAAS.serviceLogger.info('The Database manager is being started')
while self.POLL:
if (self.InBoundProcessingQueue).qsize() != 0:
inMessage = self.InBoundProcessingQueue.get()
if inMessage["functionType"] == 1:
dba = self.pyAAS.dba
_dba_method = getattr(dba,inMessage['method'])
self.outBoundProcessingDict[inMessage["instanceid"]] = _dba_method(inMessage['data'])
elif inMessage['functionType'] == 3:
dba = self.pyAAS.dba
(dba.saveNewConversationMessage(inMessage['conversationId'],inMessage['messageType'],inMessage["messageId"],inMessage["message"]))
self.pyAAS.serviceLogger.info('The Database manager is started')
def stop(self):
self.pyAAS.serviceLogger.info('The Database manager is being stopped')
self.POLL = False
self.pyAAS.serviceLogger.info('The Database manager is stopped')
def update(self):
pass
| [
"Queue.Queue"
] | [((500, 513), 'Queue.Queue', 'Queue.Queue', ([], {}), '()\n', (511, 513), True, 'import Queue as Queue\n')] |
import numpy as np
from public_tool.form_index import form_index
from XGB_HMM.form_B_matrix_by_XGB import form_B_matrix_by_XGB
from XGB_HMM.predict import self_pred
def pred_proba_XGB(A, model, pi, O, allow_flag, lengths):
# 对dataset形成pred_proba,注意这里的dataset是solve_on_raw_data后的结果,即附带allow_flag的数据
# output:
# pred_proba:数组类型
n_states = len(pi)
pred_proba = np.zeros((O.shape[0], n_states))
for i in range(len(lengths)):
begin_index, end_index = form_index(lengths, i)
now_O = O[begin_index:end_index, :]
now_allow_flag = allow_flag[begin_index:end_index]
now_pred_proba = np.zeros((now_O.shape[0], n_states))
now_allow_B = form_B_matrix_by_XGB(model, now_O[now_allow_flag == 1], pi)
_, now_allow_pred_proba, _ = self_pred(now_allow_B, [now_allow_B.shape[0]], A, pi)
now_pred_proba[now_allow_flag == 1] = now_allow_pred_proba
pred_proba[begin_index:end_index] = now_pred_proba
return pred_proba
| [
"XGB_HMM.form_B_matrix_by_XGB.form_B_matrix_by_XGB",
"numpy.zeros",
"XGB_HMM.predict.self_pred",
"public_tool.form_index.form_index"
] | [((397, 429), 'numpy.zeros', 'np.zeros', (['(O.shape[0], n_states)'], {}), '((O.shape[0], n_states))\n', (405, 429), True, 'import numpy as np\n'), ((501, 523), 'public_tool.form_index.form_index', 'form_index', (['lengths', 'i'], {}), '(lengths, i)\n', (511, 523), False, 'from public_tool.form_index import form_index\n'), ((657, 693), 'numpy.zeros', 'np.zeros', (['(now_O.shape[0], n_states)'], {}), '((now_O.shape[0], n_states))\n', (665, 693), True, 'import numpy as np\n'), ((719, 778), 'XGB_HMM.form_B_matrix_by_XGB.form_B_matrix_by_XGB', 'form_B_matrix_by_XGB', (['model', 'now_O[now_allow_flag == 1]', 'pi'], {}), '(model, now_O[now_allow_flag == 1], pi)\n', (739, 778), False, 'from XGB_HMM.form_B_matrix_by_XGB import form_B_matrix_by_XGB\n'), ((817, 870), 'XGB_HMM.predict.self_pred', 'self_pred', (['now_allow_B', '[now_allow_B.shape[0]]', 'A', 'pi'], {}), '(now_allow_B, [now_allow_B.shape[0]], A, pi)\n', (826, 870), False, 'from XGB_HMM.predict import self_pred\n')] |
from flask import Flask, render_template, url_for
app = Flask(__name__)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/top_score')
def top_score():
return render_template("top_score.html")
@app.route('/login')
def login():
return render_template("login.html")
@app.route('/manual')
def manual():
return render_template("manual.html")
@app.route('/tracking_ijazah')
def tracking_ijazah():
return render_template("tracking_ijazah.html")
@app.route('/kelas_mkdu')
def kelas_mkdu():
return render_template("kelas_mkdu.html")
@app.route('/katalog_online')
def katalog_online():
return render_template("katalog_online.html")
@app.route('/pendaftaran_wisuda')
def pendaftaran_wisuda():
return render_template("pendaftaran_wisuda.html")
@app.route('/daftar_peserta_mata_kuliah')
def daftar_peserta_mata_kuliah():
return render_template("daftar_peserta_matkul.html")
@app.route('/daftar_peserta_mata_kuliah_mkdu')
def daftar_peserta_mata_kuliah_mkdu():
return render_template("daftar_peserta_matkul_mkdu.html")
@app.route('/kalender_akademik')
def kalender_akademik():
return render_template("kalender_akademik.html")
@app.route('/informasi_skripsi')
def informasi_skripsi():
return render_template("informasi_skripsi.html")
app.run(debug=True) | [
"flask.render_template",
"flask.Flask"
] | [((57, 72), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (62, 72), False, 'from flask import Flask, render_template, url_for\n'), ((112, 141), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (127, 141), False, 'from flask import Flask, render_template, url_for\n'), ((194, 227), 'flask.render_template', 'render_template', (['"""top_score.html"""'], {}), "('top_score.html')\n", (209, 227), False, 'from flask import Flask, render_template, url_for\n'), ((272, 301), 'flask.render_template', 'render_template', (['"""login.html"""'], {}), "('login.html')\n", (287, 301), False, 'from flask import Flask, render_template, url_for\n'), ((348, 378), 'flask.render_template', 'render_template', (['"""manual.html"""'], {}), "('manual.html')\n", (363, 378), False, 'from flask import Flask, render_template, url_for\n'), ((443, 482), 'flask.render_template', 'render_template', (['"""tracking_ijazah.html"""'], {}), "('tracking_ijazah.html')\n", (458, 482), False, 'from flask import Flask, render_template, url_for\n'), ((537, 571), 'flask.render_template', 'render_template', (['"""kelas_mkdu.html"""'], {}), "('kelas_mkdu.html')\n", (552, 571), False, 'from flask import Flask, render_template, url_for\n'), ((634, 672), 'flask.render_template', 'render_template', (['"""katalog_online.html"""'], {}), "('katalog_online.html')\n", (649, 672), False, 'from flask import Flask, render_template, url_for\n'), ((743, 785), 'flask.render_template', 'render_template', (['"""pendaftaran_wisuda.html"""'], {}), "('pendaftaran_wisuda.html')\n", (758, 785), False, 'from flask import Flask, render_template, url_for\n'), ((872, 917), 'flask.render_template', 'render_template', (['"""daftar_peserta_matkul.html"""'], {}), "('daftar_peserta_matkul.html')\n", (887, 917), False, 'from flask import Flask, render_template, url_for\n'), ((1014, 1064), 'flask.render_template', 'render_template', (['"""daftar_peserta_matkul_mkdu.html"""'], {}), "('daftar_peserta_matkul_mkdu.html')\n", (1029, 1064), False, 'from flask import Flask, render_template, url_for\n'), ((1133, 1174), 'flask.render_template', 'render_template', (['"""kalender_akademik.html"""'], {}), "('kalender_akademik.html')\n", (1148, 1174), False, 'from flask import Flask, render_template, url_for\n'), ((1243, 1284), 'flask.render_template', 'render_template', (['"""informasi_skripsi.html"""'], {}), "('informasi_skripsi.html')\n", (1258, 1284), False, 'from flask import Flask, render_template, url_for\n')] |
from setuptools import setup, find_packages
import codecs
import os
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh:
long_description = "\n" + fh.read()
setup(
name='pyqt-svg-icon-text-widget',
version='0.0.1',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
description='PyQt widget consists of svg icon label and text label side by side',
url='https://github.com/yjg30737/pyqt-svg-icon-text-widget.git',
long_description_content_type='text/markdown',
long_description=long_description,
install_requires=[
'PyQt5>=5.8',
'pyqt-svg-label>=0.0.1'
]
) | [
"os.path.dirname",
"setuptools.find_packages",
"os.path.join"
] | [((92, 117), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (107, 117), False, 'import os\n'), ((137, 168), 'os.path.join', 'os.path.join', (['here', '"""README.md"""'], {}), "(here, 'README.md')\n", (149, 168), False, 'import os\n'), ((383, 398), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (396, 398), False, 'from setuptools import setup, find_packages\n')] |
"""
Software that detects each of the yellow shapes on the video frames and
classifies the shapes into classes: circle, rectangle, triangle.
USAGE: python3 shape_detection.py <video path> <output video path>
"""
import sys
import cv2
import imutils
import numpy as np
from tqdm import tqdm
BOX_COLORS = {
"triangle": (255, 0, 0),
"rectangle": (0, 255, 0),
"circle" : (0, 0, 255)
}
def get_contours(image: np.ndarray) -> (np.ndarray, np.ndarray):
"""
Gets edge and yellow contours from an image.
Parameters
----------
image: np.ndarray
Target image.
Returns
-------
edges_filled: np.ndarray
Detected edges in an image as a boolean 2D map.
yellow_contours: np.ndarray
Detected yellow contours in an image.
"""
# get edges
edges = cv2.Canny(image, 10, 255)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
edges_thresh = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)
edges_filled = np.zeros_like(edges_thresh)
edges_contours = cv2.findContours(edges_thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
edges_contours = imutils.grab_contours(edges_contours)
for cont in edges_contours:
cv2.drawContours(edges_filled, [cont], 0, 255, -1)
# select yellow color
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
yellow_lower = np.array([30, 10, 10])
yellow_upper = np.array([90, 255, 255])
mask_yellow = cv2.inRange(hsv, yellow_lower, yellow_upper)
yellow_output = cv2.bitwise_and(image, image, mask=mask_yellow)
gray = cv2.cvtColor(yellow_output, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 60, 255, cv2.THRESH_BINARY)[1]
yellow_contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
yellow_contours = imutils.grab_contours(yellow_contours)
return edges_filled, yellow_contours
def detect(tcontour: np.ndarray) -> str:
"""
Detects shape by a contour.
Parameters
----------
tcontour: np.ndarray
Target contour.
Returns
-------
shape: str
Detected shape of a contour.
"""
shape = "unidentified"
peri = cv2.arcLength(tcontour, True)
approx = cv2.approxPolyDP(tcontour, 0.04 * peri, True)
if len(approx) == 3:
shape = "triangle"
elif len(approx) == 4:
shape = "rectangle"
else:
shape = "circle"
return shape
if __name__ == "__main__":
if len(sys.argv) != 3:
print(f'USAGE: python3 {sys.argv[0]} <video path> <output video path>')
sys.exit()
VIDEO_PATH = sys.argv[1]
OUTPUT_PATH = sys.argv[2]
# open input and output videos
cap = cv2.VideoCapture(VIDEO_PATH)
width = int(cap.get(3))
height = int(cap.get(4))
fps = cap.get(5)
frame_count = int(cap.get(7))
out = cv2.VideoWriter(OUTPUT_PATH, -1, fps, (width, height))
if not cap.isOpened() or not out.isOpened():
raise RuntimeError("video file cannot be opened")
print(f'\nVideo "{VIDEO_PATH}" opened for processing\n')
for i in tqdm(range(frame_count), desc="Processing video"):
ret, frame = cap.read()
if ret is True:
# preprocess frame
resized = imutils.resize(frame, width=300)
ratio = frame.shape[0] / float(resized.shape[0])
blurred = cv2.GaussianBlur(resized, (3, 3), 0)
edges_map, contours = get_contours(blurred)
for contour in contours:
if cv2.contourArea(contour) > 50:
# get contour's center
M = cv2.moments(contour)
rel_cX = int(M["m10"] / M["m00"])
rel_cY = int(M["m01"] / M["m00"])
if edges_map[rel_cY, rel_cX]:
SHAPE = detect(contour)
# get exact coordinates
contour = contour.astype("float")
contour *= ratio
cX = int((M["m10"] / M["m00"]) * ratio)
cY = int((M["m01"] / M["m00"]) * ratio)
contour = contour.astype("int")
# draw contour and shape name
cv2.drawContours(frame, [contour], -1, BOX_COLORS[SHAPE], 2)
cv2.putText(frame, SHAPE, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 255, 255), 2)
out.write(frame)
else:
break
cap.release()
out.release()
print(f'\nVideo "{OUTPUT_PATH}" successfully saved')
| [
"numpy.array",
"cv2.approxPolyDP",
"sys.exit",
"cv2.threshold",
"cv2.arcLength",
"numpy.zeros_like",
"cv2.VideoWriter",
"cv2.contourArea",
"imutils.grab_contours",
"cv2.drawContours",
"cv2.putText",
"cv2.morphologyEx",
"cv2.cvtColor",
"cv2.moments",
"cv2.Canny",
"cv2.GaussianBlur",
"... | [((821, 846), 'cv2.Canny', 'cv2.Canny', (['image', '(10)', '(255)'], {}), '(image, 10, 255)\n', (830, 846), False, 'import cv2\n'), ((860, 912), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(3, 3)'], {}), '(cv2.MORPH_ELLIPSE, (3, 3))\n', (885, 912), False, 'import cv2\n'), ((932, 980), 'cv2.morphologyEx', 'cv2.morphologyEx', (['edges', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(edges, cv2.MORPH_CLOSE, kernel)\n', (948, 980), False, 'import cv2\n'), ((1000, 1027), 'numpy.zeros_like', 'np.zeros_like', (['edges_thresh'], {}), '(edges_thresh)\n', (1013, 1027), True, 'import numpy as np\n'), ((1190, 1227), 'imutils.grab_contours', 'imutils.grab_contours', (['edges_contours'], {}), '(edges_contours)\n', (1211, 1227), False, 'import imutils\n'), ((1356, 1394), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (1368, 1394), False, 'import cv2\n'), ((1414, 1436), 'numpy.array', 'np.array', (['[30, 10, 10]'], {}), '([30, 10, 10])\n', (1422, 1436), True, 'import numpy as np\n'), ((1456, 1480), 'numpy.array', 'np.array', (['[90, 255, 255]'], {}), '([90, 255, 255])\n', (1464, 1480), True, 'import numpy as np\n'), ((1499, 1543), 'cv2.inRange', 'cv2.inRange', (['hsv', 'yellow_lower', 'yellow_upper'], {}), '(hsv, yellow_lower, yellow_upper)\n', (1510, 1543), False, 'import cv2\n'), ((1564, 1611), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'image'], {'mask': 'mask_yellow'}), '(image, image, mask=mask_yellow)\n', (1579, 1611), False, 'import cv2\n'), ((1623, 1670), 'cv2.cvtColor', 'cv2.cvtColor', (['yellow_output', 'cv2.COLOR_BGR2GRAY'], {}), '(yellow_output, cv2.COLOR_BGR2GRAY)\n', (1635, 1670), False, 'import cv2\n'), ((1894, 1932), 'imutils.grab_contours', 'imutils.grab_contours', (['yellow_contours'], {}), '(yellow_contours)\n', (1915, 1932), False, 'import imutils\n'), ((2259, 2288), 'cv2.arcLength', 'cv2.arcLength', (['tcontour', '(True)'], {}), '(tcontour, True)\n', (2272, 2288), False, 'import cv2\n'), ((2302, 2347), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['tcontour', '(0.04 * peri)', '(True)'], {}), '(tcontour, 0.04 * peri, True)\n', (2318, 2347), False, 'import cv2\n'), ((2767, 2795), 'cv2.VideoCapture', 'cv2.VideoCapture', (['VIDEO_PATH'], {}), '(VIDEO_PATH)\n', (2783, 2795), False, 'import cv2\n'), ((2918, 2972), 'cv2.VideoWriter', 'cv2.VideoWriter', (['OUTPUT_PATH', '(-1)', 'fps', '(width, height)'], {}), '(OUTPUT_PATH, -1, fps, (width, height))\n', (2933, 2972), False, 'import cv2\n'), ((1268, 1318), 'cv2.drawContours', 'cv2.drawContours', (['edges_filled', '[cont]', '(0)', '(255)', '(-1)'], {}), '(edges_filled, [cont], 0, 255, -1)\n', (1284, 1318), False, 'import cv2\n'), ((1684, 1731), 'cv2.threshold', 'cv2.threshold', (['gray', '(60)', '(255)', 'cv2.THRESH_BINARY'], {}), '(gray, 60, 255, cv2.THRESH_BINARY)\n', (1697, 1731), False, 'import cv2\n'), ((2651, 2661), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2659, 2661), False, 'import sys\n'), ((3316, 3348), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(300)'}), '(frame, width=300)\n', (3330, 3348), False, 'import imutils\n'), ((3432, 3468), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['resized', '(3, 3)', '(0)'], {}), '(resized, (3, 3), 0)\n', (3448, 3468), False, 'import cv2\n'), ((3581, 3605), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (3596, 3605), False, 'import cv2\n'), ((3680, 3700), 'cv2.moments', 'cv2.moments', (['contour'], {}), '(contour)\n', (3691, 3700), False, 'import cv2\n'), ((4318, 4378), 'cv2.drawContours', 'cv2.drawContours', (['frame', '[contour]', '(-1)', 'BOX_COLORS[SHAPE]', '(2)'], {}), '(frame, [contour], -1, BOX_COLORS[SHAPE], 2)\n', (4334, 4378), False, 'import cv2\n'), ((4403, 4494), 'cv2.putText', 'cv2.putText', (['frame', 'SHAPE', '(cX, cY)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(255, 255, 255)', '(2)'], {}), '(frame, SHAPE, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, \n 255, 255), 2)\n', (4414, 4494), False, 'import cv2\n')] |
import time
import paramiko
import fixtures
from fabric.api import run, hide, settings
from vn_test import VNFixture
from vm_test import VMFixture
from policy_test import PolicyFixture
from common.policy.config import ConfigPolicy
from common.connections import ContrailConnections
from security_group import SecurityGroupFixture
class ConfigSecGroup(ConfigPolicy):
def config_sec_group(self, name, secgrpid=None, entries=None):
secgrp_fixture = self.useFixture(SecurityGroupFixture(self.inputs,
self.connections, self.inputs.domain_name, self.inputs.project_name,
secgrp_name=name, uuid=secgrpid, secgrp_entries=entries))
result, msg = secgrp_fixture.verify_on_setup()
assert result, msg
return secgrp_fixture
def delete_sec_group(self, secgrp_fix):
secgrp_fix.cleanUp()
self.remove_from_cleanups(secgrp_fix)
def remove_from_cleanups(self, fix):
for cleanup in self._cleanups:
if fix.cleanUp in cleanup:
self._cleanups.remove(cleanup)
break
| [
"security_group.SecurityGroupFixture"
] | [((479, 646), 'security_group.SecurityGroupFixture', 'SecurityGroupFixture', (['self.inputs', 'self.connections', 'self.inputs.domain_name', 'self.inputs.project_name'], {'secgrp_name': 'name', 'uuid': 'secgrpid', 'secgrp_entries': 'entries'}), '(self.inputs, self.connections, self.inputs.domain_name,\n self.inputs.project_name, secgrp_name=name, uuid=secgrpid,\n secgrp_entries=entries)\n', (499, 646), False, 'from security_group import SecurityGroupFixture\n')] |
# -*- coding: utf-8 -*-
# Copyright 2017, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=invalid-name,missing-docstring
from test.python.common import QiskitTestCase
import json
import unittest
import numpy as np
from numpy.linalg import norm
import qiskit
import qiskit._compiler
from qiskit import ClassicalRegister
from qiskit import QuantumCircuit
from qiskit import QuantumJob
from qiskit import QuantumRegister
from qiskit.backends.local.qasm_simulator_cpp import (QasmSimulatorCpp,
cx_error_matrix,
x90_error_matrix)
class TestLocalQasmSimulatorCpp(QiskitTestCase):
"""
Test job_processor module.
"""
def setUp(self):
self.seed = 88
self.qasm_filename = self._get_resource_path('qasm/example.qasm')
with open(self.qasm_filename, 'r') as qasm_file:
self.qasm_text = qasm_file.read()
self.qasm_ast = qiskit.qasm.Qasm(data=self.qasm_text).parse()
self.qasm_be = qiskit.unroll.CircuitBackend(['u1', 'u2', 'u3', 'id', 'cx'])
self.qasm_circ = qiskit.unroll.Unroller(self.qasm_ast, self.qasm_be).execute()
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(2, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.measure(qr[0], cr[0])
self.qc = qc
# create qobj
compiled_circuit1 = qiskit._compiler.compile_circuit(self.qc, format='json')
compiled_circuit2 = qiskit._compiler.compile_circuit(self.qasm_circ, format='json')
self.qobj = {'id': 'test_qobj',
'config': {
'max_credits': 3,
'shots': 2000,
'backend_name': 'local_qasm_simulator_cpp',
'seed': 1111
},
'circuits': [
{
'name': 'test_circuit1',
'compiled_circuit': compiled_circuit1,
'basis_gates': 'u1,u2,u3,cx,id',
'layout': None,
},
{
'name': 'test_circuit2',
'compiled_circuit': compiled_circuit2,
'basis_gates': 'u1,u2,u3,cx,id',
'layout': None,
}
]}
# Simulator backend
try:
self.backend = QasmSimulatorCpp()
except FileNotFoundError as fnferr:
raise unittest.SkipTest(
'cannot find {} in path'.format(fnferr))
self.q_job = QuantumJob(self.qobj,
backend=self.backend,
preformatted=True)
def test_x90_coherent_error_matrix(self):
X90 = np.array([[1, -1j], [-1j, 1]]) / np.sqrt(2)
U = x90_error_matrix(0., 0.).dot(X90)
target = X90
self.assertAlmostEqual(norm(U - target), 0.0, places=10,
msg="identity error matrix")
U = x90_error_matrix(np.pi / 2., 0.).dot(X90)
target = -1j * np.array([[0, 1], [1, 0]])
self.assertAlmostEqual(norm(U - target), 0.0, places=10)
U = x90_error_matrix(0., np.pi / 2.).dot(X90)
target = np.array([[1., -1], [1, 1.]]) / np.sqrt(2.)
self.assertAlmostEqual(norm(U - target), 0.0, places=10)
U = x90_error_matrix(np.pi / 2, np.pi / 2.).dot(X90)
target = np.array([[0., -1], [1, 0.]])
self.assertAlmostEqual(norm(U - target), 0.0, places=10)
U = x90_error_matrix(0.02, -0.03)
self.assertAlmostEqual(norm(U.dot(U.conj().T) - np.eye(2)), 0.0,
places=10, msg="Test error matrix is unitary")
def test_cx_coherent_error_matrix(self):
CX = np.array([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]])
U = cx_error_matrix(0., 0.).dot(CX)
target = CX
self.assertAlmostEqual(norm(U - target), 0.0, places=10,
msg="identity error matrix")
U = cx_error_matrix(np.pi / 2., 0.).dot(CX)
target = np.array([[1, 0, 1j, 0],
[0, -1j, 0, 1],
[1j, 0, 1, 0],
[0, 1, 0, -1j]]) / np.sqrt(2)
self.assertAlmostEqual(norm(U - target), 0.0, places=10)
U = cx_error_matrix(0.03, -0.04)
self.assertAlmostEqual(norm(U.dot(U.conj().T) - np.eye(4)), 0.0,
places=10, msg="Test error matrix is unitary")
def test_run_qobj(self):
result = self.backend.run(self.q_job).result()
shots = self.qobj['config']['shots']
threshold = 0.04 * shots
counts = result.get_counts('test_circuit2')
target = {'100 100': shots / 8, '011 011': shots / 8,
'101 101': shots / 8, '111 111': shots / 8,
'000 000': shots / 8, '010 010': shots / 8,
'110 110': shots / 8, '001 001': shots / 8}
self.assertDictAlmostEqual(counts, target, threshold)
def test_qobj_measure_opt(self):
filename = self._get_resource_path('qobj/cpp_measure_opt.json')
with open(filename, 'r') as file:
q_job = QuantumJob(json.load(file),
backend=self.backend,
preformatted=True)
result = self.backend.run(q_job).result()
shots = q_job.qobj['config']['shots']
expected_data = {
'measure (opt)': {
'deterministic': True,
'counts': {'00': shots},
'statevector': np.array([1, 0, 0, 0])},
'x0 measure (opt)': {
'deterministic': True,
'counts': {'01': shots},
'statevector': np.array([0, 1, 0, 0])},
'x1 measure (opt)': {
'deterministic': True,
'counts': {'10': shots},
'statevector': np.array([0, 0, 1, 0])},
'x0 x1 measure (opt)': {
'deterministic': True,
'counts': {'11': shots},
'statevector': np.array([0, 0, 0, 1])},
'y0 measure (opt)': {
'deterministic': True,
'counts': {'01': shots},
'statevector': np.array([0, 1j, 0, 0])},
'y1 measure (opt)': {
'deterministic': True,
'counts': {'10': shots},
'statevector': np.array([0, 0, 1j, 0])},
'y0 y1 measure (opt)': {
'deterministic': True,
'counts': {'11': shots},
'statevector': np.array([0, 0, 0, -1j])},
'h0 measure (opt)': {
'deterministic': False,
'counts': {'00': shots / 2, '01': shots / 2},
'statevector': np.array([1 / np.sqrt(2), 1 / np.sqrt(2), 0, 0])},
'h1 measure (opt)': {
'deterministic': False,
'counts': {'00': shots / 2, '10': shots / 2},
'statevector': np.array([1 / np.sqrt(2), 0, 1 / np.sqrt(2), 0])},
'h0 h1 measure (opt)': {
'deterministic': False,
'counts': {'00': shots / 4, '01': shots / 4,
'10': shots / 4, '11': shots / 4},
'statevector': np.array([0.5, 0.5, 0.5, 0.5])},
'bell measure (opt)': {
'deterministic': False,
'counts': {'00': shots / 2, '11': shots / 2},
'statevector': np.array([1 / np.sqrt(2), 0, 0, 1 / np.sqrt(2)])}
}
for name in expected_data:
# Check counts:
counts = result.get_counts(name)
expected_counts = expected_data[name]['counts']
if expected_data[name].get('deterministic', False):
self.assertEqual(counts, expected_counts,
msg=name + ' counts')
else:
threshold = 0.04 * shots
self.assertDictAlmostEqual(counts, expected_counts,
threshold, msg=name + 'counts')
# Check snapshot
snapshots = result.get_snapshots(name)
self.assertEqual(set(snapshots), {'0'},
msg=name + ' snapshot keys')
self.assertEqual(len(snapshots['0']), 3,
msg=name + ' snapshot length')
state = snapshots['0']['statevector'][0]
expected_state = expected_data[name]['statevector']
fidelity = np.abs(expected_state.dot(state.conj())) ** 2
self.assertAlmostEqual(fidelity, 1.0, places=10,
msg=name + ' snapshot fidelity')
rho = snapshots['0']['density_matrix']
self.assertAlmostEqual(np.trace(rho), 1)
prob = snapshots['0']['probabilities']
self.assertAlmostEqual(np.sum(prob), 1)
def test_qobj_measure_opt_flag(self):
filename = self._get_resource_path('qobj/cpp_measure_opt_flag.json')
with open(filename, 'r') as file:
q_job = QuantumJob(json.load(file),
backend=self.backend,
preformatted=True)
result = self.backend.run(q_job).result()
shots = q_job.qobj['config']['shots']
sampled_measurements = {
'measure (sampled)': True,
'trivial (sampled)': True,
'reset1 (shots)': False,
'reset2 (shots)': False,
'reset3 (shots)': False,
'gate1 (shots)': False,
'gate2 (shots)': False,
'gate3 (shots)': False,
'gate4 (shots)': False
}
for name in sampled_measurements:
snapshots = result.get_snapshots(name)
# Check snapshot keys
self.assertEqual(set(snapshots), {'0'},
msg=name + ' snapshot keys')
# Check number of snapshots
# there should be 1 for measurement sampling optimization
# and there should be >1 for each shot beign simulated.
num_snapshots = len(snapshots['0'].get('statevector', []))
if sampled_measurements[name] is True:
self.assertEqual(num_snapshots, 1,
msg=name + ' snapshot length')
else:
self.assertEqual(num_snapshots, shots,
msg=name + ' snapshot length')
def test_qobj_reset(self):
filename = self._get_resource_path('qobj/cpp_reset.json')
with open(filename, 'r') as file:
q_job = QuantumJob(json.load(file),
backend=self.backend,
preformatted=True)
result = self.backend.run(q_job).result()
expected_data = {
'reset': {'statevector': np.array([1, 0])},
'x reset': {'statevector': np.array([1, 0])},
'y reset': {'statevector': np.array([1, 0])},
'h reset': {'statevector': np.array([1, 0])}
}
for name in expected_data:
# Check snapshot is |0> state
snapshots = result.get_snapshots(name)
self.assertEqual(set(snapshots), {'0'},
msg=name + ' snapshot keys')
self.assertEqual(len(snapshots['0']), 1,
msg=name + ' snapshot length')
state = snapshots['0']['statevector'][0]
expected_state = expected_data[name]['statevector']
fidelity = np.abs(expected_state.dot(state.conj())) ** 2
self.assertAlmostEqual(fidelity, 1.0, places=10,
msg=name + ' snapshot fidelity')
def test_qobj_save_load(self):
filename = self._get_resource_path('qobj/cpp_save_load.json')
with open(filename, 'r') as file:
q_job = QuantumJob(json.load(file),
backend=self.backend,
preformatted=True)
result = self.backend.run(q_job).result()
snapshots = result.get_snapshots('save_command')
self.assertEqual(set(snapshots), {'0', '1', '10', '11'},
msg='snapshot keys')
state0 = snapshots['0']['statevector'][0]
state10 = snapshots['10']['statevector'][0]
state1 = snapshots['1']['statevector'][0]
state11 = snapshots['11']['statevector'][0]
expected_state0 = np.array([1, 0])
expected_state10 = np.array([1 / np.sqrt(2), 1 / np.sqrt(2)])
fidelity0 = np.abs(expected_state0.dot(state0.conj())) ** 2
fidelity1 = np.abs(expected_state0.dot(state1.conj())) ** 2
fidelity10 = np.abs(expected_state10.dot(state10.conj())) ** 2
fidelity11 = np.abs(expected_state10.dot(state11.conj())) ** 2
self.assertAlmostEqual(fidelity0, 1.0, places=10, msg='snapshot 0')
self.assertAlmostEqual(fidelity10, 1.0, places=10, msg='snapshot 0')
self.assertAlmostEqual(fidelity1, 1.0, places=10, msg='snapshot 0')
self.assertAlmostEqual(fidelity11, 1.0, places=10, msg='snapshot 0')
def test_qobj_single_qubit_gates(self):
filename = self._get_resource_path('qobj/cpp_single_qubit_gates.json')
with open(filename, 'r') as file:
q_job = QuantumJob(json.load(file),
backend=self.backend,
preformatted=True)
result = self.backend.run(q_job).result()
expected_data = {
'snapshot': {
'statevector': np.array([1, 0])},
'id(U)': {
'statevector': np.array([1, 0])},
'id(u3)': {
'statevector': np.array([1, 0])},
'id(u1)': {
'statevector': np.array([1, 0])},
'id(direct)': {
'statevector': np.array([1, 0])},
'x(U)': {
'statevector': np.array([0, 1])},
'x(u3)': {
'statevector': np.array([0, 1])},
'x(direct)': {
'statevector': np.array([0, 1])},
'y(U)': {
'statevector': np.array([0, 1j])},
'y(u3)': {
'statevector': np.array([0, 1j])},
'y(direct)': {
'statevector': np.array([0, 1j])},
'h(U)': {
'statevector': np.array([1 / np.sqrt(2), 1 / np.sqrt(2)])},
'h(u3)': {
'statevector': np.array([1 / np.sqrt(2), 1 / np.sqrt(2)])},
'h(u2)': {
'statevector': np.array([1 / np.sqrt(2), 1 / np.sqrt(2)])},
'h(direct)': {
'statevector': np.array([1 / np.sqrt(2), 1 / np.sqrt(2)])},
'h(direct) z(U)': {
'statevector': np.array([1 / np.sqrt(2), -1 / np.sqrt(2)])},
'h(direct) z(u3)': {
'statevector': np.array([1 / np.sqrt(2), -1 / np.sqrt(2)])},
'h(direct) z(u1)': {
'statevector': np.array([1 / np.sqrt(2), -1 / np.sqrt(2)])},
'h(direct) z(direct)': {
'statevector': np.array([1 / np.sqrt(2), -1 / np.sqrt(2)])},
'h(direct) s(U)': {
'statevector': np.array([1 / np.sqrt(2), 1j / np.sqrt(2)])},
'h(direct) s(u3)': {
'statevector': np.array([1 / np.sqrt(2), 1j / np.sqrt(2)])},
'h(direct) s(u1)': {
'statevector': np.array([1 / np.sqrt(2), 1j / np.sqrt(2)])},
'h(direct) s(direct)': {
'statevector': np.array([1 / np.sqrt(2), 1j / np.sqrt(2)])},
'h(direct) sdg(U)': {
'statevector': np.array([1 / np.sqrt(2), -1j / np.sqrt(2)])},
'h(direct) sdg(u3)': {
'statevector': np.array([1 / np.sqrt(2), -1j / np.sqrt(2)])},
'h(direct) sdg(u1)': {
'statevector': np.array([1 / np.sqrt(2), -1j / np.sqrt(2)])},
'h(direct) sdg(direct)': {
'statevector': np.array([1 / np.sqrt(2), -1j / np.sqrt(2)])},
'h(direct) t(U)': {
'statevector': np.array([1 / np.sqrt(2), 0.5 + 0.5j])},
'h(direct) t(u3)': {
'statevector': np.array([1 / np.sqrt(2), 0.5 + 0.5j])},
'h(direct) t(u1)': {
'statevector': np.array([1 / np.sqrt(2), 0.5 + 0.5j])},
'h(direct) t(direct)': {
'statevector': np.array([1 / np.sqrt(2), 0.5 + 0.5j])},
'h(direct) tdg(U)': {
'statevector': np.array([1 / np.sqrt(2), 0.5 - 0.5j])},
'h(direct) tdg(u3)': {
'statevector': np.array([1 / np.sqrt(2), 0.5 - 0.5j])},
'h(direct) tdg(u1)': {
'statevector': np.array([1 / np.sqrt(2), 0.5 - 0.5j])},
'h(direct) tdg(direct)': {
'statevector': np.array([1 / np.sqrt(2), 0.5 - 0.5j])}
}
for name in expected_data:
# Check snapshot
snapshots = result.get_snapshots(name)
self.assertEqual(set(snapshots), {'0'},
msg=name + ' snapshot keys')
self.assertEqual(len(snapshots['0']), 1,
msg=name + ' snapshot length')
state = snapshots['0']['statevector'][0]
expected_state = expected_data[name]['statevector']
inner_product = expected_state.dot(state.conj())
self.assertAlmostEqual(inner_product, 1.0, places=10,
msg=name + ' snapshot fidelity')
def test_qobj_two_qubit_gates(self):
filename = self._get_resource_path('qobj/cpp_two_qubit_gates.json')
with open(filename, 'r') as file:
q_job = QuantumJob(json.load(file),
backend=self.backend,
preformatted=True)
result = self.backend.run(q_job).result()
expected_data = {
'h0 CX01': {
'statevector': np.array([1 / np.sqrt(2), 0, 0, 1 / np.sqrt(2)])},
'h0 CX10': {
'statevector': np.array([1 / np.sqrt(2), 1 / np.sqrt(2), 0, 0])},
'h1 CX01': {
'statevector': np.array([1 / np.sqrt(2), 0, 1 / np.sqrt(2), 0])},
'h1 CX10': {
'statevector': np.array([1 / np.sqrt(2), 0, 0, 1 / np.sqrt(2)])},
'h0 cx01': {
'statevector': np.array([1 / np.sqrt(2), 0, 0, 1 / np.sqrt(2)])},
'h0 cx10': {
'statevector': np.array([1 / np.sqrt(2), 1 / np.sqrt(2), 0, 0])},
'h1 cx01': {
'statevector': np.array([1 / np.sqrt(2), 0, 1 / np.sqrt(2), 0])},
'h1 cx10': {
'statevector': np.array([1 / np.sqrt(2), 0, 0, 1 / np.sqrt(2)])},
'h0 cz01': {
'statevector': np.array([1 / np.sqrt(2), 1 / np.sqrt(2), 0, 0])},
'h0 cz10': {
'statevector': np.array([1 / np.sqrt(2), 1 / np.sqrt(2), 0, 0])},
'h1 cz01': {
'statevector': np.array([1 / np.sqrt(2), 0, 1 / np.sqrt(2), 0])},
'h1 cz10': {
'statevector': np.array([1 / np.sqrt(2), 0, 1 / np.sqrt(2), 0])},
'h0 h1 cz01': {'statevector': np.array([0.5, 0.5, 0.5, -0.5])},
'h0 h1 cz10': {'statevector': np.array([0.5, 0.5, 0.5, -0.5])},
'h0 rzz01': {
'statevector': np.array([1 / np.sqrt(2), 1j / np.sqrt(2), 0, 0])},
'h0 rzz10': {
'statevector': np.array([1 / np.sqrt(2), 1j / np.sqrt(2), 0, 0])},
'h1 rzz01': {
'statevector': np.array([1 / np.sqrt(2), 0, 1j / np.sqrt(2), 0])},
'h1 rzz10': {
'statevector': np.array([1 / np.sqrt(2), 0, 1j / np.sqrt(2), 0])},
'h0 h1 rzz01': {'statevector': np.array([0.5, 0.5j, 0.5j, 0.5])},
'h0 h1 rzz10': {'statevector': np.array([0.5, 0.5j, 0.5j, 0.5])}
}
for name in expected_data:
# Check snapshot
snapshots = result.get_snapshots(name)
self.assertEqual(set(snapshots), {'0'},
msg=name + ' snapshot keys')
self.assertEqual(len(snapshots['0']), 1,
msg=name + ' snapshot length')
state = snapshots['0']['statevector'][0]
expected_state = expected_data[name]['statevector']
fidelity = np.abs(expected_state.dot(state.conj())) ** 2
self.assertAlmostEqual(fidelity, 1.0, places=10,
msg=name + ' snapshot fidelity')
def test_conditionals(self):
filename = self._get_resource_path('qobj/cpp_conditionals.json')
with open(filename, 'r') as file:
q_job = QuantumJob(json.load(file),
backend=self.backend,
preformatted=True)
result = self.backend.run(q_job).result()
expected_data = {
'single creg (c0=0)': {
'statevector': np.array([1, 0, 0, 0])},
'single creg (c0=1)': {
'statevector': np.array([0, 0, 0, 1])},
'two creg (c1=0)': {
'statevector': np.array([1, 0, 0, 0])},
'two creg (c1=1)': {
'statevector': np.array([0, 0, 0, 1])}
}
for name in expected_data:
# Check snapshot
snapshots = result.get_snapshots(name)
self.assertEqual(set(snapshots), {'0'},
msg=name + ' snapshot keys')
self.assertEqual(len(snapshots['0']), 1,
msg=name + ' snapshot length')
state = snapshots['0']['statevector'][0]
expected_state = expected_data[name]['statevector']
fidelity = np.abs(expected_state.dot(state.conj())) ** 2
self.assertAlmostEqual(fidelity, 1.0, places=10,
msg=name + ' snapshot fidelity')
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"numpy.trace",
"numpy.sqrt",
"numpy.array",
"qiskit.qasm.Qasm",
"numpy.linalg.norm",
"unittest.main",
"qiskit.QuantumJob",
"qiskit.backends.local.qasm_simulator_cpp.x90_error_matrix",
"qiskit.QuantumCircuit",
"qiskit.backends.local.qasm_simulator_cpp.cx_error_matrix",
"numpy.eye",
"qiskit._com... | [((22521, 22547), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (22534, 22547), False, 'import unittest\n'), ((1345, 1368), 'qiskit.QuantumRegister', 'QuantumRegister', (['(2)', '"""q"""'], {}), "(2, 'q')\n", (1360, 1368), False, 'from qiskit import QuantumRegister\n'), ((1382, 1407), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['(2)', '"""c"""'], {}), "(2, 'c')\n", (1399, 1407), False, 'from qiskit import ClassicalRegister\n'), ((1421, 1443), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['qr', 'cr'], {}), '(qr, cr)\n', (1435, 1443), False, 'from qiskit import QuantumCircuit\n'), ((1568, 1624), 'qiskit._compiler.compile_circuit', 'qiskit._compiler.compile_circuit', (['self.qc'], {'format': '"""json"""'}), "(self.qc, format='json')\n", (1600, 1624), False, 'import qiskit\n'), ((1653, 1716), 'qiskit._compiler.compile_circuit', 'qiskit._compiler.compile_circuit', (['self.qasm_circ'], {'format': '"""json"""'}), "(self.qasm_circ, format='json')\n", (1685, 1716), False, 'import qiskit\n'), ((2877, 2939), 'qiskit.QuantumJob', 'QuantumJob', (['self.qobj'], {'backend': 'self.backend', 'preformatted': '(True)'}), '(self.qobj, backend=self.backend, preformatted=True)\n', (2887, 2939), False, 'from qiskit import QuantumJob\n'), ((3728, 3759), 'numpy.array', 'np.array', (['[[0.0, -1], [1, 0.0]]'], {}), '([[0.0, -1], [1, 0.0]])\n', (3736, 3759), True, 'import numpy as np\n'), ((3835, 3864), 'qiskit.backends.local.qasm_simulator_cpp.x90_error_matrix', 'x90_error_matrix', (['(0.02)', '(-0.03)'], {}), '(0.02, -0.03)\n', (3851, 3864), False, 'from qiskit.backends.local.qasm_simulator_cpp import QasmSimulatorCpp, cx_error_matrix, x90_error_matrix\n'), ((4075, 4141), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]]'], {}), '([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]])\n', (4083, 4141), True, 'import numpy as np\n'), ((4644, 4672), 'qiskit.backends.local.qasm_simulator_cpp.cx_error_matrix', 'cx_error_matrix', (['(0.03)', '(-0.04)'], {}), '(0.03, -0.04)\n', (4659, 4672), False, 'from qiskit.backends.local.qasm_simulator_cpp import QasmSimulatorCpp, cx_error_matrix, x90_error_matrix\n'), ((12868, 12884), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (12876, 12884), True, 'import numpy as np\n'), ((1180, 1240), 'qiskit.unroll.CircuitBackend', 'qiskit.unroll.CircuitBackend', (["['u1', 'u2', 'u3', 'id', 'cx']"], {}), "(['u1', 'u2', 'u3', 'id', 'cx'])\n", (1208, 1240), False, 'import qiskit\n'), ((2698, 2716), 'qiskit.backends.local.qasm_simulator_cpp.QasmSimulatorCpp', 'QasmSimulatorCpp', ([], {}), '()\n', (2714, 2716), False, 'from qiskit.backends.local.qasm_simulator_cpp import QasmSimulatorCpp, cx_error_matrix, x90_error_matrix\n'), ((3065, 3099), 'numpy.array', 'np.array', (['[[1, -1.0j], [-1.0j, 1]]'], {}), '([[1, -1.0j], [-1.0j, 1]])\n', (3073, 3099), True, 'import numpy as np\n'), ((3098, 3108), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3105, 3108), True, 'import numpy as np\n'), ((3207, 3223), 'numpy.linalg.norm', 'norm', (['(U - target)'], {}), '(U - target)\n', (3211, 3223), False, 'from numpy.linalg import norm\n'), ((3378, 3404), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (3386, 3404), True, 'import numpy as np\n'), ((3436, 3452), 'numpy.linalg.norm', 'norm', (['(U - target)'], {}), '(U - target)\n', (3440, 3452), False, 'from numpy.linalg import norm\n'), ((3541, 3572), 'numpy.array', 'np.array', (['[[1.0, -1], [1, 1.0]]'], {}), '([[1.0, -1], [1, 1.0]])\n', (3549, 3572), True, 'import numpy as np\n'), ((3573, 3585), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (3580, 3585), True, 'import numpy as np\n'), ((3616, 3632), 'numpy.linalg.norm', 'norm', (['(U - target)'], {}), '(U - target)\n', (3620, 3632), False, 'from numpy.linalg import norm\n'), ((3789, 3805), 'numpy.linalg.norm', 'norm', (['(U - target)'], {}), '(U - target)\n', (3793, 3805), False, 'from numpy.linalg import norm\n'), ((4237, 4253), 'numpy.linalg.norm', 'norm', (['(U - target)'], {}), '(U - target)\n', (4241, 4253), False, 'from numpy.linalg import norm\n'), ((4400, 4485), 'numpy.array', 'np.array', (['[[1, 0, 1.0j, 0], [0, -1.0j, 0, 1], [1.0j, 0, 1, 0], [0, 1, 0, -1.0j]]'], {}), '([[1, 0, 1.0j, 0], [0, -1.0j, 0, 1], [1.0j, 0, 1, 0], [0, 1, 0, -1.0j]]\n )\n', (4408, 4485), True, 'import numpy as np\n'), ((4556, 4566), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4563, 4566), True, 'import numpy as np\n'), ((4598, 4614), 'numpy.linalg.norm', 'norm', (['(U - target)'], {}), '(U - target)\n', (4602, 4614), False, 'from numpy.linalg import norm\n'), ((3121, 3147), 'qiskit.backends.local.qasm_simulator_cpp.x90_error_matrix', 'x90_error_matrix', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (3137, 3147), False, 'from qiskit.backends.local.qasm_simulator_cpp import QasmSimulatorCpp, cx_error_matrix, x90_error_matrix\n'), ((3313, 3347), 'qiskit.backends.local.qasm_simulator_cpp.x90_error_matrix', 'x90_error_matrix', (['(np.pi / 2.0)', '(0.0)'], {}), '(np.pi / 2.0, 0.0)\n', (3329, 3347), False, 'from qiskit.backends.local.qasm_simulator_cpp import QasmSimulatorCpp, cx_error_matrix, x90_error_matrix\n'), ((3482, 3516), 'qiskit.backends.local.qasm_simulator_cpp.x90_error_matrix', 'x90_error_matrix', (['(0.0)', '(np.pi / 2.0)'], {}), '(0.0, np.pi / 2.0)\n', (3498, 3516), False, 'from qiskit.backends.local.qasm_simulator_cpp import QasmSimulatorCpp, cx_error_matrix, x90_error_matrix\n'), ((3662, 3702), 'qiskit.backends.local.qasm_simulator_cpp.x90_error_matrix', 'x90_error_matrix', (['(np.pi / 2)', '(np.pi / 2.0)'], {}), '(np.pi / 2, np.pi / 2.0)\n', (3678, 3702), False, 'from qiskit.backends.local.qasm_simulator_cpp import QasmSimulatorCpp, cx_error_matrix, x90_error_matrix\n'), ((4154, 4179), 'qiskit.backends.local.qasm_simulator_cpp.cx_error_matrix', 'cx_error_matrix', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (4169, 4179), False, 'from qiskit.backends.local.qasm_simulator_cpp import QasmSimulatorCpp, cx_error_matrix, x90_error_matrix\n'), ((4343, 4376), 'qiskit.backends.local.qasm_simulator_cpp.cx_error_matrix', 'cx_error_matrix', (['(np.pi / 2.0)', '(0.0)'], {}), '(np.pi / 2.0, 0.0)\n', (4358, 4376), False, 'from qiskit.backends.local.qasm_simulator_cpp import QasmSimulatorCpp, cx_error_matrix, x90_error_matrix\n'), ((5532, 5547), 'json.load', 'json.load', (['file'], {}), '(file)\n', (5541, 5547), False, 'import json\n'), ((5916, 5938), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (5924, 5938), True, 'import numpy as np\n'), ((6086, 6108), 'numpy.array', 'np.array', (['[0, 1, 0, 0]'], {}), '([0, 1, 0, 0])\n', (6094, 6108), True, 'import numpy as np\n'), ((6256, 6278), 'numpy.array', 'np.array', (['[0, 0, 1, 0]'], {}), '([0, 0, 1, 0])\n', (6264, 6278), True, 'import numpy as np\n'), ((6429, 6451), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (6437, 6451), True, 'import numpy as np\n'), ((6599, 6624), 'numpy.array', 'np.array', (['[0, 1.0j, 0, 0]'], {}), '([0, 1.0j, 0, 0])\n', (6607, 6624), True, 'import numpy as np\n'), ((6770, 6795), 'numpy.array', 'np.array', (['[0, 0, 1.0j, 0]'], {}), '([0, 0, 1.0j, 0])\n', (6778, 6795), True, 'import numpy as np\n'), ((6944, 6970), 'numpy.array', 'np.array', (['[0, 0, 0, -1.0j]'], {}), '([0, 0, 0, -1.0j])\n', (6952, 6970), True, 'import numpy as np\n'), ((7638, 7668), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5, 0.5])\n', (7646, 7668), True, 'import numpy as np\n'), ((9152, 9165), 'numpy.trace', 'np.trace', (['rho'], {}), '(rho)\n', (9160, 9165), True, 'import numpy as np\n'), ((9256, 9268), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (9262, 9268), True, 'import numpy as np\n'), ((9466, 9481), 'json.load', 'json.load', (['file'], {}), '(file)\n', (9475, 9481), False, 'import json\n'), ((11018, 11033), 'json.load', 'json.load', (['file'], {}), '(file)\n', (11027, 11033), False, 'import json\n'), ((11251, 11267), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (11259, 11267), True, 'import numpy as np\n'), ((11309, 11325), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (11317, 11325), True, 'import numpy as np\n'), ((11367, 11383), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (11375, 11383), True, 'import numpy as np\n'), ((11425, 11441), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (11433, 11441), True, 'import numpy as np\n'), ((12298, 12313), 'json.load', 'json.load', (['file'], {}), '(file)\n', (12307, 12313), False, 'import json\n'), ((13737, 13752), 'json.load', 'json.load', (['file'], {}), '(file)\n', (13746, 13752), False, 'import json\n'), ((13990, 14006), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (13998, 14006), True, 'import numpy as np\n'), ((14063, 14079), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (14071, 14079), True, 'import numpy as np\n'), ((14137, 14153), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (14145, 14153), True, 'import numpy as np\n'), ((14211, 14227), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (14219, 14227), True, 'import numpy as np\n'), ((14289, 14305), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (14297, 14305), True, 'import numpy as np\n'), ((14361, 14377), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (14369, 14377), True, 'import numpy as np\n'), ((14434, 14450), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (14442, 14450), True, 'import numpy as np\n'), ((14511, 14527), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (14519, 14527), True, 'import numpy as np\n'), ((14583, 14602), 'numpy.array', 'np.array', (['[0, 1.0j]'], {}), '([0, 1.0j])\n', (14591, 14602), True, 'import numpy as np\n'), ((14657, 14676), 'numpy.array', 'np.array', (['[0, 1.0j]'], {}), '([0, 1.0j])\n', (14665, 14676), True, 'import numpy as np\n'), ((14735, 14754), 'numpy.array', 'np.array', (['[0, 1.0j]'], {}), '([0, 1.0j])\n', (14743, 14754), True, 'import numpy as np\n'), ((18200, 18215), 'json.load', 'json.load', (['file'], {}), '(file)\n', (18209, 18215), False, 'import json\n'), ((19722, 19753), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5, -0.5]'], {}), '([0.5, 0.5, 0.5, -0.5])\n', (19730, 19753), True, 'import numpy as np\n'), ((19798, 19829), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5, -0.5]'], {}), '([0.5, 0.5, 0.5, -0.5])\n', (19806, 19829), True, 'import numpy as np\n'), ((20311, 20343), 'numpy.array', 'np.array', (['[0.5, 0.5j, 0.5j, 0.5]'], {}), '([0.5, 0.5j, 0.5j, 0.5])\n', (20319, 20343), True, 'import numpy as np\n'), ((20389, 20421), 'numpy.array', 'np.array', (['[0.5, 0.5j, 0.5j, 0.5]'], {}), '([0.5, 0.5j, 0.5j, 0.5])\n', (20397, 20421), True, 'import numpy as np\n'), ((21267, 21282), 'json.load', 'json.load', (['file'], {}), '(file)\n', (21276, 21282), False, 'import json\n'), ((21530, 21552), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (21538, 21552), True, 'import numpy as np\n'), ((21622, 21644), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (21630, 21644), True, 'import numpy as np\n'), ((21711, 21733), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (21719, 21733), True, 'import numpy as np\n'), ((21800, 21822), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (21808, 21822), True, 'import numpy as np\n'), ((1107, 1144), 'qiskit.qasm.Qasm', 'qiskit.qasm.Qasm', ([], {'data': 'self.qasm_text'}), '(data=self.qasm_text)\n', (1123, 1144), False, 'import qiskit\n'), ((1270, 1321), 'qiskit.unroll.Unroller', 'qiskit.unroll.Unroller', (['self.qasm_ast', 'self.qasm_be'], {}), '(self.qasm_ast, self.qasm_be)\n', (1292, 1321), False, 'import qiskit\n'), ((3921, 3930), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (3927, 3930), True, 'import numpy as np\n'), ((4729, 4738), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4735, 4738), True, 'import numpy as np\n'), ((12926, 12936), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (12933, 12936), True, 'import numpy as np\n'), ((12942, 12952), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (12949, 12952), True, 'import numpy as np\n'), ((7152, 7162), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7159, 7162), True, 'import numpy as np\n'), ((7168, 7178), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7175, 7178), True, 'import numpy as np\n'), ((7370, 7380), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7377, 7380), True, 'import numpy as np\n'), ((7389, 7399), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7396, 7399), True, 'import numpy as np\n'), ((7854, 7864), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7861, 7864), True, 'import numpy as np\n'), ((7876, 7886), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7883, 7886), True, 'import numpy as np\n'), ((14822, 14832), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (14829, 14832), True, 'import numpy as np\n'), ((14838, 14848), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (14845, 14848), True, 'import numpy as np\n'), ((14921, 14931), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (14928, 14931), True, 'import numpy as np\n'), ((14937, 14947), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (14944, 14947), True, 'import numpy as np\n'), ((15020, 15030), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15027, 15030), True, 'import numpy as np\n'), ((15036, 15046), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15043, 15046), True, 'import numpy as np\n'), ((15123, 15133), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15130, 15133), True, 'import numpy as np\n'), ((15139, 15149), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15146, 15149), True, 'import numpy as np\n'), ((15231, 15241), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15238, 15241), True, 'import numpy as np\n'), ((15248, 15258), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15255, 15258), True, 'import numpy as np\n'), ((15341, 15351), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15348, 15351), True, 'import numpy as np\n'), ((15358, 15368), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15365, 15368), True, 'import numpy as np\n'), ((15451, 15461), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15458, 15461), True, 'import numpy as np\n'), ((15468, 15478), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15475, 15478), True, 'import numpy as np\n'), ((15565, 15575), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15572, 15575), True, 'import numpy as np\n'), ((15582, 15592), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15589, 15592), True, 'import numpy as np\n'), ((15674, 15684), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15681, 15684), True, 'import numpy as np\n'), ((15691, 15701), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15698, 15701), True, 'import numpy as np\n'), ((15784, 15794), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15791, 15794), True, 'import numpy as np\n'), ((15801, 15811), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15808, 15811), True, 'import numpy as np\n'), ((15894, 15904), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15901, 15904), True, 'import numpy as np\n'), ((15911, 15921), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15918, 15921), True, 'import numpy as np\n'), ((16008, 16018), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16015, 16018), True, 'import numpy as np\n'), ((16025, 16035), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16032, 16035), True, 'import numpy as np\n'), ((16119, 16129), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16126, 16129), True, 'import numpy as np\n'), ((16137, 16147), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16144, 16147), True, 'import numpy as np\n'), ((16232, 16242), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16239, 16242), True, 'import numpy as np\n'), ((16250, 16260), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16257, 16260), True, 'import numpy as np\n'), ((16345, 16355), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16352, 16355), True, 'import numpy as np\n'), ((16363, 16373), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16370, 16373), True, 'import numpy as np\n'), ((16462, 16472), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16469, 16472), True, 'import numpy as np\n'), ((16480, 16490), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16487, 16490), True, 'import numpy as np\n'), ((16572, 16582), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16579, 16582), True, 'import numpy as np\n'), ((16677, 16687), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16684, 16687), True, 'import numpy as np\n'), ((16782, 16792), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16789, 16792), True, 'import numpy as np\n'), ((16891, 16901), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16898, 16901), True, 'import numpy as np\n'), ((16997, 17007), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (17004, 17007), True, 'import numpy as np\n'), ((17104, 17114), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (17111, 17114), True, 'import numpy as np\n'), ((17211, 17221), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (17218, 17221), True, 'import numpy as np\n'), ((17322, 17332), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (17329, 17332), True, 'import numpy as np\n'), ((18466, 18476), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (18473, 18476), True, 'import numpy as np\n'), ((18488, 18498), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (18495, 18498), True, 'import numpy as np\n'), ((18573, 18583), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (18580, 18583), True, 'import numpy as np\n'), ((18589, 18599), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (18596, 18599), True, 'import numpy as np\n'), ((18680, 18690), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (18687, 18690), True, 'import numpy as np\n'), ((18699, 18709), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (18706, 18709), True, 'import numpy as np\n'), ((18787, 18797), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (18794, 18797), True, 'import numpy as np\n'), ((18809, 18819), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (18816, 18819), True, 'import numpy as np\n'), ((18894, 18904), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (18901, 18904), True, 'import numpy as np\n'), ((18916, 18926), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (18923, 18926), True, 'import numpy as np\n'), ((19001, 19011), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19008, 19011), True, 'import numpy as np\n'), ((19017, 19027), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19024, 19027), True, 'import numpy as np\n'), ((19108, 19118), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19115, 19118), True, 'import numpy as np\n'), ((19127, 19137), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19134, 19137), True, 'import numpy as np\n'), ((19215, 19225), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19222, 19225), True, 'import numpy as np\n'), ((19237, 19247), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19244, 19247), True, 'import numpy as np\n'), ((19322, 19332), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19329, 19332), True, 'import numpy as np\n'), ((19338, 19348), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19345, 19348), True, 'import numpy as np\n'), ((19429, 19439), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19436, 19439), True, 'import numpy as np\n'), ((19445, 19455), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19452, 19455), True, 'import numpy as np\n'), ((19536, 19546), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19543, 19546), True, 'import numpy as np\n'), ((19555, 19565), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19562, 19565), True, 'import numpy as np\n'), ((19643, 19653), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19650, 19653), True, 'import numpy as np\n'), ((19662, 19672), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19669, 19672), True, 'import numpy as np\n'), ((19903, 19913), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19910, 19913), True, 'import numpy as np\n'), ((19920, 19930), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19927, 19930), True, 'import numpy as np\n'), ((20012, 20022), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20019, 20022), True, 'import numpy as np\n'), ((20029, 20039), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20036, 20039), True, 'import numpy as np\n'), ((20121, 20131), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20128, 20131), True, 'import numpy as np\n'), ((20141, 20151), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20148, 20151), True, 'import numpy as np\n'), ((20230, 20240), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20237, 20240), True, 'import numpy as np\n'), ((20250, 20260), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20257, 20260), True, 'import numpy as np\n')] |
import json
import random
from typing import NamedTuple, Any
import numpy
from numpy.testing import assert_array_almost_equal, assert_almost_equal
import torch
import pytest
from flaky import flaky
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import sanitize
from allennlp.nn import util
from allennlp.models import load_archive
class TestNnUtil(AllenNlpTestCase):
def test_get_sequence_lengths_from_binary_mask(self):
binary_mask = torch.tensor(
[
[True, True, True, False, False, False],
[True, True, False, False, False, False],
[True, True, True, True, True, True],
[True, False, False, False, False, False],
]
)
lengths = util.get_lengths_from_binary_sequence_mask(binary_mask)
numpy.testing.assert_array_equal(lengths.numpy(), numpy.array([3, 2, 6, 1]))
def test_get_mask_from_sequence_lengths(self):
sequence_lengths = torch.LongTensor([4, 3, 1, 4, 2])
mask = util.get_mask_from_sequence_lengths(sequence_lengths, 5).data.numpy()
assert_almost_equal(
mask,
[[1, 1, 1, 1, 0], [1, 1, 1, 0, 0], [1, 0, 0, 0, 0], [1, 1, 1, 1, 0], [1, 1, 0, 0, 0]],
)
def test_get_sequence_lengths_converts_to_long_tensor_and_avoids_variable_overflow(self):
# Tests the following weird behaviour in Pytorch 0.1.12
# doesn't happen for our sequence masks:
#
# mask = torch.ones([260]).bool()
# mask.sum() # equals 260.
# var_mask = t.a.V(mask)
# var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows.
binary_mask = torch.ones(2, 260).bool()
lengths = util.get_lengths_from_binary_sequence_mask(binary_mask)
numpy.testing.assert_array_equal(lengths.data.numpy(), numpy.array([260, 260]))
def test_clamp_tensor(self):
# Test on uncoalesced sparse tensor
i = torch.LongTensor([[0, 1, 1, 0], [2, 0, 2, 2]])
v = torch.FloatTensor([3, 4, -5, 3])
tensor = torch.sparse.FloatTensor(i, v, torch.Size([2, 3]))
clamped_tensor = util.clamp_tensor(tensor, minimum=-3, maximum=3).to_dense()
assert_almost_equal(clamped_tensor, [[0, 0, 3], [3, 0, -3]])
# Test on coalesced sparse tensor
i = torch.LongTensor([[0, 1, 1], [2, 0, 2]])
v = torch.FloatTensor([3, 4, -5])
tensor = torch.sparse.FloatTensor(i, v, torch.Size([2, 3]))
clamped_tensor = util.clamp_tensor(tensor, minimum=-3, maximum=3).to_dense()
assert_almost_equal(clamped_tensor, [[0, 0, 3], [3, 0, -3]])
# Test on dense tensor
tensor = torch.tensor([[5, -4, 3], [-3, 0, -30]])
clamped_tensor = util.clamp_tensor(tensor, minimum=-3, maximum=3)
assert_almost_equal(clamped_tensor, [[3, -3, 3], [-3, 0, -3]])
def test_sort_tensor_by_length(self):
tensor = torch.rand([5, 7, 9])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 1:, :] = 0
tensor[3, 5:, :] = 0
sequence_lengths = torch.LongTensor([3, 4, 1, 5, 7])
sorted_tensor, sorted_lengths, reverse_indices, _ = util.sort_batch_by_length(
tensor, sequence_lengths
)
# Test sorted indices are padded correctly.
numpy.testing.assert_array_equal(sorted_tensor[1, 5:, :].data.numpy(), 0.0)
numpy.testing.assert_array_equal(sorted_tensor[2, 4:, :].data.numpy(), 0.0)
numpy.testing.assert_array_equal(sorted_tensor[3, 3:, :].data.numpy(), 0.0)
numpy.testing.assert_array_equal(sorted_tensor[4, 1:, :].data.numpy(), 0.0)
assert sorted_lengths.data.equal(torch.LongTensor([7, 5, 4, 3, 1]))
# Test restoration indices correctly recover the original tensor.
assert sorted_tensor.index_select(0, reverse_indices).data.equal(tensor.data)
def test_get_final_encoder_states(self):
encoder_outputs = torch.Tensor(
[
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]],
]
)
mask = torch.tensor([[True, True, True], [True, True, False]])
final_states = util.get_final_encoder_states(encoder_outputs, mask, bidirectional=False)
assert_almost_equal(final_states.data.numpy(), [[9, 10, 11, 12], [17, 18, 19, 20]])
final_states = util.get_final_encoder_states(encoder_outputs, mask, bidirectional=True)
assert_almost_equal(final_states.data.numpy(), [[9, 10, 3, 4], [17, 18, 15, 16]])
def test_masked_softmax_no_mask(self):
# Testing the general unmasked 1D case.
vector_1d = torch.FloatTensor([[1.0, 2.0, 3.0]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, None).data.numpy()
assert_array_almost_equal(
vector_1d_softmaxed, numpy.array([[0.090031, 0.244728, 0.665241]])
)
assert_almost_equal(1.0, numpy.sum(vector_1d_softmaxed), decimal=6)
vector_1d = torch.FloatTensor([[1.0, 2.0, 5.0]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, None).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.017148, 0.046613, 0.93624]]))
# Testing the unmasked 1D case where the input is all 0s.
vector_zero = torch.FloatTensor([[0.0, 0.0, 0.0]])
vector_zero_softmaxed = util.masked_softmax(vector_zero, None).data.numpy()
assert_array_almost_equal(
vector_zero_softmaxed, numpy.array([[0.33333334, 0.33333334, 0.33333334]])
)
# Testing the general unmasked batched case.
matrix = torch.FloatTensor([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]])
masked_matrix_softmaxed = util.masked_softmax(matrix, None).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array(
[[0.01714783, 0.04661262, 0.93623955], [0.09003057, 0.24472847, 0.66524096]]
),
)
# Testing the unmasked batched case where one of the inputs are all 0s.
matrix = torch.FloatTensor([[1.0, 2.0, 5.0], [0.0, 0.0, 0.0]])
masked_matrix_softmaxed = util.masked_softmax(matrix, None).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array(
[[0.01714783, 0.04661262, 0.93623955], [0.33333334, 0.33333334, 0.33333334]]
),
)
def test_masked_softmax_masked(self):
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([[1.0, 2.0, 5.0]])
mask_1d = torch.tensor([[True, False, True]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.01798621, 0.0, 0.98201382]]))
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[True, False, True, True]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(
vector_1d_softmaxed, numpy.array([[0.01321289, 0.0, 0.26538793, 0.72139918]])
)
# Testing the masked 1D case where the input is all 0s and the mask
# is not all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, True]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0, 0, 0, 1]]))
# Testing the masked 1D case where the input is not all 0s
# and the mask is all 0s.
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.0, 0.0, 0.0, 0.0]]))
# Testing the masked 1D case where the input is all 0s and
# the mask is all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.0, 0.0, 0.0, 0.0]]))
# Testing the masked 1D case where there are large elements in the
# padding.
vector_1d = torch.FloatTensor([[1.0, 1.0, 1e5]])
mask_1d = torch.tensor([[True, True, False]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.5, 0.5, 0]]))
# Testing the general masked batched case.
matrix = torch.FloatTensor([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, True]])
masked_matrix_softmaxed = util.masked_softmax(matrix, mask).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array([[0.01798621, 0.0, 0.98201382], [0.090031, 0.244728, 0.665241]]),
)
# Testing the masked batch case where one of the inputs is all 0s but
# none of the masks are all 0.
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, True]])
masked_matrix_softmaxed = util.masked_softmax(matrix, mask).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed, numpy.array([[0.5, 0.0, 0.5], [0.090031, 0.244728, 0.665241]])
)
# Testing the masked batch case where one of the inputs is all 0s and
# one of the masks are all 0.
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [False, False, False]])
masked_matrix_softmaxed = util.masked_softmax(matrix, mask).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed, numpy.array([[0.5, 0.0, 0.5], [0.0, 0.0, 0.0]])
)
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[False, False, False], [True, False, True]])
masked_matrix_softmaxed = util.masked_softmax(matrix, mask).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed, numpy.array([[0.0, 0.0, 0.0], [0.11920292, 0.0, 0.88079708]])
)
def test_masked_softmax_memory_efficient_masked(self):
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([[1.0, 2.0, 5.0]])
mask_1d = torch.tensor([[True, False, True]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.01798621, 0.0, 0.98201382]]))
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[True, False, True, True]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
vector_1d_softmaxed, numpy.array([[0.01321289, 0.0, 0.26538793, 0.72139918]])
)
# Testing the masked 1D case where the input is all 0s and the mask
# is not all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, True]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0, 0, 0, 1]]))
# Testing the masked 1D case where the input is not all 0s
# and the mask is all 0s.
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.25, 0.25, 0.25, 0.25]]))
# Testing the masked 1D case where the input is all 0s and
# the mask is all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.25, 0.25, 0.25, 0.25]]))
# Testing the masked 1D case where there are large elements in the
# padding.
vector_1d = torch.FloatTensor([[1.0, 1.0, 1e5]])
mask_1d = torch.tensor([[True, True, False]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.5, 0.5, 0]]))
# Testing the general masked batched case.
matrix = torch.FloatTensor([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, True]])
masked_matrix_softmaxed = util.masked_softmax(
matrix, mask, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array([[0.01798621, 0.0, 0.98201382], [0.090031, 0.244728, 0.665241]]),
)
# Testing the masked batch case where one of the inputs is all 0s but
# none of the masks are all 0.
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, True]])
masked_matrix_softmaxed = util.masked_softmax(
matrix, mask, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed, numpy.array([[0.5, 0.0, 0.5], [0.090031, 0.244728, 0.665241]])
)
# Testing the masked batch case where one of the inputs is all 0s and
# one of the masks are all 0.
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [False, False, False]])
masked_matrix_softmaxed = util.masked_softmax(
matrix, mask, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array([[0.5, 0.0, 0.5], [0.33333333, 0.33333333, 0.33333333]]),
)
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[False, False, False], [True, False, True]])
masked_matrix_softmaxed = util.masked_softmax(
matrix, mask, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array([[0.33333333, 0.33333333, 0.33333333], [0.11920292, 0.0, 0.88079708]]),
)
def test_masked_log_softmax_masked(self):
# Tests replicated from test_softmax_masked - we test that exponentiated,
# the log softmax contains the correct elements (masked elements should be == 1).
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([[1.0, 2.0, 5.0]])
mask_1d = torch.tensor([[True, False, True]])
vector_1d_softmaxed = util.masked_log_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(
numpy.exp(vector_1d_softmaxed), numpy.array([[0.01798621, 0.0, 0.98201382]])
)
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[True, False, True, True]])
vector_1d_softmaxed = util.masked_log_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(
numpy.exp(vector_1d_softmaxed), numpy.array([[0.01321289, 0.0, 0.26538793, 0.72139918]])
)
# Testing the masked 1D case where the input is all 0s and the mask
# is not all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, True]])
vector_1d_softmaxed = util.masked_log_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(
numpy.exp(vector_1d_softmaxed), numpy.array([[0.0, 0.0, 0.0, 1.0]])
)
# Testing the masked 1D case where the input is not all 0s
# and the mask is all 0s. The output here will be arbitrary, but it should not be nan.
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_log_softmax(vector_1d, mask_1d).data.numpy()
assert not numpy.isnan(vector_1d_softmaxed).any()
def test_masked_max(self):
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([1.0, 12.0, 5.0])
mask_1d = torch.tensor([True, False, True])
vector_1d_maxed = util.masked_max(vector_1d, mask_1d, dim=0).data.numpy()
assert_array_almost_equal(vector_1d_maxed, 5.0)
# Testing if all masks are zero, the output will be arbitrary, but it should not be nan.
vector_1d = torch.FloatTensor([1.0, 12.0, 5.0])
mask_1d = torch.tensor([False, False, False])
vector_1d_maxed = util.masked_max(vector_1d, mask_1d, dim=0).data.numpy()
assert not numpy.isnan(vector_1d_maxed).any()
# Testing batch value and batch masks
matrix = torch.FloatTensor([[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, False]])
matrix_maxed = util.masked_max(matrix, mask, dim=-1).data.numpy()
assert_array_almost_equal(matrix_maxed, numpy.array([5.0, -1.0]))
# Testing keepdim for batch value and batch masks
matrix = torch.FloatTensor([[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, False]])
matrix_maxed = util.masked_max(matrix, mask, dim=-1, keepdim=True).data.numpy()
assert_array_almost_equal(matrix_maxed, numpy.array([[5.0], [-1.0]]))
# Testing broadcast
matrix = torch.FloatTensor(
[[[1.0, 2.0], [12.0, 3.0], [5.0, -1.0]], [[-1.0, -3.0], [-2.0, -0.5], [3.0, 8.0]]]
)
mask = torch.tensor([[True, False, True], [True, True, False]]).unsqueeze(-1)
matrix_maxed = util.masked_max(matrix, mask, dim=1).data.numpy()
assert_array_almost_equal(matrix_maxed, numpy.array([[5.0, 2.0], [-1.0, -0.5]]))
def test_masked_mean(self):
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([1.0, 12.0, 5.0])
mask_1d = torch.tensor([True, False, True])
vector_1d_mean = util.masked_mean(vector_1d, mask_1d, dim=0).data.numpy()
assert_array_almost_equal(vector_1d_mean, 3.0)
# Testing if all masks are zero, the output will be arbitrary, but it should not be nan.
vector_1d = torch.FloatTensor([1.0, 12.0, 5.0])
mask_1d = torch.tensor([False, False, False])
vector_1d_mean = util.masked_mean(vector_1d, mask_1d, dim=0).data.numpy()
assert not numpy.isnan(vector_1d_mean).any()
# Testing batch value and batch masks
matrix = torch.FloatTensor([[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, False]])
matrix_mean = util.masked_mean(matrix, mask, dim=-1).data.numpy()
assert_array_almost_equal(matrix_mean, numpy.array([3.0, -1.5]))
# Testing keepdim for batch value and batch masks
matrix = torch.FloatTensor([[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, False]])
matrix_mean = util.masked_mean(matrix, mask, dim=-1, keepdim=True).data.numpy()
assert_array_almost_equal(matrix_mean, numpy.array([[3.0], [-1.5]]))
# Testing broadcast
matrix = torch.FloatTensor(
[[[1.0, 2.0], [12.0, 3.0], [5.0, -1.0]], [[-1.0, -3.0], [-2.0, -0.5], [3.0, 8.0]]]
)
mask = torch.tensor([[True, False, True], [True, True, False]]).unsqueeze(-1)
matrix_mean = util.masked_mean(matrix, mask, dim=1).data.numpy()
assert_array_almost_equal(matrix_mean, numpy.array([[3.0, 0.5], [-1.5, -1.75]]))
def test_masked_flip(self):
tensor = torch.FloatTensor(
[[[6, 6, 6], [1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4], [5, 5, 5]]]
)
solution = [[[6, 6, 6], [0, 0, 0]], [[4, 4, 4], [3, 3, 3]]]
response = util.masked_flip(tensor, [1, 2])
assert_almost_equal(response, solution)
tensor = torch.FloatTensor(
[
[[6, 6, 6], [1, 1, 1], [2, 2, 2], [0, 0, 0]],
[[3, 3, 3], [4, 4, 4], [5, 5, 5], [1, 2, 3]],
]
)
solution = [
[[2, 2, 2], [1, 1, 1], [6, 6, 6], [0, 0, 0]],
[[1, 2, 3], [5, 5, 5], [4, 4, 4], [3, 3, 3]],
]
response = util.masked_flip(tensor, [3, 4])
assert_almost_equal(response, solution)
tensor = torch.FloatTensor(
[
[[6, 6, 6], [1, 1, 1], [2, 2, 2], [0, 0, 0]],
[[3, 3, 3], [4, 4, 4], [5, 5, 5], [1, 2, 3]],
[[1, 1, 1], [2, 2, 2], [0, 0, 0], [0, 0, 0]],
]
)
solution = [
[[2, 2, 2], [1, 1, 1], [6, 6, 6], [0, 0, 0]],
[[1, 2, 3], [5, 5, 5], [4, 4, 4], [3, 3, 3]],
[[2, 2, 2], [1, 1, 1], [0, 0, 0], [0, 0, 0]],
]
response = util.masked_flip(tensor, [3, 4, 2])
assert_almost_equal(response, solution)
def test_get_text_field_mask_returns_a_correct_mask(self):
text_field_tensors = {
"indexer_name": {
"tokens": torch.LongTensor([[3, 4, 5, 0, 0], [1, 2, 0, 0, 0]]),
"token_characters": torch.LongTensor(
[
[[1, 2], [3, 0], [2, 0], [0, 0], [0, 0]],
[[5, 0], [4, 6], [0, 0], [0, 0], [0, 0]],
]
),
}
}
assert_almost_equal(
util.get_text_field_mask(text_field_tensors).long().numpy(),
[[1, 1, 1, 0, 0], [1, 1, 0, 0, 0]],
)
def test_get_text_field_mask_returns_a_correct_mask_character_only_input(self):
text_field_tensors = {
"indexer_name": {
"token_characters": torch.LongTensor(
[
[[1, 2, 3], [3, 0, 1], [2, 1, 0], [0, 0, 0]],
[[5, 5, 5], [4, 6, 0], [0, 0, 0], [0, 0, 0]],
]
)
}
}
assert_almost_equal(
util.get_text_field_mask(text_field_tensors).long().numpy(),
[[1, 1, 1, 0], [1, 1, 0, 0]],
)
def test_get_text_field_mask_returns_a_correct_mask_list_field(self):
text_field_tensors = {
"indexer_name": {
"list_tokens": torch.LongTensor(
[
[[1, 2], [3, 0], [2, 0], [0, 0], [0, 0]],
[[5, 0], [4, 6], [0, 0], [0, 0], [0, 0]],
]
)
}
}
actual_mask = (
util.get_text_field_mask(text_field_tensors, num_wrapping_dims=1).long().numpy()
)
expected_mask = (text_field_tensors["indexer_name"]["list_tokens"].numpy() > 0).astype(
"int32"
)
assert_almost_equal(actual_mask, expected_mask)
def test_get_text_field_mask_returns_mask_key(self):
text_field_tensors = {
"indexer_name": {
"tokens": torch.LongTensor([[3, 4, 5, 0, 0], [1, 2, 0, 0, 0]]),
"mask": torch.tensor([[False, False, True]]),
}
}
assert_almost_equal(
util.get_text_field_mask(text_field_tensors).long().numpy(), [[0, 0, 1]]
)
def test_weighted_sum_works_on_simple_input(self):
batch_size = 1
sentence_length = 5
embedding_dim = 4
sentence_array = numpy.random.rand(batch_size, sentence_length, embedding_dim)
sentence_tensor = torch.from_numpy(sentence_array).float()
attention_tensor = torch.FloatTensor([[0.3, 0.4, 0.1, 0, 1.2]])
aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
assert aggregated_array.shape == (batch_size, embedding_dim)
expected_array = (
0.3 * sentence_array[0, 0]
+ 0.4 * sentence_array[0, 1]
+ 0.1 * sentence_array[0, 2]
+ 0.0 * sentence_array[0, 3]
+ 1.2 * sentence_array[0, 4]
)
numpy.testing.assert_almost_equal(aggregated_array, [expected_array], decimal=5)
def test_weighted_sum_handles_higher_order_input(self):
batch_size = 1
length_1 = 5
length_2 = 6
length_3 = 2
embedding_dim = 4
sentence_array = numpy.random.rand(batch_size, length_1, length_2, length_3, embedding_dim)
attention_array = numpy.random.rand(batch_size, length_1, length_2, length_3)
sentence_tensor = torch.from_numpy(sentence_array).float()
attention_tensor = torch.from_numpy(attention_array).float()
aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
assert aggregated_array.shape == (batch_size, length_1, length_2, embedding_dim)
expected_array = (
attention_array[0, 3, 2, 0] * sentence_array[0, 3, 2, 0]
+ attention_array[0, 3, 2, 1] * sentence_array[0, 3, 2, 1]
)
numpy.testing.assert_almost_equal(aggregated_array[0, 3, 2], expected_array, decimal=5)
def test_weighted_sum_handles_uneven_higher_order_input(self):
batch_size = 1
length_1 = 5
length_2 = 6
length_3 = 2
embedding_dim = 4
sentence_array = numpy.random.rand(batch_size, length_3, embedding_dim)
attention_array = numpy.random.rand(batch_size, length_1, length_2, length_3)
sentence_tensor = torch.from_numpy(sentence_array).float()
attention_tensor = torch.from_numpy(attention_array).float()
aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
assert aggregated_array.shape == (batch_size, length_1, length_2, embedding_dim)
for i in range(length_1):
for j in range(length_2):
expected_array = (
attention_array[0, i, j, 0] * sentence_array[0, 0]
+ attention_array[0, i, j, 1] * sentence_array[0, 1]
)
numpy.testing.assert_almost_equal(
aggregated_array[0, i, j], expected_array, decimal=5
)
def test_weighted_sum_handles_3d_attention_with_3d_matrix(self):
batch_size = 1
length_1 = 5
length_2 = 2
embedding_dim = 4
sentence_array = numpy.random.rand(batch_size, length_2, embedding_dim)
attention_array = numpy.random.rand(batch_size, length_1, length_2)
sentence_tensor = torch.from_numpy(sentence_array).float()
attention_tensor = torch.from_numpy(attention_array).float()
aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
assert aggregated_array.shape == (batch_size, length_1, embedding_dim)
for i in range(length_1):
expected_array = (
attention_array[0, i, 0] * sentence_array[0, 0]
+ attention_array[0, i, 1] * sentence_array[0, 1]
)
numpy.testing.assert_almost_equal(aggregated_array[0, i], expected_array, decimal=5)
def test_viterbi_decode(self):
# Test Viterbi decoding is equal to greedy decoding with no pairwise potentials.
sequence_logits = torch.nn.functional.softmax(torch.rand([5, 9]), dim=-1)
transition_matrix = torch.zeros([9, 9])
indices, _ = util.viterbi_decode(sequence_logits.data, transition_matrix)
_, argmax_indices = torch.max(sequence_logits, 1)
assert indices == argmax_indices.data.squeeze().tolist()
# Test Viterbi decoding works with start and end transitions
sequence_logits = torch.nn.functional.softmax(torch.rand([5, 9]), dim=-1)
transition_matrix = torch.zeros([9, 9])
allowed_start_transitions = torch.zeros([9])
# Force start tag to be an 8
allowed_start_transitions[:8] = float("-inf")
allowed_end_transitions = torch.zeros([9])
# Force end tag to be a 0
allowed_end_transitions[1:] = float("-inf")
indices, _ = util.viterbi_decode(
sequence_logits.data,
transition_matrix,
allowed_end_transitions=allowed_end_transitions,
allowed_start_transitions=allowed_start_transitions,
)
assert indices[0] == 8
assert indices[-1] == 0
# Test that pairwise potentials affect the sequence correctly and that
# viterbi_decode can handle -inf values.
sequence_logits = torch.FloatTensor(
[
[0, 0, 0, 3, 5],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
]
)
# The same tags shouldn't appear sequentially.
transition_matrix = torch.zeros([5, 5])
for i in range(5):
transition_matrix[i, i] = float("-inf")
indices, _ = util.viterbi_decode(sequence_logits, transition_matrix)
assert indices == [4, 3, 4, 3, 4, 3]
# Test that unbalanced pairwise potentials break ties
# between paths with equal unary potentials.
sequence_logits = torch.FloatTensor(
[
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
]
)
# The 5th tag has a penalty for appearing sequentially
# or for transitioning to the 4th tag, making the best
# path uniquely to take the 4th tag only.
transition_matrix = torch.zeros([5, 5])
transition_matrix[4, 4] = -10
transition_matrix[4, 3] = -10
transition_matrix[3, 4] = -10
indices, _ = util.viterbi_decode(sequence_logits, transition_matrix)
assert indices == [3, 3, 3, 3, 3, 3]
sequence_logits = torch.FloatTensor([[1, 0, 0, 4], [1, 0, 6, 2], [0, 3, 0, 4]])
# Best path would normally be [3, 2, 3] but we add a
# potential from 2 -> 1, making [3, 2, 1] the best path.
transition_matrix = torch.zeros([4, 4])
transition_matrix[0, 0] = 1
transition_matrix[2, 1] = 5
indices, value = util.viterbi_decode(sequence_logits, transition_matrix)
assert indices == [3, 2, 1]
assert value.numpy() == 18
# Test that providing evidence results in paths containing specified tags.
sequence_logits = torch.FloatTensor(
[
[0, 0, 0, 7, 7],
[0, 0, 0, 7, 7],
[0, 0, 0, 7, 7],
[0, 0, 0, 7, 7],
[0, 0, 0, 7, 7],
[0, 0, 0, 7, 7],
]
)
# The 5th tag has a penalty for appearing sequentially
# or for transitioning to the 4th tag, making the best
# path to take the 4th tag for every label.
transition_matrix = torch.zeros([5, 5])
transition_matrix[4, 4] = -10
transition_matrix[4, 3] = -2
transition_matrix[3, 4] = -2
# The 1st, 4th and 5th sequence elements are observed - they should be
# equal to 2, 0 and 4. The last tag should be equal to 3, because although
# the penalty for transitioning to the 4th tag is -2, the unary potential
# is 7, which is greater than the combination for any of the other labels.
observations = [2, -1, -1, 0, 4, -1]
indices, _ = util.viterbi_decode(sequence_logits, transition_matrix, observations)
assert indices == [2, 3, 3, 0, 4, 3]
def test_viterbi_decode_top_k(self):
# Test cases taken from: https://gist.github.com/PetrochukM/afaa3613a99a8e7213d2efdd02ae4762
# Test Viterbi decoding is equal to greedy decoding with no pairwise potentials.
sequence_logits = torch.autograd.Variable(torch.rand([5, 9]))
transition_matrix = torch.zeros([9, 9])
indices, _ = util.viterbi_decode(sequence_logits.data, transition_matrix, top_k=5)
_, argmax_indices = torch.max(sequence_logits, 1)
assert indices[0] == argmax_indices.data.squeeze().tolist()
# Test that pairwise potentials effect the sequence correctly and that
# viterbi_decode can handle -inf values.
sequence_logits = torch.FloatTensor(
[
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
]
)
# The same tags shouldn't appear sequentially.
transition_matrix = torch.zeros([5, 5])
for i in range(5):
transition_matrix[i, i] = float("-inf")
indices, _ = util.viterbi_decode(sequence_logits, transition_matrix, top_k=5)
assert indices[0] == [3, 4, 3, 4, 3, 4]
# Test that unbalanced pairwise potentials break ties
# between paths with equal unary potentials.
sequence_logits = torch.FloatTensor(
[
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 0],
]
)
# The 5th tag has a penalty for appearing sequentially
# or for transitioning to the 4th tag, making the best
# path uniquely to take the 4th tag only.
transition_matrix = torch.zeros([5, 5])
transition_matrix[4, 4] = -10
transition_matrix[4, 3] = -10
indices, _ = util.viterbi_decode(sequence_logits, transition_matrix, top_k=5)
assert indices[0] == [3, 3, 3, 3, 3, 3]
sequence_logits = torch.FloatTensor([[1, 0, 0, 4], [1, 0, 6, 2], [0, 3, 0, 4]])
# Best path would normally be [3, 2, 3] but we add a
# potential from 2 -> 1, making [3, 2, 1] the best path.
transition_matrix = torch.zeros([4, 4])
transition_matrix[0, 0] = 1
transition_matrix[2, 1] = 5
indices, value = util.viterbi_decode(sequence_logits, transition_matrix, top_k=5)
assert indices[0] == [3, 2, 1]
assert value[0] == 18
def _brute_decode(
tag_sequence: torch.Tensor, transition_matrix: torch.Tensor, top_k: int = 5
) -> Any:
"""
Top-k decoder that uses brute search instead of the Viterbi Decode dynamic programing algorithm
"""
# Create all possible sequences
sequences = [[]] # type: ignore
for i in range(len(tag_sequence)):
new_sequences = [] # type: ignore
for j in range(len(tag_sequence[i])):
for sequence in sequences:
new_sequences.append(sequence[:] + [j])
sequences = new_sequences
# Score
scored_sequences = [] # type: ignore
for sequence in sequences:
emission_score = sum(tag_sequence[i, j] for i, j in enumerate(sequence))
transition_score = sum(
transition_matrix[sequence[i - 1], sequence[i]] for i in range(1, len(sequence))
)
score = emission_score + transition_score
scored_sequences.append((score, sequence))
# Get the top k scores / paths
top_k_sequences = sorted(scored_sequences, key=lambda r: r[0], reverse=True)[:top_k]
scores, paths = zip(*top_k_sequences)
return paths, scores # type: ignore
# Run 100 randomly generated parameters and compare the outputs.
for i in range(100):
num_tags = random.randint(1, 5)
seq_len = random.randint(1, 5)
k = random.randint(1, 5)
sequence_logits = torch.rand([seq_len, num_tags])
transition_matrix = torch.rand([num_tags, num_tags])
viterbi_paths_v1, viterbi_scores_v1 = util.viterbi_decode(
sequence_logits, transition_matrix, top_k=k
)
viterbi_path_brute, viterbi_score_brute = _brute_decode(
sequence_logits, transition_matrix, top_k=k
)
numpy.testing.assert_almost_equal(
list(viterbi_score_brute), viterbi_scores_v1.tolist(), decimal=3
)
numpy.testing.assert_equal(sanitize(viterbi_paths_v1), viterbi_path_brute)
def test_sequence_cross_entropy_with_logits_masks_loss_correctly(self):
# test weight masking by checking that a tensor with non-zero values in
# masked positions returns the same loss as a tensor with zeros in those
# positions.
tensor = torch.rand([5, 7, 4])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 2:, :] = 0
tensor[3, :, :] = 0
weights = (tensor != 0.0)[:, :, 0].long().squeeze(-1)
tensor2 = tensor.clone()
tensor2[0, 3:, :] = 2
tensor2[1, 4:, :] = 13
tensor2[2, 2:, :] = 234
tensor2[3, :, :] = 65
targets = torch.LongTensor(numpy.random.randint(0, 3, [5, 7]))
targets *= weights
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights)
loss2 = util.sequence_cross_entropy_with_logits(tensor2, targets, weights)
assert loss.data.numpy() == loss2.data.numpy()
def test_sequence_cross_entropy_with_logits_smooths_labels_correctly(self):
tensor = torch.rand([1, 3, 4])
targets = torch.LongTensor(numpy.random.randint(0, 3, [1, 3]))
weights = torch.ones([2, 3])
loss = util.sequence_cross_entropy_with_logits(
tensor, targets, weights, label_smoothing=0.1
)
correct_loss = 0.0
for prediction, label in zip(tensor.squeeze(0), targets.squeeze(0)):
prediction = torch.nn.functional.log_softmax(prediction, dim=-1)
correct_loss += prediction[label] * 0.9
# incorrect elements
correct_loss += prediction.sum() * 0.1 / 4
# Average over sequence.
correct_loss = -correct_loss / 3
numpy.testing.assert_array_almost_equal(loss.data.numpy(), correct_loss.data.numpy())
def test_sequence_cross_entropy_with_logits_averages_batch_correctly(self):
# test batch average is the same as dividing the batch averaged
# loss by the number of batches containing any non-padded tokens.
tensor = torch.rand([5, 7, 4])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 2:, :] = 0
tensor[3, :, :] = 0
weights = (tensor != 0.0)[:, :, 0].long().squeeze(-1)
targets = torch.LongTensor(numpy.random.randint(0, 3, [5, 7]))
targets *= weights
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights)
vector_loss = util.sequence_cross_entropy_with_logits(
tensor, targets, weights, average=None
)
# Batch has one completely padded row, so divide by 4.
assert loss.data.numpy() == vector_loss.sum().item() / 4
@flaky(max_runs=3, min_passes=1)
def test_sequence_cross_entropy_with_logits_averages_token_correctly(self):
# test token average is the same as multiplying the per-batch loss
# with the per-batch weights and dividing by the total weight
tensor = torch.rand([5, 7, 4])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 2:, :] = 0
tensor[3, :, :] = 0
weights = (tensor != 0.0)[:, :, 0].long().squeeze(-1)
targets = torch.LongTensor(numpy.random.randint(0, 3, [5, 7]))
targets *= weights
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights, average="token")
vector_loss = util.sequence_cross_entropy_with_logits(
tensor, targets, weights, average=None
)
total_token_loss = (vector_loss * weights.float().sum(dim=-1)).sum()
average_token_loss = (total_token_loss / weights.float().sum()).detach()
assert_almost_equal(loss.detach().item(), average_token_loss.item(), decimal=5)
def test_sequence_cross_entropy_with_logits_gamma_correctly(self):
batch = 1
length = 3
classes = 4
gamma = abs(numpy.random.randn()) # [0, +inf)
tensor = torch.rand([batch, length, classes])
targets = torch.LongTensor(numpy.random.randint(0, classes, [batch, length]))
weights = torch.ones([batch, length])
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights, gamma=gamma)
correct_loss = 0.0
for logit, label in zip(tensor.squeeze(0), targets.squeeze(0)):
p = torch.nn.functional.softmax(logit, dim=-1)
pt = p[label]
ft = (1 - pt) ** gamma
correct_loss += -pt.log() * ft
# Average over sequence.
correct_loss = correct_loss / length
numpy.testing.assert_array_almost_equal(loss.data.numpy(), correct_loss.data.numpy())
def test_sequence_cross_entropy_with_logits_alpha_float_correctly(self):
batch = 1
length = 3
classes = 2 # alpha float for binary class only
alpha = (
numpy.random.rand() if numpy.random.rand() > 0.5 else (1.0 - numpy.random.rand())
) # [0, 1]
tensor = torch.rand([batch, length, classes])
targets = torch.LongTensor(numpy.random.randint(0, classes, [batch, length]))
weights = torch.ones([batch, length])
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights, alpha=alpha)
correct_loss = 0.0
for logit, label in zip(tensor.squeeze(0), targets.squeeze(0)):
logp = torch.nn.functional.log_softmax(logit, dim=-1)
logpt = logp[label]
if label:
at = alpha
else:
at = 1 - alpha
correct_loss += -logpt * at
# Average over sequence.
correct_loss = correct_loss / length
numpy.testing.assert_array_almost_equal(loss.data.numpy(), correct_loss.data.numpy())
def test_sequence_cross_entropy_with_logits_alpha_single_float_correctly(self):
batch = 1
length = 3
classes = 2 # alpha float for binary class only
alpha = (
numpy.random.rand() if numpy.random.rand() > 0.5 else (1.0 - numpy.random.rand())
) # [0, 1]
alpha = torch.tensor(alpha)
tensor = torch.rand([batch, length, classes])
targets = torch.LongTensor(numpy.random.randint(0, classes, [batch, length]))
weights = torch.ones([batch, length])
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights, alpha=alpha)
correct_loss = 0.0
for logit, label in zip(tensor.squeeze(0), targets.squeeze(0)):
logp = torch.nn.functional.log_softmax(logit, dim=-1)
logpt = logp[label]
if label:
at = alpha
else:
at = 1 - alpha
correct_loss += -logpt * at
# Average over sequence.
correct_loss = correct_loss / length
numpy.testing.assert_array_almost_equal(loss.data.numpy(), correct_loss.data.numpy())
def test_sequence_cross_entropy_with_logits_alpha_list_correctly(self):
batch = 1
length = 3
classes = 4 # alpha float for binary class only
alpha = abs(numpy.random.randn(classes)) # [0, +inf)
tensor = torch.rand([batch, length, classes])
targets = torch.LongTensor(numpy.random.randint(0, classes, [batch, length]))
weights = torch.ones([batch, length])
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights, alpha=alpha)
correct_loss = 0.0
for logit, label in zip(tensor.squeeze(0), targets.squeeze(0)):
logp = torch.nn.functional.log_softmax(logit, dim=-1)
logpt = logp[label]
at = alpha[label]
correct_loss += -logpt * at
# Average over sequence.
correct_loss = correct_loss / length
numpy.testing.assert_array_almost_equal(loss.data.numpy(), correct_loss.data.numpy())
def test_replace_masked_values_replaces_masked_values_with_finite_value(self):
tensor = torch.FloatTensor([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]])
mask = torch.tensor([[True, True, False]])
replaced = util.replace_masked_values(tensor, mask.unsqueeze(-1), 2).data.numpy()
assert_almost_equal(replaced, [[[1, 2, 3, 4], [5, 6, 7, 8], [2, 2, 2, 2]]])
def test_logsumexp(self):
# First a simple example where we add probabilities in log space.
tensor = torch.FloatTensor([[0.4, 0.1, 0.2]])
log_tensor = tensor.log()
log_summed = util.logsumexp(log_tensor, dim=-1, keepdim=False)
assert_almost_equal(log_summed.exp().data.numpy(), [0.7])
log_summed = util.logsumexp(log_tensor, dim=-1, keepdim=True)
assert_almost_equal(log_summed.exp().data.numpy(), [[0.7]])
# Then some more atypical examples, and making sure this will work with how we handle
# log masks.
tensor = torch.FloatTensor([[float("-inf"), 20.0]])
assert_almost_equal(util.logsumexp(tensor).data.numpy(), [20.0])
tensor = torch.FloatTensor([[-200.0, 20.0]])
assert_almost_equal(util.logsumexp(tensor).data.numpy(), [20.0])
tensor = torch.FloatTensor([[20.0, 20.0], [-200.0, 200.0]])
assert_almost_equal(util.logsumexp(tensor, dim=0).data.numpy(), [20.0, 200.0])
def test_flatten_and_batch_shift_indices(self):
indices = numpy.array(
[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 9, 9, 9]], [[2, 1, 0, 7], [7, 7, 2, 3], [0, 0, 4, 2]]]
)
indices = torch.tensor(indices, dtype=torch.long)
shifted_indices = util.flatten_and_batch_shift_indices(indices, 10)
numpy.testing.assert_array_equal(
shifted_indices.data.numpy(),
numpy.array(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 9, 12, 11, 10, 17, 17, 17, 12, 13, 10, 10, 14, 12]
),
)
def test_batched_index_select(self):
indices = numpy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
# Each element is a vector of its index.
targets = torch.ones([2, 10, 3]).cumsum(1) - 1
# Make the second batch double its index so they're different.
targets[1, :, :] *= 2
indices = torch.tensor(indices, dtype=torch.long)
selected = util.batched_index_select(targets, indices)
assert list(selected.size()) == [2, 2, 2, 3]
ones = numpy.ones([3])
numpy.testing.assert_array_equal(selected[0, 0, 0, :].data.numpy(), ones)
numpy.testing.assert_array_equal(selected[0, 0, 1, :].data.numpy(), ones * 2)
numpy.testing.assert_array_equal(selected[0, 1, 0, :].data.numpy(), ones * 3)
numpy.testing.assert_array_equal(selected[0, 1, 1, :].data.numpy(), ones * 4)
numpy.testing.assert_array_equal(selected[1, 0, 0, :].data.numpy(), ones * 10)
numpy.testing.assert_array_equal(selected[1, 0, 1, :].data.numpy(), ones * 12)
numpy.testing.assert_array_equal(selected[1, 1, 0, :].data.numpy(), ones * 14)
numpy.testing.assert_array_equal(selected[1, 1, 1, :].data.numpy(), ones * 16)
indices = numpy.array([[[1, 11], [3, 4]], [[5, 6], [7, 8]]])
indices = torch.tensor(indices, dtype=torch.long)
with pytest.raises(ConfigurationError):
util.batched_index_select(targets, indices)
indices = numpy.array([[[1, -1], [3, 4]], [[5, 6], [7, 8]]])
indices = torch.tensor(indices, dtype=torch.long)
with pytest.raises(ConfigurationError):
util.batched_index_select(targets, indices)
def test_batched_span_select(self):
# Each element is a vector of its index.
targets = torch.ones([3, 12, 2]).cumsum(1) - 1
spans = torch.LongTensor(
[
[[0, 0], [1, 2], [5, 8], [10, 10]],
[[i, i] for i in range(3, -1, -1)],
[[0, 3], [1, 4], [2, 5], [10, 11]],
]
)
selected, mask = util.batched_span_select(targets, spans)
selected = torch.where(mask.unsqueeze(-1), selected, torch.empty_like(selected).fill_(-1))
numpy.testing.assert_array_equal(
selected,
[
[
[[0, 0], [-1, -1], [-1, -1], [-1, -1]],
[[2, 2], [1, 1], [-1, -1], [-1, -1]],
[[8, 8], [7, 7], [6, 6], [5, 5]],
[[10, 10], [-1, -1], [-1, -1], [-1, -1]],
],
[[[i, i], [-1, -1], [-1, -1], [-1, -1]] for i in range(3, -1, -1)],
[
[[3, 3], [2, 2], [1, 1], [0, 0]],
[[4, 4], [3, 3], [2, 2], [1, 1]],
[[5, 5], [4, 4], [3, 3], [2, 2]],
[[11, 11], [10, 10], [-1, -1], [-1, -1]],
],
],
)
def test_flattened_index_select(self):
indices = numpy.array([[1, 2], [3, 4]])
targets = torch.ones([2, 6, 3]).cumsum(1) - 1
# Make the second batch double its index so they're different.
targets[1, :, :] *= 2
indices = torch.tensor(indices, dtype=torch.long)
selected = util.flattened_index_select(targets, indices)
assert list(selected.size()) == [2, 2, 2, 3]
ones = numpy.ones([3])
numpy.testing.assert_array_equal(selected[0, 0, 0, :].data.numpy(), ones)
numpy.testing.assert_array_equal(selected[0, 0, 1, :].data.numpy(), ones * 2)
numpy.testing.assert_array_equal(selected[0, 1, 0, :].data.numpy(), ones * 3)
numpy.testing.assert_array_equal(selected[0, 1, 1, :].data.numpy(), ones * 4)
numpy.testing.assert_array_equal(selected[1, 0, 0, :].data.numpy(), ones * 2)
numpy.testing.assert_array_equal(selected[1, 0, 1, :].data.numpy(), ones * 4)
numpy.testing.assert_array_equal(selected[1, 1, 0, :].data.numpy(), ones * 6)
numpy.testing.assert_array_equal(selected[1, 1, 1, :].data.numpy(), ones * 8)
# Check we only accept 2D indices.
with pytest.raises(ConfigurationError):
util.flattened_index_select(targets, torch.ones([3, 4, 5]))
def test_bucket_values(self):
indices = torch.LongTensor([1, 2, 7, 1, 56, 900])
bucketed_distances = util.bucket_values(indices)
numpy.testing.assert_array_equal(
bucketed_distances.numpy(), numpy.array([1, 2, 5, 1, 8, 9])
)
def test_add_sentence_boundary_token_ids_handles_2D_input(self):
tensor = torch.from_numpy(numpy.array([[1, 2, 3], [4, 5, 0]]))
mask = tensor > 0
bos = 9
eos = 10
new_tensor, new_mask = util.add_sentence_boundary_token_ids(tensor, mask, bos, eos)
expected_new_tensor = numpy.array([[9, 1, 2, 3, 10], [9, 4, 5, 10, 0]])
assert (new_tensor.data.numpy() == expected_new_tensor).all()
assert (new_mask.data.numpy() == (expected_new_tensor > 0)).all()
def test_add_sentence_boundary_token_ids_handles_3D_input(self):
tensor = torch.from_numpy(
numpy.array(
[
[[1, 2, 3, 4], [5, 5, 5, 5], [6, 8, 1, 2]],
[[4, 3, 2, 1], [8, 7, 6, 5], [0, 0, 0, 0]],
]
)
)
mask = (tensor > 0).sum(dim=-1) > 0
bos = torch.from_numpy(numpy.array([9, 9, 9, 9]))
eos = torch.from_numpy(numpy.array([10, 10, 10, 10]))
new_tensor, new_mask = util.add_sentence_boundary_token_ids(tensor, mask, bos, eos)
expected_new_tensor = numpy.array(
[
[[9, 9, 9, 9], [1, 2, 3, 4], [5, 5, 5, 5], [6, 8, 1, 2], [10, 10, 10, 10]],
[[9, 9, 9, 9], [4, 3, 2, 1], [8, 7, 6, 5], [10, 10, 10, 10], [0, 0, 0, 0]],
]
)
assert (new_tensor.data.numpy() == expected_new_tensor).all()
assert (new_mask.data.numpy() == ((expected_new_tensor > 0).sum(axis=-1) > 0)).all()
def test_remove_sentence_boundaries(self):
tensor = torch.from_numpy(numpy.random.rand(3, 5, 7))
mask = torch.from_numpy(
# The mask with two elements is to test the corner case
# of an empty sequence, so here we are removing boundaries
# from "<S> </S>"
numpy.array([[1, 1, 0, 0, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 0]])
).bool()
new_tensor, new_mask = util.remove_sentence_boundaries(tensor, mask)
expected_new_tensor = torch.zeros(3, 3, 7)
expected_new_tensor[1, 0:3, :] = tensor[1, 1:4, :]
expected_new_tensor[2, 0:2, :] = tensor[2, 1:3, :]
assert_array_almost_equal(new_tensor.data.numpy(), expected_new_tensor.data.numpy())
expected_new_mask = torch.from_numpy(numpy.array([[0, 0, 0], [1, 1, 1], [1, 1, 0]])).bool()
assert (new_mask.data.numpy() == expected_new_mask.data.numpy()).all()
def test_add_positional_features(self):
# This is hard to test, so we check that we get the same result as the
# original tensorflow implementation:
# https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/layers/common_attention.py#L270
tensor2tensor_result = numpy.asarray(
[
[0.00000000e00, 0.00000000e00, 1.00000000e00, 1.00000000e00],
[8.41470957e-01, 9.99999902e-05, 5.40302277e-01, 1.00000000e00],
[9.09297407e-01, 1.99999980e-04, -4.16146845e-01, 1.00000000e00],
]
)
tensor = torch.zeros([2, 3, 4])
result = util.add_positional_features(tensor, min_timescale=1.0, max_timescale=1.0e4)
numpy.testing.assert_almost_equal(result[0].detach().cpu().numpy(), tensor2tensor_result)
numpy.testing.assert_almost_equal(result[1].detach().cpu().numpy(), tensor2tensor_result)
# Check case with odd number of dimensions.
tensor2tensor_result = numpy.asarray(
[
[
0.00000000e00,
0.00000000e00,
0.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
0.00000000e00,
],
[
8.41470957e-01,
9.99983307e-03,
9.99999902e-05,
5.40302277e-01,
9.99949992e-01,
1.00000000e00,
0.00000000e00,
],
[
9.09297407e-01,
1.99986659e-02,
1.99999980e-04,
-4.16146815e-01,
9.99800026e-01,
1.00000000e00,
0.00000000e00,
],
]
)
tensor = torch.zeros([2, 3, 7])
result = util.add_positional_features(tensor, min_timescale=1.0, max_timescale=1.0e4)
numpy.testing.assert_almost_equal(result[0].detach().cpu().numpy(), tensor2tensor_result)
numpy.testing.assert_almost_equal(result[1].detach().cpu().numpy(), tensor2tensor_result)
def test_combine_tensors_and_multiply(self):
tensors = [torch.Tensor([[[2, 3]]]), torch.Tensor([[[5, 5]]])]
weight = torch.Tensor([4, 5])
combination = "x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[8 + 15]]
)
combination = "y"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[20 + 25]]
)
combination = "x,y"
weight2 = torch.Tensor([4, 5, 4, 5])
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight2), [[8 + 20 + 15 + 25]]
)
combination = "x-y"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[-3 * 4 + -2 * 5]]
)
combination = "y-x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[3 * 4 + 2 * 5]]
)
combination = "y+x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[7 * 4 + 8 * 5]]
)
combination = "y*x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[10 * 4 + 15 * 5]]
)
combination = "y/x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight),
[[(5 / 2) * 4 + (5 / 3) * 5]],
decimal=4,
)
combination = "x/y"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight),
[[(2 / 5) * 4 + (3 / 5) * 5]],
decimal=4,
)
with pytest.raises(ConfigurationError):
util.combine_tensors_and_multiply("x+y+y", tensors, weight)
with pytest.raises(ConfigurationError):
util.combine_tensors_and_multiply("x%y", tensors, weight)
def test_combine_tensors_and_multiply_with_same_batch_size_and_embedding_dim(self):
# This test just makes sure we handle some potential edge cases where the lengths of all
# dimensions are the same, making sure that the multiplication with the weight vector
# happens along the right dimension (it should be the last one).
tensors = [torch.Tensor([[[5, 5], [4, 4]], [[2, 3], [1, 1]]])] # (2, 2, 2)
weight = torch.Tensor([4, 5]) # (2,)
combination = "x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight),
[[20 + 25, 16 + 20], [8 + 15, 4 + 5]],
)
tensors = [
torch.Tensor([[[5, 5], [2, 2]], [[4, 4], [3, 3]]]),
torch.Tensor([[[2, 3]], [[1, 1]]]),
]
weight = torch.Tensor([4, 5])
combination = "x*y"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight),
[
[5 * 2 * 4 + 5 * 3 * 5, 2 * 2 * 4 + 2 * 3 * 5],
[4 * 1 * 4 + 4 * 1 * 5, 3 * 1 * 4 + 3 * 1 * 5],
],
)
def test_combine_tensors_and_multiply_with_batch_size_one(self):
seq_len_1 = 10
seq_len_2 = 5
embedding_dim = 8
combination = "x,y,x*y"
t1 = torch.randn(1, seq_len_1, embedding_dim)
t2 = torch.randn(1, seq_len_2, embedding_dim)
combined_dim = util.get_combined_dim(combination, [embedding_dim, embedding_dim])
weight = torch.Tensor(combined_dim)
result = util.combine_tensors_and_multiply(
combination, [t1.unsqueeze(2), t2.unsqueeze(1)], weight
)
assert_almost_equal(result.size(), [1, seq_len_1, seq_len_2])
def test_combine_tensors_and_multiply_with_batch_size_one_and_seq_len_one(self):
seq_len_1 = 10
seq_len_2 = 1
embedding_dim = 8
combination = "x,y,x*y"
t1 = torch.randn(1, seq_len_1, embedding_dim)
t2 = torch.randn(1, seq_len_2, embedding_dim)
combined_dim = util.get_combined_dim(combination, [embedding_dim, embedding_dim])
weight = torch.Tensor(combined_dim)
result = util.combine_tensors_and_multiply(
combination, [t1.unsqueeze(2), t2.unsqueeze(1)], weight
)
assert_almost_equal(result.size(), [1, seq_len_1, seq_len_2])
def test_has_tensor(self):
has_tensor = util.has_tensor
tensor = torch.tensor([1, 2, 3])
assert has_tensor(["a", 10, tensor])
assert not has_tensor(["a", 10])
assert has_tensor(("a", 10, tensor))
assert not has_tensor(("a", 10))
assert has_tensor({"a": tensor, "b": 1})
assert not has_tensor({"a": 10, "b": 1})
assert has_tensor(tensor)
assert not has_tensor(3)
assert has_tensor({"x": [0, {"inside": {"double_inside": [3, [10, tensor]]}}]})
def test_combine_initial_dims(self):
tensor = torch.randn(4, 10, 20, 17, 5)
tensor2d = util.combine_initial_dims(tensor)
assert list(tensor2d.size()) == [4 * 10 * 20 * 17, 5]
def test_uncombine_initial_dims(self):
embedding2d = torch.randn(4 * 10 * 20 * 17 * 5, 12)
embedding = util.uncombine_initial_dims(embedding2d, torch.Size((4, 10, 20, 17, 5)))
assert list(embedding.size()) == [4, 10, 20, 17, 5, 12]
def test_inspect_model_parameters(self):
model_archive = str(
self.FIXTURES_ROOT / "decomposable_attention" / "serialization" / "model.tar.gz"
)
parameters_inspection = str(
self.FIXTURES_ROOT / "decomposable_attention" / "parameters_inspection.json"
)
model = load_archive(model_archive).model
with open(parameters_inspection) as file:
parameters_inspection_dict = json.load(file)
assert parameters_inspection_dict == util.inspect_parameters(model)
def test_move_to_device(self):
# We're faking the tensor here so that we can test the calls to .cuda() without actually
# needing a GPU.
class FakeTensor(torch.Tensor):
def __init__(self):
self._device = None
def cuda(self, device):
self._device = device
return self
class A(NamedTuple):
a: int
b: torch.Tensor
structured_obj = {
"a": [A(1, FakeTensor()), A(2, FakeTensor())],
"b": FakeTensor(),
"c": (1, FakeTensor()),
}
new_device = 4
moved_obj = util.move_to_device(structured_obj, new_device)
assert moved_obj["a"][0].a == 1
assert moved_obj["a"][0].b._device == new_device
assert moved_obj["a"][1].b._device == new_device
assert moved_obj["b"]._device == new_device
assert moved_obj["c"][0] == 1
assert moved_obj["c"][1]._device == new_device
def test_extend_layer(self):
lin_layer = torch.nn.Linear(10, 5)
new_dim = 8
old_weights = lin_layer.weight.data.clone()
old_bias = lin_layer.bias.data.clone()
util.extend_layer(lin_layer, new_dim)
assert lin_layer.weight.data.shape == (8, 10)
assert lin_layer.bias.data.shape == (8,)
assert (lin_layer.weight.data[:5] == old_weights).all()
assert (lin_layer.bias.data[:5] == old_bias).all()
assert lin_layer.out_features == new_dim
def test_masked_topk_selects_top_scored_items_and_respects_masking(self):
items = torch.randn([3, 4, 5]).clamp(min=0.0, max=1.0)
items[0, :2, :] = 1
items[1, 2:, :] = 1
items[2, 2:, :] = 1
scores = items.sum(-1)
mask = torch.ones([3, 4]).bool()
mask[1, 0] = 0
mask[1, 3] = 0
pruned_scores, pruned_mask, pruned_indices = util.masked_topk(scores, mask, 2)
# Second element in the batch would have indices 2, 3, but
# 3 and 0 are masked, so instead it has 1, 2.
numpy.testing.assert_array_equal(
pruned_indices.data.numpy(), numpy.array([[0, 1], [1, 2], [2, 3]])
)
numpy.testing.assert_array_equal(pruned_mask.data.numpy(), numpy.ones([3, 2]))
# scores should be the result of index_selecting the pruned_indices.
correct_scores = util.batched_index_select(scores.unsqueeze(-1), pruned_indices).squeeze(-1)
self.assert_array_equal_with_mask(correct_scores, pruned_scores, pruned_mask)
def test_masked_topk_works_for_completely_masked_rows(self):
items = torch.randn([3, 4, 5]).clamp(min=0.0, max=1.0)
items[0, :2, :] = 1
items[1, 2:, :] = 1
items[2, 2:, :] = 1
scores = items.sum(-1)
mask = torch.ones([3, 4]).bool()
mask[1, 0] = 0
mask[1, 3] = 0
mask[2, :] = 0 # fully masked last batch element.
pruned_scores, pruned_mask, pruned_indices = util.masked_topk(scores, mask, 2)
# We can't check the last row here, because it's completely masked.
# Instead we'll check that the scores for these elements are very small.
numpy.testing.assert_array_equal(
pruned_indices[:2].data.numpy(), numpy.array([[0, 1], [1, 2]])
)
numpy.testing.assert_array_equal(
pruned_mask.data.numpy(), numpy.array([[1, 1], [1, 1], [0, 0]])
)
# scores should be the result of index_selecting the pruned_indices.
correct_scores = util.batched_index_select(scores.unsqueeze(-1), pruned_indices).squeeze(-1)
self.assert_array_equal_with_mask(correct_scores, pruned_scores, pruned_mask)
def test_masked_topk_selects_top_scored_items_and_respects_masking_different_num_items(self):
items = torch.randn([3, 4, 5]).clamp(min=0.0, max=1.0)
items[0, 0, :] = 1.5
items[0, 1, :] = 2
items[0, 3, :] = 1
items[1, 1:3, :] = 1
items[2, 0, :] = 1
items[2, 1, :] = 2
items[2, 2, :] = 1.5
scores = items.sum(-1)
mask = torch.ones([3, 4]).bool()
mask[1, 3] = 0
k = torch.tensor([3, 2, 1], dtype=torch.long)
pruned_scores, pruned_mask, pruned_indices = util.masked_topk(scores, mask, k)
# Second element in the batch would have indices 2, 3, but
# 3 and 0 are masked, so instead it has 1, 2.
numpy.testing.assert_array_equal(
pruned_indices.data.numpy(), numpy.array([[0, 1, 3], [1, 2, 2], [1, 2, 2]])
)
numpy.testing.assert_array_equal(
pruned_mask.data.numpy(), numpy.array([[1, 1, 1], [1, 1, 0], [1, 0, 0]])
)
# scores should be the result of index_selecting the pruned_indices.
correct_scores = util.batched_index_select(scores.unsqueeze(-1), pruned_indices).squeeze(-1)
self.assert_array_equal_with_mask(correct_scores, pruned_scores, pruned_mask)
def test_masked_topk_works_for_row_with_no_items_requested(self):
# Case where `num_items_to_keep` is a tensor rather than an int. Make sure it does the right
# thing when no items are requested for one of the rows.
items = torch.randn([3, 4, 5]).clamp(min=0.0, max=1.0)
items[0, :3, :] = 1
items[1, 2:, :] = 1
items[2, 2:, :] = 1
scores = items.sum(-1)
mask = torch.ones([3, 4]).bool()
mask[1, 0] = 0
mask[1, 3] = 0
k = torch.tensor([3, 2, 0], dtype=torch.long)
pruned_scores, pruned_mask, pruned_indices = util.masked_topk(scores, mask, k)
# First element just picks top three entries. Second would pick entries 2 and 3, but 0 and 3
# are masked, so it takes 1 and 2 (repeating the second index). The third element is
# entirely masked and just repeats the largest index with a top-3 score.
numpy.testing.assert_array_equal(
pruned_indices.data.numpy(), numpy.array([[0, 1, 2], [1, 2, 2], [3, 3, 3]])
)
numpy.testing.assert_array_equal(
pruned_mask.data.numpy(), numpy.array([[1, 1, 1], [1, 1, 0], [0, 0, 0]])
)
# scores should be the result of index_selecting the pruned_indices.
correct_scores = util.batched_index_select(scores.unsqueeze(-1), pruned_indices).squeeze(-1)
self.assert_array_equal_with_mask(correct_scores, pruned_scores, pruned_mask)
def test_masked_topk_works_for_multiple_dimensions(self):
# fmt: off
items = torch.FloatTensor([ # (3, 2, 5)
[[4, 2, 9, 9, 7], [-4, -2, -9, -9, -7]],
[[5, 4, 1, 8, 8], [9, 1, 7, 4, 1]],
[[9, 8, 9, 6, 0], [2, 2, 2, 2, 2]],
]).unsqueeze(-1).expand(3, 2, 5, 4)
mask = torch.tensor([
[[False, False, False, False, False], [True, True, True, True, True]],
[[True, True, True, True, False], [False, True, True, True, True]],
[[True, False, True, True, True], [False, True, False, True, True]],
]).unsqueeze(-1).expand(3, 2, 5, 4)
# This is the same as just specifying a scalar int, but we want to test this behavior
k = torch.ones(3, 5, 4, dtype=torch.long)
k[1, 3, :] = 2
target_items = torch.FloatTensor([
[[-4, -2, -9, -9, -7], [0, 0, 0, 0, 0]],
[[5, 4, 7, 8, 1], [0, 0, 0, 4, 0]],
[[9, 2, 9, 6, 2], [0, 0, 0, 0, 0]],
]).unsqueeze(-1).expand(3, 2, 5, 4)
target_mask = torch.ones(3, 2, 5, 4, dtype=torch.bool)
target_mask[:, 1, :, :] = 0
target_mask[1, 1, 3, :] = 1
target_indices = torch.LongTensor([
[[1, 1, 1, 1, 1], [0, 0, 0, 0, 0]],
[[0, 0, 1, 0, 1], [0, 0, 0, 1, 0]],
[[0, 1, 0, 0, 1], [0, 0, 0, 0, 0]],
]).unsqueeze(-1).expand(3, 2, 5, 4)
# fmt: on
pruned_items, pruned_mask, pruned_indices = util.masked_topk(items, mask, k, dim=1)
numpy.testing.assert_array_equal(pruned_mask.data.numpy(), target_mask.data.numpy())
self.assert_array_equal_with_mask(pruned_items, target_items, pruned_mask)
self.assert_array_equal_with_mask(pruned_indices, target_indices, pruned_mask)
def assert_array_equal_with_mask(self, a, b, mask):
numpy.testing.assert_array_equal((a * mask).data.numpy(), (b * mask).data.numpy())
def test_tensors_equal(self):
# Basic
assert util.tensors_equal(torch.tensor([1]), torch.tensor([1]))
assert not util.tensors_equal(torch.tensor([1]), torch.tensor([2]))
# Bool
assert util.tensors_equal(torch.tensor([True]), torch.tensor([True]))
# Cross dtype
assert util.tensors_equal(torch.tensor([1]), torch.tensor([1.0]))
assert util.tensors_equal(torch.tensor([1]), torch.tensor([True]))
# Containers
assert util.tensors_equal([torch.tensor([1])], [torch.tensor([1])])
assert not util.tensors_equal([torch.tensor([1])], [torch.tensor([2])])
assert util.tensors_equal({"key": torch.tensor([1])}, {"key": torch.tensor([1])})
| [
"allennlp.nn.util.bucket_values",
"allennlp.nn.util.masked_topk",
"numpy.random.rand",
"torch.LongTensor",
"allennlp.nn.util.get_text_field_mask",
"torch.max",
"allennlp.nn.util.get_combined_dim",
"torch.from_numpy",
"allennlp.nn.util.add_sentence_boundary_token_ids",
"numpy.array",
"allennlp.nn... | [((40211, 40242), 'flaky.flaky', 'flaky', ([], {'max_runs': '(3)', 'min_passes': '(1)'}), '(max_runs=3, min_passes=1)\n', (40216, 40242), False, 'from flaky import flaky\n'), ((537, 725), 'torch.tensor', 'torch.tensor', (['[[True, True, True, False, False, False], [True, True, False, False, False,\n False], [True, True, True, True, True, True], [True, False, False, \n False, False, False]]'], {}), '([[True, True, True, False, False, False], [True, True, False, \n False, False, False], [True, True, True, True, True, True], [True, \n False, False, False, False, False]])\n', (549, 725), False, 'import torch\n'), ((835, 890), 'allennlp.nn.util.get_lengths_from_binary_sequence_mask', 'util.get_lengths_from_binary_sequence_mask', (['binary_mask'], {}), '(binary_mask)\n', (877, 890), False, 'from allennlp.nn import util\n'), ((1055, 1088), 'torch.LongTensor', 'torch.LongTensor', (['[4, 3, 1, 4, 2]'], {}), '([4, 3, 1, 4, 2])\n', (1071, 1088), False, 'import torch\n'), ((1182, 1299), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mask', '[[1, 1, 1, 1, 0], [1, 1, 1, 0, 0], [1, 0, 0, 0, 0], [1, 1, 1, 1, 0], [1, 1,\n 0, 0, 0]]'], {}), '(mask, [[1, 1, 1, 1, 0], [1, 1, 1, 0, 0], [1, 0, 0, 0, 0\n ], [1, 1, 1, 1, 0], [1, 1, 0, 0, 0]])\n', (1201, 1299), False, 'from numpy.testing import assert_array_almost_equal, assert_almost_equal\n'), ((1805, 1860), 'allennlp.nn.util.get_lengths_from_binary_sequence_mask', 'util.get_lengths_from_binary_sequence_mask', (['binary_mask'], {}), '(binary_mask)\n', (1847, 1860), False, 'from allennlp.nn import util\n'), ((2039, 2085), 'torch.LongTensor', 'torch.LongTensor', (['[[0, 1, 1, 0], [2, 0, 2, 2]]'], {}), '([[0, 1, 1, 0], [2, 0, 2, 2]])\n', (2055, 2085), False, 'import torch\n'), ((2098, 2130), 'torch.FloatTensor', 'torch.FloatTensor', (['[3, 4, -5, 3]'], {}), '([3, 4, -5, 3])\n', (2115, 2130), False, 'import torch\n'), ((2292, 2352), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['clamped_tensor', '[[0, 0, 3], [3, 0, -3]]'], {}), '(clamped_tensor, [[0, 0, 3], [3, 0, -3]])\n', (2311, 2352), False, 'from numpy.testing import assert_array_almost_equal, assert_almost_equal\n'), ((2408, 2448), 'torch.LongTensor', 'torch.LongTensor', (['[[0, 1, 1], [2, 0, 2]]'], {}), '([[0, 1, 1], [2, 0, 2]])\n', (2424, 2448), False, 'import torch\n'), ((2461, 2490), 'torch.FloatTensor', 'torch.FloatTensor', (['[3, 4, -5]'], {}), '([3, 4, -5])\n', (2478, 2490), False, 'import torch\n'), ((2652, 2712), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['clamped_tensor', '[[0, 0, 3], [3, 0, -3]]'], {}), '(clamped_tensor, [[0, 0, 3], [3, 0, -3]])\n', (2671, 2712), False, 'from numpy.testing import assert_array_almost_equal, assert_almost_equal\n'), ((2762, 2802), 'torch.tensor', 'torch.tensor', (['[[5, -4, 3], [-3, 0, -30]]'], {}), '([[5, -4, 3], [-3, 0, -30]])\n', (2774, 2802), False, 'import torch\n'), ((2828, 2876), 'allennlp.nn.util.clamp_tensor', 'util.clamp_tensor', (['tensor'], {'minimum': '(-3)', 'maximum': '(3)'}), '(tensor, minimum=-3, maximum=3)\n', (2845, 2876), False, 'from allennlp.nn import util\n'), ((2885, 2947), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['clamped_tensor', '[[3, -3, 3], [-3, 0, -3]]'], {}), '(clamped_tensor, [[3, -3, 3], [-3, 0, -3]])\n', (2904, 2947), False, 'from numpy.testing import assert_array_almost_equal, assert_almost_equal\n'), ((3008, 3029), 'torch.rand', 'torch.rand', (['[5, 7, 9]'], {}), '([5, 7, 9])\n', (3018, 3029), False, 'import torch\n'), ((3174, 3207), 'torch.LongTensor', 'torch.LongTensor', (['[3, 4, 1, 5, 7]'], {}), '([3, 4, 1, 5, 7])\n', (3190, 3207), False, 'import torch\n'), ((3268, 3319), 'allennlp.nn.util.sort_batch_by_length', 'util.sort_batch_by_length', (['tensor', 'sequence_lengths'], {}), '(tensor, sequence_lengths)\n', (3293, 3319), False, 'from allennlp.nn import util\n'), ((4041, 4163), 'torch.Tensor', 'torch.Tensor', (['[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[13, 14, 15, 16], [17, 18,\n 19, 20], [21, 22, 23, 24]]]'], {}), '([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[13, 14, 15, \n 16], [17, 18, 19, 20], [21, 22, 23, 24]]])\n', (4053, 4163), False, 'import torch\n'), ((4243, 4298), 'torch.tensor', 'torch.tensor', (['[[True, True, True], [True, True, False]]'], {}), '([[True, True, True], [True, True, False]])\n', (4255, 4298), False, 'import torch\n'), ((4322, 4395), 'allennlp.nn.util.get_final_encoder_states', 'util.get_final_encoder_states', (['encoder_outputs', 'mask'], {'bidirectional': '(False)'}), '(encoder_outputs, mask, bidirectional=False)\n', (4351, 4395), False, 'from allennlp.nn import util\n'), ((4511, 4583), 'allennlp.nn.util.get_final_encoder_states', 'util.get_final_encoder_states', (['encoder_outputs', 'mask'], {'bidirectional': '(True)'}), '(encoder_outputs, mask, bidirectional=True)\n', (4540, 4583), False, 'from allennlp.nn import util\n'), ((4786, 4822), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0, 2.0, 3.0]]'], {}), '([[1.0, 2.0, 3.0]])\n', (4803, 4822), False, 'import torch\n'), ((5124, 5160), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0, 2.0, 5.0]]'], {}), '([[1.0, 2.0, 5.0]])\n', (5141, 5160), False, 'import torch\n'), ((5431, 5467), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0]])\n', (5448, 5467), False, 'import torch\n'), ((5755, 5808), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]]'], {}), '([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]])\n', (5772, 5808), False, 'import torch\n'), ((6203, 6256), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0, 2.0, 5.0], [0.0, 0.0, 0.0]]'], {}), '([[1.0, 2.0, 5.0], [0.0, 0.0, 0.0]])\n', (6220, 6256), False, 'import torch\n'), ((6662, 6698), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0, 2.0, 5.0]]'], {}), '([[1.0, 2.0, 5.0]])\n', (6679, 6698), False, 'import torch\n'), ((6717, 6752), 'torch.tensor', 'torch.tensor', (['[[True, False, True]]'], {}), '([[True, False, True]])\n', (6729, 6752), False, 'import torch\n'), ((6958, 6999), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 2.0, 3.0, 4.0]]'], {}), '([[0.0, 2.0, 3.0, 4.0]])\n', (6975, 6999), False, 'import torch\n'), ((7018, 7059), 'torch.tensor', 'torch.tensor', (['[[True, False, True, True]]'], {}), '([[True, False, True, True]])\n', (7030, 7059), False, 'import torch\n'), ((7400, 7441), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0]])\n', (7417, 7441), False, 'import torch\n'), ((7460, 7503), 'torch.tensor', 'torch.tensor', (['[[False, False, False, True]]'], {}), '([[False, False, False, True]])\n', (7472, 7503), False, 'import torch\n'), ((7793, 7834), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 2.0, 3.0, 4.0]]'], {}), '([[0.0, 2.0, 3.0, 4.0]])\n', (7810, 7834), False, 'import torch\n'), ((7853, 7897), 'torch.tensor', 'torch.tensor', (['[[False, False, False, False]]'], {}), '([[False, False, False, False]])\n', (7865, 7897), False, 'import torch\n'), ((8191, 8232), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0]])\n', (8208, 8232), False, 'import torch\n'), ((8251, 8295), 'torch.tensor', 'torch.tensor', (['[[False, False, False, False]]'], {}), '([[False, False, False, False]])\n', (8263, 8295), False, 'import torch\n'), ((8586, 8627), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0, 1.0, 100000.0]]'], {}), '([[1.0, 1.0, 100000.0]])\n', (8603, 8627), False, 'import torch\n'), ((8641, 8676), 'torch.tensor', 'torch.tensor', (['[[True, True, False]]'], {}), '([[True, True, False]])\n', (8653, 8676), False, 'import torch\n'), ((8914, 8967), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]]'], {}), '([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]])\n', (8931, 8967), False, 'import torch\n'), ((8983, 9038), 'torch.tensor', 'torch.tensor', (['[[True, False, True], [True, True, True]]'], {}), '([[True, False, True], [True, True, True]])\n', (8995, 9038), False, 'import torch\n'), ((9427, 9480), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]]'], {}), '([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])\n', (9444, 9480), False, 'import torch\n'), ((9496, 9551), 'torch.tensor', 'torch.tensor', (['[[True, False, True], [True, True, True]]'], {}), '([[True, False, True], [True, True, True]])\n', (9508, 9551), False, 'import torch\n'), ((9912, 9965), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]]'], {}), '([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])\n', (9929, 9965), False, 'import torch\n'), ((9981, 10039), 'torch.tensor', 'torch.tensor', (['[[True, False, True], [False, False, False]]'], {}), '([[True, False, True], [False, False, False]])\n', (9993, 10039), False, 'import torch\n'), ((10269, 10322), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]]'], {}), '([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])\n', (10286, 10322), False, 'import torch\n'), ((10338, 10396), 'torch.tensor', 'torch.tensor', (['[[False, False, False], [True, False, True]]'], {}), '([[False, False, False], [True, False, True]])\n', (10350, 10396), False, 'import torch\n'), ((10748, 10784), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0, 2.0, 5.0]]'], {}), '([[1.0, 2.0, 5.0]])\n', (10765, 10784), False, 'import torch\n'), ((10803, 10838), 'torch.tensor', 'torch.tensor', (['[[True, False, True]]'], {}), '([[True, False, True]])\n', (10815, 10838), False, 'import torch\n'), ((11089, 11130), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 2.0, 3.0, 4.0]]'], {}), '([[0.0, 2.0, 3.0, 4.0]])\n', (11106, 11130), False, 'import torch\n'), ((11149, 11190), 'torch.tensor', 'torch.tensor', (['[[True, False, True, True]]'], {}), '([[True, False, True, True]])\n', (11161, 11190), False, 'import torch\n'), ((11576, 11617), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0]])\n', (11593, 11617), False, 'import torch\n'), ((11636, 11679), 'torch.tensor', 'torch.tensor', (['[[False, False, False, True]]'], {}), '([[False, False, False, True]])\n', (11648, 11679), False, 'import torch\n'), ((12014, 12055), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 2.0, 3.0, 4.0]]'], {}), '([[0.0, 2.0, 3.0, 4.0]])\n', (12031, 12055), False, 'import torch\n'), ((12074, 12118), 'torch.tensor', 'torch.tensor', (['[[False, False, False, False]]'], {}), '([[False, False, False, False]])\n', (12086, 12118), False, 'import torch\n'), ((12461, 12502), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0]])\n', (12478, 12502), False, 'import torch\n'), ((12521, 12565), 'torch.tensor', 'torch.tensor', (['[[False, False, False, False]]'], {}), '([[False, False, False, False]])\n', (12533, 12565), False, 'import torch\n'), ((12905, 12946), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0, 1.0, 100000.0]]'], {}), '([[1.0, 1.0, 100000.0]])\n', (12922, 12946), False, 'import torch\n'), ((12960, 12995), 'torch.tensor', 'torch.tensor', (['[[True, True, False]]'], {}), '([[True, True, False]])\n', (12972, 12995), False, 'import torch\n'), ((13278, 13331), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]]'], {}), '([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]])\n', (13295, 13331), False, 'import torch\n'), ((13347, 13402), 'torch.tensor', 'torch.tensor', (['[[True, False, True], [True, True, True]]'], {}), '([[True, False, True], [True, True, True]])\n', (13359, 13402), False, 'import torch\n'), ((13836, 13889), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]]'], {}), '([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])\n', (13853, 13889), False, 'import torch\n'), ((13905, 13960), 'torch.tensor', 'torch.tensor', (['[[True, False, True], [True, True, True]]'], {}), '([[True, False, True], [True, True, True]])\n', (13917, 13960), False, 'import torch\n'), ((14366, 14419), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]]'], {}), '([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])\n', (14383, 14419), False, 'import torch\n'), ((14435, 14493), 'torch.tensor', 'torch.tensor', (['[[True, False, True], [False, False, False]]'], {}), '([[True, False, True], [False, False, False]])\n', (14447, 14493), False, 'import torch\n'), ((14802, 14855), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]]'], {}), '([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])\n', (14819, 14855), False, 'import torch\n'), ((14871, 14929), 'torch.tensor', 'torch.tensor', (['[[False, False, False], [True, False, True]]'], {}), '([[False, False, False], [True, False, True]])\n', (14883, 14929), False, 'import torch\n'), ((15520, 15556), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0, 2.0, 5.0]]'], {}), '([[1.0, 2.0, 5.0]])\n', (15537, 15556), False, 'import torch\n'), ((15575, 15610), 'torch.tensor', 'torch.tensor', (['[[True, False, True]]'], {}), '([[True, False, True]])\n', (15587, 15610), False, 'import torch\n'), ((15853, 15894), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 2.0, 3.0, 4.0]]'], {}), '([[0.0, 2.0, 3.0, 4.0]])\n', (15870, 15894), False, 'import torch\n'), ((15913, 15954), 'torch.tensor', 'torch.tensor', (['[[True, False, True, True]]'], {}), '([[True, False, True, True]])\n', (15925, 15954), False, 'import torch\n'), ((16310, 16351), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0]])\n', (16327, 16351), False, 'import torch\n'), ((16370, 16413), 'torch.tensor', 'torch.tensor', (['[[False, False, False, True]]'], {}), '([[False, False, False, True]])\n', (16382, 16413), False, 'import torch\n'), ((16810, 16851), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 2.0, 3.0, 4.0]]'], {}), '([[0.0, 2.0, 3.0, 4.0]])\n', (16827, 16851), False, 'import torch\n'), ((16870, 16914), 'torch.tensor', 'torch.tensor', (['[[False, False, False, False]]'], {}), '([[False, False, False, False]])\n', (16882, 16914), False, 'import torch\n'), ((17158, 17193), 'torch.FloatTensor', 'torch.FloatTensor', (['[1.0, 12.0, 5.0]'], {}), '([1.0, 12.0, 5.0])\n', (17175, 17193), False, 'import torch\n'), ((17212, 17245), 'torch.tensor', 'torch.tensor', (['[True, False, True]'], {}), '([True, False, True])\n', (17224, 17245), False, 'import torch\n'), ((17336, 17383), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['vector_1d_maxed', '(5.0)'], {}), '(vector_1d_maxed, 5.0)\n', (17361, 17383), False, 'from numpy.testing import assert_array_almost_equal, assert_almost_equal\n'), ((17502, 17537), 'torch.FloatTensor', 'torch.FloatTensor', (['[1.0, 12.0, 5.0]'], {}), '([1.0, 12.0, 5.0])\n', (17519, 17537), False, 'import torch\n'), ((17556, 17591), 'torch.tensor', 'torch.tensor', (['[False, False, False]'], {}), '([False, False, False])\n', (17568, 17591), False, 'import torch\n'), ((17792, 17848), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]]'], {}), '([[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]])\n', (17809, 17848), False, 'import torch\n'), ((17864, 17920), 'torch.tensor', 'torch.tensor', (['[[True, False, True], [True, True, False]]'], {}), '([[True, False, True], [True, True, False]])\n', (17876, 17920), False, 'import torch\n'), ((18145, 18201), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]]'], {}), '([[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]])\n', (18162, 18201), False, 'import torch\n'), ((18217, 18273), 'torch.tensor', 'torch.tensor', (['[[True, False, True], [True, True, False]]'], {}), '([[True, False, True], [True, True, False]])\n', (18229, 18273), False, 'import torch\n'), ((18486, 18592), 'torch.FloatTensor', 'torch.FloatTensor', (['[[[1.0, 2.0], [12.0, 3.0], [5.0, -1.0]], [[-1.0, -3.0], [-2.0, -0.5], [3.0,\n 8.0]]]'], {}), '([[[1.0, 2.0], [12.0, 3.0], [5.0, -1.0]], [[-1.0, -3.0], [\n -2.0, -0.5], [3.0, 8.0]]])\n', (18503, 18592), False, 'import torch\n'), ((18957, 18992), 'torch.FloatTensor', 'torch.FloatTensor', (['[1.0, 12.0, 5.0]'], {}), '([1.0, 12.0, 5.0])\n', (18974, 18992), False, 'import torch\n'), ((19011, 19044), 'torch.tensor', 'torch.tensor', (['[True, False, True]'], {}), '([True, False, True])\n', (19023, 19044), False, 'import torch\n'), ((19135, 19181), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['vector_1d_mean', '(3.0)'], {}), '(vector_1d_mean, 3.0)\n', (19160, 19181), False, 'from numpy.testing import assert_array_almost_equal, assert_almost_equal\n'), ((19300, 19335), 'torch.FloatTensor', 'torch.FloatTensor', (['[1.0, 12.0, 5.0]'], {}), '([1.0, 12.0, 5.0])\n', (19317, 19335), False, 'import torch\n'), ((19354, 19389), 'torch.tensor', 'torch.tensor', (['[False, False, False]'], {}), '([False, False, False])\n', (19366, 19389), False, 'import torch\n'), ((19589, 19645), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]]'], {}), '([[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]])\n', (19606, 19645), False, 'import torch\n'), ((19661, 19717), 'torch.tensor', 'torch.tensor', (['[[True, False, True], [True, True, False]]'], {}), '([[True, False, True], [True, True, False]])\n', (19673, 19717), False, 'import torch\n'), ((19941, 19997), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]]'], {}), '([[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]])\n', (19958, 19997), False, 'import torch\n'), ((20013, 20069), 'torch.tensor', 'torch.tensor', (['[[True, False, True], [True, True, False]]'], {}), '([[True, False, True], [True, True, False]])\n', (20025, 20069), False, 'import torch\n'), ((20281, 20387), 'torch.FloatTensor', 'torch.FloatTensor', (['[[[1.0, 2.0], [12.0, 3.0], [5.0, -1.0]], [[-1.0, -3.0], [-2.0, -0.5], [3.0,\n 8.0]]]'], {}), '([[[1.0, 2.0], [12.0, 3.0], [5.0, -1.0]], [[-1.0, -3.0], [\n -2.0, -0.5], [3.0, 8.0]]])\n', (20298, 20387), False, 'import torch\n'), ((20703, 20796), 'torch.FloatTensor', 'torch.FloatTensor', (['[[[6, 6, 6], [1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4], [5, 5, 5]]]'], {}), '([[[6, 6, 6], [1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4],\n [5, 5, 5]]])\n', (20720, 20796), False, 'import torch\n'), ((20902, 20934), 'allennlp.nn.util.masked_flip', 'util.masked_flip', (['tensor', '[1, 2]'], {}), '(tensor, [1, 2])\n', (20918, 20934), False, 'from allennlp.nn import util\n'), ((20943, 20982), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['response', 'solution'], {}), '(response, solution)\n', (20962, 20982), False, 'from numpy.testing import assert_array_almost_equal, assert_almost_equal\n'), ((21001, 21116), 'torch.FloatTensor', 'torch.FloatTensor', (['[[[6, 6, 6], [1, 1, 1], [2, 2, 2], [0, 0, 0]], [[3, 3, 3], [4, 4, 4], [5, 5,\n 5], [1, 2, 3]]]'], {}), '([[[6, 6, 6], [1, 1, 1], [2, 2, 2], [0, 0, 0]], [[3, 3, 3],\n [4, 4, 4], [5, 5, 5], [1, 2, 3]]])\n', (21018, 21116), False, 'import torch\n'), ((21348, 21380), 'allennlp.nn.util.masked_flip', 'util.masked_flip', (['tensor', '[3, 4]'], {}), '(tensor, [3, 4])\n', (21364, 21380), False, 'from allennlp.nn import util\n'), ((21389, 21428), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['response', 'solution'], {}), '(response, solution)\n', (21408, 21428), False, 'from numpy.testing import assert_array_almost_equal, assert_almost_equal\n'), ((21447, 21612), 'torch.FloatTensor', 'torch.FloatTensor', (['[[[6, 6, 6], [1, 1, 1], [2, 2, 2], [0, 0, 0]], [[3, 3, 3], [4, 4, 4], [5, 5,\n 5], [1, 2, 3]], [[1, 1, 1], [2, 2, 2], [0, 0, 0], [0, 0, 0]]]'], {}), '([[[6, 6, 6], [1, 1, 1], [2, 2, 2], [0, 0, 0]], [[3, 3, 3],\n [4, 4, 4], [5, 5, 5], [1, 2, 3]], [[1, 1, 1], [2, 2, 2], [0, 0, 0], [0,\n 0, 0]]])\n', (21464, 21612), False, 'import torch\n'), ((21914, 21949), 'allennlp.nn.util.masked_flip', 'util.masked_flip', (['tensor', '[3, 4, 2]'], {}), '(tensor, [3, 4, 2])\n', (21930, 21949), False, 'from allennlp.nn import util\n'), ((21958, 21997), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['response', 'solution'], {}), '(response, solution)\n', (21977, 21997), False, 'from numpy.testing import assert_array_almost_equal, assert_almost_equal\n'), ((23880, 23927), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['actual_mask', 'expected_mask'], {}), '(actual_mask, expected_mask)\n', (23899, 23927), False, 'from numpy.testing import assert_array_almost_equal, assert_almost_equal\n'), ((24495, 24556), 'numpy.random.rand', 'numpy.random.rand', (['batch_size', 'sentence_length', 'embedding_dim'], {}), '(batch_size, sentence_length, embedding_dim)\n', (24512, 24556), False, 'import numpy\n'), ((24651, 24695), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.3, 0.4, 0.1, 0, 1.2]]'], {}), '([[0.3, 0.4, 0.1, 0, 1.2]])\n', (24668, 24695), False, 'import torch\n'), ((25106, 25191), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['aggregated_array', '[expected_array]'], {'decimal': '(5)'}), '(aggregated_array, [expected_array], decimal=5\n )\n', (25139, 25191), False, 'import numpy\n'), ((25385, 25459), 'numpy.random.rand', 'numpy.random.rand', (['batch_size', 'length_1', 'length_2', 'length_3', 'embedding_dim'], {}), '(batch_size, length_1, length_2, length_3, embedding_dim)\n', (25402, 25459), False, 'import numpy\n'), ((25486, 25545), 'numpy.random.rand', 'numpy.random.rand', (['batch_size', 'length_1', 'length_2', 'length_3'], {}), '(batch_size, length_1, length_2, length_3)\n', (25503, 25545), False, 'import numpy\n'), ((26049, 26140), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['aggregated_array[0, 3, 2]', 'expected_array'], {'decimal': '(5)'}), '(aggregated_array[0, 3, 2], expected_array,\n decimal=5)\n', (26082, 26140), False, 'import numpy\n'), ((26342, 26396), 'numpy.random.rand', 'numpy.random.rand', (['batch_size', 'length_3', 'embedding_dim'], {}), '(batch_size, length_3, embedding_dim)\n', (26359, 26396), False, 'import numpy\n'), ((26423, 26482), 'numpy.random.rand', 'numpy.random.rand', (['batch_size', 'length_1', 'length_2', 'length_3'], {}), '(batch_size, length_1, length_2, length_3)\n', (26440, 26482), False, 'import numpy\n'), ((27398, 27452), 'numpy.random.rand', 'numpy.random.rand', (['batch_size', 'length_2', 'embedding_dim'], {}), '(batch_size, length_2, embedding_dim)\n', (27415, 27452), False, 'import numpy\n'), ((27479, 27528), 'numpy.random.rand', 'numpy.random.rand', (['batch_size', 'length_1', 'length_2'], {}), '(batch_size, length_1, length_2)\n', (27496, 27528), False, 'import numpy\n'), ((28378, 28397), 'torch.zeros', 'torch.zeros', (['[9, 9]'], {}), '([9, 9])\n', (28389, 28397), False, 'import torch\n'), ((28419, 28479), 'allennlp.nn.util.viterbi_decode', 'util.viterbi_decode', (['sequence_logits.data', 'transition_matrix'], {}), '(sequence_logits.data, transition_matrix)\n', (28438, 28479), False, 'from allennlp.nn import util\n'), ((28508, 28537), 'torch.max', 'torch.max', (['sequence_logits', '(1)'], {}), '(sequence_logits, 1)\n', (28517, 28537), False, 'import torch\n'), ((28783, 28802), 'torch.zeros', 'torch.zeros', (['[9, 9]'], {}), '([9, 9])\n', (28794, 28802), False, 'import torch\n'), ((28839, 28855), 'torch.zeros', 'torch.zeros', (['[9]'], {}), '([9])\n', (28850, 28855), False, 'import torch\n'), ((28981, 28997), 'torch.zeros', 'torch.zeros', (['[9]'], {}), '([9])\n', (28992, 28997), False, 'import torch\n'), ((29105, 29275), 'allennlp.nn.util.viterbi_decode', 'util.viterbi_decode', (['sequence_logits.data', 'transition_matrix'], {'allowed_end_transitions': 'allowed_end_transitions', 'allowed_start_transitions': 'allowed_start_transitions'}), '(sequence_logits.data, transition_matrix,\n allowed_end_transitions=allowed_end_transitions,\n allowed_start_transitions=allowed_start_transitions)\n', (29124, 29275), False, 'from allennlp.nn import util\n'), ((29545, 29670), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0, 0, 0, 3, 5], [0, 0, 0, 3, 4], [0, 0, 0, 3, 4], [0, 0, 0, 3, 4], [0, 0,\n 0, 3, 4], [0, 0, 0, 3, 4]]'], {}), '([[0, 0, 0, 3, 5], [0, 0, 0, 3, 4], [0, 0, 0, 3, 4], [0, 0,\n 0, 3, 4], [0, 0, 0, 3, 4], [0, 0, 0, 3, 4]])\n', (29562, 29670), False, 'import torch\n'), ((29883, 29902), 'torch.zeros', 'torch.zeros', (['[5, 5]'], {}), '([5, 5])\n', (29894, 29902), False, 'import torch\n'), ((30003, 30058), 'allennlp.nn.util.viterbi_decode', 'util.viterbi_decode', (['sequence_logits', 'transition_matrix'], {}), '(sequence_logits, transition_matrix)\n', (30022, 30058), False, 'from allennlp.nn import util\n'), ((30246, 30371), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0, 0, 0, 4, 4], [0, 0, 0, 4, 4], [0, 0, 0, 4, 4], [0, 0, 0, 4, 4], [0, 0,\n 0, 4, 4], [0, 0, 0, 4, 4]]'], {}), '([[0, 0, 0, 4, 4], [0, 0, 0, 4, 4], [0, 0, 0, 4, 4], [0, 0,\n 0, 4, 4], [0, 0, 0, 4, 4], [0, 0, 0, 4, 4]])\n', (30263, 30371), False, 'import torch\n'), ((30705, 30724), 'torch.zeros', 'torch.zeros', (['[5, 5]'], {}), '([5, 5])\n', (30716, 30724), False, 'import torch\n'), ((30860, 30915), 'allennlp.nn.util.viterbi_decode', 'util.viterbi_decode', (['sequence_logits', 'transition_matrix'], {}), '(sequence_logits, transition_matrix)\n', (30879, 30915), False, 'from allennlp.nn import util\n'), ((30988, 31049), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1, 0, 0, 4], [1, 0, 6, 2], [0, 3, 0, 4]]'], {}), '([[1, 0, 0, 4], [1, 0, 6, 2], [0, 3, 0, 4]])\n', (31005, 31049), False, 'import torch\n'), ((31204, 31223), 'torch.zeros', 'torch.zeros', (['[4, 4]'], {}), '([4, 4])\n', (31215, 31223), False, 'import torch\n'), ((31321, 31376), 'allennlp.nn.util.viterbi_decode', 'util.viterbi_decode', (['sequence_logits', 'transition_matrix'], {}), '(sequence_logits, transition_matrix)\n', (31340, 31376), False, 'from allennlp.nn import util\n'), ((31558, 31683), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0, 0, 0, 7, 7], [0, 0, 0, 7, 7], [0, 0, 0, 7, 7], [0, 0, 0, 7, 7], [0, 0,\n 0, 7, 7], [0, 0, 0, 7, 7]]'], {}), '([[0, 0, 0, 7, 7], [0, 0, 0, 7, 7], [0, 0, 0, 7, 7], [0, 0,\n 0, 7, 7], [0, 0, 0, 7, 7], [0, 0, 0, 7, 7]])\n', (31575, 31683), False, 'import torch\n'), ((32019, 32038), 'torch.zeros', 'torch.zeros', (['[5, 5]'], {}), '([5, 5])\n', (32030, 32038), False, 'import torch\n'), ((32544, 32613), 'allennlp.nn.util.viterbi_decode', 'util.viterbi_decode', (['sequence_logits', 'transition_matrix', 'observations'], {}), '(sequence_logits, transition_matrix, observations)\n', (32563, 32613), False, 'from allennlp.nn import util\n'), ((32990, 33009), 'torch.zeros', 'torch.zeros', (['[9, 9]'], {}), '([9, 9])\n', (33001, 33009), False, 'import torch\n'), ((33032, 33101), 'allennlp.nn.util.viterbi_decode', 'util.viterbi_decode', (['sequence_logits.data', 'transition_matrix'], {'top_k': '(5)'}), '(sequence_logits.data, transition_matrix, top_k=5)\n', (33051, 33101), False, 'from allennlp.nn import util\n'), ((33131, 33160), 'torch.max', 'torch.max', (['sequence_logits', '(1)'], {}), '(sequence_logits, 1)\n', (33140, 33160), False, 'import torch\n'), ((33384, 33509), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0, 0, 0, 3, 4], [0, 0, 0, 3, 4], [0, 0, 0, 3, 4], [0, 0, 0, 3, 4], [0, 0,\n 0, 3, 4], [0, 0, 0, 3, 4]]'], {}), '([[0, 0, 0, 3, 4], [0, 0, 0, 3, 4], [0, 0, 0, 3, 4], [0, 0,\n 0, 3, 4], [0, 0, 0, 3, 4], [0, 0, 0, 3, 4]])\n', (33401, 33509), False, 'import torch\n'), ((33722, 33741), 'torch.zeros', 'torch.zeros', (['[5, 5]'], {}), '([5, 5])\n', (33733, 33741), False, 'import torch\n'), ((33842, 33906), 'allennlp.nn.util.viterbi_decode', 'util.viterbi_decode', (['sequence_logits', 'transition_matrix'], {'top_k': '(5)'}), '(sequence_logits, transition_matrix, top_k=5)\n', (33861, 33906), False, 'from allennlp.nn import util\n'), ((34097, 34222), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0, 0, 0, 4, 4], [0, 0, 0, 4, 4], [0, 0, 0, 4, 4], [0, 0, 0, 4, 4], [0, 0,\n 0, 4, 4], [0, 0, 0, 4, 0]]'], {}), '([[0, 0, 0, 4, 4], [0, 0, 0, 4, 4], [0, 0, 0, 4, 4], [0, 0,\n 0, 4, 4], [0, 0, 0, 4, 4], [0, 0, 0, 4, 0]])\n', (34114, 34222), False, 'import torch\n'), ((34556, 34575), 'torch.zeros', 'torch.zeros', (['[5, 5]'], {}), '([5, 5])\n', (34567, 34575), False, 'import torch\n'), ((34673, 34737), 'allennlp.nn.util.viterbi_decode', 'util.viterbi_decode', (['sequence_logits', 'transition_matrix'], {'top_k': '(5)'}), '(sequence_logits, transition_matrix, top_k=5)\n', (34692, 34737), False, 'from allennlp.nn import util\n'), ((34813, 34874), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1, 0, 0, 4], [1, 0, 6, 2], [0, 3, 0, 4]]'], {}), '([[1, 0, 0, 4], [1, 0, 6, 2], [0, 3, 0, 4]])\n', (34830, 34874), False, 'import torch\n'), ((35029, 35048), 'torch.zeros', 'torch.zeros', (['[4, 4]'], {}), '([4, 4])\n', (35040, 35048), False, 'import torch\n'), ((35146, 35210), 'allennlp.nn.util.viterbi_decode', 'util.viterbi_decode', (['sequence_logits', 'transition_matrix'], {'top_k': '(5)'}), '(sequence_logits, transition_matrix, top_k=5)\n', (35165, 35210), False, 'from allennlp.nn import util\n'), ((37813, 37834), 'torch.rand', 'torch.rand', (['[5, 7, 4]'], {}), '([5, 7, 4])\n', (37823, 37834), False, 'import torch\n'), ((38282, 38347), 'allennlp.nn.util.sequence_cross_entropy_with_logits', 'util.sequence_cross_entropy_with_logits', (['tensor', 'targets', 'weights'], {}), '(tensor, targets, weights)\n', (38321, 38347), False, 'from allennlp.nn import util\n'), ((38364, 38430), 'allennlp.nn.util.sequence_cross_entropy_with_logits', 'util.sequence_cross_entropy_with_logits', (['tensor2', 'targets', 'weights'], {}), '(tensor2, targets, weights)\n', (38403, 38430), False, 'from allennlp.nn import util\n'), ((38584, 38605), 'torch.rand', 'torch.rand', (['[1, 3, 4]'], {}), '([1, 3, 4])\n', (38594, 38605), False, 'import torch\n'), ((38696, 38714), 'torch.ones', 'torch.ones', (['[2, 3]'], {}), '([2, 3])\n', (38706, 38714), False, 'import torch\n'), ((38730, 38820), 'allennlp.nn.util.sequence_cross_entropy_with_logits', 'util.sequence_cross_entropy_with_logits', (['tensor', 'targets', 'weights'], {'label_smoothing': '(0.1)'}), '(tensor, targets, weights,\n label_smoothing=0.1)\n', (38769, 38820), False, 'from allennlp.nn import util\n'), ((39573, 39594), 'torch.rand', 'torch.rand', (['[5, 7, 4]'], {}), '([5, 7, 4])\n', (39583, 39594), False, 'import torch\n'), ((39886, 39951), 'allennlp.nn.util.sequence_cross_entropy_with_logits', 'util.sequence_cross_entropy_with_logits', (['tensor', 'targets', 'weights'], {}), '(tensor, targets, weights)\n', (39925, 39951), False, 'from allennlp.nn import util\n'), ((39975, 40054), 'allennlp.nn.util.sequence_cross_entropy_with_logits', 'util.sequence_cross_entropy_with_logits', (['tensor', 'targets', 'weights'], {'average': 'None'}), '(tensor, targets, weights, average=None)\n', (40014, 40054), False, 'from allennlp.nn import util\n'), ((40485, 40506), 'torch.rand', 'torch.rand', (['[5, 7, 4]'], {}), '([5, 7, 4])\n', (40495, 40506), False, 'import torch\n'), ((40798, 40885), 'allennlp.nn.util.sequence_cross_entropy_with_logits', 'util.sequence_cross_entropy_with_logits', (['tensor', 'targets', 'weights'], {'average': '"""token"""'}), "(tensor, targets, weights, average=\n 'token')\n", (40837, 40885), False, 'from allennlp.nn import util\n'), ((40904, 40983), 'allennlp.nn.util.sequence_cross_entropy_with_logits', 'util.sequence_cross_entropy_with_logits', (['tensor', 'targets', 'weights'], {'average': 'None'}), '(tensor, targets, weights, average=None)\n', (40943, 40983), False, 'from allennlp.nn import util\n'), ((41454, 41490), 'torch.rand', 'torch.rand', (['[batch, length, classes]'], {}), '([batch, length, classes])\n', (41464, 41490), False, 'import torch\n'), ((41595, 41622), 'torch.ones', 'torch.ones', (['[batch, length]'], {}), '([batch, length])\n', (41605, 41622), False, 'import torch\n'), ((41639, 41717), 'allennlp.nn.util.sequence_cross_entropy_with_logits', 'util.sequence_cross_entropy_with_logits', (['tensor', 'targets', 'weights'], {'gamma': 'gamma'}), '(tensor, targets, weights, gamma=gamma)\n', (41678, 41717), False, 'from allennlp.nn import util\n'), ((42475, 42511), 'torch.rand', 'torch.rand', (['[batch, length, classes]'], {}), '([batch, length, classes])\n', (42485, 42511), False, 'import torch\n'), ((42616, 42643), 'torch.ones', 'torch.ones', (['[batch, length]'], {}), '([batch, length])\n', (42626, 42643), False, 'import torch\n'), ((42660, 42738), 'allennlp.nn.util.sequence_cross_entropy_with_logits', 'util.sequence_cross_entropy_with_logits', (['tensor', 'targets', 'weights'], {'alpha': 'alpha'}), '(tensor, targets, weights, alpha=alpha)\n', (42699, 42738), False, 'from allennlp.nn import util\n'), ((43574, 43593), 'torch.tensor', 'torch.tensor', (['alpha'], {}), '(alpha)\n', (43586, 43593), False, 'import torch\n'), ((43612, 43648), 'torch.rand', 'torch.rand', (['[batch, length, classes]'], {}), '([batch, length, classes])\n', (43622, 43648), False, 'import torch\n'), ((43753, 43780), 'torch.ones', 'torch.ones', (['[batch, length]'], {}), '([batch, length])\n', (43763, 43780), False, 'import torch\n'), ((43797, 43875), 'allennlp.nn.util.sequence_cross_entropy_with_logits', 'util.sequence_cross_entropy_with_logits', (['tensor', 'targets', 'weights'], {'alpha': 'alpha'}), '(tensor, targets, weights, alpha=alpha)\n', (43836, 43875), False, 'from allennlp.nn import util\n'), ((44635, 44671), 'torch.rand', 'torch.rand', (['[batch, length, classes]'], {}), '([batch, length, classes])\n', (44645, 44671), False, 'import torch\n'), ((44776, 44803), 'torch.ones', 'torch.ones', (['[batch, length]'], {}), '([batch, length])\n', (44786, 44803), False, 'import torch\n'), ((44820, 44898), 'allennlp.nn.util.sequence_cross_entropy_with_logits', 'util.sequence_cross_entropy_with_logits', (['tensor', 'targets', 'weights'], {'alpha': 'alpha'}), '(tensor, targets, weights, alpha=alpha)\n', (44859, 44898), False, 'from allennlp.nn import util\n'), ((45440, 45506), 'torch.FloatTensor', 'torch.FloatTensor', (['[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]]'], {}), '([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]])\n', (45457, 45506), False, 'import torch\n'), ((45522, 45557), 'torch.tensor', 'torch.tensor', (['[[True, True, False]]'], {}), '([[True, True, False]])\n', (45534, 45557), False, 'import torch\n'), ((45656, 45731), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['replaced', '[[[1, 2, 3, 4], [5, 6, 7, 8], [2, 2, 2, 2]]]'], {}), '(replaced, [[[1, 2, 3, 4], [5, 6, 7, 8], [2, 2, 2, 2]]])\n', (45675, 45731), False, 'from numpy.testing import assert_array_almost_equal, assert_almost_equal\n'), ((45854, 45890), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.4, 0.1, 0.2]]'], {}), '([[0.4, 0.1, 0.2]])\n', (45871, 45890), False, 'import torch\n'), ((45946, 45995), 'allennlp.nn.util.logsumexp', 'util.logsumexp', (['log_tensor'], {'dim': '(-1)', 'keepdim': '(False)'}), '(log_tensor, dim=-1, keepdim=False)\n', (45960, 45995), False, 'from allennlp.nn import util\n'), ((46083, 46131), 'allennlp.nn.util.logsumexp', 'util.logsumexp', (['log_tensor'], {'dim': '(-1)', 'keepdim': '(True)'}), '(log_tensor, dim=-1, keepdim=True)\n', (46097, 46131), False, 'from allennlp.nn import util\n'), ((46466, 46501), 'torch.FloatTensor', 'torch.FloatTensor', (['[[-200.0, 20.0]]'], {}), '([[-200.0, 20.0]])\n', (46483, 46501), False, 'import torch\n'), ((46592, 46642), 'torch.FloatTensor', 'torch.FloatTensor', (['[[20.0, 20.0], [-200.0, 200.0]]'], {}), '([[20.0, 20.0], [-200.0, 200.0]])\n', (46609, 46642), False, 'import torch\n'), ((46801, 46906), 'numpy.array', 'numpy.array', (['[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 9, 9, 9]], [[2, 1, 0, 7], [7, 7, 2, 3], [\n 0, 0, 4, 2]]]'], {}), '([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 9, 9, 9]], [[2, 1, 0, 7], [7,\n 7, 2, 3], [0, 0, 4, 2]]])\n', (46812, 46906), False, 'import numpy\n'), ((46943, 46982), 'torch.tensor', 'torch.tensor', (['indices'], {'dtype': 'torch.long'}), '(indices, dtype=torch.long)\n', (46955, 46982), False, 'import torch\n'), ((47009, 47058), 'allennlp.nn.util.flatten_and_batch_shift_indices', 'util.flatten_and_batch_shift_indices', (['indices', '(10)'], {}), '(indices, 10)\n', (47045, 47058), False, 'from allennlp.nn import util\n'), ((47354, 47403), 'numpy.array', 'numpy.array', (['[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]'], {}), '([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])\n', (47365, 47403), False, 'import numpy\n'), ((47627, 47666), 'torch.tensor', 'torch.tensor', (['indices'], {'dtype': 'torch.long'}), '(indices, dtype=torch.long)\n', (47639, 47666), False, 'import torch\n'), ((47686, 47729), 'allennlp.nn.util.batched_index_select', 'util.batched_index_select', (['targets', 'indices'], {}), '(targets, indices)\n', (47711, 47729), False, 'from allennlp.nn import util\n'), ((47799, 47814), 'numpy.ones', 'numpy.ones', (['[3]'], {}), '([3])\n', (47809, 47814), False, 'import numpy\n'), ((48523, 48573), 'numpy.array', 'numpy.array', (['[[[1, 11], [3, 4]], [[5, 6], [7, 8]]]'], {}), '([[[1, 11], [3, 4]], [[5, 6], [7, 8]]])\n', (48534, 48573), False, 'import numpy\n'), ((48592, 48631), 'torch.tensor', 'torch.tensor', (['indices'], {'dtype': 'torch.long'}), '(indices, dtype=torch.long)\n', (48604, 48631), False, 'import torch\n'), ((48755, 48805), 'numpy.array', 'numpy.array', (['[[[1, -1], [3, 4]], [[5, 6], [7, 8]]]'], {}), '([[[1, -1], [3, 4]], [[5, 6], [7, 8]]])\n', (48766, 48805), False, 'import numpy\n'), ((48824, 48863), 'torch.tensor', 'torch.tensor', (['indices'], {'dtype': 'torch.long'}), '(indices, dtype=torch.long)\n', (48836, 48863), False, 'import torch\n'), ((49366, 49406), 'allennlp.nn.util.batched_span_select', 'util.batched_span_select', (['targets', 'spans'], {}), '(targets, spans)\n', (49390, 49406), False, 'from allennlp.nn import util\n'), ((50289, 50318), 'numpy.array', 'numpy.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (50300, 50318), False, 'import numpy\n'), ((50492, 50531), 'torch.tensor', 'torch.tensor', (['indices'], {'dtype': 'torch.long'}), '(indices, dtype=torch.long)\n', (50504, 50531), False, 'import torch\n'), ((50552, 50597), 'allennlp.nn.util.flattened_index_select', 'util.flattened_index_select', (['targets', 'indices'], {}), '(targets, indices)\n', (50579, 50597), False, 'from allennlp.nn import util\n'), ((50668, 50683), 'numpy.ones', 'numpy.ones', (['[3]'], {}), '([3])\n', (50678, 50683), False, 'import numpy\n'), ((51586, 51625), 'torch.LongTensor', 'torch.LongTensor', (['[1, 2, 7, 1, 56, 900]'], {}), '([1, 2, 7, 1, 56, 900])\n', (51602, 51625), False, 'import torch\n'), ((51655, 51682), 'allennlp.nn.util.bucket_values', 'util.bucket_values', (['indices'], {}), '(indices)\n', (51673, 51682), False, 'from allennlp.nn import util\n'), ((52038, 52098), 'allennlp.nn.util.add_sentence_boundary_token_ids', 'util.add_sentence_boundary_token_ids', (['tensor', 'mask', 'bos', 'eos'], {}), '(tensor, mask, bos, eos)\n', (52074, 52098), False, 'from allennlp.nn import util\n'), ((52129, 52178), 'numpy.array', 'numpy.array', (['[[9, 1, 2, 3, 10], [9, 4, 5, 10, 0]]'], {}), '([[9, 1, 2, 3, 10], [9, 4, 5, 10, 0]])\n', (52140, 52178), False, 'import numpy\n'), ((52836, 52896), 'allennlp.nn.util.add_sentence_boundary_token_ids', 'util.add_sentence_boundary_token_ids', (['tensor', 'mask', 'bos', 'eos'], {}), '(tensor, mask, bos, eos)\n', (52872, 52896), False, 'from allennlp.nn import util\n'), ((52927, 53102), 'numpy.array', 'numpy.array', (['[[[9, 9, 9, 9], [1, 2, 3, 4], [5, 5, 5, 5], [6, 8, 1, 2], [10, 10, 10, 10]],\n [[9, 9, 9, 9], [4, 3, 2, 1], [8, 7, 6, 5], [10, 10, 10, 10], [0, 0, 0, 0]]]'], {}), '([[[9, 9, 9, 9], [1, 2, 3, 4], [5, 5, 5, 5], [6, 8, 1, 2], [10, \n 10, 10, 10]], [[9, 9, 9, 9], [4, 3, 2, 1], [8, 7, 6, 5], [10, 10, 10, \n 10], [0, 0, 0, 0]]])\n', (52938, 53102), False, 'import numpy\n'), ((53763, 53808), 'allennlp.nn.util.remove_sentence_boundaries', 'util.remove_sentence_boundaries', (['tensor', 'mask'], {}), '(tensor, mask)\n', (53794, 53808), False, 'from allennlp.nn import util\n'), ((53840, 53860), 'torch.zeros', 'torch.zeros', (['(3)', '(3)', '(7)'], {}), '(3, 3, 7)\n', (53851, 53860), False, 'import torch\n'), ((54565, 54705), 'numpy.asarray', 'numpy.asarray', (['[[0.0, 0.0, 1.0, 1.0], [0.841470957, 9.99999902e-05, 0.540302277, 1.0], [\n 0.909297407, 0.00019999998, -0.416146845, 1.0]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [0.841470957, 9.99999902e-05, \n 0.540302277, 1.0], [0.909297407, 0.00019999998, -0.416146845, 1.0]])\n', (54578, 54705), False, 'import numpy\n'), ((54877, 54899), 'torch.zeros', 'torch.zeros', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (54888, 54899), False, 'import torch\n'), ((54917, 54995), 'allennlp.nn.util.add_positional_features', 'util.add_positional_features', (['tensor'], {'min_timescale': '(1.0)', 'max_timescale': '(10000.0)'}), '(tensor, min_timescale=1.0, max_timescale=10000.0)\n', (54945, 54995), False, 'from allennlp.nn import util\n'), ((55274, 55504), 'numpy.asarray', 'numpy.asarray', (['[[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0], [0.841470957, 0.00999983307, \n 9.99999902e-05, 0.540302277, 0.999949992, 1.0, 0.0], [0.909297407, \n 0.0199986659, 0.00019999998, -0.416146815, 0.999800026, 1.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0], [0.841470957, \n 0.00999983307, 9.99999902e-05, 0.540302277, 0.999949992, 1.0, 0.0], [\n 0.909297407, 0.0199986659, 0.00019999998, -0.416146815, 0.999800026, \n 1.0, 0.0]])\n', (55287, 55504), False, 'import numpy\n'), ((56202, 56224), 'torch.zeros', 'torch.zeros', (['[2, 3, 7]'], {}), '([2, 3, 7])\n', (56213, 56224), False, 'import torch\n'), ((56242, 56320), 'allennlp.nn.util.add_positional_features', 'util.add_positional_features', (['tensor'], {'min_timescale': '(1.0)', 'max_timescale': '(10000.0)'}), '(tensor, min_timescale=1.0, max_timescale=10000.0)\n', (56270, 56320), False, 'from allennlp.nn import util\n'), ((56653, 56673), 'torch.Tensor', 'torch.Tensor', (['[4, 5]'], {}), '([4, 5])\n', (56665, 56673), False, 'import torch\n'), ((57030, 57056), 'torch.Tensor', 'torch.Tensor', (['[4, 5, 4, 5]'], {}), '([4, 5, 4, 5])\n', (57042, 57056), False, 'import torch\n'), ((58967, 58987), 'torch.Tensor', 'torch.Tensor', (['[4, 5]'], {}), '([4, 5])\n', (58979, 58987), False, 'import torch\n'), ((59350, 59370), 'torch.Tensor', 'torch.Tensor', (['[4, 5]'], {}), '([4, 5])\n', (59362, 59370), False, 'import torch\n'), ((59859, 59899), 'torch.randn', 'torch.randn', (['(1)', 'seq_len_1', 'embedding_dim'], {}), '(1, seq_len_1, embedding_dim)\n', (59870, 59899), False, 'import torch\n'), ((59913, 59953), 'torch.randn', 'torch.randn', (['(1)', 'seq_len_2', 'embedding_dim'], {}), '(1, seq_len_2, embedding_dim)\n', (59924, 59953), False, 'import torch\n'), ((59977, 60043), 'allennlp.nn.util.get_combined_dim', 'util.get_combined_dim', (['combination', '[embedding_dim, embedding_dim]'], {}), '(combination, [embedding_dim, embedding_dim])\n', (59998, 60043), False, 'from allennlp.nn import util\n'), ((60061, 60087), 'torch.Tensor', 'torch.Tensor', (['combined_dim'], {}), '(combined_dim)\n', (60073, 60087), False, 'import torch\n'), ((60493, 60533), 'torch.randn', 'torch.randn', (['(1)', 'seq_len_1', 'embedding_dim'], {}), '(1, seq_len_1, embedding_dim)\n', (60504, 60533), False, 'import torch\n'), ((60547, 60587), 'torch.randn', 'torch.randn', (['(1)', 'seq_len_2', 'embedding_dim'], {}), '(1, seq_len_2, embedding_dim)\n', (60558, 60587), False, 'import torch\n'), ((60611, 60677), 'allennlp.nn.util.get_combined_dim', 'util.get_combined_dim', (['combination', '[embedding_dim, embedding_dim]'], {}), '(combination, [embedding_dim, embedding_dim])\n', (60632, 60677), False, 'from allennlp.nn import util\n'), ((60695, 60721), 'torch.Tensor', 'torch.Tensor', (['combined_dim'], {}), '(combined_dim)\n', (60707, 60721), False, 'import torch\n'), ((61011, 61034), 'torch.tensor', 'torch.tensor', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (61023, 61034), False, 'import torch\n'), ((61524, 61553), 'torch.randn', 'torch.randn', (['(4)', '(10)', '(20)', '(17)', '(5)'], {}), '(4, 10, 20, 17, 5)\n', (61535, 61553), False, 'import torch\n'), ((61574, 61607), 'allennlp.nn.util.combine_initial_dims', 'util.combine_initial_dims', (['tensor'], {}), '(tensor)\n', (61599, 61607), False, 'from allennlp.nn import util\n'), ((61736, 61773), 'torch.randn', 'torch.randn', (['(4 * 10 * 20 * 17 * 5)', '(12)'], {}), '(4 * 10 * 20 * 17 * 5, 12)\n', (61747, 61773), False, 'import torch\n'), ((63132, 63179), 'allennlp.nn.util.move_to_device', 'util.move_to_device', (['structured_obj', 'new_device'], {}), '(structured_obj, new_device)\n', (63151, 63179), False, 'from allennlp.nn import util\n'), ((63533, 63555), 'torch.nn.Linear', 'torch.nn.Linear', (['(10)', '(5)'], {}), '(10, 5)\n', (63548, 63555), False, 'import torch\n'), ((63685, 63722), 'allennlp.nn.util.extend_layer', 'util.extend_layer', (['lin_layer', 'new_dim'], {}), '(lin_layer, new_dim)\n', (63702, 63722), False, 'from allennlp.nn import util\n'), ((64401, 64434), 'allennlp.nn.util.masked_topk', 'util.masked_topk', (['scores', 'mask', '(2)'], {}), '(scores, mask, 2)\n', (64417, 64434), False, 'from allennlp.nn import util\n'), ((65486, 65519), 'allennlp.nn.util.masked_topk', 'util.masked_topk', (['scores', 'mask', '(2)'], {}), '(scores, mask, 2)\n', (65502, 65519), False, 'from allennlp.nn import util\n'), ((66665, 66706), 'torch.tensor', 'torch.tensor', (['[3, 2, 1]'], {'dtype': 'torch.long'}), '([3, 2, 1], dtype=torch.long)\n', (66677, 66706), False, 'import torch\n'), ((66761, 66794), 'allennlp.nn.util.masked_topk', 'util.masked_topk', (['scores', 'mask', 'k'], {}), '(scores, mask, k)\n', (66777, 66794), False, 'from allennlp.nn import util\n'), ((67977, 68018), 'torch.tensor', 'torch.tensor', (['[3, 2, 0]'], {'dtype': 'torch.long'}), '([3, 2, 0], dtype=torch.long)\n', (67989, 68018), False, 'import torch\n'), ((68073, 68106), 'allennlp.nn.util.masked_topk', 'util.masked_topk', (['scores', 'mask', 'k'], {}), '(scores, mask, k)\n', (68089, 68106), False, 'from allennlp.nn import util\n'), ((69675, 69712), 'torch.ones', 'torch.ones', (['(3)', '(5)', '(4)'], {'dtype': 'torch.long'}), '(3, 5, 4, dtype=torch.long)\n', (69685, 69712), False, 'import torch\n'), ((69996, 70036), 'torch.ones', 'torch.ones', (['(3)', '(2)', '(5)', '(4)'], {'dtype': 'torch.bool'}), '(3, 2, 5, 4, dtype=torch.bool)\n', (70006, 70036), False, 'import torch\n'), ((70413, 70452), 'allennlp.nn.util.masked_topk', 'util.masked_topk', (['items', 'mask', 'k'], {'dim': '(1)'}), '(items, mask, k, dim=1)\n', (70429, 70452), False, 'from allennlp.nn import util\n'), ((949, 974), 'numpy.array', 'numpy.array', (['[3, 2, 6, 1]'], {}), '([3, 2, 6, 1])\n', (960, 974), False, 'import numpy\n'), ((1924, 1947), 'numpy.array', 'numpy.array', (['[260, 260]'], {}), '([260, 260])\n', (1935, 1947), False, 'import numpy\n'), ((2179, 2197), 'torch.Size', 'torch.Size', (['[2, 3]'], {}), '([2, 3])\n', (2189, 2197), False, 'import torch\n'), ((2539, 2557), 'torch.Size', 'torch.Size', (['[2, 3]'], {}), '([2, 3])\n', (2549, 2557), False, 'import torch\n'), ((3773, 3806), 'torch.LongTensor', 'torch.LongTensor', (['[7, 5, 4, 3, 1]'], {}), '([7, 5, 4, 3, 1])\n', (3789, 3806), False, 'import torch\n'), ((4971, 5016), 'numpy.array', 'numpy.array', (['[[0.090031, 0.244728, 0.665241]]'], {}), '([[0.090031, 0.244728, 0.665241]])\n', (4982, 5016), False, 'import numpy\n'), ((5060, 5090), 'numpy.sum', 'numpy.sum', (['vector_1d_softmaxed'], {}), '(vector_1d_softmaxed)\n', (5069, 5090), False, 'import numpy\n'), ((5296, 5340), 'numpy.array', 'numpy.array', (['[[0.017148, 0.046613, 0.93624]]'], {}), '([[0.017148, 0.046613, 0.93624]])\n', (5307, 5340), False, 'import numpy\n'), ((5622, 5673), 'numpy.array', 'numpy.array', (['[[0.33333334, 0.33333334, 0.33333334]]'], {}), '([[0.33333334, 0.33333334, 0.33333334]])\n', (5633, 5673), False, 'import numpy\n'), ((5974, 6067), 'numpy.array', 'numpy.array', (['[[0.01714783, 0.04661262, 0.93623955], [0.09003057, 0.24472847, 0.66524096]]'], {}), '([[0.01714783, 0.04661262, 0.93623955], [0.09003057, 0.24472847,\n 0.66524096]])\n', (5985, 6067), False, 'import numpy\n'), ((6422, 6515), 'numpy.array', 'numpy.array', (['[[0.01714783, 0.04661262, 0.93623955], [0.33333334, 0.33333334, 0.33333334]]'], {}), '([[0.01714783, 0.04661262, 0.93623955], [0.33333334, 0.33333334,\n 0.33333334]])\n', (6433, 6515), False, 'import numpy\n'), ((6891, 6935), 'numpy.array', 'numpy.array', (['[[0.01798621, 0.0, 0.98201382]]'], {}), '([[0.01798621, 0.0, 0.98201382]])\n', (6902, 6935), False, 'import numpy\n'), ((7211, 7267), 'numpy.array', 'numpy.array', (['[[0.01321289, 0.0, 0.26538793, 0.72139918]]'], {}), '([[0.01321289, 0.0, 0.26538793, 0.72139918]])\n', (7222, 7267), False, 'import numpy\n'), ((7642, 7669), 'numpy.array', 'numpy.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (7653, 7669), False, 'import numpy\n'), ((8036, 8071), 'numpy.array', 'numpy.array', (['[[0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0]])\n', (8047, 8071), False, 'import numpy\n'), ((8434, 8469), 'numpy.array', 'numpy.array', (['[[0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0]])\n', (8445, 8469), False, 'import numpy\n'), ((8815, 8843), 'numpy.array', 'numpy.array', (['[[0.5, 0.5, 0]]'], {}), '([[0.5, 0.5, 0]])\n', (8826, 8843), False, 'import numpy\n'), ((9204, 9280), 'numpy.array', 'numpy.array', (['[[0.01798621, 0.0, 0.98201382], [0.090031, 0.244728, 0.665241]]'], {}), '([[0.01798621, 0.0, 0.98201382], [0.090031, 0.244728, 0.665241]])\n', (9215, 9280), False, 'import numpy\n'), ((9705, 9767), 'numpy.array', 'numpy.array', (['[[0.5, 0.0, 0.5], [0.090031, 0.244728, 0.665241]]'], {}), '([[0.5, 0.0, 0.5], [0.090031, 0.244728, 0.665241]])\n', (9716, 9767), False, 'import numpy\n'), ((10193, 10240), 'numpy.array', 'numpy.array', (['[[0.5, 0.0, 0.5], [0.0, 0.0, 0.0]]'], {}), '([[0.5, 0.0, 0.5], [0.0, 0.0, 0.0]])\n', (10204, 10240), False, 'import numpy\n'), ((10550, 10611), 'numpy.array', 'numpy.array', (['[[0.0, 0.0, 0.0], [0.11920292, 0.0, 0.88079708]]'], {}), '([[0.0, 0.0, 0.0], [0.11920292, 0.0, 0.88079708]])\n', (10561, 10611), False, 'import numpy\n'), ((11022, 11066), 'numpy.array', 'numpy.array', (['[[0.01798621, 0.0, 0.98201382]]'], {}), '([[0.01798621, 0.0, 0.98201382]])\n', (11033, 11066), False, 'import numpy\n'), ((11387, 11443), 'numpy.array', 'numpy.array', (['[[0.01321289, 0.0, 0.26538793, 0.72139918]]'], {}), '([[0.01321289, 0.0, 0.26538793, 0.72139918]])\n', (11398, 11443), False, 'import numpy\n'), ((11863, 11890), 'numpy.array', 'numpy.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (11874, 11890), False, 'import numpy\n'), ((12302, 12341), 'numpy.array', 'numpy.array', (['[[0.25, 0.25, 0.25, 0.25]]'], {}), '([[0.25, 0.25, 0.25, 0.25]])\n', (12313, 12341), False, 'import numpy\n'), ((12749, 12788), 'numpy.array', 'numpy.array', (['[[0.25, 0.25, 0.25, 0.25]]'], {}), '([[0.25, 0.25, 0.25, 0.25]])\n', (12760, 12788), False, 'import numpy\n'), ((13179, 13207), 'numpy.array', 'numpy.array', (['[[0.5, 0.5, 0]]'], {}), '([[0.5, 0.5, 0]])\n', (13190, 13207), False, 'import numpy\n'), ((13613, 13689), 'numpy.array', 'numpy.array', (['[[0.01798621, 0.0, 0.98201382], [0.090031, 0.244728, 0.665241]]'], {}), '([[0.01798621, 0.0, 0.98201382], [0.090031, 0.244728, 0.665241]])\n', (13624, 13689), False, 'import numpy\n'), ((14159, 14221), 'numpy.array', 'numpy.array', (['[[0.5, 0.0, 0.5], [0.090031, 0.244728, 0.665241]]'], {}), '([[0.5, 0.0, 0.5], [0.090031, 0.244728, 0.665241]])\n', (14170, 14221), False, 'import numpy\n'), ((14704, 14772), 'numpy.array', 'numpy.array', (['[[0.5, 0.0, 0.5], [0.33333333, 0.33333333, 0.33333333]]'], {}), '([[0.5, 0.0, 0.5], [0.33333333, 0.33333333, 0.33333333]])\n', (14715, 14772), False, 'import numpy\n'), ((15140, 15227), 'numpy.array', 'numpy.array', (['[[0.33333333, 0.33333333, 0.33333333], [0.11920292, 0.0, 0.88079708]]'], {}), '([[0.33333333, 0.33333333, 0.33333333], [0.11920292, 0.0, \n 0.88079708]])\n', (15151, 15227), False, 'import numpy\n'), ((15745, 15775), 'numpy.exp', 'numpy.exp', (['vector_1d_softmaxed'], {}), '(vector_1d_softmaxed)\n', (15754, 15775), False, 'import numpy\n'), ((15777, 15821), 'numpy.array', 'numpy.array', (['[[0.01798621, 0.0, 0.98201382]]'], {}), '([[0.01798621, 0.0, 0.98201382]])\n', (15788, 15821), False, 'import numpy\n'), ((16089, 16119), 'numpy.exp', 'numpy.exp', (['vector_1d_softmaxed'], {}), '(vector_1d_softmaxed)\n', (16098, 16119), False, 'import numpy\n'), ((16121, 16177), 'numpy.array', 'numpy.array', (['[[0.01321289, 0.0, 0.26538793, 0.72139918]]'], {}), '([[0.01321289, 0.0, 0.26538793, 0.72139918]])\n', (16132, 16177), False, 'import numpy\n'), ((16548, 16578), 'numpy.exp', 'numpy.exp', (['vector_1d_softmaxed'], {}), '(vector_1d_softmaxed)\n', (16557, 16578), False, 'import numpy\n'), ((16580, 16615), 'numpy.array', 'numpy.array', (['[[0.0, 0.0, 0.0, 1.0]]'], {}), '([[0.0, 0.0, 0.0, 1.0]])\n', (16591, 16615), False, 'import numpy\n'), ((18043, 18067), 'numpy.array', 'numpy.array', (['[5.0, -1.0]'], {}), '([5.0, -1.0])\n', (18054, 18067), False, 'import numpy\n'), ((18410, 18438), 'numpy.array', 'numpy.array', (['[[5.0], [-1.0]]'], {}), '([[5.0], [-1.0]])\n', (18421, 18438), False, 'import numpy\n'), ((18817, 18856), 'numpy.array', 'numpy.array', (['[[5.0, 2.0], [-1.0, -0.5]]'], {}), '([[5.0, 2.0], [-1.0, -0.5]])\n', (18828, 18856), False, 'import numpy\n'), ((19839, 19863), 'numpy.array', 'numpy.array', (['[3.0, -1.5]'], {}), '([3.0, -1.5])\n', (19850, 19863), False, 'import numpy\n'), ((20205, 20233), 'numpy.array', 'numpy.array', (['[[3.0], [-1.5]]'], {}), '([[3.0], [-1.5]])\n', (20216, 20233), False, 'import numpy\n'), ((20611, 20651), 'numpy.array', 'numpy.array', (['[[3.0, 0.5], [-1.5, -1.75]]'], {}), '([[3.0, 0.5], [-1.5, -1.75]])\n', (20622, 20651), False, 'import numpy\n'), ((28058, 28146), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['aggregated_array[0, i]', 'expected_array'], {'decimal': '(5)'}), '(aggregated_array[0, i], expected_array,\n decimal=5)\n', (28091, 28146), False, 'import numpy\n'), ((28322, 28340), 'torch.rand', 'torch.rand', (['[5, 9]'], {}), '([5, 9])\n', (28332, 28340), False, 'import torch\n'), ((28727, 28745), 'torch.rand', 'torch.rand', (['[5, 9]'], {}), '([5, 9])\n', (28737, 28745), False, 'import torch\n'), ((32942, 32960), 'torch.rand', 'torch.rand', (['[5, 9]'], {}), '([5, 9])\n', (32952, 32960), False, 'import torch\n'), ((36791, 36811), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (36805, 36811), False, 'import random\n'), ((36834, 36854), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (36848, 36854), False, 'import random\n'), ((36871, 36891), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (36885, 36891), False, 'import random\n'), ((36922, 36953), 'torch.rand', 'torch.rand', (['[seq_len, num_tags]'], {}), '([seq_len, num_tags])\n', (36932, 36953), False, 'import torch\n'), ((36986, 37018), 'torch.rand', 'torch.rand', (['[num_tags, num_tags]'], {}), '([num_tags, num_tags])\n', (36996, 37018), False, 'import torch\n'), ((37069, 37133), 'allennlp.nn.util.viterbi_decode', 'util.viterbi_decode', (['sequence_logits', 'transition_matrix'], {'top_k': 'k'}), '(sequence_logits, transition_matrix, top_k=k)\n', (37088, 37133), False, 'from allennlp.nn import util\n'), ((38203, 38237), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(3)', '[5, 7]'], {}), '(0, 3, [5, 7])\n', (38223, 38237), False, 'import numpy\n'), ((38641, 38675), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(3)', '[1, 3]'], {}), '(0, 3, [1, 3])\n', (38661, 38675), False, 'import numpy\n'), ((38969, 39020), 'torch.nn.functional.log_softmax', 'torch.nn.functional.log_softmax', (['prediction'], {'dim': '(-1)'}), '(prediction, dim=-1)\n', (39000, 39020), False, 'import torch\n'), ((39807, 39841), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(3)', '[5, 7]'], {}), '(0, 3, [5, 7])\n', (39827, 39841), False, 'import numpy\n'), ((40719, 40753), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(3)', '[5, 7]'], {}), '(0, 3, [5, 7])\n', (40739, 40753), False, 'import numpy\n'), ((41401, 41421), 'numpy.random.randn', 'numpy.random.randn', ([], {}), '()\n', (41419, 41421), False, 'import numpy\n'), ((41526, 41575), 'numpy.random.randint', 'numpy.random.randint', (['(0)', 'classes', '[batch, length]'], {}), '(0, classes, [batch, length])\n', (41546, 41575), False, 'import numpy\n'), ((41834, 41876), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['logit'], {'dim': '(-1)'}), '(logit, dim=-1)\n', (41861, 41876), False, 'import torch\n'), ((42355, 42374), 'numpy.random.rand', 'numpy.random.rand', ([], {}), '()\n', (42372, 42374), False, 'import numpy\n'), ((42547, 42596), 'numpy.random.randint', 'numpy.random.randint', (['(0)', 'classes', '[batch, length]'], {}), '(0, classes, [batch, length])\n', (42567, 42596), False, 'import numpy\n'), ((42858, 42904), 'torch.nn.functional.log_softmax', 'torch.nn.functional.log_softmax', (['logit'], {'dim': '(-1)'}), '(logit, dim=-1)\n', (42889, 42904), False, 'import torch\n'), ((43456, 43475), 'numpy.random.rand', 'numpy.random.rand', ([], {}), '()\n', (43473, 43475), False, 'import numpy\n'), ((43684, 43733), 'numpy.random.randint', 'numpy.random.randint', (['(0)', 'classes', '[batch, length]'], {}), '(0, classes, [batch, length])\n', (43704, 43733), False, 'import numpy\n'), ((43995, 44041), 'torch.nn.functional.log_softmax', 'torch.nn.functional.log_softmax', (['logit'], {'dim': '(-1)'}), '(logit, dim=-1)\n', (44026, 44041), False, 'import torch\n'), ((44575, 44602), 'numpy.random.randn', 'numpy.random.randn', (['classes'], {}), '(classes)\n', (44593, 44602), False, 'import numpy\n'), ((44707, 44756), 'numpy.random.randint', 'numpy.random.randint', (['(0)', 'classes', '[batch, length]'], {}), '(0, classes, [batch, length])\n', (44727, 44756), False, 'import numpy\n'), ((45018, 45064), 'torch.nn.functional.log_softmax', 'torch.nn.functional.log_softmax', (['logit'], {'dim': '(-1)'}), '(logit, dim=-1)\n', (45049, 45064), False, 'import torch\n'), ((47155, 47256), 'numpy.array', 'numpy.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 9, 12, 11, 10, 17, 17, 17, 12, 13, 10, 10,\n 14, 12]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 9, 12, 11, 10, 17, 17, 17, 12,\n 13, 10, 10, 14, 12])\n', (47166, 47256), False, 'import numpy\n'), ((48645, 48678), 'pytest.raises', 'pytest.raises', (['ConfigurationError'], {}), '(ConfigurationError)\n', (48658, 48678), False, 'import pytest\n'), ((48692, 48735), 'allennlp.nn.util.batched_index_select', 'util.batched_index_select', (['targets', 'indices'], {}), '(targets, indices)\n', (48717, 48735), False, 'from allennlp.nn import util\n'), ((48877, 48910), 'pytest.raises', 'pytest.raises', (['ConfigurationError'], {}), '(ConfigurationError)\n', (48890, 48910), False, 'import pytest\n'), ((48924, 48967), 'allennlp.nn.util.batched_index_select', 'util.batched_index_select', (['targets', 'indices'], {}), '(targets, indices)\n', (48949, 48967), False, 'from allennlp.nn import util\n'), ((51426, 51459), 'pytest.raises', 'pytest.raises', (['ConfigurationError'], {}), '(ConfigurationError)\n', (51439, 51459), False, 'import pytest\n'), ((51765, 51796), 'numpy.array', 'numpy.array', (['[1, 2, 5, 1, 8, 9]'], {}), '([1, 2, 5, 1, 8, 9])\n', (51776, 51796), False, 'import numpy\n'), ((51911, 51946), 'numpy.array', 'numpy.array', (['[[1, 2, 3], [4, 5, 0]]'], {}), '([[1, 2, 3], [4, 5, 0]])\n', (51922, 51946), False, 'import numpy\n'), ((52440, 52545), 'numpy.array', 'numpy.array', (['[[[1, 2, 3, 4], [5, 5, 5, 5], [6, 8, 1, 2]], [[4, 3, 2, 1], [8, 7, 6, 5], [\n 0, 0, 0, 0]]]'], {}), '([[[1, 2, 3, 4], [5, 5, 5, 5], [6, 8, 1, 2]], [[4, 3, 2, 1], [8,\n 7, 6, 5], [0, 0, 0, 0]]])\n', (52451, 52545), False, 'import numpy\n'), ((52716, 52741), 'numpy.array', 'numpy.array', (['[9, 9, 9, 9]'], {}), '([9, 9, 9, 9])\n', (52727, 52741), False, 'import numpy\n'), ((52774, 52803), 'numpy.array', 'numpy.array', (['[10, 10, 10, 10]'], {}), '([10, 10, 10, 10])\n', (52785, 52803), False, 'import numpy\n'), ((53407, 53433), 'numpy.random.rand', 'numpy.random.rand', (['(3)', '(5)', '(7)'], {}), '(3, 5, 7)\n', (53424, 53433), False, 'import numpy\n'), ((56584, 56608), 'torch.Tensor', 'torch.Tensor', (['[[[2, 3]]]'], {}), '([[[2, 3]]])\n', (56596, 56608), False, 'import torch\n'), ((56610, 56634), 'torch.Tensor', 'torch.Tensor', (['[[[5, 5]]]'], {}), '([[[5, 5]]])\n', (56622, 56634), False, 'import torch\n'), ((56742, 56805), 'allennlp.nn.util.combine_tensors_and_multiply', 'util.combine_tensors_and_multiply', (['combination', 'tensors', 'weight'], {}), '(combination, tensors, weight)\n', (56775, 56805), False, 'from allennlp.nn import util\n'), ((56896, 56959), 'allennlp.nn.util.combine_tensors_and_multiply', 'util.combine_tensors_and_multiply', (['combination', 'tensors', 'weight'], {}), '(combination, tensors, weight)\n', (56929, 56959), False, 'from allennlp.nn import util\n'), ((57098, 57162), 'allennlp.nn.util.combine_tensors_and_multiply', 'util.combine_tensors_and_multiply', (['combination', 'tensors', 'weight2'], {}), '(combination, tensors, weight2)\n', (57131, 57162), False, 'from allennlp.nn import util\n'), ((57265, 57328), 'allennlp.nn.util.combine_tensors_and_multiply', 'util.combine_tensors_and_multiply', (['combination', 'tensors', 'weight'], {}), '(combination, tensors, weight)\n', (57298, 57328), False, 'from allennlp.nn import util\n'), ((57430, 57493), 'allennlp.nn.util.combine_tensors_and_multiply', 'util.combine_tensors_and_multiply', (['combination', 'tensors', 'weight'], {}), '(combination, tensors, weight)\n', (57463, 57493), False, 'from allennlp.nn import util\n'), ((57593, 57656), 'allennlp.nn.util.combine_tensors_and_multiply', 'util.combine_tensors_and_multiply', (['combination', 'tensors', 'weight'], {}), '(combination, tensors, weight)\n', (57626, 57656), False, 'from allennlp.nn import util\n'), ((57756, 57819), 'allennlp.nn.util.combine_tensors_and_multiply', 'util.combine_tensors_and_multiply', (['combination', 'tensors', 'weight'], {}), '(combination, tensors, weight)\n', (57789, 57819), False, 'from allennlp.nn import util\n'), ((57921, 57984), 'allennlp.nn.util.combine_tensors_and_multiply', 'util.combine_tensors_and_multiply', (['combination', 'tensors', 'weight'], {}), '(combination, tensors, weight)\n', (57954, 57984), False, 'from allennlp.nn import util\n'), ((58132, 58195), 'allennlp.nn.util.combine_tensors_and_multiply', 'util.combine_tensors_and_multiply', (['combination', 'tensors', 'weight'], {}), '(combination, tensors, weight)\n', (58165, 58195), False, 'from allennlp.nn import util\n'), ((58287, 58320), 'pytest.raises', 'pytest.raises', (['ConfigurationError'], {}), '(ConfigurationError)\n', (58300, 58320), False, 'import pytest\n'), ((58334, 58393), 'allennlp.nn.util.combine_tensors_and_multiply', 'util.combine_tensors_and_multiply', (['"""x+y+y"""', 'tensors', 'weight'], {}), "('x+y+y', tensors, weight)\n", (58367, 58393), False, 'from allennlp.nn import util\n'), ((58408, 58441), 'pytest.raises', 'pytest.raises', (['ConfigurationError'], {}), '(ConfigurationError)\n', (58421, 58441), False, 'import pytest\n'), ((58455, 58512), 'allennlp.nn.util.combine_tensors_and_multiply', 'util.combine_tensors_and_multiply', (['"""x%y"""', 'tensors', 'weight'], {}), "('x%y', tensors, weight)\n", (58488, 58512), False, 'from allennlp.nn import util\n'), ((58885, 58935), 'torch.Tensor', 'torch.Tensor', (['[[[5, 5], [4, 4]], [[2, 3], [1, 1]]]'], {}), '([[[5, 5], [4, 4]], [[2, 3], [1, 1]]])\n', (58897, 58935), False, 'import torch\n'), ((59064, 59127), 'allennlp.nn.util.combine_tensors_and_multiply', 'util.combine_tensors_and_multiply', (['combination', 'tensors', 'weight'], {}), '(combination, tensors, weight)\n', (59097, 59127), False, 'from allennlp.nn import util\n'), ((59223, 59273), 'torch.Tensor', 'torch.Tensor', (['[[[5, 5], [2, 2]], [[4, 4], [3, 3]]]'], {}), '([[[5, 5], [2, 2]], [[4, 4], [3, 3]]])\n', (59235, 59273), False, 'import torch\n'), ((59287, 59321), 'torch.Tensor', 'torch.Tensor', (['[[[2, 3]], [[1, 1]]]'], {}), '([[[2, 3]], [[1, 1]]])\n', (59299, 59321), False, 'import torch\n'), ((59440, 59503), 'allennlp.nn.util.combine_tensors_and_multiply', 'util.combine_tensors_and_multiply', (['combination', 'tensors', 'weight'], {}), '(combination, tensors, weight)\n', (59473, 59503), False, 'from allennlp.nn import util\n'), ((61836, 61866), 'torch.Size', 'torch.Size', (['(4, 10, 20, 17, 5)'], {}), '((4, 10, 20, 17, 5))\n', (61846, 61866), False, 'import torch\n'), ((62262, 62289), 'allennlp.models.load_archive', 'load_archive', (['model_archive'], {}), '(model_archive)\n', (62274, 62289), False, 'from allennlp.models import load_archive\n'), ((62387, 62402), 'json.load', 'json.load', (['file'], {}), '(file)\n', (62396, 62402), False, 'import json\n'), ((62448, 62478), 'allennlp.nn.util.inspect_parameters', 'util.inspect_parameters', (['model'], {}), '(model)\n', (62471, 62478), False, 'from allennlp.nn import util\n'), ((64640, 64677), 'numpy.array', 'numpy.array', (['[[0, 1], [1, 2], [2, 3]]'], {}), '([[0, 1], [1, 2], [2, 3]])\n', (64651, 64677), False, 'import numpy\n'), ((64755, 64773), 'numpy.ones', 'numpy.ones', (['[3, 2]'], {}), '([3, 2])\n', (64765, 64773), False, 'import numpy\n'), ((65765, 65794), 'numpy.array', 'numpy.array', (['[[0, 1], [1, 2]]'], {}), '([[0, 1], [1, 2]])\n', (65776, 65794), False, 'import numpy\n'), ((65885, 65922), 'numpy.array', 'numpy.array', (['[[1, 1], [1, 1], [0, 0]]'], {}), '([[1, 1], [1, 1], [0, 0]])\n', (65896, 65922), False, 'import numpy\n'), ((67000, 67046), 'numpy.array', 'numpy.array', (['[[0, 1, 3], [1, 2, 2], [1, 2, 2]]'], {}), '([[0, 1, 3], [1, 2, 2], [1, 2, 2]])\n', (67011, 67046), False, 'import numpy\n'), ((67137, 67183), 'numpy.array', 'numpy.array', (['[[1, 1, 1], [1, 1, 0], [1, 0, 0]]'], {}), '([[1, 1, 1], [1, 1, 0], [1, 0, 0]])\n', (67148, 67183), False, 'import numpy\n'), ((68466, 68512), 'numpy.array', 'numpy.array', (['[[0, 1, 2], [1, 2, 2], [3, 3, 3]]'], {}), '([[0, 1, 2], [1, 2, 2], [3, 3, 3]])\n', (68477, 68512), False, 'import numpy\n'), ((68603, 68649), 'numpy.array', 'numpy.array', (['[[1, 1, 1], [1, 1, 0], [0, 0, 0]]'], {}), '([[1, 1, 1], [1, 1, 0], [0, 0, 0]])\n', (68614, 68649), False, 'import numpy\n'), ((70950, 70967), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (70962, 70967), False, 'import torch\n'), ((70969, 70986), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (70981, 70986), False, 'import torch\n'), ((71114, 71134), 'torch.tensor', 'torch.tensor', (['[True]'], {}), '([True])\n', (71126, 71134), False, 'import torch\n'), ((71136, 71156), 'torch.tensor', 'torch.tensor', (['[True]'], {}), '([True])\n', (71148, 71156), False, 'import torch\n'), ((71215, 71232), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (71227, 71232), False, 'import torch\n'), ((71234, 71253), 'torch.tensor', 'torch.tensor', (['[1.0]'], {}), '([1.0])\n', (71246, 71253), False, 'import torch\n'), ((71289, 71306), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (71301, 71306), False, 'import torch\n'), ((71308, 71328), 'torch.tensor', 'torch.tensor', (['[True]'], {}), '([True])\n', (71320, 71328), False, 'import torch\n'), ((1761, 1779), 'torch.ones', 'torch.ones', (['(2)', '(260)'], {}), '(2, 260)\n', (1771, 1779), False, 'import torch\n'), ((2224, 2272), 'allennlp.nn.util.clamp_tensor', 'util.clamp_tensor', (['tensor'], {'minimum': '(-3)', 'maximum': '(3)'}), '(tensor, minimum=-3, maximum=3)\n', (2241, 2272), False, 'from allennlp.nn import util\n'), ((2584, 2632), 'allennlp.nn.util.clamp_tensor', 'util.clamp_tensor', (['tensor'], {'minimum': '(-3)', 'maximum': '(3)'}), '(tensor, minimum=-3, maximum=3)\n', (2601, 2632), False, 'from allennlp.nn import util\n'), ((18625, 18681), 'torch.tensor', 'torch.tensor', (['[[True, False, True], [True, True, False]]'], {}), '([[True, False, True], [True, True, False]])\n', (18637, 18681), False, 'import torch\n'), ((20420, 20476), 'torch.tensor', 'torch.tensor', (['[[True, False, True], [True, True, False]]'], {}), '([[True, False, True], [True, True, False]])\n', (20432, 20476), False, 'import torch\n'), ((22149, 22201), 'torch.LongTensor', 'torch.LongTensor', (['[[3, 4, 5, 0, 0], [1, 2, 0, 0, 0]]'], {}), '([[3, 4, 5, 0, 0], [1, 2, 0, 0, 0]])\n', (22165, 22201), False, 'import torch\n'), ((22239, 22345), 'torch.LongTensor', 'torch.LongTensor', (['[[[1, 2], [3, 0], [2, 0], [0, 0], [0, 0]], [[5, 0], [4, 6], [0, 0], [0, 0],\n [0, 0]]]'], {}), '([[[1, 2], [3, 0], [2, 0], [0, 0], [0, 0]], [[5, 0], [4, 6],\n [0, 0], [0, 0], [0, 0]]])\n', (22255, 22345), False, 'import torch\n'), ((22818, 22932), 'torch.LongTensor', 'torch.LongTensor', (['[[[1, 2, 3], [3, 0, 1], [2, 1, 0], [0, 0, 0]], [[5, 5, 5], [4, 6, 0], [0, 0,\n 0], [0, 0, 0]]]'], {}), '([[[1, 2, 3], [3, 0, 1], [2, 1, 0], [0, 0, 0]], [[5, 5, 5],\n [4, 6, 0], [0, 0, 0], [0, 0, 0]]])\n', (22834, 22932), False, 'import torch\n'), ((23383, 23489), 'torch.LongTensor', 'torch.LongTensor', (['[[[1, 2], [3, 0], [2, 0], [0, 0], [0, 0]], [[5, 0], [4, 6], [0, 0], [0, 0],\n [0, 0]]]'], {}), '([[[1, 2], [3, 0], [2, 0], [0, 0], [0, 0]], [[5, 0], [4, 6],\n [0, 0], [0, 0], [0, 0]]])\n', (23399, 23489), False, 'import torch\n'), ((24073, 24125), 'torch.LongTensor', 'torch.LongTensor', (['[[3, 4, 5, 0, 0], [1, 2, 0, 0, 0]]'], {}), '([[3, 4, 5, 0, 0], [1, 2, 0, 0, 0]])\n', (24089, 24125), False, 'import torch\n'), ((24151, 24187), 'torch.tensor', 'torch.tensor', (['[[False, False, True]]'], {}), '([[False, False, True]])\n', (24163, 24187), False, 'import torch\n'), ((24583, 24615), 'torch.from_numpy', 'torch.from_numpy', (['sentence_array'], {}), '(sentence_array)\n', (24599, 24615), False, 'import torch\n'), ((25572, 25604), 'torch.from_numpy', 'torch.from_numpy', (['sentence_array'], {}), '(sentence_array)\n', (25588, 25604), False, 'import torch\n'), ((25640, 25673), 'torch.from_numpy', 'torch.from_numpy', (['attention_array'], {}), '(attention_array)\n', (25656, 25673), False, 'import torch\n'), ((26509, 26541), 'torch.from_numpy', 'torch.from_numpy', (['sentence_array'], {}), '(sentence_array)\n', (26525, 26541), False, 'import torch\n'), ((26577, 26610), 'torch.from_numpy', 'torch.from_numpy', (['attention_array'], {}), '(attention_array)\n', (26593, 26610), False, 'import torch\n'), ((27086, 27177), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['aggregated_array[0, i, j]', 'expected_array'], {'decimal': '(5)'}), '(aggregated_array[0, i, j], expected_array,\n decimal=5)\n', (27119, 27177), False, 'import numpy\n'), ((27555, 27587), 'torch.from_numpy', 'torch.from_numpy', (['sentence_array'], {}), '(sentence_array)\n', (27571, 27587), False, 'import torch\n'), ((27623, 27656), 'torch.from_numpy', 'torch.from_numpy', (['attention_array'], {}), '(attention_array)\n', (27639, 27656), False, 'import torch\n'), ((37488, 37514), 'allennlp.common.util.sanitize', 'sanitize', (['viterbi_paths_v1'], {}), '(viterbi_paths_v1)\n', (37496, 37514), False, 'from allennlp.common.util import sanitize\n'), ((42378, 42397), 'numpy.random.rand', 'numpy.random.rand', ([], {}), '()\n', (42395, 42397), False, 'import numpy\n'), ((42416, 42435), 'numpy.random.rand', 'numpy.random.rand', ([], {}), '()\n', (42433, 42435), False, 'import numpy\n'), ((43479, 43498), 'numpy.random.rand', 'numpy.random.rand', ([], {}), '()\n', (43496, 43498), False, 'import numpy\n'), ((43517, 43536), 'numpy.random.rand', 'numpy.random.rand', ([], {}), '()\n', (43534, 43536), False, 'import numpy\n'), ((51510, 51531), 'torch.ones', 'torch.ones', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (51520, 51531), False, 'import torch\n'), ((64096, 64118), 'torch.randn', 'torch.randn', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (64107, 64118), False, 'import torch\n'), ((64275, 64293), 'torch.ones', 'torch.ones', (['[3, 4]'], {}), '([3, 4])\n', (64285, 64293), False, 'import torch\n'), ((65122, 65144), 'torch.randn', 'torch.randn', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (65133, 65144), False, 'import torch\n'), ((65301, 65319), 'torch.ones', 'torch.ones', (['[3, 4]'], {}), '([3, 4])\n', (65311, 65319), False, 'import torch\n'), ((66313, 66335), 'torch.randn', 'torch.randn', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (66324, 66335), False, 'import torch\n'), ((66603, 66621), 'torch.ones', 'torch.ones', (['[3, 4]'], {}), '([3, 4])\n', (66613, 66621), False, 'import torch\n'), ((67713, 67735), 'torch.randn', 'torch.randn', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (67724, 67735), False, 'import torch\n'), ((67892, 67910), 'torch.ones', 'torch.ones', (['[3, 4]'], {}), '([3, 4])\n', (67902, 67910), False, 'import torch\n'), ((71026, 71043), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (71038, 71043), False, 'import torch\n'), ((71045, 71062), 'torch.tensor', 'torch.tensor', (['[2]'], {}), '([2])\n', (71057, 71062), False, 'import torch\n'), ((71387, 71404), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (71399, 71404), False, 'import torch\n'), ((71408, 71425), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (71420, 71425), False, 'import torch\n'), ((71550, 71567), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (71562, 71567), False, 'import torch\n'), ((71578, 71595), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (71590, 71595), False, 'import torch\n'), ((1104, 1160), 'allennlp.nn.util.get_mask_from_sequence_lengths', 'util.get_mask_from_sequence_lengths', (['sequence_lengths', '(5)'], {}), '(sequence_lengths, 5)\n', (1139, 1160), False, 'from allennlp.nn import util\n'), ((4853, 4889), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['vector_1d', 'None'], {}), '(vector_1d, None)\n', (4872, 4889), False, 'from allennlp.nn import util\n'), ((5191, 5227), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['vector_1d', 'None'], {}), '(vector_1d, None)\n', (5210, 5227), False, 'from allennlp.nn import util\n'), ((5500, 5538), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['vector_zero', 'None'], {}), '(vector_zero, None)\n', (5519, 5538), False, 'from allennlp.nn import util\n'), ((5843, 5876), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['matrix', 'None'], {}), '(matrix, None)\n', (5862, 5876), False, 'from allennlp.nn import util\n'), ((6291, 6324), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['matrix', 'None'], {}), '(matrix, None)\n', (6310, 6324), False, 'from allennlp.nn import util\n'), ((6783, 6822), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['vector_1d', 'mask_1d'], {}), '(vector_1d, mask_1d)\n', (6802, 6822), False, 'from allennlp.nn import util\n'), ((7090, 7129), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['vector_1d', 'mask_1d'], {}), '(vector_1d, mask_1d)\n', (7109, 7129), False, 'from allennlp.nn import util\n'), ((7534, 7573), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['vector_1d', 'mask_1d'], {}), '(vector_1d, mask_1d)\n', (7553, 7573), False, 'from allennlp.nn import util\n'), ((7928, 7967), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['vector_1d', 'mask_1d'], {}), '(vector_1d, mask_1d)\n', (7947, 7967), False, 'from allennlp.nn import util\n'), ((8326, 8365), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['vector_1d', 'mask_1d'], {}), '(vector_1d, mask_1d)\n', (8345, 8365), False, 'from allennlp.nn import util\n'), ((8707, 8746), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['vector_1d', 'mask_1d'], {}), '(vector_1d, mask_1d)\n', (8726, 8746), False, 'from allennlp.nn import util\n'), ((9073, 9106), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['matrix', 'mask'], {}), '(matrix, mask)\n', (9092, 9106), False, 'from allennlp.nn import util\n'), ((9586, 9619), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['matrix', 'mask'], {}), '(matrix, mask)\n', (9605, 9619), False, 'from allennlp.nn import util\n'), ((10074, 10107), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['matrix', 'mask'], {}), '(matrix, mask)\n', (10093, 10107), False, 'from allennlp.nn import util\n'), ((10431, 10464), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['matrix', 'mask'], {}), '(matrix, mask)\n', (10450, 10464), False, 'from allennlp.nn import util\n'), ((10869, 10931), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['vector_1d', 'mask_1d'], {'memory_efficient': '(True)'}), '(vector_1d, mask_1d, memory_efficient=True)\n', (10888, 10931), False, 'from allennlp.nn import util\n'), ((11221, 11283), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['vector_1d', 'mask_1d'], {'memory_efficient': '(True)'}), '(vector_1d, mask_1d, memory_efficient=True)\n', (11240, 11283), False, 'from allennlp.nn import util\n'), ((11710, 11772), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['vector_1d', 'mask_1d'], {'memory_efficient': '(True)'}), '(vector_1d, mask_1d, memory_efficient=True)\n', (11729, 11772), False, 'from allennlp.nn import util\n'), ((12149, 12211), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['vector_1d', 'mask_1d'], {'memory_efficient': '(True)'}), '(vector_1d, mask_1d, memory_efficient=True)\n', (12168, 12211), False, 'from allennlp.nn import util\n'), ((12596, 12658), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['vector_1d', 'mask_1d'], {'memory_efficient': '(True)'}), '(vector_1d, mask_1d, memory_efficient=True)\n', (12615, 12658), False, 'from allennlp.nn import util\n'), ((13026, 13088), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['vector_1d', 'mask_1d'], {'memory_efficient': '(True)'}), '(vector_1d, mask_1d, memory_efficient=True)\n', (13045, 13088), False, 'from allennlp.nn import util\n'), ((13437, 13493), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['matrix', 'mask'], {'memory_efficient': '(True)'}), '(matrix, mask, memory_efficient=True)\n', (13456, 13493), False, 'from allennlp.nn import util\n'), ((13995, 14051), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['matrix', 'mask'], {'memory_efficient': '(True)'}), '(matrix, mask, memory_efficient=True)\n', (14014, 14051), False, 'from allennlp.nn import util\n'), ((14528, 14584), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['matrix', 'mask'], {'memory_efficient': '(True)'}), '(matrix, mask, memory_efficient=True)\n', (14547, 14584), False, 'from allennlp.nn import util\n'), ((14964, 15020), 'allennlp.nn.util.masked_softmax', 'util.masked_softmax', (['matrix', 'mask'], {'memory_efficient': '(True)'}), '(matrix, mask, memory_efficient=True)\n', (14983, 15020), False, 'from allennlp.nn import util\n'), ((15641, 15684), 'allennlp.nn.util.masked_log_softmax', 'util.masked_log_softmax', (['vector_1d', 'mask_1d'], {}), '(vector_1d, mask_1d)\n', (15664, 15684), False, 'from allennlp.nn import util\n'), ((15985, 16028), 'allennlp.nn.util.masked_log_softmax', 'util.masked_log_softmax', (['vector_1d', 'mask_1d'], {}), '(vector_1d, mask_1d)\n', (16008, 16028), False, 'from allennlp.nn import util\n'), ((16444, 16487), 'allennlp.nn.util.masked_log_softmax', 'util.masked_log_softmax', (['vector_1d', 'mask_1d'], {}), '(vector_1d, mask_1d)\n', (16467, 16487), False, 'from allennlp.nn import util\n'), ((16945, 16988), 'allennlp.nn.util.masked_log_softmax', 'util.masked_log_softmax', (['vector_1d', 'mask_1d'], {}), '(vector_1d, mask_1d)\n', (16968, 16988), False, 'from allennlp.nn import util\n'), ((17021, 17053), 'numpy.isnan', 'numpy.isnan', (['vector_1d_softmaxed'], {}), '(vector_1d_softmaxed)\n', (17032, 17053), False, 'import numpy\n'), ((17272, 17314), 'allennlp.nn.util.masked_max', 'util.masked_max', (['vector_1d', 'mask_1d'], {'dim': '(0)'}), '(vector_1d, mask_1d, dim=0)\n', (17287, 17314), False, 'from allennlp.nn import util\n'), ((17618, 17660), 'allennlp.nn.util.masked_max', 'util.masked_max', (['vector_1d', 'mask_1d'], {'dim': '(0)'}), '(vector_1d, mask_1d, dim=0)\n', (17633, 17660), False, 'from allennlp.nn import util\n'), ((17693, 17721), 'numpy.isnan', 'numpy.isnan', (['vector_1d_maxed'], {}), '(vector_1d_maxed)\n', (17704, 17721), False, 'import numpy\n'), ((17944, 17981), 'allennlp.nn.util.masked_max', 'util.masked_max', (['matrix', 'mask'], {'dim': '(-1)'}), '(matrix, mask, dim=-1)\n', (17959, 17981), False, 'from allennlp.nn import util\n'), ((18297, 18348), 'allennlp.nn.util.masked_max', 'util.masked_max', (['matrix', 'mask'], {'dim': '(-1)', 'keepdim': '(True)'}), '(matrix, mask, dim=-1, keepdim=True)\n', (18312, 18348), False, 'from allennlp.nn import util\n'), ((18719, 18755), 'allennlp.nn.util.masked_max', 'util.masked_max', (['matrix', 'mask'], {'dim': '(1)'}), '(matrix, mask, dim=1)\n', (18734, 18755), False, 'from allennlp.nn import util\n'), ((19070, 19113), 'allennlp.nn.util.masked_mean', 'util.masked_mean', (['vector_1d', 'mask_1d'], {'dim': '(0)'}), '(vector_1d, mask_1d, dim=0)\n', (19086, 19113), False, 'from allennlp.nn import util\n'), ((19415, 19458), 'allennlp.nn.util.masked_mean', 'util.masked_mean', (['vector_1d', 'mask_1d'], {'dim': '(0)'}), '(vector_1d, mask_1d, dim=0)\n', (19431, 19458), False, 'from allennlp.nn import util\n'), ((19491, 19518), 'numpy.isnan', 'numpy.isnan', (['vector_1d_mean'], {}), '(vector_1d_mean)\n', (19502, 19518), False, 'import numpy\n'), ((19740, 19778), 'allennlp.nn.util.masked_mean', 'util.masked_mean', (['matrix', 'mask'], {'dim': '(-1)'}), '(matrix, mask, dim=-1)\n', (19756, 19778), False, 'from allennlp.nn import util\n'), ((20092, 20144), 'allennlp.nn.util.masked_mean', 'util.masked_mean', (['matrix', 'mask'], {'dim': '(-1)', 'keepdim': '(True)'}), '(matrix, mask, dim=-1, keepdim=True)\n', (20108, 20144), False, 'from allennlp.nn import util\n'), ((20513, 20550), 'allennlp.nn.util.masked_mean', 'util.masked_mean', (['matrix', 'mask'], {'dim': '(1)'}), '(matrix, mask, dim=1)\n', (20529, 20550), False, 'from allennlp.nn import util\n'), ((24723, 24775), 'allennlp.nn.util.weighted_sum', 'util.weighted_sum', (['sentence_tensor', 'attention_tensor'], {}), '(sentence_tensor, attention_tensor)\n', (24740, 24775), False, 'from allennlp.nn import util\n'), ((25709, 25761), 'allennlp.nn.util.weighted_sum', 'util.weighted_sum', (['sentence_tensor', 'attention_tensor'], {}), '(sentence_tensor, attention_tensor)\n', (25726, 25761), False, 'from allennlp.nn import util\n'), ((26646, 26698), 'allennlp.nn.util.weighted_sum', 'util.weighted_sum', (['sentence_tensor', 'attention_tensor'], {}), '(sentence_tensor, attention_tensor)\n', (26663, 26698), False, 'from allennlp.nn import util\n'), ((27692, 27744), 'allennlp.nn.util.weighted_sum', 'util.weighted_sum', (['sentence_tensor', 'attention_tensor'], {}), '(sentence_tensor, attention_tensor)\n', (27709, 27744), False, 'from allennlp.nn import util\n'), ((47471, 47493), 'torch.ones', 'torch.ones', (['[2, 10, 3]'], {}), '([2, 10, 3])\n', (47481, 47493), False, 'import torch\n'), ((49076, 49098), 'torch.ones', 'torch.ones', (['[3, 12, 2]'], {}), '([3, 12, 2])\n', (49086, 49098), False, 'import torch\n'), ((49469, 49495), 'torch.empty_like', 'torch.empty_like', (['selected'], {}), '(selected)\n', (49485, 49495), False, 'import torch\n'), ((50337, 50358), 'torch.ones', 'torch.ones', (['[2, 6, 3]'], {}), '([2, 6, 3])\n', (50347, 50358), False, 'import torch\n'), ((53650, 53714), 'numpy.array', 'numpy.array', (['[[1, 1, 0, 0, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 0]]'], {}), '([[1, 1, 0, 0, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 0]])\n', (53661, 53714), False, 'import numpy\n'), ((54118, 54164), 'numpy.array', 'numpy.array', (['[[0, 0, 0], [1, 1, 1], [1, 1, 0]]'], {}), '([[0, 0, 0], [1, 1, 1], [1, 1, 0]])\n', (54129, 54164), False, 'import numpy\n'), ((71467, 71484), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (71479, 71484), False, 'import torch\n'), ((71488, 71505), 'torch.tensor', 'torch.tensor', (['[2]'], {}), '([2])\n', (71500, 71505), False, 'import torch\n'), ((23655, 23720), 'allennlp.nn.util.get_text_field_mask', 'util.get_text_field_mask', (['text_field_tensors'], {'num_wrapping_dims': '(1)'}), '(text_field_tensors, num_wrapping_dims=1)\n', (23679, 23720), False, 'from allennlp.nn import util\n'), ((46404, 46426), 'allennlp.nn.util.logsumexp', 'util.logsumexp', (['tensor'], {}), '(tensor)\n', (46418, 46426), False, 'from allennlp.nn import util\n'), ((46530, 46552), 'allennlp.nn.util.logsumexp', 'util.logsumexp', (['tensor'], {}), '(tensor)\n', (46544, 46552), False, 'from allennlp.nn import util\n'), ((46671, 46700), 'allennlp.nn.util.logsumexp', 'util.logsumexp', (['tensor'], {'dim': '(0)'}), '(tensor, dim=0)\n', (46685, 46700), False, 'from allennlp.nn import util\n'), ((69023, 69160), 'torch.FloatTensor', 'torch.FloatTensor', (['[[[4, 2, 9, 9, 7], [-4, -2, -9, -9, -7]], [[5, 4, 1, 8, 8], [9, 1, 7, 4, 1]\n ], [[9, 8, 9, 6, 0], [2, 2, 2, 2, 2]]]'], {}), '([[[4, 2, 9, 9, 7], [-4, -2, -9, -9, -7]], [[5, 4, 1, 8, 8\n ], [9, 1, 7, 4, 1]], [[9, 8, 9, 6, 0], [2, 2, 2, 2, 2]]])\n', (69040, 69160), False, 'import torch\n'), ((69265, 69501), 'torch.tensor', 'torch.tensor', (['[[[False, False, False, False, False], [True, True, True, True, True]], [[\n True, True, True, True, False], [False, True, True, True, True]], [[\n True, False, True, True, True], [False, True, False, True, True]]]'], {}), '([[[False, False, False, False, False], [True, True, True, True,\n True]], [[True, True, True, True, False], [False, True, True, True, \n True]], [[True, False, True, True, True], [False, True, False, True, \n True]]])\n', (69277, 69501), False, 'import torch\n'), ((69760, 69897), 'torch.FloatTensor', 'torch.FloatTensor', (['[[[-4, -2, -9, -9, -7], [0, 0, 0, 0, 0]], [[5, 4, 7, 8, 1], [0, 0, 0, 4, 0]\n ], [[9, 2, 9, 6, 2], [0, 0, 0, 0, 0]]]'], {}), '([[[-4, -2, -9, -9, -7], [0, 0, 0, 0, 0]], [[5, 4, 7, 8, 1\n ], [0, 0, 0, 4, 0]], [[9, 2, 9, 6, 2], [0, 0, 0, 0, 0]]])\n', (69777, 69897), False, 'import torch\n'), ((70135, 70265), 'torch.LongTensor', 'torch.LongTensor', (['[[[1, 1, 1, 1, 1], [0, 0, 0, 0, 0]], [[0, 0, 1, 0, 1], [0, 0, 0, 1, 0]], [[\n 0, 1, 0, 0, 1], [0, 0, 0, 0, 0]]]'], {}), '([[[1, 1, 1, 1, 1], [0, 0, 0, 0, 0]], [[0, 0, 1, 0, 1], [0,\n 0, 0, 1, 0]], [[0, 1, 0, 0, 1], [0, 0, 0, 0, 0]]])\n', (70151, 70265), False, 'import torch\n'), ((22517, 22561), 'allennlp.nn.util.get_text_field_mask', 'util.get_text_field_mask', (['text_field_tensors'], {}), '(text_field_tensors)\n', (22541, 22561), False, 'from allennlp.nn import util\n'), ((23103, 23147), 'allennlp.nn.util.get_text_field_mask', 'util.get_text_field_mask', (['text_field_tensors'], {}), '(text_field_tensors)\n', (23127, 23147), False, 'from allennlp.nn import util\n'), ((24254, 24298), 'allennlp.nn.util.get_text_field_mask', 'util.get_text_field_mask', (['text_field_tensors'], {}), '(text_field_tensors)\n', (24278, 24298), False, 'from allennlp.nn import util\n')] |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from airflow import models
from airflow.api.common.experimental.mark_tasks import (
set_state, _create_dagruns, set_dag_run_state)
from airflow.settings import Session
from airflow.utils.dates import days_ago
from airflow.utils.state import State
from datetime import datetime, timedelta
DEV_NULL = "/dev/null"
class TestMarkTasks(unittest.TestCase):
def setUp(self):
self.dagbag = models.DagBag(include_examples=True)
self.dag1 = self.dagbag.dags['test_example_bash_operator']
self.dag2 = self.dagbag.dags['example_subdag_operator']
self.execution_dates = [days_ago(2), days_ago(1)]
drs = _create_dagruns(self.dag1, self.execution_dates,
state=State.RUNNING,
run_id_template="scheduled__{}")
for dr in drs:
dr.dag = self.dag1
dr.verify_integrity()
drs = _create_dagruns(self.dag2,
[self.dag2.default_args['start_date']],
state=State.RUNNING,
run_id_template="scheduled__{}")
for dr in drs:
dr.dag = self.dag2
dr.verify_integrity()
self.session = Session()
def snapshot_state(self, dag, execution_dates):
TI = models.TaskInstance
tis = self.session.query(TI).filter(
TI.dag_id==dag.dag_id,
TI.execution_date.in_(execution_dates)
).all()
self.session.expunge_all()
return tis
def verify_state(self, dag, task_ids, execution_dates, state, old_tis):
TI = models.TaskInstance
tis = self.session.query(TI).filter(
TI.dag_id==dag.dag_id,
TI.execution_date.in_(execution_dates)
).all()
self.assertTrue(len(tis) > 0)
for ti in tis:
if ti.task_id in task_ids and ti.execution_date in execution_dates:
self.assertEqual(ti.state, state)
else:
for old_ti in old_tis:
if (old_ti.task_id == ti.task_id
and old_ti.execution_date == ti.execution_date):
self.assertEqual(ti.state, old_ti.state)
def test_mark_tasks_now(self):
# set one task to success but do not commit
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=False)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
None, snapshot)
# set one and only one task to success
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
# set no tasks
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 0)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
# set task to other than success
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.FAILED, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.FAILED, snapshot)
# dont alter other tasks
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_0")
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
def test_mark_downstream(self):
# test downstream
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
relatives = task.get_flat_relatives(upstream=False)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=True, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 3)
self.verify_state(self.dag1, task_ids, [self.execution_dates[0]],
State.SUCCESS, snapshot)
def test_mark_upstream(self):
# test upstream
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("run_after_loop")
relatives = task.get_flat_relatives(upstream=True)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=True, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 4)
self.verify_state(self.dag1, task_ids, [self.execution_dates[0]],
State.SUCCESS, snapshot)
def test_mark_tasks_future(self):
# set one task to success towards end of scheduled dag runs
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=True,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 2)
self.verify_state(self.dag1, [task.task_id], self.execution_dates,
State.SUCCESS, snapshot)
def test_mark_tasks_past(self):
# set one task to success towards end of scheduled dag runs
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(task=task, execution_date=self.execution_dates[1],
upstream=False, downstream=False, future=False,
past=True, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 2)
self.verify_state(self.dag1, [task.task_id], self.execution_dates,
State.SUCCESS, snapshot)
def test_mark_tasks_subdag(self):
# set one task to success towards end of scheduled dag runs
task = self.dag2.get_task("section-1")
relatives = task.get_flat_relatives(upstream=False)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=True, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 14)
# cannot use snapshot here as that will require drilling down the
# the sub dag tree essentially recreating the same code as in the
# tested logic.
self.verify_state(self.dag2, task_ids, [self.execution_dates[0]],
State.SUCCESS, [])
def tearDown(self):
self.dag1.clear()
self.dag2.clear()
# just to make sure we are fully cleaned up
self.session.query(models.DagRun).delete()
self.session.query(models.TaskInstance).delete()
self.session.commit()
self.session.close()
class TestMarkDAGRun(unittest.TestCase):
def setUp(self):
self.dagbag = models.DagBag(include_examples=True)
self.dag1 = self.dagbag.dags['test_example_bash_operator']
self.dag2 = self.dagbag.dags['example_subdag_operator']
self.execution_dates = [days_ago(3), days_ago(2), days_ago(1)]
self.session = Session()
def verify_dag_run_states(self, dag, date, state=State.SUCCESS):
drs = models.DagRun.find(dag_id=dag.dag_id, execution_date=date)
dr = drs[0]
self.assertEqual(dr.get_state(), state)
tis = dr.get_task_instances(session=self.session)
for ti in tis:
self.assertEqual(ti.state, state)
def test_set_running_dag_run_state(self):
date = self.execution_dates[0]
dr = self.dag1.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.RUNNING,
execution_date=date,
session=self.session
)
for ti in dr.get_task_instances(session=self.session):
ti.set_state(State.RUNNING, self.session)
altered = set_dag_run_state(self.dag1, date, state=State.SUCCESS, commit=True)
# All of the task should be altered
self.assertEqual(len(altered), len(self.dag1.tasks))
self.verify_dag_run_states(self.dag1, date)
def test_set_success_dag_run_state(self):
date = self.execution_dates[0]
dr = self.dag1.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.SUCCESS,
execution_date=date,
session=self.session
)
for ti in dr.get_task_instances(session=self.session):
ti.set_state(State.SUCCESS, self.session)
altered = set_dag_run_state(self.dag1, date, state=State.SUCCESS, commit=True)
# None of the task should be altered
self.assertEqual(len(altered), 0)
self.verify_dag_run_states(self.dag1, date)
def test_set_failed_dag_run_state(self):
date = self.execution_dates[0]
dr = self.dag1.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.FAILED,
execution_date=date,
session=self.session
)
dr.get_task_instance('runme_0').set_state(State.FAILED, self.session)
altered = set_dag_run_state(self.dag1, date, state=State.SUCCESS, commit=True)
# All of the task should be altered
self.assertEqual(len(altered), len(self.dag1.tasks))
self.verify_dag_run_states(self.dag1, date)
def test_set_mixed_dag_run_state(self):
"""
This test checks function set_dag_run_state with mixed task instance
state.
"""
date = self.execution_dates[0]
dr = self.dag1.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.FAILED,
execution_date=date,
session=self.session
)
# success task
dr.get_task_instance('runme_0').set_state(State.SUCCESS, self.session)
# skipped task
dr.get_task_instance('runme_1').set_state(State.SKIPPED, self.session)
# retry task
dr.get_task_instance('runme_2').set_state(State.UP_FOR_RETRY, self.session)
# queued task
dr.get_task_instance('also_run_this').set_state(State.QUEUED, self.session)
# running task
dr.get_task_instance('run_after_loop').set_state(State.RUNNING, self.session)
# failed task
dr.get_task_instance('run_this_last').set_state(State.FAILED, self.session)
altered = set_dag_run_state(self.dag1, date, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), len(self.dag1.tasks) - 1) # only 1 task succeeded
self.verify_dag_run_states(self.dag1, date)
def test_set_state_without_commit(self):
date = self.execution_dates[0]
# Running dag run and task instances
dr = self.dag1.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.RUNNING,
execution_date=date,
session=self.session
)
for ti in dr.get_task_instances(session=self.session):
ti.set_state(State.RUNNING, self.session)
altered = set_dag_run_state(self.dag1, date, state=State.SUCCESS, commit=False)
# All of the task should be altered
self.assertEqual(len(altered), len(self.dag1.tasks))
# Both dag run and task instances' states should remain the same
self.verify_dag_run_states(self.dag1, date, State.RUNNING)
def test_set_state_with_multiple_dagruns(self):
dr1 = self.dag2.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.FAILED,
execution_date=self.execution_dates[0],
session=self.session
)
dr2 = self.dag2.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.FAILED,
execution_date=self.execution_dates[1],
session=self.session
)
dr3 = self.dag2.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.RUNNING,
execution_date=self.execution_dates[2],
session=self.session
)
altered = set_dag_run_state(self.dag2, self.execution_dates[1],
state=State.SUCCESS, commit=True)
# Recursively count number of tasks in the dag
def count_dag_tasks(dag):
count = len(dag.tasks)
subdag_counts = [count_dag_tasks(subdag) for subdag in dag.subdags]
count += sum(subdag_counts)
return count
self.assertEqual(len(altered), count_dag_tasks(self.dag2))
self.verify_dag_run_states(self.dag2, self.execution_dates[1])
# Make sure other dag status are not changed
dr1 = models.DagRun.find(dag_id=self.dag2.dag_id, execution_date=self.execution_dates[0])
dr1 = dr1[0]
self.assertEqual(dr1.get_state(), State.FAILED)
dr3 = models.DagRun.find(dag_id=self.dag2.dag_id, execution_date=self.execution_dates[2])
dr3 = dr3[0]
self.assertEqual(dr3.get_state(), State.RUNNING)
def test_set_dag_run_state_edge_cases(self):
# Dag does not exist
altered = set_dag_run_state(None, self.execution_dates[0])
self.assertEqual(len(altered), 0)
# Invalid execution date
altered = set_dag_run_state(self.dag1, None)
self.assertEqual(len(altered), 0)
self.assertRaises(AssertionError, set_dag_run_state, self.dag1, timedelta(microseconds=-1))
# DagRun does not exist
# This will throw AssertionError since dag.latest_execution_date does not exist
self.assertRaises(AssertionError, set_dag_run_state, self.dag1, self.execution_dates[0])
def tearDown(self):
self.dag1.clear()
self.dag2.clear()
self.session.query(models.DagRun).delete()
self.session.query(models.TaskInstance).delete()
self.session.query(models.DagStat).delete()
self.session.commit()
if __name__ == '__main__':
unittest.main()
| [
"airflow.api.common.experimental.mark_tasks._create_dagruns",
"airflow.api.common.experimental.mark_tasks.set_dag_run_state",
"airflow.api.common.experimental.mark_tasks.set_state",
"airflow.models.DagBag",
"datetime.timedelta",
"datetime.datetime.now",
"airflow.models.DagRun.find",
"airflow.utils.dat... | [((16431, 16446), 'unittest.main', 'unittest.main', ([], {}), '()\n', (16444, 16446), False, 'import unittest\n'), ((987, 1023), 'airflow.models.DagBag', 'models.DagBag', ([], {'include_examples': '(True)'}), '(include_examples=True)\n', (1000, 1023), False, 'from airflow import models\n'), ((1229, 1335), 'airflow.api.common.experimental.mark_tasks._create_dagruns', '_create_dagruns', (['self.dag1', 'self.execution_dates'], {'state': 'State.RUNNING', 'run_id_template': '"""scheduled__{}"""'}), "(self.dag1, self.execution_dates, state=State.RUNNING,\n run_id_template='scheduled__{}')\n", (1244, 1335), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((1495, 1620), 'airflow.api.common.experimental.mark_tasks._create_dagruns', '_create_dagruns', (['self.dag2', "[self.dag2.default_args['start_date']]"], {'state': 'State.RUNNING', 'run_id_template': '"""scheduled__{}"""'}), "(self.dag2, [self.dag2.default_args['start_date']], state=\n State.RUNNING, run_id_template='scheduled__{}')\n", (1510, 1620), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((1819, 1828), 'airflow.settings.Session', 'Session', ([], {}), '()\n', (1826, 1828), False, 'from airflow.settings import Session\n'), ((3048, 3212), 'airflow.api.common.experimental.mark_tasks.set_state', 'set_state', ([], {'task': 'task', 'execution_date': 'self.execution_dates[0]', 'upstream': '(False)', 'downstream': '(False)', 'future': '(False)', 'past': '(False)', 'state': 'State.SUCCESS', 'commit': '(False)'}), '(task=task, execution_date=self.execution_dates[0], upstream=False,\n downstream=False, future=False, past=False, state=State.SUCCESS, commit\n =False)\n', (3057, 3212), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((3490, 3653), 'airflow.api.common.experimental.mark_tasks.set_state', 'set_state', ([], {'task': 'task', 'execution_date': 'self.execution_dates[0]', 'upstream': '(False)', 'downstream': '(False)', 'future': '(False)', 'past': '(False)', 'state': 'State.SUCCESS', 'commit': '(True)'}), '(task=task, execution_date=self.execution_dates[0], upstream=False,\n downstream=False, future=False, past=False, state=State.SUCCESS, commit\n =True)\n', (3499, 3653), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((3916, 4079), 'airflow.api.common.experimental.mark_tasks.set_state', 'set_state', ([], {'task': 'task', 'execution_date': 'self.execution_dates[0]', 'upstream': '(False)', 'downstream': '(False)', 'future': '(False)', 'past': '(False)', 'state': 'State.SUCCESS', 'commit': '(True)'}), '(task=task, execution_date=self.execution_dates[0], upstream=False,\n downstream=False, future=False, past=False, state=State.SUCCESS, commit\n =True)\n', (3925, 4079), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((4360, 4522), 'airflow.api.common.experimental.mark_tasks.set_state', 'set_state', ([], {'task': 'task', 'execution_date': 'self.execution_dates[0]', 'upstream': '(False)', 'downstream': '(False)', 'future': '(False)', 'past': '(False)', 'state': 'State.FAILED', 'commit': '(True)'}), '(task=task, execution_date=self.execution_dates[0], upstream=False,\n downstream=False, future=False, past=False, state=State.FAILED, commit=True\n )\n', (4369, 4522), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((4911, 5074), 'airflow.api.common.experimental.mark_tasks.set_state', 'set_state', ([], {'task': 'task', 'execution_date': 'self.execution_dates[0]', 'upstream': '(False)', 'downstream': '(False)', 'future': '(False)', 'past': '(False)', 'state': 'State.SUCCESS', 'commit': '(True)'}), '(task=task, execution_date=self.execution_dates[0], upstream=False,\n downstream=False, future=False, past=False, state=State.SUCCESS, commit\n =True)\n', (4920, 5074), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((5642, 5804), 'airflow.api.common.experimental.mark_tasks.set_state', 'set_state', ([], {'task': 'task', 'execution_date': 'self.execution_dates[0]', 'upstream': '(False)', 'downstream': '(True)', 'future': '(False)', 'past': '(False)', 'state': 'State.SUCCESS', 'commit': '(True)'}), '(task=task, execution_date=self.execution_dates[0], upstream=False,\n downstream=True, future=False, past=False, state=State.SUCCESS, commit=True\n )\n', (5651, 5804), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((6368, 6530), 'airflow.api.common.experimental.mark_tasks.set_state', 'set_state', ([], {'task': 'task', 'execution_date': 'self.execution_dates[0]', 'upstream': '(True)', 'downstream': '(False)', 'future': '(False)', 'past': '(False)', 'state': 'State.SUCCESS', 'commit': '(True)'}), '(task=task, execution_date=self.execution_dates[0], upstream=True,\n downstream=False, future=False, past=False, state=State.SUCCESS, commit\n =True)\n', (6377, 6530), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((6987, 7149), 'airflow.api.common.experimental.mark_tasks.set_state', 'set_state', ([], {'task': 'task', 'execution_date': 'self.execution_dates[0]', 'upstream': '(False)', 'downstream': '(False)', 'future': '(True)', 'past': '(False)', 'state': 'State.SUCCESS', 'commit': '(True)'}), '(task=task, execution_date=self.execution_dates[0], upstream=False,\n downstream=False, future=True, past=False, state=State.SUCCESS, commit=True\n )\n', (6996, 7149), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((7605, 7767), 'airflow.api.common.experimental.mark_tasks.set_state', 'set_state', ([], {'task': 'task', 'execution_date': 'self.execution_dates[1]', 'upstream': '(False)', 'downstream': '(False)', 'future': '(False)', 'past': '(True)', 'state': 'State.SUCCESS', 'commit': '(True)'}), '(task=task, execution_date=self.execution_dates[1], upstream=False,\n downstream=False, future=False, past=True, state=State.SUCCESS, commit=True\n )\n', (7614, 7767), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((8304, 8466), 'airflow.api.common.experimental.mark_tasks.set_state', 'set_state', ([], {'task': 'task', 'execution_date': 'self.execution_dates[0]', 'upstream': '(False)', 'downstream': '(True)', 'future': '(False)', 'past': '(False)', 'state': 'State.SUCCESS', 'commit': '(True)'}), '(task=task, execution_date=self.execution_dates[0], upstream=False,\n downstream=True, future=False, past=False, state=State.SUCCESS, commit=True\n )\n', (8313, 8466), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((9232, 9268), 'airflow.models.DagBag', 'models.DagBag', ([], {'include_examples': '(True)'}), '(include_examples=True)\n', (9245, 9268), False, 'from airflow import models\n'), ((9496, 9505), 'airflow.settings.Session', 'Session', ([], {}), '()\n', (9503, 9505), False, 'from airflow.settings import Session\n'), ((9590, 9648), 'airflow.models.DagRun.find', 'models.DagRun.find', ([], {'dag_id': 'dag.dag_id', 'execution_date': 'date'}), '(dag_id=dag.dag_id, execution_date=date)\n', (9608, 9648), False, 'from airflow import models\n'), ((10273, 10341), 'airflow.api.common.experimental.mark_tasks.set_dag_run_state', 'set_dag_run_state', (['self.dag1', 'date'], {'state': 'State.SUCCESS', 'commit': '(True)'}), '(self.dag1, date, state=State.SUCCESS, commit=True)\n', (10290, 10341), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((10930, 10998), 'airflow.api.common.experimental.mark_tasks.set_dag_run_state', 'set_dag_run_state', (['self.dag1', 'date'], {'state': 'State.SUCCESS', 'commit': '(True)'}), '(self.dag1, date, state=State.SUCCESS, commit=True)\n', (10947, 10998), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((11527, 11595), 'airflow.api.common.experimental.mark_tasks.set_dag_run_state', 'set_dag_run_state', (['self.dag1', 'date'], {'state': 'State.SUCCESS', 'commit': '(True)'}), '(self.dag1, date, state=State.SUCCESS, commit=True)\n', (11544, 11595), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((12809, 12877), 'airflow.api.common.experimental.mark_tasks.set_dag_run_state', 'set_dag_run_state', (['self.dag1', 'date'], {'state': 'State.SUCCESS', 'commit': '(True)'}), '(self.dag1, date, state=State.SUCCESS, commit=True)\n', (12826, 12877), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((13494, 13563), 'airflow.api.common.experimental.mark_tasks.set_dag_run_state', 'set_dag_run_state', (['self.dag1', 'date'], {'state': 'State.SUCCESS', 'commit': '(False)'}), '(self.dag1, date, state=State.SUCCESS, commit=False)\n', (13511, 13563), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((14562, 14653), 'airflow.api.common.experimental.mark_tasks.set_dag_run_state', 'set_dag_run_state', (['self.dag2', 'self.execution_dates[1]'], {'state': 'State.SUCCESS', 'commit': '(True)'}), '(self.dag2, self.execution_dates[1], state=State.SUCCESS,\n commit=True)\n', (14579, 14653), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((15159, 15247), 'airflow.models.DagRun.find', 'models.DagRun.find', ([], {'dag_id': 'self.dag2.dag_id', 'execution_date': 'self.execution_dates[0]'}), '(dag_id=self.dag2.dag_id, execution_date=self.\n execution_dates[0])\n', (15177, 15247), False, 'from airflow import models\n'), ((15334, 15422), 'airflow.models.DagRun.find', 'models.DagRun.find', ([], {'dag_id': 'self.dag2.dag_id', 'execution_date': 'self.execution_dates[2]'}), '(dag_id=self.dag2.dag_id, execution_date=self.\n execution_dates[2])\n', (15352, 15422), False, 'from airflow import models\n'), ((15593, 15641), 'airflow.api.common.experimental.mark_tasks.set_dag_run_state', 'set_dag_run_state', (['None', 'self.execution_dates[0]'], {}), '(None, self.execution_dates[0])\n', (15610, 15641), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((15736, 15770), 'airflow.api.common.experimental.mark_tasks.set_dag_run_state', 'set_dag_run_state', (['self.dag1', 'None'], {}), '(self.dag1, None)\n', (15753, 15770), False, 'from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns, set_dag_run_state\n'), ((1188, 1199), 'airflow.utils.dates.days_ago', 'days_ago', (['(2)'], {}), '(2)\n', (1196, 1199), False, 'from airflow.utils.dates import days_ago\n'), ((1201, 1212), 'airflow.utils.dates.days_ago', 'days_ago', (['(1)'], {}), '(1)\n', (1209, 1212), False, 'from airflow.utils.dates import days_ago\n'), ((9433, 9444), 'airflow.utils.dates.days_ago', 'days_ago', (['(3)'], {}), '(3)\n', (9441, 9444), False, 'from airflow.utils.dates import days_ago\n'), ((9446, 9457), 'airflow.utils.dates.days_ago', 'days_ago', (['(2)'], {}), '(2)\n', (9454, 9457), False, 'from airflow.utils.dates import days_ago\n'), ((9459, 9470), 'airflow.utils.dates.days_ago', 'days_ago', (['(1)'], {}), '(1)\n', (9467, 9470), False, 'from airflow.utils.dates import days_ago\n'), ((15885, 15911), 'datetime.timedelta', 'timedelta', ([], {'microseconds': '(-1)'}), '(microseconds=-1)\n', (15894, 15911), False, 'from datetime import datetime, timedelta\n'), ((10000, 10014), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10012, 10014), False, 'from datetime import datetime, timedelta\n'), ((10657, 10671), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10669, 10671), False, 'from datetime import datetime, timedelta\n'), ((11294, 11308), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11306, 11308), False, 'from datetime import datetime, timedelta\n'), ((12024, 12038), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12036, 12038), False, 'from datetime import datetime, timedelta\n'), ((13221, 13235), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13233, 13235), False, 'from datetime import datetime, timedelta\n'), ((13935, 13949), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13947, 13949), False, 'from datetime import datetime, timedelta\n'), ((14161, 14175), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14173, 14175), False, 'from datetime import datetime, timedelta\n'), ((14387, 14401), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14399, 14401), False, 'from datetime import datetime, timedelta\n')] |
import bs4
html_str = """
<html>
<body>
<ul>
<li>hello</li>
<li>bye</li>
<li>welcome</li>
</ul>
</body>
</html>
"""
bs_obj = bs4.BeautifulSoup(html_str, "html.parser")
ul = bs_obj.find("ul")
lis = ul.findAll("li")
print(lis)
| [
"bs4.BeautifulSoup"
] | [((186, 228), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['html_str', '"""html.parser"""'], {}), "(html_str, 'html.parser')\n", (203, 228), False, 'import bs4\n')] |
from django.urls import path
from rest_framework_nested import routers
from hypha.apply.api.v1.determination.views import SubmissionDeterminationViewSet
from hypha.apply.api.v1.review.views import SubmissionReviewViewSet
from hypha.apply.api.v1.screening.views import (
ScreeningStatusViewSet,
SubmissionScreeningStatusViewSet,
)
from .views import (
CommentViewSet,
CurrentUser,
RoundViewSet,
SubmissionActionViewSet,
SubmissionCommentViewSet,
SubmissionViewSet,
)
app_name = 'v1'
router = routers.SimpleRouter()
router.register(r'submissions', SubmissionViewSet, basename='submissions')
router.register(r'comments', CommentViewSet, basename='comments')
router.register(r'rounds', RoundViewSet, basename='rounds')
router.register(r'screening_statuses', ScreeningStatusViewSet, basename='screenings')
submission_router = routers.NestedSimpleRouter(router, r'submissions', lookup='submission')
submission_router.register(r'actions', SubmissionActionViewSet, basename='submission-actions')
submission_router.register(r'comments', SubmissionCommentViewSet, basename='submission-comments')
submission_router.register(r'reviews', SubmissionReviewViewSet, basename='reviews')
submission_router.register(r'determinations', SubmissionDeterminationViewSet, basename='determinations')
submission_router.register(r'screening_statuses', SubmissionScreeningStatusViewSet, basename='submission-screening_statuses')
urlpatterns = [
path('user/', CurrentUser.as_view(), name='user'),
]
urlpatterns = router.urls + submission_router.urls + urlpatterns
| [
"rest_framework_nested.routers.NestedSimpleRouter",
"rest_framework_nested.routers.SimpleRouter"
] | [((528, 550), 'rest_framework_nested.routers.SimpleRouter', 'routers.SimpleRouter', ([], {}), '()\n', (548, 550), False, 'from rest_framework_nested import routers\n'), ((859, 929), 'rest_framework_nested.routers.NestedSimpleRouter', 'routers.NestedSimpleRouter', (['router', '"""submissions"""'], {'lookup': '"""submission"""'}), "(router, 'submissions', lookup='submission')\n", (885, 929), False, 'from rest_framework_nested import routers\n')] |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Built-in optimizer classes.
For more examples see the base class `tf.keras.optimizers.Optimizer`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import six
from keras import backend as K
from keras.optimizer_v1 import Optimizer
from keras.optimizer_v1 import TFOptimizer
from keras.optimizer_v2 import adadelta as adadelta_v2
from keras.optimizer_v2 import adagrad as adagrad_v2
from keras.optimizer_v2 import adam as adam_v2
from keras.optimizer_v2 import adamax as adamax_v2
from keras.optimizer_v2 import ftrl
from keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from keras.optimizer_v2 import nadam as nadam_v2
from keras.optimizer_v2 import optimizer_v2
from keras.optimizer_v2 import rmsprop as rmsprop_v2
from keras.utils.generic_utils import deserialize_keras_object
from keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.optimizers.serialize')
def serialize(optimizer):
return serialize_keras_object(optimizer)
@keras_export('keras.optimizers.deserialize')
def deserialize(config, custom_objects=None):
"""Inverse of the `serialize` function.
Args:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during deserialization.
Returns:
A Keras Optimizer instance.
"""
# loss_scale_optimizer has a direct dependency of optimizer, import here
# rather than top to avoid the cyclic dependency.
from keras.mixed_precision import loss_scale_optimizer # pylint: disable=g-import-not-at-top
all_classes = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD,
'ftrl': ftrl.Ftrl,
'lossscaleoptimizer': loss_scale_optimizer.LossScaleOptimizer,
# LossScaleOptimizerV1 deserializes into LossScaleOptimizer, as
# LossScaleOptimizerV1 will be removed soon but deserializing it will
# still be supported.
'lossscaleoptimizerv1': loss_scale_optimizer.LossScaleOptimizer,
}
# Make deserialization case-insensitive for built-in optimizers.
if config['class_name'].lower() in all_classes:
config['class_name'] = config['class_name'].lower()
return deserialize_keras_object(
config,
module_objects=all_classes,
custom_objects=custom_objects,
printable_module_name='optimizer')
@keras_export('keras.optimizers.get')
def get(identifier):
"""Retrieves a Keras Optimizer instance.
Args:
identifier: Optimizer identifier, one of
- String: name of an optimizer
- Dictionary: configuration dictionary. - Keras Optimizer instance (it
will be returned unchanged). - TensorFlow Optimizer instance (it
will be wrapped as a Keras Optimizer).
Returns:
A Keras Optimizer instance.
Raises:
ValueError: If `identifier` cannot be interpreted.
"""
if isinstance(identifier, (Optimizer, optimizer_v2.OptimizerV2)):
return identifier
# Wrap TF optimizer instances
elif isinstance(identifier, tf.compat.v1.train.Optimizer):
opt = TFOptimizer(identifier)
K.track_tf_optimizer(opt)
return opt
elif isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
else:
raise ValueError(
'Could not interpret optimizer identifier: {}'.format(identifier))
| [
"keras.backend.track_tf_optimizer",
"keras.utils.generic_utils.serialize_keras_object",
"tensorflow.python.util.tf_export.keras_export",
"keras.optimizer_v1.TFOptimizer",
"keras.utils.generic_utils.deserialize_keras_object"
] | [((1742, 1784), 'tensorflow.python.util.tf_export.keras_export', 'keras_export', (['"""keras.optimizers.serialize"""'], {}), "('keras.optimizers.serialize')\n", (1754, 1784), False, 'from tensorflow.python.util.tf_export import keras_export\n'), ((1857, 1901), 'tensorflow.python.util.tf_export.keras_export', 'keras_export', (['"""keras.optimizers.deserialize"""'], {}), "('keras.optimizers.deserialize')\n", (1869, 1901), False, 'from tensorflow.python.util.tf_export import keras_export\n'), ((3425, 3461), 'tensorflow.python.util.tf_export.keras_export', 'keras_export', (['"""keras.optimizers.get"""'], {}), "('keras.optimizers.get')\n", (3437, 3461), False, 'from tensorflow.python.util.tf_export import keras_export\n'), ((1820, 1853), 'keras.utils.generic_utils.serialize_keras_object', 'serialize_keras_object', (['optimizer'], {}), '(optimizer)\n', (1842, 1853), False, 'from keras.utils.generic_utils import serialize_keras_object\n'), ((3270, 3401), 'keras.utils.generic_utils.deserialize_keras_object', 'deserialize_keras_object', (['config'], {'module_objects': 'all_classes', 'custom_objects': 'custom_objects', 'printable_module_name': '"""optimizer"""'}), "(config, module_objects=all_classes, custom_objects\n =custom_objects, printable_module_name='optimizer')\n", (3294, 3401), False, 'from keras.utils.generic_utils import deserialize_keras_object\n'), ((4145, 4168), 'keras.optimizer_v1.TFOptimizer', 'TFOptimizer', (['identifier'], {}), '(identifier)\n', (4156, 4168), False, 'from keras.optimizer_v1 import TFOptimizer\n'), ((4173, 4198), 'keras.backend.track_tf_optimizer', 'K.track_tf_optimizer', (['opt'], {}), '(opt)\n', (4193, 4198), True, 'from keras import backend as K\n')] |
##
# Copyright (c) 2007-2013 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from cStringIO import StringIO
from pycalendar.containerbase import ContainerBase
from pycalendar.exceptions import InvalidData
from pycalendar.parser import ParserContext
from pycalendar.utils import readFoldedLine
from pycalendar.vcard import definitions
from pycalendar.vcard.definitions import VCARD, Property_VERSION, \
Property_PRODID, Property_UID
from pycalendar.vcard.property import Property
from pycalendar.vcard.validation import VCARD_VALUE_CHECKS
import json
class Card(ContainerBase):
sContainerDescriptor = "vCard"
sComponentType = None
sPropertyType = Property
sFormatText = "text/vcard"
sFormatJSON = "application/vcard+json"
propertyCardinality_1 = (
definitions.Property_VERSION,
definitions.Property_N,
)
propertyCardinality_0_1 = (
definitions.Property_BDAY,
definitions.Property_PRODID,
definitions.Property_REV,
definitions.Property_UID,
)
propertyCardinality_1_More = (
definitions.Property_FN,
)
propertyValueChecks = VCARD_VALUE_CHECKS
def duplicate(self):
return super(Card, self).duplicate()
def getType(self):
return VCARD
def sortedPropertyKeyOrder(self):
return (
Property_VERSION,
Property_PRODID,
Property_UID,
)
@classmethod
def parseMultipleData(cls, data, format):
if format == cls.sFormatText:
return cls.parseMultipleTextData(data)
elif format == cls.sFormatJSON:
return cls.parseMultipleJSONData(data)
@classmethod
def parseMultipleTextData(cls, ins):
if isinstance(ins, str):
ins = StringIO(ins)
results = []
card = cls(add_defaults=False)
LOOK_FOR_VCARD = 0
GET_PROPERTY = 1
state = LOOK_FOR_VCARD
# Get lines looking for start of calendar
lines = [None, None]
while readFoldedLine(ins, lines):
line = lines[0]
if state == LOOK_FOR_VCARD:
# Look for start
if line == card.getBeginDelimiter():
# Next state
state = GET_PROPERTY
# Handle blank line
elif len(line) == 0:
# Raise if requested, otherwise just ignore
if ParserContext.BLANK_LINES_IN_DATA == ParserContext.PARSER_RAISE:
raise InvalidData("vCard data has blank lines")
# Unrecognized data
else:
raise InvalidData("vCard data not recognized", line)
elif state == GET_PROPERTY:
# Look for end of object
if line == card.getEndDelimiter():
# Finalise the current calendar
card.finalise()
# Validate some things
if not card.hasProperty("VERSION"):
raise InvalidData("vCard missing VERSION", "")
results.append(card)
# Change state
card = Card(add_defaults=False)
state = LOOK_FOR_VCARD
# Blank line
elif len(line) == 0:
# Raise if requested, otherwise just ignore
if ParserContext.BLANK_LINES_IN_DATA == ParserContext.PARSER_RAISE:
raise InvalidData("vCard data has blank lines")
# Must be a property
else:
# Parse parameter/value for top-level calendar item
prop = Property.parseText(line)
# Check for valid property
if not card.validProperty(prop):
raise InvalidData("Invalid property", str(prop))
else:
card.addProperty(prop)
# Check for truncated data
if state != LOOK_FOR_VCARD:
raise InvalidData("vCard data not complete")
return results
@classmethod
def parseMultipleJSONData(cls, data):
if not isinstance(data, str):
data = data.read()
try:
jobjects = json.loads(data)
except ValueError as e:
raise InvalidData("JSON error: '{}'".format(e), data)
results = []
for jobject in jobjects:
results.append(cls.parseJSON(jobject, None, cls(add_defaults=False)))
return results
def addDefaultProperties(self):
self.addProperty(Property(definitions.Property_PRODID, Card.sProdID))
self.addProperty(Property(definitions.Property_VERSION, "3.0"))
def validProperty(self, prop):
if prop.getName() == definitions.Property_VERSION:
tvalue = prop.getValue()
if ((tvalue is None) or (tvalue.getValue() != "3.0")):
return False
return True
| [
"json.loads",
"cStringIO.StringIO",
"pycalendar.utils.readFoldedLine",
"pycalendar.exceptions.InvalidData",
"pycalendar.vcard.property.Property",
"pycalendar.vcard.property.Property.parseText"
] | [((2596, 2622), 'pycalendar.utils.readFoldedLine', 'readFoldedLine', (['ins', 'lines'], {}), '(ins, lines)\n', (2610, 2622), False, 'from pycalendar.utils import readFoldedLine\n'), ((2340, 2353), 'cStringIO.StringIO', 'StringIO', (['ins'], {}), '(ins)\n', (2348, 2353), False, 'from cStringIO import StringIO\n'), ((4664, 4702), 'pycalendar.exceptions.InvalidData', 'InvalidData', (['"""vCard data not complete"""'], {}), "('vCard data not complete')\n", (4675, 4702), False, 'from pycalendar.exceptions import InvalidData\n'), ((4893, 4909), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (4903, 4909), False, 'import json\n'), ((5229, 5280), 'pycalendar.vcard.property.Property', 'Property', (['definitions.Property_PRODID', 'Card.sProdID'], {}), '(definitions.Property_PRODID, Card.sProdID)\n', (5237, 5280), False, 'from pycalendar.vcard.property import Property\n'), ((5307, 5352), 'pycalendar.vcard.property.Property', 'Property', (['definitions.Property_VERSION', '"""3.0"""'], {}), "(definitions.Property_VERSION, '3.0')\n", (5315, 5352), False, 'from pycalendar.vcard.property import Property\n'), ((3237, 3283), 'pycalendar.exceptions.InvalidData', 'InvalidData', (['"""vCard data not recognized"""', 'line'], {}), "('vCard data not recognized', line)\n", (3248, 3283), False, 'from pycalendar.exceptions import InvalidData\n'), ((3110, 3151), 'pycalendar.exceptions.InvalidData', 'InvalidData', (['"""vCard data has blank lines"""'], {}), "('vCard data has blank lines')\n", (3121, 3151), False, 'from pycalendar.exceptions import InvalidData\n'), ((3637, 3677), 'pycalendar.exceptions.InvalidData', 'InvalidData', (['"""vCard missing VERSION"""', '""""""'], {}), "('vCard missing VERSION', '')\n", (3648, 3677), False, 'from pycalendar.exceptions import InvalidData\n'), ((4302, 4326), 'pycalendar.vcard.property.Property.parseText', 'Property.parseText', (['line'], {}), '(line)\n', (4320, 4326), False, 'from pycalendar.vcard.property import Property\n'), ((4100, 4141), 'pycalendar.exceptions.InvalidData', 'InvalidData', (['"""vCard data has blank lines"""'], {}), "('vCard data has blank lines')\n", (4111, 4141), False, 'from pycalendar.exceptions import InvalidData\n')] |
"""PyMC3-ArviZ conversion code."""
import logging
import warnings
from typing import ( # pylint: disable=unused-import
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Tuple,
Union,
)
import numpy as np
import xarray as xr
from aesara.graph.basic import Constant
from aesara.tensor.sharedvar import SharedVariable
from aesara.tensor.subtensor import AdvancedIncSubtensor
from arviz import InferenceData, concat, rcParams
from arviz.data.base import CoordSpec, DimSpec
from arviz.data.base import dict_to_dataset as _dict_to_dataset
from arviz.data.base import generate_dims_coords, make_attrs, requires
import pymc3
from pymc3.aesaraf import extract_obs_data
from pymc3.distributions import logpt
from pymc3.model import modelcontext
from pymc3.util import get_default_varnames
if TYPE_CHECKING:
from typing import Set # pylint: disable=ungrouped-imports
from pymc3.backends.base import MultiTrace # pylint: disable=invalid-name
from pymc3.model import Model
___all__ = [""]
_log = logging.getLogger("pymc3")
# random variable object ...
Var = Any # pylint: disable=invalid-name
class _DefaultTrace:
"""
Utility for collecting samples into a dictionary.
Name comes from its similarity to ``defaultdict``:
entries are lazily created.
Parameters
----------
samples : int
The number of samples that will be collected, per variable,
into the trace.
Attributes
----------
trace_dict : Dict[str, np.ndarray]
A dictionary constituting a trace. Should be extracted
after a procedure has filled the `_DefaultTrace` using the
`insert()` method
"""
trace_dict: Dict[str, np.ndarray] = {}
_len: Optional[int] = None
def __init__(self, samples: int):
self._len = samples
self.trace_dict = {}
def insert(self, k: str, v, idx: int):
"""
Insert `v` as the value of the `idx`th sample for the variable `k`.
Parameters
----------
k: str
Name of the variable.
v: anything that can go into a numpy array (including a numpy array)
The value of the `idx`th sample from variable `k`
ids: int
The index of the sample we are inserting into the trace.
"""
value_shape = np.shape(v)
# initialize if necessary
if k not in self.trace_dict:
array_shape = (self._len,) + value_shape
self.trace_dict[k] = np.empty(array_shape, dtype=np.array(v).dtype)
# do the actual insertion
if value_shape == ():
self.trace_dict[k][idx] = v
else:
self.trace_dict[k][idx, :] = v
def dict_to_dataset(
data,
library=None,
coords=None,
dims=None,
attrs=None,
default_dims=None,
skip_event_dims=None,
index_origin=None,
):
"""Temporal workaround for dict_to_dataset.
Once ArviZ>0.11.2 release is available, only two changes are needed for everything to work.
1) this should be deleted, 2) dict_to_dataset should be imported as is from arviz, no underscore,
also remove unnecessary imports
"""
if default_dims is None:
return _dict_to_dataset(
data, library=library, coords=coords, dims=dims, skip_event_dims=skip_event_dims
)
else:
out_data = {}
for name, vals in data.items():
vals = np.atleast_1d(vals)
val_dims = dims.get(name)
val_dims, coords = generate_dims_coords(vals.shape, name, dims=val_dims, coords=coords)
coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}
out_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)
return xr.Dataset(data_vars=out_data, attrs=make_attrs(library=library))
class InferenceDataConverter: # pylint: disable=too-many-instance-attributes
"""Encapsulate InferenceData specific logic."""
model = None # type: Optional[Model]
nchains = None # type: int
ndraws = None # type: int
posterior_predictive = None # Type: Optional[Mapping[str, np.ndarray]]
predictions = None # Type: Optional[Mapping[str, np.ndarray]]
prior = None # Type: Optional[Mapping[str, np.ndarray]]
def __init__(
self,
*,
trace=None,
prior=None,
posterior_predictive=None,
log_likelihood=True,
predictions=None,
coords: Optional[CoordSpec] = None,
dims: Optional[DimSpec] = None,
model=None,
save_warmup: Optional[bool] = None,
density_dist_obs: bool = True,
index_origin: Optional[int] = None,
):
self.save_warmup = rcParams["data.save_warmup"] if save_warmup is None else save_warmup
self.trace = trace
# this permits us to get the model from command-line argument or from with model:
self.model = modelcontext(model)
self.attrs = None
if trace is not None:
self.nchains = trace.nchains if hasattr(trace, "nchains") else 1
if hasattr(trace.report, "n_draws") and trace.report.n_draws is not None:
self.ndraws = trace.report.n_draws
self.attrs = {
"sampling_time": trace.report.t_sampling,
"tuning_steps": trace.report.n_tune,
}
else:
self.ndraws = len(trace)
if self.save_warmup:
warnings.warn(
"Warmup samples will be stored in posterior group and will not be"
" excluded from stats and diagnostics."
" Do not slice the trace manually before conversion",
UserWarning,
)
self.ntune = len(self.trace) - self.ndraws
self.posterior_trace, self.warmup_trace = self.split_trace()
else:
self.nchains = self.ndraws = 0
self.prior = prior
self.posterior_predictive = posterior_predictive
self.log_likelihood = log_likelihood
self.predictions = predictions
self.index_origin = rcParams["data.index_origin"] if index_origin is None else index_origin
def arbitrary_element(dct: Dict[Any, np.ndarray]) -> np.ndarray:
return next(iter(dct.values()))
if trace is None:
# if you have a posterior_predictive built with keep_dims,
# you'll lose here, but there's nothing I can do about that.
self.nchains = 1
get_from = None
if predictions is not None:
get_from = predictions
elif posterior_predictive is not None:
get_from = posterior_predictive
elif prior is not None:
get_from = prior
if get_from is None:
# pylint: disable=line-too-long
raise ValueError(
"When constructing InferenceData must have at least"
" one of trace, prior, posterior_predictive or predictions."
)
aelem = arbitrary_element(get_from)
self.ndraws = aelem.shape[0]
self.coords = {} if coords is None else coords
if hasattr(self.model, "coords"):
self.coords = {**self.model.coords, **self.coords}
self.coords = {key: value for key, value in self.coords.items() if value is not None}
self.dims = {} if dims is None else dims
if hasattr(self.model, "RV_dims"):
model_dims = {
var_name: [dim for dim in dims if dim is not None]
for var_name, dims in self.model.RV_dims.items()
}
self.dims = {**model_dims, **self.dims}
self.density_dist_obs = density_dist_obs
self.observations = self.find_observations()
def find_observations(self) -> Optional[Dict[str, Var]]:
"""If there are observations available, return them as a dictionary."""
if self.model is None:
return None
observations = {}
for obs in self.model.observed_RVs:
aux_obs = getattr(obs.tag, "observations", None)
if aux_obs is not None:
try:
obs_data = extract_obs_data(aux_obs)
observations[obs.name] = obs_data
except TypeError:
warnings.warn(f"Could not extract data from symbolic observation {obs}")
else:
warnings.warn(f"No data for observation {obs}")
return observations
def split_trace(self) -> Tuple[Union[None, "MultiTrace"], Union[None, "MultiTrace"]]:
"""Split MultiTrace object into posterior and warmup.
Returns
-------
trace_posterior: MultiTrace or None
The slice of the trace corresponding to the posterior. If the posterior
trace is empty, None is returned
trace_warmup: MultiTrace or None
The slice of the trace corresponding to the warmup. If the warmup trace is
empty or ``save_warmup=False``, None is returned
"""
trace_posterior = None
trace_warmup = None
if self.save_warmup and self.ntune > 0:
trace_warmup = self.trace[: self.ntune]
if self.ndraws > 0:
trace_posterior = self.trace[self.ntune :]
return trace_posterior, trace_warmup
def log_likelihood_vals_point(self, point, var, log_like_fun):
"""Compute log likelihood for each observed point."""
# TODO: This is a cheap hack; we should filter-out the correct
# variables some other way
point = {i.name: point[i.name] for i in log_like_fun.f.maker.inputs if i.name in point}
log_like_val = np.atleast_1d(log_like_fun(point))
if isinstance(var.owner.op, AdvancedIncSubtensor):
try:
obs_data = extract_obs_data(var.tag.observations)
except TypeError:
warnings.warn(f"Could not extract data from symbolic observation {var}")
mask = obs_data.mask
if np.ndim(mask) > np.ndim(log_like_val):
mask = np.any(mask, axis=-1)
log_like_val = np.where(mask, np.nan, log_like_val)
return log_like_val
def _extract_log_likelihood(self, trace):
"""Compute log likelihood of each observation."""
if self.trace is None:
return None
if self.model is None:
return None
if self.log_likelihood is True:
cached = [(var, self.model.fn(logpt(var))) for var in self.model.observed_RVs]
else:
cached = [
(var, self.model.fn(logpt(var)))
for var in self.model.observed_RVs
if var.name in self.log_likelihood
]
log_likelihood_dict = _DefaultTrace(len(trace.chains))
for var, log_like_fun in cached:
for k, chain in enumerate(trace.chains):
log_like_chain = [
self.log_likelihood_vals_point(point, var, log_like_fun)
for point in trace.points([chain])
]
log_likelihood_dict.insert(var.name, np.stack(log_like_chain), k)
return log_likelihood_dict.trace_dict
@requires("trace")
def posterior_to_xarray(self):
"""Convert the posterior to an xarray dataset."""
var_names = get_default_varnames(self.trace.varnames, include_transformed=False)
data = {}
data_warmup = {}
for var_name in var_names:
if self.warmup_trace:
data_warmup[var_name] = np.array(
self.warmup_trace.get_values(var_name, combine=False, squeeze=False)
)
if self.posterior_trace:
data[var_name] = np.array(
self.posterior_trace.get_values(var_name, combine=False, squeeze=False)
)
return (
dict_to_dataset(
data,
library=pymc3,
coords=self.coords,
dims=self.dims,
attrs=self.attrs,
index_origin=self.index_origin,
),
dict_to_dataset(
data_warmup,
library=pymc3,
coords=self.coords,
dims=self.dims,
attrs=self.attrs,
index_origin=self.index_origin,
),
)
@requires("trace")
def sample_stats_to_xarray(self):
"""Extract sample_stats from PyMC3 trace."""
data = {}
rename_key = {
"model_logp": "lp",
"mean_tree_accept": "acceptance_rate",
"depth": "tree_depth",
"tree_size": "n_steps",
}
data = {}
data_warmup = {}
for stat in self.trace.stat_names:
name = rename_key.get(stat, stat)
if name == "tune":
continue
if self.warmup_trace:
data_warmup[name] = np.array(
self.warmup_trace.get_sampler_stats(stat, combine=False)
)
if self.posterior_trace:
data[name] = np.array(self.posterior_trace.get_sampler_stats(stat, combine=False))
return (
dict_to_dataset(
data,
library=pymc3,
dims=None,
coords=self.coords,
attrs=self.attrs,
index_origin=self.index_origin,
),
dict_to_dataset(
data_warmup,
library=pymc3,
dims=None,
coords=self.coords,
attrs=self.attrs,
index_origin=self.index_origin,
),
)
@requires("trace")
@requires("model")
def log_likelihood_to_xarray(self):
"""Extract log likelihood and log_p data from PyMC3 trace."""
if self.predictions or not self.log_likelihood:
return None
data_warmup = {}
data = {}
warn_msg = (
"Could not compute log_likelihood, it will be omitted. "
"Check your model object or set log_likelihood=False"
)
if self.posterior_trace:
try:
data = self._extract_log_likelihood(self.posterior_trace)
except TypeError:
warnings.warn(warn_msg)
if self.warmup_trace:
try:
data_warmup = self._extract_log_likelihood(self.warmup_trace)
except TypeError:
warnings.warn(warn_msg)
return (
dict_to_dataset(
data,
library=pymc3,
dims=self.dims,
coords=self.coords,
skip_event_dims=True,
index_origin=self.index_origin,
),
dict_to_dataset(
data_warmup,
library=pymc3,
dims=self.dims,
coords=self.coords,
skip_event_dims=True,
index_origin=self.index_origin,
),
)
def translate_posterior_predictive_dict_to_xarray(self, dct) -> xr.Dataset:
"""Take Dict of variables to numpy ndarrays (samples) and translate into dataset."""
data = {}
for k, ary in dct.items():
shape = ary.shape
if shape[0] == self.nchains and shape[1] == self.ndraws:
data[k] = ary
elif shape[0] == self.nchains * self.ndraws:
data[k] = ary.reshape((self.nchains, self.ndraws, *shape[1:]))
else:
data[k] = np.expand_dims(ary, 0)
# pylint: disable=line-too-long
_log.warning(
"posterior predictive variable %s's shape not compatible with number of chains and draws. "
"This can mean that some draws or even whole chains are not represented.",
k,
)
return dict_to_dataset(
data, library=pymc3, coords=self.coords, dims=self.dims, index_origin=self.index_origin
)
@requires(["posterior_predictive"])
def posterior_predictive_to_xarray(self):
"""Convert posterior_predictive samples to xarray."""
return self.translate_posterior_predictive_dict_to_xarray(self.posterior_predictive)
@requires(["predictions"])
def predictions_to_xarray(self):
"""Convert predictions (out of sample predictions) to xarray."""
return self.translate_posterior_predictive_dict_to_xarray(self.predictions)
def priors_to_xarray(self):
"""Convert prior samples (and if possible prior predictive too) to xarray."""
if self.prior is None:
return {"prior": None, "prior_predictive": None}
if self.observations is not None:
prior_predictive_vars = list(self.observations.keys())
prior_vars = [key for key in self.prior.keys() if key not in prior_predictive_vars]
else:
prior_vars = list(self.prior.keys())
prior_predictive_vars = None
priors_dict = {}
for group, var_names in zip(
("prior", "prior_predictive"), (prior_vars, prior_predictive_vars)
):
priors_dict[group] = (
None
if var_names is None
else dict_to_dataset(
{k: np.expand_dims(self.prior[k], 0) for k in var_names},
library=pymc3,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
)
)
return priors_dict
@requires("observations")
@requires("model")
def observed_data_to_xarray(self):
"""Convert observed data to xarray."""
if self.predictions:
return None
return dict_to_dataset(
self.observations,
library=pymc3,
coords=self.coords,
dims=self.dims,
default_dims=[],
index_origin=self.index_origin,
)
@requires(["trace", "predictions"])
@requires("model")
def constant_data_to_xarray(self):
"""Convert constant data to xarray."""
# For constant data, we are concerned only with deterministics and
# data. The constant data vars must be either pm.Data
# (TensorSharedVariable) or pm.Deterministic
constant_data_vars = {} # type: Dict[str, Var]
def is_data(name, var) -> bool:
assert self.model is not None
return (
var not in self.model.deterministics
and var not in self.model.observed_RVs
and var not in self.model.free_RVs
and var not in self.model.potentials
and (self.observations is None or name not in self.observations)
and isinstance(var, (Constant, SharedVariable))
)
# I don't know how to find pm.Data, except that they are named
# variables that aren't observed or free RVs, nor are they
# deterministics, and then we eliminate observations.
for name, var in self.model.named_vars.items():
if is_data(name, var):
constant_data_vars[name] = var
if not constant_data_vars:
return None
constant_data = {}
for name, vals in constant_data_vars.items():
if hasattr(vals, "get_value"):
vals = vals.get_value()
elif hasattr(vals, "data"):
vals = vals.data
constant_data[name] = vals
return dict_to_dataset(
constant_data,
library=pymc3,
coords=self.coords,
dims=self.dims,
default_dims=[],
index_origin=self.index_origin,
)
def to_inference_data(self):
"""Convert all available data to an InferenceData object.
Note that if groups can not be created (e.g., there is no `trace`, so
the `posterior` and `sample_stats` can not be extracted), then the InferenceData
will not have those groups.
"""
id_dict = {
"posterior": self.posterior_to_xarray(),
"sample_stats": self.sample_stats_to_xarray(),
"log_likelihood": self.log_likelihood_to_xarray(),
"posterior_predictive": self.posterior_predictive_to_xarray(),
"predictions": self.predictions_to_xarray(),
**self.priors_to_xarray(),
"observed_data": self.observed_data_to_xarray(),
}
if self.predictions:
id_dict["predictions_constant_data"] = self.constant_data_to_xarray()
else:
id_dict["constant_data"] = self.constant_data_to_xarray()
return InferenceData(save_warmup=self.save_warmup, **id_dict)
def to_inference_data(
trace: Optional["MultiTrace"] = None,
*,
prior: Optional[Dict[str, Any]] = None,
posterior_predictive: Optional[Dict[str, Any]] = None,
log_likelihood: Union[bool, Iterable[str]] = True,
coords: Optional[CoordSpec] = None,
dims: Optional[DimSpec] = None,
model: Optional["Model"] = None,
save_warmup: Optional[bool] = None,
density_dist_obs: bool = True,
) -> InferenceData:
"""Convert pymc3 data into an InferenceData object.
All three of them are optional arguments, but at least one of ``trace``,
``prior`` and ``posterior_predictive`` must be present.
For a usage example read the
:ref:`Creating InferenceData section on from_pymc3 <creating_InferenceData>`
Parameters
----------
trace : MultiTrace, optional
Trace generated from MCMC sampling. Output of
:func:`~pymc3.sampling.sample`.
prior : dict, optional
Dictionary with the variable names as keys, and values numpy arrays
containing prior and prior predictive samples.
posterior_predictive : dict, optional
Dictionary with the variable names as keys, and values numpy arrays
containing posterior predictive samples.
log_likelihood : bool or array_like of str, optional
List of variables to calculate `log_likelihood`. Defaults to True which calculates
`log_likelihood` for all observed variables. If set to False, log_likelihood is skipped.
coords : dict of {str: array-like}, optional
Map of coordinate names to coordinate values
dims : dict of {str: list of str}, optional
Map of variable names to the coordinate names to use to index its dimensions.
model : Model, optional
Model used to generate ``trace``. It is not necessary to pass ``model`` if in
``with`` context.
save_warmup : bool, optional
Save warmup iterations InferenceData object. If not defined, use default
defined by the rcParams.
density_dist_obs : bool, default True
Store variables passed with ``observed`` arg to
:class:`~pymc.distributions.DensityDist` in the generated InferenceData.
Returns
-------
arviz.InferenceData
"""
if isinstance(trace, InferenceData):
return trace
return InferenceDataConverter(
trace=trace,
prior=prior,
posterior_predictive=posterior_predictive,
log_likelihood=log_likelihood,
coords=coords,
dims=dims,
model=model,
save_warmup=save_warmup,
density_dist_obs=density_dist_obs,
).to_inference_data()
### Later I could have this return ``None`` if the ``idata_orig`` argument is supplied. But
### perhaps we should have an inplace argument?
def predictions_to_inference_data(
predictions,
posterior_trace: Optional["MultiTrace"] = None,
model: Optional["Model"] = None,
coords: Optional[CoordSpec] = None,
dims: Optional[DimSpec] = None,
idata_orig: Optional[InferenceData] = None,
inplace: bool = False,
) -> InferenceData:
"""Translate out-of-sample predictions into ``InferenceData``.
Parameters
----------
predictions: Dict[str, np.ndarray]
The predictions are the return value of :func:`~pymc3.sample_posterior_predictive`,
a dictionary of strings (variable names) to numpy ndarrays (draws).
posterior_trace: MultiTrace
This should be a trace that has been thinned appropriately for
``pymc3.sample_posterior_predictive``. Specifically, any variable whose shape is
a deterministic function of the shape of any predictor (explanatory, independent, etc.)
variables must be *removed* from this trace.
model: Model
The pymc3 model. It can be ommited if within a model context.
coords: Dict[str, array-like[Any]]
Coordinates for the variables. Map from coordinate names to coordinate values.
dims: Dict[str, array-like[str]]
Map from variable name to ordered set of coordinate names.
idata_orig: InferenceData, optional
If supplied, then modify this inference data in place, adding ``predictions`` and
(if available) ``predictions_constant_data`` groups. If this is not supplied, make a
fresh InferenceData
inplace: boolean, optional
If idata_orig is supplied and inplace is True, merge the predictions into idata_orig,
rather than returning a fresh InferenceData object.
Returns
-------
InferenceData:
May be modified ``idata_orig``.
"""
if inplace and not idata_orig:
raise ValueError(
"Do not pass True for inplace unless passing" "an existing InferenceData as idata_orig"
)
new_idata = InferenceDataConverter(
trace=posterior_trace,
predictions=predictions,
model=model,
coords=coords,
dims=dims,
log_likelihood=False,
).to_inference_data()
if idata_orig is None:
return new_idata
elif inplace:
concat([idata_orig, new_idata], dim=None, inplace=True)
return idata_orig
else:
# if we are not returning in place, then merge the old groups into the new inference
# data and return that.
concat([new_idata, idata_orig], dim=None, copy=True, inplace=True)
return new_idata
| [
"logging.getLogger",
"xarray.IndexVariable",
"arviz.data.base.dict_to_dataset",
"numpy.array",
"pymc3.aesaraf.extract_obs_data",
"numpy.where",
"numpy.ndim",
"numpy.stack",
"warnings.warn",
"pymc3.util.get_default_varnames",
"numpy.any",
"numpy.shape",
"arviz.data.base.generate_dims_coords",... | [((1054, 1080), 'logging.getLogger', 'logging.getLogger', (['"""pymc3"""'], {}), "('pymc3')\n", (1071, 1080), False, 'import logging\n'), ((11415, 11432), 'arviz.data.base.requires', 'requires', (['"""trace"""'], {}), "('trace')\n", (11423, 11432), False, 'from arviz.data.base import generate_dims_coords, make_attrs, requires\n'), ((12608, 12625), 'arviz.data.base.requires', 'requires', (['"""trace"""'], {}), "('trace')\n", (12616, 12625), False, 'from arviz.data.base import generate_dims_coords, make_attrs, requires\n'), ((13946, 13963), 'arviz.data.base.requires', 'requires', (['"""trace"""'], {}), "('trace')\n", (13954, 13963), False, 'from arviz.data.base import generate_dims_coords, make_attrs, requires\n'), ((13969, 13986), 'arviz.data.base.requires', 'requires', (['"""model"""'], {}), "('model')\n", (13977, 13986), False, 'from arviz.data.base import generate_dims_coords, make_attrs, requires\n'), ((16344, 16378), 'arviz.data.base.requires', 'requires', (["['posterior_predictive']"], {}), "(['posterior_predictive'])\n", (16352, 16378), False, 'from arviz.data.base import generate_dims_coords, make_attrs, requires\n'), ((16586, 16611), 'arviz.data.base.requires', 'requires', (["['predictions']"], {}), "(['predictions'])\n", (16594, 16611), False, 'from arviz.data.base import generate_dims_coords, make_attrs, requires\n'), ((17916, 17940), 'arviz.data.base.requires', 'requires', (['"""observations"""'], {}), "('observations')\n", (17924, 17940), False, 'from arviz.data.base import generate_dims_coords, make_attrs, requires\n'), ((17946, 17963), 'arviz.data.base.requires', 'requires', (['"""model"""'], {}), "('model')\n", (17954, 17963), False, 'from arviz.data.base import generate_dims_coords, make_attrs, requires\n'), ((18342, 18376), 'arviz.data.base.requires', 'requires', (["['trace', 'predictions']"], {}), "(['trace', 'predictions'])\n", (18350, 18376), False, 'from arviz.data.base import generate_dims_coords, make_attrs, requires\n'), ((18382, 18399), 'arviz.data.base.requires', 'requires', (['"""model"""'], {}), "('model')\n", (18390, 18399), False, 'from arviz.data.base import generate_dims_coords, make_attrs, requires\n'), ((2352, 2363), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (2360, 2363), True, 'import numpy as np\n'), ((3240, 3342), 'arviz.data.base.dict_to_dataset', '_dict_to_dataset', (['data'], {'library': 'library', 'coords': 'coords', 'dims': 'dims', 'skip_event_dims': 'skip_event_dims'}), '(data, library=library, coords=coords, dims=dims,\n skip_event_dims=skip_event_dims)\n', (3256, 3342), True, 'from arviz.data.base import dict_to_dataset as _dict_to_dataset\n'), ((4950, 4969), 'pymc3.model.modelcontext', 'modelcontext', (['model'], {}), '(model)\n', (4962, 4969), False, 'from pymc3.model import modelcontext\n'), ((11546, 11614), 'pymc3.util.get_default_varnames', 'get_default_varnames', (['self.trace.varnames'], {'include_transformed': '(False)'}), '(self.trace.varnames, include_transformed=False)\n', (11566, 11614), False, 'from pymc3.util import get_default_varnames\n'), ((21077, 21131), 'arviz.InferenceData', 'InferenceData', ([], {'save_warmup': 'self.save_warmup'}), '(save_warmup=self.save_warmup, **id_dict)\n', (21090, 21131), False, 'from arviz import InferenceData, concat, rcParams\n'), ((3452, 3471), 'numpy.atleast_1d', 'np.atleast_1d', (['vals'], {}), '(vals)\n', (3465, 3471), True, 'import numpy as np\n'), ((3541, 3609), 'arviz.data.base.generate_dims_coords', 'generate_dims_coords', (['vals.shape', 'name'], {'dims': 'val_dims', 'coords': 'coords'}), '(vals.shape, name, dims=val_dims, coords=coords)\n', (3561, 3609), False, 'from arviz.data.base import generate_dims_coords, make_attrs, requires\n'), ((3730, 3778), 'xarray.DataArray', 'xr.DataArray', (['vals'], {'dims': 'val_dims', 'coords': 'coords'}), '(vals, dims=val_dims, coords=coords)\n', (3742, 3778), True, 'import xarray as xr\n'), ((10325, 10361), 'numpy.where', 'np.where', (['mask', 'np.nan', 'log_like_val'], {}), '(mask, np.nan, log_like_val)\n', (10333, 10361), True, 'import numpy as np\n'), ((26181, 26236), 'arviz.concat', 'concat', (['[idata_orig, new_idata]'], {'dim': 'None', 'inplace': '(True)'}), '([idata_orig, new_idata], dim=None, inplace=True)\n', (26187, 26236), False, 'from arviz import InferenceData, concat, rcParams\n'), ((26406, 26472), 'arviz.concat', 'concat', (['[new_idata, idata_orig]'], {'dim': 'None', 'copy': '(True)', 'inplace': '(True)'}), '([new_idata, idata_orig], dim=None, copy=True, inplace=True)\n', (26412, 26472), False, 'from arviz import InferenceData, concat, rcParams\n'), ((3637, 3679), 'xarray.IndexVariable', 'xr.IndexVariable', (['(key,)'], {'data': 'coords[key]'}), '((key,), data=coords[key])\n', (3653, 3679), True, 'import xarray as xr\n'), ((3831, 3858), 'arviz.data.base.make_attrs', 'make_attrs', ([], {'library': 'library'}), '(library=library)\n', (3841, 3858), False, 'from arviz.data.base import generate_dims_coords, make_attrs, requires\n'), ((8589, 8636), 'warnings.warn', 'warnings.warn', (['f"""No data for observation {obs}"""'], {}), "(f'No data for observation {obs}')\n", (8602, 8636), False, 'import warnings\n'), ((10007, 10045), 'pymc3.aesaraf.extract_obs_data', 'extract_obs_data', (['var.tag.observations'], {}), '(var.tag.observations)\n', (10023, 10045), False, 'from pymc3.aesaraf import extract_obs_data\n'), ((10214, 10227), 'numpy.ndim', 'np.ndim', (['mask'], {}), '(mask)\n', (10221, 10227), True, 'import numpy as np\n'), ((10230, 10251), 'numpy.ndim', 'np.ndim', (['log_like_val'], {}), '(log_like_val)\n', (10237, 10251), True, 'import numpy as np\n'), ((10276, 10297), 'numpy.any', 'np.any', (['mask'], {'axis': '(-1)'}), '(mask, axis=-1)\n', (10282, 10297), True, 'import numpy as np\n'), ((5525, 5716), 'warnings.warn', 'warnings.warn', (['"""Warmup samples will be stored in posterior group and will not be excluded from stats and diagnostics. Do not slice the trace manually before conversion"""', 'UserWarning'], {}), "(\n 'Warmup samples will be stored in posterior group and will not be excluded from stats and diagnostics. Do not slice the trace manually before conversion'\n , UserWarning)\n", (5538, 5716), False, 'import warnings\n'), ((8348, 8373), 'pymc3.aesaraf.extract_obs_data', 'extract_obs_data', (['aux_obs'], {}), '(aux_obs)\n', (8364, 8373), False, 'from pymc3.aesaraf import extract_obs_data\n'), ((10092, 10164), 'warnings.warn', 'warnings.warn', (['f"""Could not extract data from symbolic observation {var}"""'], {}), "(f'Could not extract data from symbolic observation {var}')\n", (10105, 10164), False, 'import warnings\n'), ((11334, 11358), 'numpy.stack', 'np.stack', (['log_like_chain'], {}), '(log_like_chain)\n', (11342, 11358), True, 'import numpy as np\n'), ((14556, 14579), 'warnings.warn', 'warnings.warn', (['warn_msg'], {}), '(warn_msg)\n', (14569, 14579), False, 'import warnings\n'), ((14751, 14774), 'warnings.warn', 'warnings.warn', (['warn_msg'], {}), '(warn_msg)\n', (14764, 14774), False, 'import warnings\n'), ((15847, 15869), 'numpy.expand_dims', 'np.expand_dims', (['ary', '(0)'], {}), '(ary, 0)\n', (15861, 15869), True, 'import numpy as np\n'), ((2550, 2561), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (2558, 2561), True, 'import numpy as np\n'), ((8482, 8554), 'warnings.warn', 'warnings.warn', (['f"""Could not extract data from symbolic observation {obs}"""'], {}), "(f'Could not extract data from symbolic observation {obs}')\n", (8495, 8554), False, 'import warnings\n'), ((10688, 10698), 'pymc3.distributions.logpt', 'logpt', (['var'], {}), '(var)\n', (10693, 10698), False, 'from pymc3.distributions import logpt\n'), ((10810, 10820), 'pymc3.distributions.logpt', 'logpt', (['var'], {}), '(var)\n', (10815, 10820), False, 'from pymc3.distributions import logpt\n'), ((17634, 17666), 'numpy.expand_dims', 'np.expand_dims', (['self.prior[k]', '(0)'], {}), '(self.prior[k], 0)\n', (17648, 17666), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 <NAME> <<EMAIL>>
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Distribution License v1.0
# which accompanies this distribution.
#
# The Eclipse Distribution License is available at
# http://www.eclipse.org/org/documents/edl-v10.php.
#
# Contributors:
# <NAME> - initial implementation
# This shows a simple example of standard logging with an MQTT subscriber client.
import context # Ensures paho is in PYTHONPATH
import paho.mqtt.client as mqtt
import logging
logging.basicConfig(level=logging.DEBUG)
# If you want to use a specific client id, use
# mqttc = mqtt.Client("client-id")
# but note that the client id must be unique on the broker. Leaving the client
# id parameter empty will generate a random id for you.
mqttc = mqtt.Client()
logger = logging.getLogger(__name__)
mqttc.enable_logger(logger)
mqttc.connect("mqtt.eclipseprojects.io", 1883, 60)
mqttc.subscribe("$SYS/#", 0)
mqttc.loop_forever()
| [
"logging.basicConfig",
"paho.mqtt.client.Client",
"logging.getLogger"
] | [((609, 649), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (628, 649), False, 'import logging\n'), ((876, 889), 'paho.mqtt.client.Client', 'mqtt.Client', ([], {}), '()\n', (887, 889), True, 'import paho.mqtt.client as mqtt\n'), ((900, 927), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (917, 927), False, 'import logging\n')] |
import logging
import unittest
logger = logging.getLogger("aoc2020.day_5")
logger.setLevel(logging.DEBUG)
from aoc2020.day_5 import *
class ParseBinaryTestCase(unittest.TestCase):
def setUp(self):
self.binary = "FBFBBFFRLR"
def test_parse_column(self):
result = parse_binary(self.binary[:7], symbols = "FB")
expected = 44
self.assertEqual(result, expected)
def test_parse_row(self):
result = parse_binary(self.binary[7:], symbols = "LR")
expected = 5
self.assertEqual(result, expected)
class ReadBoardingPassTestCase(unittest.TestCase):
def setUp(self):
self.input_list = [
"FBFBBFFRLR",
"BFFFBBFRRR",
"FFFBBBFRRR",
"BBFFBBFRLL",
]
def test_single_boarding_pass(self):
result = list(map(lambda x: read_boarding_pass(x), self.input_list))
expected = (357, 567, 119, 820)
for index, item in enumerate(expected):
with self.subTest(i = index):
self.assertEqual(result[index]["seat_id"], item)
class FindMissingTestCase(unittest.TestCase):
def setUp(self):
self.seat_ids = [
dict(seat_id = 44),
dict(seat_id = 46),
dict(seat_id = 47),
dict(seat_id = 48),
dict(seat_id = 49),
]
def test_find_missing(self):
result = find_missing(seat_ids = self.seat_ids)
expected = 45
self.assertEqual(result, expected)
class MainTestCase(unittest.TestCase):
def setUp(self):
self.input_list = [
"FBFBBFFRLR",
"BFFFBBFRRR",
"FFFBBBFRRR",
"BBFFBBFRLL",
]
def test_main_find_max(self):
args = {
"-": self.input_list,
"--find-missing": False,
}
result = main(args)
expected = 820
self.assertEqual(result, expected)
def test_main_find_missing(self):
args = {
"-": self.input_list,
"--find-missing": True,
}
result = main(args)
expected = 120
self.assertEqual(result, expected)
| [
"logging.getLogger"
] | [((41, 75), 'logging.getLogger', 'logging.getLogger', (['"""aoc2020.day_5"""'], {}), "('aoc2020.day_5')\n", (58, 75), False, 'import logging\n')] |
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from networking_cisco.plugins.cisco.cfg_agent.device_drivers import (
cisco_csr_rest_client)
from neutron.i18n import _LE
from oslo_log import log as logging
from neutron_fwaas.services.firewall.drivers import fwaas_base
LOG = logging.getLogger(__name__)
#----- ACL REST URL definitions -------------------------------------------
ACL_API = 'acl'
ACL_API_ACLID = 'acl/%s' # ACLID
ACL_API_ACLID_IF = 'acl/%s/interfaces' # ACLID
ACL_API_ACLID_IFID_DIR = 'acl/%s/interfaces/%s_%s' # ACLID, IF_DIRECTION
class CsrAclDriver(fwaas_base.FwaasDriverBase):
"""Cisco CSR ACL driver for FWaaS.
This driver will send ACL configuration via RESTAPI to CSR1kv.
This driver will return error to the caller function in case of
error such as validation failures, sending configuration failures.
The caller function will handle the error return properly.
"""
def __init__(self):
LOG.debug("Initializing fwaas CSR ACL driver")
def _get_csr_host(self, firewall_vendor_ext):
settings = {
'rest_mgmt_ip': firewall_vendor_ext['host_mngt_ip'],
'username': firewall_vendor_ext['host_usr_nm'],
'password': firewall_vendor_ext['host_usr_pw'],
'timeout': 30,
}
return cisco_csr_rest_client.CsrRestClient(settings)
def _validate_firewall_rule_data(self, firewall):
if 'firewall_rule_list' not in firewall:
LOG.error(_LE("no rule list"))
return False
for rule in firewall['firewall_rule_list']:
if 'name' not in rule:
LOG.error(_LE("CsrAcl: no rule name"))
return False
ip_version = rule.get('ip_version')
if ip_version != 4:
LOG.error(_LE("invalid ip version %(ip_version)s in "
"rule %(rule)s"),
{'ip_version': ip_version, 'rule': rule['name']})
return False
if 'protocol' not in rule:
LOG.error(_LE("no protocol in rule [%s]"), rule['name'])
return False
if rule.get('action', '').lower() not in ('allow', 'deny'):
LOG.error(_LE("invalid action in rule [%s]"), rule['name'])
return False
return True
def _validate_firewall_data(self, firewall):
data = ('admin_state_up', 'vendor_ext')
is_valid = all(x in firewall for x in data)
if not is_valid:
LOG.error(_LE("missing data in firewall"))
return is_valid
data = ('host_mngt_ip', 'host_usr_nm', 'host_usr_pw', 'if_list')
is_valid = all(x in firewall['vendor_ext'] for x in data)
if not is_valid:
LOG.error(_LE("missing data in firewall vendor_ext"))
return is_valid
for firewall_interface in firewall['vendor_ext']['if_list']:
if firewall_interface.get('direction', '') not in (
'inside', 'outside', 'both'):
LOG.error(_LE("invalid direction"))
return False
if 'port' not in firewall_interface:
LOG.error(_LE("no port"))
return False
port = firewall_interface['port']
if 'id' not in port:
LOG.error(_LE("no port id"))
return False
if 'hosting_info' not in port:
LOG.error(_LE("no hosting_info"))
return False
if 'segmentation_id' not in port['hosting_info']:
LOG.error(_LE("no segmentation_id"))
return False
if 'hosting_port_name' not in port['hosting_info']:
LOG.error(_LE("hosting_port_name"))
return False
interface_type = port['hosting_info'][
'hosting_port_name'].split(':')[0] + ':'
if interface_type not in ('t1_p:', 't2_p:'):
LOG.error(_LE("invalide interface type %s"), interface_type)
return False
return True
def _get_acl_l4_port(self, rule_port_name, rule, l4_opt):
if rule.get(rule_port_name):
ports = rule[rule_port_name].split(':')
if rule_port_name == 'source_port':
port_prefix = 'src'
else:
port_prefix = 'dest'
l4_opt[port_prefix + '-port-start'] = ports[0]
if len(ports) == 2:
l4_opt[port_prefix + '-port-end'] = ports[1]
def _get_acl_rule_data(self, firewall):
"""Get ACL RESTAPI request data from firewall dictionary.
:return: ACL RESTAPI request data based on data from plugin.
:return: {} if there is any error.
"""
acl_rules_list = []
seq = 100
for rule in firewall['firewall_rule_list']:
if not rule['enabled']:
continue
ace_rule = {'sequence': str(seq)}
seq += 1
if rule.get('protocol'):
ace_rule['protocol'] = rule['protocol']
else:
ace_rule['protocol'] = 'all'
if rule['action'].lower() == 'allow':
ace_rule['action'] = 'permit'
else:
ace_rule['action'] = 'deny'
if rule.get('source_ip_address'):
ace_rule['source'] = rule['source_ip_address']
else:
ace_rule['source'] = 'any'
if rule.get('destination_ip_address'):
ace_rule['destination'] = rule['destination_ip_address']
else:
ace_rule['destination'] = 'any'
l4_opt = {}
self._get_acl_l4_port('source_port', rule, l4_opt)
self._get_acl_l4_port('destination_port', rule, l4_opt)
if l4_opt:
ace_rule['l4-options'] = l4_opt
acl_rules_list.append(ace_rule)
return {'rules': acl_rules_list}
def _get_interface_name_from_hosting_port(self, port):
vlan = port['hosting_info']['segmentation_id']
interface_type, interface_num = port[
'hosting_info']['hosting_port_name'].split(':')
offset = 0 if interface_type == 't1_p' else 1
interface_num = str(int(interface_num) * 2 + offset)
return 'GigabitEthernet%s.%s' % (interface_num, vlan)
def _post_acl_to_interfaces(self, firewall, csr, acl_id, status_data):
acl_interface_url = ACL_API_ACLID_IF % acl_id
for firewall_interface in firewall['vendor_ext']['if_list']:
if_name = self._get_interface_name_from_hosting_port(
firewall_interface['port'])
acl_interface_req = {
'if-id': if_name,
'direction': firewall_interface['direction']
}
LOG.debug("acl_interface_url %s", acl_interface_url)
csr.post_request(acl_interface_url, acl_interface_req)
if csr.status == requests.codes.CREATED:
status_data['if_list'].append(
{'port_id': firewall_interface['port']['id'],
'status': 'OK'})
else:
LOG.error(_LE("status %s"), csr.status)
status_data['if_list'].append(
{'port_id': firewall_interface['port']['id'],
'status': 'ERROR'})
def _delete_acl_on_interface(self, csr, acl_id,
csr_firewall_interface_list):
for interface in csr_firewall_interface_list:
my_api = ACL_API_ACLID_IFID_DIR % (
acl_id, interface['if-id'], interface['direction'])
csr.delete_request(my_api)
if csr.status != requests.codes.NO_CONTENT:
LOG.error(_LE("status %s"), csr.status)
def _get_acl_interface(self, csr, acl_id):
my_api = ACL_API_ACLID_IF % acl_id
response = csr.get_request(my_api)
if csr.status == requests.codes.OK:
return response['items']
LOG.error(_LE("status %s"), csr.status)
return ''
def _post_acl(self, csr, acl_data):
response = csr.post_request(ACL_API, acl_data)
if csr.status == requests.codes.CREATED:
return response[response.rfind('/') + 1:]
LOG.error(_LE("status %s"), csr.status)
return ''
def _delete_acl(self, csr, acl_id):
my_api = ACL_API_ACLID % acl_id
csr.delete_request(my_api)
if csr.status == requests.codes.NO_CONTENT:
return True
LOG.error(_LE("status %s"), csr.status)
return False
def _put_acl(self, csr, acl_id, acl_data):
my_api = ACL_API_ACLID % acl_id
csr.put_request(my_api, acl_data)
if csr.status == requests.codes.NO_CONTENT:
return True
LOG.error(_LE("status %s"), csr.status)
return False
def _create_firewall(self, firewall):
"""Create ACL and apply ACL to interfaces.
:param firewall: firewall dictionary
:return: True and status_data if OK
:return: False and status_data if there is an error
"""
LOG.debug("firewall %s", firewall)
if not self._validate_firewall_data(firewall):
return False, {}
if not self._validate_firewall_rule_data(firewall):
return False, {}
csr = self._get_csr_host(firewall['vendor_ext'])
acl_data = self._get_acl_rule_data(firewall)
LOG.debug("acl_data %s", acl_data)
acl_id = self._post_acl(csr, acl_data)
if not acl_id:
LOG.debug("No acl_id created, acl_data %s", acl_data)
return False, {}
LOG.debug("new ACL ID: %s", acl_id)
status_data = {
'fw_id': firewall['id'],
'acl_id': acl_id,
'if_list': []
}
if not firewall['admin_state_up']:
LOG.debug("status %s", status_data)
return True, status_data
# apply ACL to interfaces
self._post_acl_to_interfaces(firewall, csr, acl_id, status_data)
LOG.debug("status %s", status_data)
return True, status_data
def _delete_firewall(self, firewall):
"""Delete ACL.
:param firewall: firewall dictionary
:return: True if OK
:return: False if there is an error
"""
if not self._validate_firewall_data(firewall):
return False
acl_id = firewall['vendor_ext'].get('acl_id')
if not acl_id:
LOG.error(_LE("firewal (%s) has no acl_id"), firewall['id'])
return False
csr = self._get_csr_host(firewall['vendor_ext'])
return self._delete_acl(csr, acl_id)
def _update_firewall(self, firewall):
"""Update ACL and associated interfacesr.
:param firewall: firewall dictionry
:return: True and status_data if OK
:return: False and {} if there is an error
"""
if not self._validate_firewall_data(firewall):
return False, {}
if not self._validate_firewall_rule_data(firewall):
return False, {}
acl_id = firewall['vendor_ext'].get('acl_id')
if not acl_id:
LOG.error(_LE("firewal (%s) has no acl_id"), firewall['id'])
return False, {}
csr = self._get_csr_host(firewall['vendor_ext'])
rest_acl_rules = self._get_acl_rule_data(firewall)
rest_acl_rules['acl-id'] = acl_id
# update ACL rules
response = self._put_acl(csr, acl_id, rest_acl_rules)
if not response:
return False, {}
status_data = {
'fw_id': firewall['id'],
'acl_id': acl_id,
'if_list': []
}
# update ACL interface
# get all interfaces with this acl_id
csr_fw_interface_list = self._get_acl_interface(csr, acl_id)
self._delete_acl_on_interface(csr, acl_id, csr_fw_interface_list)
if not firewall['admin_state_up']:
return True, status_data
self._post_acl_to_interfaces(firewall, csr, acl_id, status_data)
return True, status_data
def create_firewall(self, agent_mode, apply_list, firewall):
"""Create firewall on CSR."""
LOG.debug("create_firewall: firewall %s", firewall)
return self._create_firewall(firewall)
def delete_firewall(self, agent_mode, apply_list, firewall):
"""Delete firewall on CSR."""
LOG.debug("delete_firewall: firewall %s", firewall)
return self._delete_firewall(firewall)
def update_firewall(self, agent_mode, apply_list, firewall):
"""Update firewall on CSR."""
LOG.debug("update_firewall: firewall %s", firewall)
return self._update_firewall(firewall)
def apply_default_policy(self, agent_mode, apply_list, firewall):
# CSR firewall driver does not support this for now
LOG.debug("apply_default_policy")
| [
"neutron.i18n._LE",
"networking_cisco.plugins.cisco.cfg_agent.device_drivers.cisco_csr_rest_client.CsrRestClient",
"oslo_log.log.getLogger"
] | [((886, 913), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (903, 913), True, 'from oslo_log import log as logging\n'), ((1957, 2002), 'networking_cisco.plugins.cisco.cfg_agent.device_drivers.cisco_csr_rest_client.CsrRestClient', 'cisco_csr_rest_client.CsrRestClient', (['settings'], {}), '(settings)\n', (1992, 2002), False, 'from networking_cisco.plugins.cisco.cfg_agent.device_drivers import cisco_csr_rest_client\n'), ((8709, 8725), 'neutron.i18n._LE', '_LE', (['"""status %s"""'], {}), "('status %s')\n", (8712, 8725), False, 'from neutron.i18n import _LE\n'), ((8975, 8991), 'neutron.i18n._LE', '_LE', (['"""status %s"""'], {}), "('status %s')\n", (8978, 8991), False, 'from neutron.i18n import _LE\n'), ((9234, 9250), 'neutron.i18n._LE', '_LE', (['"""status %s"""'], {}), "('status %s')\n", (9237, 9250), False, 'from neutron.i18n import _LE\n'), ((9510, 9526), 'neutron.i18n._LE', '_LE', (['"""status %s"""'], {}), "('status %s')\n", (9513, 9526), False, 'from neutron.i18n import _LE\n'), ((2129, 2148), 'neutron.i18n._LE', '_LE', (['"""no rule list"""'], {}), "('no rule list')\n", (2132, 2148), False, 'from neutron.i18n import _LE\n'), ((3169, 3200), 'neutron.i18n._LE', '_LE', (['"""missing data in firewall"""'], {}), "('missing data in firewall')\n", (3172, 3200), False, 'from neutron.i18n import _LE\n'), ((3417, 3459), 'neutron.i18n._LE', '_LE', (['"""missing data in firewall vendor_ext"""'], {}), "('missing data in firewall vendor_ext')\n", (3420, 3459), False, 'from neutron.i18n import _LE\n'), ((11218, 11251), 'neutron.i18n._LE', '_LE', (['"""firewal (%s) has no acl_id"""'], {}), "('firewal (%s) has no acl_id')\n", (11221, 11251), False, 'from neutron.i18n import _LE\n'), ((11916, 11949), 'neutron.i18n._LE', '_LE', (['"""firewal (%s) has no acl_id"""'], {}), "('firewal (%s) has no acl_id')\n", (11919, 11949), False, 'from neutron.i18n import _LE\n'), ((2288, 2315), 'neutron.i18n._LE', '_LE', (['"""CsrAcl: no rule name"""'], {}), "('CsrAcl: no rule name')\n", (2291, 2315), False, 'from neutron.i18n import _LE\n'), ((2452, 2509), 'neutron.i18n._LE', '_LE', (['"""invalid ip version %(ip_version)s in rule %(rule)s"""'], {}), "('invalid ip version %(ip_version)s in rule %(rule)s')\n", (2455, 2509), False, 'from neutron.i18n import _LE\n'), ((2698, 2729), 'neutron.i18n._LE', '_LE', (['"""no protocol in rule [%s]"""'], {}), "('no protocol in rule [%s]')\n", (2701, 2729), False, 'from neutron.i18n import _LE\n'), ((2872, 2906), 'neutron.i18n._LE', '_LE', (['"""invalid action in rule [%s]"""'], {}), "('invalid action in rule [%s]')\n", (2875, 2906), False, 'from neutron.i18n import _LE\n'), ((3695, 3719), 'neutron.i18n._LE', '_LE', (['"""invalid direction"""'], {}), "('invalid direction')\n", (3698, 3719), False, 'from neutron.i18n import _LE\n'), ((3825, 3839), 'neutron.i18n._LE', '_LE', (['"""no port"""'], {}), "('no port')\n", (3828, 3839), False, 'from neutron.i18n import _LE\n'), ((3975, 3992), 'neutron.i18n._LE', '_LE', (['"""no port id"""'], {}), "('no port id')\n", (3978, 3992), False, 'from neutron.i18n import _LE\n'), ((4092, 4114), 'neutron.i18n._LE', '_LE', (['"""no hosting_info"""'], {}), "('no hosting_info')\n", (4095, 4114), False, 'from neutron.i18n import _LE\n'), ((4233, 4258), 'neutron.i18n._LE', '_LE', (['"""no segmentation_id"""'], {}), "('no segmentation_id')\n", (4236, 4258), False, 'from neutron.i18n import _LE\n'), ((4379, 4403), 'neutron.i18n._LE', '_LE', (['"""hosting_port_name"""'], {}), "('hosting_port_name')\n", (4382, 4403), False, 'from neutron.i18n import _LE\n'), ((4625, 4658), 'neutron.i18n._LE', '_LE', (['"""invalide interface type %s"""'], {}), "('invalide interface type %s')\n", (4628, 4658), False, 'from neutron.i18n import _LE\n'), ((7854, 7870), 'neutron.i18n._LE', '_LE', (['"""status %s"""'], {}), "('status %s')\n", (7857, 7870), False, 'from neutron.i18n import _LE\n'), ((8445, 8461), 'neutron.i18n._LE', '_LE', (['"""status %s"""'], {}), "('status %s')\n", (8448, 8461), False, 'from neutron.i18n import _LE\n')] |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import urllib
from tempest.common.rest_client import RestClient
from tempest import exceptions
class SecurityGroupsClientJSON(RestClient):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(SecurityGroupsClientJSON, self).__init__(config, username,
password, auth_url,
tenant_name)
self.service = self.config.compute.catalog_type
def list_security_groups(self, params=None):
"""List all security groups for a user."""
url = 'os-security-groups'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['security_groups']
def get_security_group(self, security_group_id):
"""Get the details of a Security Group."""
url = "os-security-groups/%s" % str(security_group_id)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['security_group']
def create_security_group(self, name, description):
"""
Creates a new security group.
name (Required): Name of security group.
description (Required): Description of security group.
"""
post_body = {
'name': name,
'description': description,
}
post_body = json.dumps({'security_group': post_body})
resp, body = self.post('os-security-groups', post_body, self.headers)
body = json.loads(body)
return resp, body['security_group']
def update_security_group(self, security_group_id, name=None,
description=None):
"""
Update a security group.
security_group_id: a security_group to update
name: new name of security group
description: new description of security group
"""
post_body = {}
if name:
post_body['name'] = name
if description:
post_body['description'] = description
post_body = json.dumps({'security_group': post_body})
resp, body = self.put('os-security-groups/%s' % str(security_group_id),
post_body, self.headers)
body = json.loads(body)
return resp, body['security_group']
def delete_security_group(self, security_group_id):
"""Deletes the provided Security Group."""
return self.delete('os-security-groups/%s' % str(security_group_id))
def create_security_group_rule(self, parent_group_id, ip_proto, from_port,
to_port, **kwargs):
"""
Creating a new security group rules.
parent_group_id :ID of Security group
ip_protocol : ip_proto (icmp, tcp, udp).
from_port: Port at start of range.
to_port : Port at end of range.
Following optional keyword arguments are accepted:
cidr : CIDR for address range.
group_id : ID of the Source group
"""
post_body = {
'parent_group_id': parent_group_id,
'ip_protocol': ip_proto,
'from_port': from_port,
'to_port': to_port,
'cidr': kwargs.get('cidr'),
'group_id': kwargs.get('group_id'),
}
post_body = json.dumps({'security_group_rule': post_body})
url = 'os-security-group-rules'
resp, body = self.post(url, post_body, self.headers)
body = json.loads(body)
return resp, body['security_group_rule']
def delete_security_group_rule(self, group_rule_id):
"""Deletes the provided Security Group rule."""
return self.delete('os-security-group-rules/%s' % str(group_rule_id))
def list_security_group_rules(self, security_group_id):
"""List all rules for a security group."""
resp, body = self.get('os-security-groups')
body = json.loads(body)
for sg in body['security_groups']:
if sg['id'] == security_group_id:
return resp, sg['rules']
raise exceptions.NotFound('No such Security Group')
| [
"urllib.urlencode",
"json.loads",
"json.dumps",
"tempest.exceptions.NotFound"
] | [((1402, 1418), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (1412, 1418), False, 'import json\n'), ((1682, 1698), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (1692, 1698), False, 'import json\n'), ((2092, 2133), 'json.dumps', 'json.dumps', (["{'security_group': post_body}"], {}), "({'security_group': post_body})\n", (2102, 2133), False, 'import json\n'), ((2227, 2243), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (2237, 2243), False, 'import json\n'), ((2783, 2824), 'json.dumps', 'json.dumps', (["{'security_group': post_body}"], {}), "({'security_group': post_body})\n", (2793, 2824), False, 'import json\n'), ((2975, 2991), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (2985, 2991), False, 'import json\n'), ((4041, 4087), 'json.dumps', 'json.dumps', (["{'security_group_rule': post_body}"], {}), "({'security_group_rule': post_body})\n", (4051, 4087), False, 'import json\n'), ((4204, 4220), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (4214, 4220), False, 'import json\n'), ((4641, 4657), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (4651, 4657), False, 'import json\n'), ((4802, 4847), 'tempest.exceptions.NotFound', 'exceptions.NotFound', (['"""No such Security Group"""'], {}), "('No such Security Group')\n", (4821, 4847), False, 'from tempest import exceptions\n'), ((1326, 1350), 'urllib.urlencode', 'urllib.urlencode', (['params'], {}), '(params)\n', (1342, 1350), False, 'import urllib\n')] |
"""Tests for the lms module itself."""
import logging
import mimetypes
from django.conf import settings # lint-amnesty, pylint: disable=unused-import
from django.test import TestCase
log = logging.getLogger(__name__)
class LmsModuleTests(TestCase):
"""
Tests for lms module itself.
"""
def test_new_mimetypes(self):
extensions = ['eot', 'otf', 'ttf', 'woff']
for extension in extensions:
mimetype, _ = mimetypes.guess_type('test.' + extension)
assert mimetype is not None
def test_api_docs(self):
"""
Tests that requests to the `/api-docs/` endpoint do not raise an exception.
"""
response = self.client.get('/api-docs/')
assert response.status_code == 200
| [
"logging.getLogger",
"mimetypes.guess_type"
] | [((194, 221), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (211, 221), False, 'import logging\n'), ((454, 495), 'mimetypes.guess_type', 'mimetypes.guess_type', (["('test.' + extension)"], {}), "('test.' + extension)\n", (474, 495), False, 'import mimetypes\n')] |
from __future__ import print_function
import json
import os
import requests
from datetime import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
DEMO_UID = 0
PREDICTION_RESPONSE_KEY_QUERY_ID = "query_id"
PREDICTION_RESPONSE_KEY_OUTPUT = "output"
PREDICTION_RESPONSE_KEY_USED_DEFAULT = "default"
PREDICTION_ERROR_RESPONSE_KEY_ERROR = "error"
PREDICTION_ERROR_RESPONSE_KEY_CAUSE = "cause"
classes = [
'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
'ship', 'truck'
]
positive_class = classes.index('airplane')
negative_class = classes.index('bird')
def recover_pixels(x):
return np.transpose(x.reshape(3, 32, 32), (1, 2, 0))
def show_example_images(images, labels, num_rows):
imgs_per_row = 6
num_images = imgs_per_row * num_rows
idxs = np.random.randint(0, len(labels), num_images)
f, axes = plt.subplots(
nrows=num_rows,
ncols=imgs_per_row,
figsize=(1.5 * imgs_per_row, 1.5 * num_rows))
f.tight_layout()
for i, idx in enumerate(idxs):
image = recover_pixels(images[idx])
label = labels[idx]
cur_ax = axes[i / imgs_per_row][i % imgs_per_row]
cur_ax.imshow(image.astype(np.ubyte), interpolation="nearest")
cur_ax.axis('off')
if label == 0:
title = classes[negative_class]
else:
title = classes[positive_class]
cur_ax.set_title(title)
def load_cifar(cifar_location, cifar_filename="cifar_train.data", norm=True):
cifar_path = os.path.join(cifar_location, cifar_filename)
# print("Source file: %s" % cifar_path)
df = pd.read_csv(cifar_path, sep=",", header=None)
data = df.values
print("Number of image files: %d" % len(data))
y = data[:, 0]
X = data[:, 1:]
Z = X
if norm:
mu = np.mean(X.T, 0)
sigma = np.var(X.T, 0)
Z = (X.T - mu) / np.array([np.sqrt(z) if z > 0 else 1. for z in sigma])
Z = Z.T
return Z, y
def filter_data(X, y):
X_train, y_train = [], []
for (example, label) in zip(X, y):
if label == positive_class:
X_train.append(example)
y_train.append(1.0)
elif label == negative_class:
X_train.append(example)
y_train.append(0.0)
X_train = np.array(X_train)
y_train = np.array(y_train)
return X_train, y_train
def cifar_update(host, app, uid, x, y, print_result=False):
url = "http://%s:1337/%s/update" % (host, app)
req_json = json.dumps({
'uid': uid,
'input': list(x),
'label': float(y),
# These updates aren't coming from predictions made by a particular
# model, so we can ignore the model name and version fields.
'model_name': 'NA',
'model_version': 1
})
headers = {'Content-type': 'application/json'}
start = datetime.now()
r = requests.post(url, headers=headers, data=req_json)
end = datetime.now()
latency = (end - start).total_seconds() * 1000.0
if print_result:
print("'%s', %f ms" % (r.text, latency))
def parse_pred(p):
json_prediction = json.loads(p)
if PREDICTION_RESPONSE_KEY_OUTPUT in json_prediction:
# Prediction was successful, return parsed data
qid = int(json_prediction[PREDICTION_RESPONSE_KEY_QUERY_ID])
pred = int(json_prediction[PREDICTION_RESPONSE_KEY_OUTPUT])
return qid, pred
elif PREDICTION_ERROR_RESPONSE_KEY_ERROR in json_prediction:
# Prediction is an error, log the issue
error_name = str(json_prediction[PREDICTION_ERROR_RESPONSE_KEY_ERROR])
print(error_name)
error_cause = str(json_prediction[PREDICTION_ERROR_RESPONSE_KEY_CAUSE])
print("Error executing prediction!")
print("{}: {}".format(error_name, error_cause))
return None
def cifar_prediction(host, app, uid, x):
url = "http://%s/%s/predict" % (host, app)
req_json = json.dumps({'uid': uid, 'input': list(x)})
headers = {'Content-type': 'application/json'}
start = datetime.now()
r = requests.post(url, headers=headers, data=req_json)
end = datetime.now()
latency = (end - start).total_seconds() * 1000.0
parsed_prediction = parse_pred(r.text)
if parsed_prediction:
qid, pred = parsed_prediction
if pred == -1.0:
pred = 0.0
assert pred == 1.0 or pred == 0.0
return (pred, latency)
else:
return None
def run_iteration(host, app, uid, test_x, test_y):
correct = 0
false_pos = 0
false_neg = 0
latencies = []
true_pos = 0
true_neg = 0
total = 100
for i in range(total):
example_num = np.random.randint(0, len(test_y))
correct_y = float(test_y[example_num])
prediction = cifar_prediction(host, app, uid, test_x[example_num])
if not prediction:
continue
pred_y, latency = prediction
if correct_y == pred_y:
if correct_y == 0:
true_neg += 1
elif correct_y == 1:
true_pos += 1
correct += 1
elif correct_y == 0 and pred_y == 1:
false_pos += 1
elif correct_y == 1 and pred_y == 0:
false_neg += 1
else:
print("predicted: {p}, correct: {c}".format(p=pred_y, c=correct_y))
latencies.append(latency)
total = float(total)
return (float(correct) / total, float(false_pos) / total,
float(false_neg) / total, float(true_pos) / total,
float(true_neg) / total, np.mean(latencies))
def run_serving_workload(host, app, test_x, test_y):
fig, (ax_acc) = plt.subplots(1, 1, sharex=True)
ax_acc.set_ylabel("application accuracy")
ax_acc.set_xlabel("iterations")
ax_acc.set_ylim(0, 1.0)
xs = []
accs = []
lats = []
j = 0
uid = DEMO_UID
while True:
correct, fp, fn, tp, tn, mean_lat, = run_iteration(
host, app, uid, test_x, test_y)
xs.append(j)
accs.append(correct)
lats.append(mean_lat)
j += 1
ax_acc.set_xlim(0, j + 1)
ax_acc.plot(xs, accs, 'b')
fig.tight_layout()
fig.canvas.draw()
def run_serving_workload_show_latency(host, app, test_x, test_y):
fig, (ax_acc, ax_lat) = plt.subplots(2, 1, sharex=True)
ax_acc.set_ylabel("accuracy")
ax_lat.set_xlabel("time")
ax_lat.set_ylabel("latency")
ax_acc.set_ylim(0, 1.0)
xs = []
accs = []
lats = []
j = 0
uid = DEMO_UID
while True:
correct, fp, fn, tp, tn, mean_lat, = run_iteration(
host, app, uid, test_x, test_y)
xs.append(j)
accs.append(correct)
lats.append(mean_lat)
j += 1
ax_acc.set_xlim(0, j + 1)
ax_lat.set_xlim(0, j + 1)
ax_acc.plot(xs, accs, 'b')
ax_lat.plot(xs, lats, 'r')
ax_lat.set_ylim(0, 300)
fig.canvas.draw()
print(("Accuracy: {cor}, false positives: {fp}, "
"false negatives: {fn}, true positives: {tp}, "
"true negatives: {tn}").format(
cor=correct, fp=fp, fn=fn, tp=tp, tn=tn))
print("Mean latency: {lat} ms".format(lat=mean_lat))
def enable_feedback(host, app, test_x, test_y, num_updates):
uid = DEMO_UID
for i in range(num_updates):
example_num = np.random.randint(0, len(test_y))
cifar_update(host, app, uid, test_x[example_num],
float(test_y[example_num]))
| [
"numpy.mean",
"json.loads",
"requests.post",
"numpy.sqrt",
"pandas.read_csv",
"os.path.join",
"numpy.array",
"datetime.datetime.now",
"matplotlib.pyplot.subplots",
"numpy.var"
] | [((883, 981), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'num_rows', 'ncols': 'imgs_per_row', 'figsize': '(1.5 * imgs_per_row, 1.5 * num_rows)'}), '(nrows=num_rows, ncols=imgs_per_row, figsize=(1.5 *\n imgs_per_row, 1.5 * num_rows))\n', (895, 981), True, 'import matplotlib.pyplot as plt\n'), ((1542, 1586), 'os.path.join', 'os.path.join', (['cifar_location', 'cifar_filename'], {}), '(cifar_location, cifar_filename)\n', (1554, 1586), False, 'import os\n'), ((1640, 1685), 'pandas.read_csv', 'pd.read_csv', (['cifar_path'], {'sep': '""","""', 'header': 'None'}), "(cifar_path, sep=',', header=None)\n", (1651, 1685), True, 'import pandas as pd\n'), ((2310, 2327), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (2318, 2327), True, 'import numpy as np\n'), ((2342, 2359), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (2350, 2359), True, 'import numpy as np\n'), ((2872, 2886), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2884, 2886), False, 'from datetime import datetime\n'), ((2895, 2945), 'requests.post', 'requests.post', (['url'], {'headers': 'headers', 'data': 'req_json'}), '(url, headers=headers, data=req_json)\n', (2908, 2945), False, 'import requests\n'), ((2956, 2970), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2968, 2970), False, 'from datetime import datetime\n'), ((3137, 3150), 'json.loads', 'json.loads', (['p'], {}), '(p)\n', (3147, 3150), False, 'import json\n'), ((4058, 4072), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4070, 4072), False, 'from datetime import datetime\n'), ((4081, 4131), 'requests.post', 'requests.post', (['url'], {'headers': 'headers', 'data': 'req_json'}), '(url, headers=headers, data=req_json)\n', (4094, 4131), False, 'import requests\n'), ((4142, 4156), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4154, 4156), False, 'from datetime import datetime\n'), ((5667, 5698), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'sharex': '(True)'}), '(1, 1, sharex=True)\n', (5679, 5698), True, 'import matplotlib.pyplot as plt\n'), ((6311, 6342), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)'}), '(2, 1, sharex=True)\n', (6323, 6342), True, 'import matplotlib.pyplot as plt\n'), ((1833, 1848), 'numpy.mean', 'np.mean', (['X.T', '(0)'], {}), '(X.T, 0)\n', (1840, 1848), True, 'import numpy as np\n'), ((1865, 1879), 'numpy.var', 'np.var', (['X.T', '(0)'], {}), '(X.T, 0)\n', (1871, 1879), True, 'import numpy as np\n'), ((5572, 5590), 'numpy.mean', 'np.mean', (['latencies'], {}), '(latencies)\n', (5579, 5590), True, 'import numpy as np\n'), ((1915, 1925), 'numpy.sqrt', 'np.sqrt', (['z'], {}), '(z)\n', (1922, 1925), True, 'import numpy as np\n')] |
"""
Module contains tools for processing files into DataFrames or other objects
"""
from __future__ import annotations
from collections import abc
import csv
import sys
from textwrap import fill
from typing import Any
import warnings
import numpy as np
import pandas._libs.lib as lib
from pandas._libs.parsers import STR_NA_VALUES
from pandas._typing import (
ArrayLike,
DtypeArg,
FilePathOrBuffer,
StorageOptions,
)
from pandas.errors import (
AbstractMethodError,
ParserWarning,
)
from pandas.util._decorators import (
Appender,
deprecate_nonkeyword_arguments,
)
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
is_file_like,
is_float,
is_integer,
is_list_like,
)
from pandas.core import generic
from pandas.core.frame import DataFrame
from pandas.core.indexes.api import RangeIndex
from pandas.io.common import validate_header_arg
from pandas.io.parsers.base_parser import (
ParserBase,
is_index_col,
parser_defaults,
)
from pandas.io.parsers.c_parser_wrapper import CParserWrapper
from pandas.io.parsers.python_parser import (
FixedWidthFieldParser,
PythonParser,
)
_doc_read_csv_and_table = (
r"""
{summary}
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the online docs for
`IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method, such as
a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default {_default_sep}
Delimiter to use. If sep is None, the C engine cannot automatically detect
the separator, but the Python parsing engine can, meaning the latter will
be used and automatically detect the separator by Python's builtin sniffer
tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, default ``None``
Alias for sep.
header : int, list of int, default 'infer'
Row number(s) to use as the column names, and the start of the
data. Default behavior is to infer the column names: if no names
are passed the behavior is identical to ``header=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly then the behavior is identical to
``header=None``. Explicitly pass ``header=0`` to be able to
replace existing names. The header can be a list of integers that
specify row locations for a multi-index on the columns
e.g. [0,1,3]. Intervening rows that are not specified will be
skipped (e.g. 2 in this example is skipped). Note that this
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header=0`` denotes the first line of
data rather than the first line of the file.
names : array-like, optional
List of column names to use. If the file contains a header row,
then you should explicitly pass ``header=0`` to override the column names.
Duplicates in this list are not allowed.
index_col : int, str, sequence of int / str, or False, default ``None``
Column(s) to use as the row labels of the ``DataFrame``, either given as
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
Note: ``index_col=False`` can be used to force pandas to *not* use the first
column as the index, e.g. when you have a malformed file with delimiters at
the end of each line.
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header row(s). For example, a valid list-like
`usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a DataFrame from ``data`` with element order preserved use
``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
in ``['foo', 'bar']`` order or
``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
for ``['bar', 'foo']`` order.
If callable, the callable function will be evaluated against the column
names, returning names where the callable function evaluates to True. An
example of a valid callable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
prefix : str, optional
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
'c': 'Int64'}}
Use `str` or `object` together with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : {{'c', 'python'}}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
true_values : list, optional
Values to consider as True.
false_values : list, optional
Values to consider as False.
skipinitialspace : bool, default False
Skip spaces after delimiter.
skiprows : list-like, int or callable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If callable, the callable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c').
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '"""
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is appended to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, \
default False
The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index cannot be represented as an array of datetimes,
say because of an unparsable value or a mixture of timezones, the column
or index will be returned unaltered as an object data type. For
non-standard datetime parsing, use ``pd.to_datetime`` after
``pd.read_csv``. To parse an index or column with a mixture of timezones,
specify ``date_parser`` to be a partially-applied
:func:`pandas.to_datetime` with ``utc=True``. See
:ref:`io.csv.mixed_timezones` for more.
Note: A fast-path exists for iso8601-formatted dates.
infer_datetime_format : bool, default False
If True and `parse_dates` is enabled, pandas will attempt to infer the
format of the datetime strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
keep_date_col : bool, default False
If True and `parse_dates` specifies combining multiple columns then
keep the original columns.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
dayfirst : bool, default False
DD/MM format dates, international and European format.
cache_dates : bool, default True
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.25.0
iterator : bool, default False
Return TextFileReader object for iteration or getting chunks with
``get_chunk()``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
chunksize : int, optional
Return TextFileReader object for iteration.
See the `IO Tools docs
<https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_
for more information on ``iterator`` and ``chunksize``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
`filepath_or_buffer` is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
thousands : str, optional
Thousands separator.
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
lineterminator : str (length 1), optional
Character to break file into lines. Only valid with C parser.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : bool, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (length 1), optional
One-character string used to escape other characters.
comment : str, optional
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogether. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header` but not by
`skiprows`. For example, if ``comment='#'``, parsing
``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in 'a,b,c' being
treated as the header.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
.. versionchanged:: 1.2
When ``encoding`` is ``None``, ``errors="replace"`` is passed to
``open()``. Otherwise, ``errors="strict"`` is passed to ``open()``.
This behavior was previously only the case for ``engine="python"``.
.. versionchanged:: 1.3.0
``encoding_errors`` is a new argument. ``encoding`` has no longer an
influence on how encoding errors are handled.
encoding_errors : str, optional, default "strict"
How encoding errors are treated. `List of possible values
<https://docs.python.org/3/library/codecs.html#error-handlers>`_ .
.. versionadded:: 1.3.0
dialect : str or csv.Dialect, optional
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more details.
error_bad_lines : bool, default ``None``
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned.
If False, then these "bad lines" will be dropped from the DataFrame that is
returned.
.. deprecated:: 1.3.0
The ``on_bad_lines`` parameter should be used instead to specify behavior upon
encountering a bad line instead.
warn_bad_lines : bool, default ``None``
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
.. deprecated:: 1.3.0
The ``on_bad_lines`` parameter should be used instead to specify behavior upon
encountering a bad line instead.
on_bad_lines : {{'error', 'warn', 'skip'}}, default 'error'
Specifies what to do upon encountering a bad line (a line with too many fields).
Allowed values are :
- 'error', raise an Exception when a bad line is encountered.
- 'warn', raise a warning when a bad line is encountered and skip that line.
- 'skip', skip bad lines without raising or warning when they are encountered.
.. versionadded:: 1.3.0
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
low_memory : bool, default True
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single DataFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser).
memory_map : bool, default False
If a filepath is provided for `filepath_or_buffer`, map the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer any I/O overhead.
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are ``None`` or 'high' for the ordinary converter,
'legacy' for the original lower precision pandas converter, and
'round_trip' for the round-trip converter.
.. versionchanged:: 1.2
{storage_options}
.. versionadded:: 1.2
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
>>> pd.{func_name}('data.csv') # doctest: +SKIP
"""
)
_c_parser_defaults = {
"delim_whitespace": False,
"na_filter": True,
"low_memory": True,
"memory_map": False,
"error_bad_lines": None,
"warn_bad_lines": None,
"float_precision": None,
}
_fwf_defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
_c_unsupported = {"skipfooter"}
_python_unsupported = {"low_memory", "float_precision"}
_deprecated_defaults: dict[str, Any] = {"error_bad_lines": None, "warn_bad_lines": None}
_deprecated_args: set[str] = {"error_bad_lines", "warn_bad_lines"}
def validate_integer(name, val, min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : str
Parameter name (used for error reporting)
val : int or float
The value to check
min_val : int
Minimum allowed value (val < min_val will result in a ValueError)
"""
msg = f"'{name:s}' must be an integer >={min_val:d}"
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= min_val):
raise ValueError(msg)
return val
def _validate_names(names):
"""
Raise ValueError if the `names` parameter contains duplicates or has an
invalid data type.
Parameters
----------
names : array-like or None
An array containing a list of the names used for the output DataFrame.
Raises
------
ValueError
If names are not unique or are not ordered (e.g. set).
"""
if names is not None:
if len(names) != len(set(names)):
raise ValueError("Duplicate names are not allowed.")
if not (
is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView)
):
raise ValueError("Names should be an ordered collection.")
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"""Generic reader of line files."""
if kwds.get("date_parser", None) is not None:
if isinstance(kwds["parse_dates"], bool):
kwds["parse_dates"] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get("iterator", False)
chunksize = validate_integer("chunksize", kwds.get("chunksize", None), 1)
nrows = kwds.get("nrows", None)
# Check for duplicates in names.
_validate_names(kwds.get("names", None))
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
if chunksize or iterator:
return parser
with parser:
return parser.read(nrows)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["filepath_or_buffer"], stacklevel=3
)
@Appender(
_doc_read_csv_and_table.format(
func_name="read_csv",
summary="Read a comma-separated values (csv) file into DataFrame.",
_default_sep="','",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_csv(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header="infer",
names=lib.no_default,
index_col=None,
usecols=None,
squeeze=False,
prefix=lib.no_default,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype: DtypeArg | None = None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
encoding_errors: str | None = "strict",
dialect=None,
# Error Handling
error_bad_lines=None,
warn_bad_lines=None,
# TODO (2.0): set on_bad_lines to "error".
# See _refine_defaults_read comment for why we do this.
on_bad_lines=None,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_map=False,
float_precision=None,
storage_options: StorageOptions = None,
):
# locals() should never be modified
kwds = locals().copy()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect,
delimiter,
delim_whitespace,
engine,
sep,
error_bad_lines,
warn_bad_lines,
on_bad_lines,
names,
prefix,
defaults={"delimiter": ","},
)
kwds.update(kwds_defaults)
return _read(filepath_or_buffer, kwds)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["filepath_or_buffer"], stacklevel=3
)
@Appender(
_doc_read_csv_and_table.format(
func_name="read_table",
summary="Read general delimited file into DataFrame.",
_default_sep=r"'\\t' (tab-stop)",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_table(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header="infer",
names=lib.no_default,
index_col=None,
usecols=None,
squeeze=False,
prefix=lib.no_default,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype: DtypeArg | None = None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=None,
warn_bad_lines=None,
# TODO (2.0): set on_bad_lines to "error".
# See _refine_defaults_read comment for why we do this.
on_bad_lines=None,
encoding_errors: str | None = "strict",
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_map=False,
float_precision=None,
):
# locals() should never be modified
kwds = locals().copy()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect,
delimiter,
delim_whitespace,
engine,
sep,
error_bad_lines,
warn_bad_lines,
on_bad_lines,
names,
prefix,
defaults={"delimiter": "\t"},
)
kwds.update(kwds_defaults)
return _read(filepath_or_buffer, kwds)
def read_fwf(
filepath_or_buffer: FilePathOrBuffer,
colspecs="infer",
widths=None,
infer_nrows=100,
**kwds,
):
r"""
Read a table of fixed-width formatted lines into DataFrame.
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.csv``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser determine the
`colspecs`.
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
>>> pd.read_fwf('data.csv') # doctest: +SKIP
"""
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, "infer") and widths is not None:
raise ValueError("You must specify only one of 'widths' and 'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.append((col, col + w))
col += w
kwds["colspecs"] = colspecs
kwds["infer_nrows"] = infer_nrows
kwds["engine"] = "python-fwf"
return _read(filepath_or_buffer, kwds)
class TextFileReader(abc.Iterator):
"""
Passed dialect overrides any of the related parser options
"""
def __init__(self, f, engine=None, **kwds):
self.f = f
if engine is not None:
engine_specified = True
else:
engine = "python"
engine_specified = False
self.engine = engine
self._engine_specified = kwds.get("engine_specified", engine_specified)
_validate_skipfooter(kwds)
dialect = _extract_dialect(kwds)
if dialect is not None:
kwds = _merge_with_dialect_properties(dialect, kwds)
if kwds.get("header", "infer") == "infer":
kwds["header"] = 0 if kwds.get("names") is None else None
self.orig_options = kwds
# miscellanea
self._currow = 0
options = self._get_options_with_defaults(engine)
options["storage_options"] = kwds.get("storage_options", None)
self.chunksize = options.pop("chunksize", None)
self.nrows = options.pop("nrows", None)
self.squeeze = options.pop("squeeze", False)
self._check_file_or_buffer(f, engine)
self.options, self.engine = self._clean_options(options, engine)
if "has_index_names" in kwds:
self.options["has_index_names"] = kwds["has_index_names"]
self._engine = self._make_engine(self.engine)
def close(self):
self._engine.close()
def _get_options_with_defaults(self, engine):
kwds = self.orig_options
options = {}
default: object | None
for argname, default in parser_defaults.items():
value = kwds.get(argname, default)
# see gh-12935
if argname == "mangle_dupe_cols" and not value:
raise ValueError("Setting mangle_dupe_cols=False is not supported yet")
else:
options[argname] = value
for argname, default in _c_parser_defaults.items():
if argname in kwds:
value = kwds[argname]
if engine != "c" and value != default:
if "python" in engine and argname not in _python_unsupported:
pass
elif value == _deprecated_defaults.get(argname, default):
pass
else:
raise ValueError(
f"The {repr(argname)} option is not supported with the "
f"{repr(engine)} engine"
)
else:
value = _deprecated_defaults.get(argname, default)
options[argname] = value
if engine == "python-fwf":
for argname, default in _fwf_defaults.items():
options[argname] = kwds.get(argname, default)
return options
def _check_file_or_buffer(self, f, engine):
# see gh-16530
if is_file_like(f) and engine != "c" and not hasattr(f, "__next__"):
# The C engine doesn't need the file-like to have the "__next__"
# attribute. However, the Python engine explicitly calls
# "__next__(...)" when iterating through such an object, meaning it
# needs to have that attribute
raise ValueError(
"The 'python' engine cannot iterate through this file buffer."
)
def _clean_options(self, options, engine):
result = options.copy()
fallback_reason = None
# C engine not supported yet
if engine == "c":
if options["skipfooter"] > 0:
fallback_reason = "the 'c' engine does not support skipfooter"
engine = "python"
sep = options["delimiter"]
delim_whitespace = options["delim_whitespace"]
if sep is None and not delim_whitespace:
if engine == "c":
fallback_reason = (
"the 'c' engine does not support "
"sep=None with delim_whitespace=False"
)
engine = "python"
elif sep is not None and len(sep) > 1:
if engine == "c" and sep == r"\s+":
result["delim_whitespace"] = True
del result["delimiter"]
elif engine not in ("python", "python-fwf"):
# wait until regex engine integrated
fallback_reason = (
"the 'c' engine does not support "
"regex separators (separators > 1 char and "
r"different from '\s+' are interpreted as regex)"
)
engine = "python"
elif delim_whitespace:
if "python" in engine:
result["delimiter"] = r"\s+"
elif sep is not None:
encodeable = True
encoding = sys.getfilesystemencoding() or "utf-8"
try:
if len(sep.encode(encoding)) > 1:
encodeable = False
except UnicodeDecodeError:
encodeable = False
if not encodeable and engine not in ("python", "python-fwf"):
fallback_reason = (
f"the separator encoded in {encoding} "
"is > 1 char long, and the 'c' engine "
"does not support such separators"
)
engine = "python"
quotechar = options["quotechar"]
if quotechar is not None and isinstance(quotechar, (str, bytes)):
if (
len(quotechar) == 1
and ord(quotechar) > 127
and engine not in ("python", "python-fwf")
):
fallback_reason = (
"ord(quotechar) > 127, meaning the "
"quotechar is larger than one byte, "
"and the 'c' engine does not support such quotechars"
)
engine = "python"
if fallback_reason and self._engine_specified:
raise ValueError(fallback_reason)
if engine == "c":
for arg in _c_unsupported:
del result[arg]
if "python" in engine:
for arg in _python_unsupported:
if fallback_reason and result[arg] != _c_parser_defaults[arg]:
raise ValueError(
"Falling back to the 'python' engine because "
f"{fallback_reason}, but this causes {repr(arg)} to be "
"ignored as it is not supported by the 'python' engine."
)
del result[arg]
if fallback_reason:
warnings.warn(
(
"Falling back to the 'python' engine because "
f"{fallback_reason}; you can avoid this warning by specifying "
"engine='python'."
),
ParserWarning,
stacklevel=5,
)
index_col = options["index_col"]
names = options["names"]
converters = options["converters"]
na_values = options["na_values"]
skiprows = options["skiprows"]
validate_header_arg(options["header"])
for arg in _deprecated_args:
parser_default = _c_parser_defaults[arg]
depr_default = _deprecated_defaults[arg]
if result.get(arg, depr_default) != depr_default:
msg = (
f"The {arg} argument has been deprecated and will be "
"removed in a future version.\n\n"
)
warnings.warn(msg, FutureWarning, stacklevel=7)
else:
result[arg] = parser_default
if index_col is True:
raise ValueError("The value of index_col couldn't be 'True'")
if is_index_col(index_col):
if not isinstance(index_col, (list, tuple, np.ndarray)):
index_col = [index_col]
result["index_col"] = index_col
names = list(names) if names is not None else names
# type conversion-related
if converters is not None:
if not isinstance(converters, dict):
raise TypeError(
"Type converters must be a dict or subclass, "
f"input was a {type(converters).__name__}"
)
else:
converters = {}
# Converting values to NA
keep_default_na = options["keep_default_na"]
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
# handle skiprows; this is internally handled by the
# c-engine, so only need for python parsers
if engine != "c":
if is_integer(skiprows):
skiprows = list(range(skiprows))
if skiprows is None:
skiprows = set()
elif not callable(skiprows):
skiprows = set(skiprows)
# put stuff back
result["names"] = names
result["converters"] = converters
result["na_values"] = na_values
result["na_fvalues"] = na_fvalues
result["skiprows"] = skiprows
return result, engine
def __next__(self):
try:
return self.get_chunk()
except StopIteration:
self.close()
raise
def _make_engine(self, engine="c"):
mapping: dict[str, type[ParserBase]] = {
"c": CParserWrapper,
"python": PythonParser,
"python-fwf": FixedWidthFieldParser,
}
if engine not in mapping:
raise ValueError(
f"Unknown engine: {engine} (valid options are {mapping.keys()})"
)
# error: Too many arguments for "ParserBase"
return mapping[engine](self.f, **self.options) # type: ignore[call-arg]
def _failover_to_python(self):
raise AbstractMethodError(self)
def read(self, nrows=None):
nrows = validate_integer("nrows", nrows)
index, columns, col_dict = self._engine.read(nrows)
if index is None:
if col_dict:
# Any column is actually fine:
new_rows = len(next(iter(col_dict.values())))
index = RangeIndex(self._currow, self._currow + new_rows)
else:
new_rows = 0
else:
new_rows = len(index)
df = DataFrame(col_dict, columns=columns, index=index)
self._currow += new_rows
if self.squeeze and len(df.columns) == 1:
return df[df.columns[0]].copy()
return df
def get_chunk(self, size=None):
if size is None:
size = self.chunksize
if self.nrows is not None:
if self._currow >= self.nrows:
raise StopIteration
size = min(size, self.nrows - self._currow)
return self.read(nrows=size)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def TextParser(*args, **kwds):
"""
Converts lists of lists/tuples into DataFrames with proper type inference
and optional (e.g. string to datetime) conversion. Also enables iterating
lazily over chunks of large files
Parameters
----------
data : file-like object or list
delimiter : separator character to use
dialect : str or csv.Dialect instance, optional
Ignored if delimiter is longer than 1 character
names : sequence, default
header : int, default 0
Row to use to parse column labels. Defaults to the first row. Prior
rows will be discarded
index_col : int or list, optional
Column or columns to use as the (possibly hierarchical) index
has_index_names: bool, default False
True if the cols defined in index_col have an index name and are
not in the header.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN.
keep_default_na : bool, default True
thousands : str, optional
Thousands separator
comment : str, optional
Comment out remainder of line
parse_dates : bool, default False
keep_date_col : bool, default False
date_parser : function, optional
skiprows : list of integers
Row numbers to skip
skipfooter : int
Number of line at bottom of file to skip
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8')
squeeze : bool, default False
returns Series if only one column.
infer_datetime_format: bool, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are `None` or `high` for the ordinary converter,
`legacy` for the original lower precision pandas converter, and
`round_trip` for the round-trip converter.
.. versionchanged:: 1.2
"""
kwds["engine"] = "python"
return TextFileReader(*args, **kwds)
def _clean_na_values(na_values, keep_default_na=True):
na_fvalues: set | dict
if na_values is None:
if keep_default_na:
na_values = STR_NA_VALUES
else:
na_values = set()
na_fvalues = set()
elif isinstance(na_values, dict):
old_na_values = na_values.copy()
na_values = {} # Prevent aliasing.
# Convert the values in the na_values dictionary
# into array-likes for further use. This is also
# where we append the default NaN values, provided
# that `keep_default_na=True`.
for k, v in old_na_values.items():
if not is_list_like(v):
v = [v]
if keep_default_na:
v = set(v) | STR_NA_VALUES
na_values[k] = v
na_fvalues = {k: _floatify_na_values(v) for k, v in na_values.items()}
else:
if not is_list_like(na_values):
na_values = [na_values]
na_values = _stringify_na_values(na_values)
if keep_default_na:
na_values = na_values | STR_NA_VALUES
na_fvalues = _floatify_na_values(na_values)
return na_values, na_fvalues
def _floatify_na_values(na_values):
# create float versions of the na_values
result = set()
for v in na_values:
try:
v = float(v)
if not np.isnan(v):
result.add(v)
except (TypeError, ValueError, OverflowError):
pass
return result
def _stringify_na_values(na_values):
""" return a stringified and numeric for these values """
result: list[int | str | float] = []
for x in na_values:
result.append(str(x))
result.append(x)
try:
v = float(x)
# we are like 999 here
if v == int(v):
v = int(v)
result.append(f"{v}.0")
result.append(str(v))
result.append(v)
except (TypeError, ValueError, OverflowError):
pass
try:
result.append(int(x))
except (TypeError, ValueError, OverflowError):
pass
return set(result)
def _refine_defaults_read(
dialect: str | csv.Dialect,
delimiter: str | object,
delim_whitespace: bool,
engine: str,
sep: str | object,
error_bad_lines: bool | None,
warn_bad_lines: bool | None,
on_bad_lines: str | None,
names: ArrayLike | None | object,
prefix: str | None | object,
defaults: dict[str, Any],
):
"""Validate/refine default values of input parameters of read_csv, read_table.
Parameters
----------
dialect : str or csv.Dialect
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more details.
delimiter : str or object
Alias for sep.
delim_whitespace : bool
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
engine : {{'c', 'python'}}
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
sep : str or object
A delimiter provided by the user (str) or a sentinel value, i.e.
pandas._libs.lib.no_default.
error_bad_lines : str or None
Whether to error on a bad line or not.
warn_bad_lines : str or None
Whether to warn on a bad line or not.
on_bad_lines : str or None
An option for handling bad lines or a sentinel value(None).
names : array-like, optional
List of column names to use. If the file contains a header row,
then you should explicitly pass ``header=0`` to override the column names.
Duplicates in this list are not allowed.
prefix : str, optional
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
defaults: dict
Default values of input parameters.
Returns
-------
kwds : dict
Input parameters with correct values.
Raises
------
ValueError :
If a delimiter was specified with ``sep`` (or ``delimiter``) and
``delim_whitespace=True``.
If on_bad_lines is specified(not ``None``) and ``error_bad_lines``/
``warn_bad_lines`` is True.
"""
# fix types for sep, delimiter to Union(str, Any)
delim_default = defaults["delimiter"]
kwds: dict[str, Any] = {}
# gh-23761
#
# When a dialect is passed, it overrides any of the overlapping
# parameters passed in directly. We don't want to warn if the
# default parameters were passed in (since it probably means
# that the user didn't pass them in explicitly in the first place).
#
# "delimiter" is the annoying corner case because we alias it to
# "sep" before doing comparison to the dialect values later on.
# Thus, we need a flag to indicate that we need to "override"
# the comparison to dialect values by checking if default values
# for BOTH "delimiter" and "sep" were provided.
if dialect is not None:
kwds["sep_override"] = delimiter is None and (
sep is lib.no_default or sep == delim_default
)
if delimiter and (sep is not lib.no_default):
raise ValueError("Specified a sep and a delimiter; you can only specify one.")
if names is not lib.no_default and prefix is not lib.no_default:
raise ValueError("Specified named and prefix; you can only specify one.")
kwds["names"] = None if names is lib.no_default else names
kwds["prefix"] = None if prefix is lib.no_default else prefix
# Alias sep -> delimiter.
if delimiter is None:
delimiter = sep
if delim_whitespace and (delimiter is not lib.no_default):
raise ValueError(
"Specified a delimiter with both sep and "
"delim_whitespace=True; you can only specify one."
)
if delimiter is lib.no_default:
# assign default separator value
kwds["delimiter"] = delim_default
else:
kwds["delimiter"] = delimiter
if engine is not None:
kwds["engine_specified"] = True
else:
kwds["engine"] = "c"
kwds["engine_specified"] = False
# Ensure that on_bad_lines and error_bad_lines/warn_bad_lines
# aren't specified at the same time. If so, raise. Otherwise,
# alias on_bad_lines to "error" if error/warn_bad_lines not set
# and on_bad_lines is not set. on_bad_lines is defaulted to None
# so we can tell if it is set (this is why this hack exists).
if on_bad_lines is not None:
if error_bad_lines is not None or warn_bad_lines is not None:
raise ValueError(
"Both on_bad_lines and error_bad_lines/warn_bad_lines are set. "
"Please only set on_bad_lines."
)
if on_bad_lines == "error":
kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.ERROR
elif on_bad_lines == "warn":
kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.WARN
elif on_bad_lines == "skip":
kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.SKIP
else:
raise ValueError(f"Argument {on_bad_lines} is invalid for on_bad_lines")
else:
if error_bad_lines is not None:
# Must check is_bool, because other stuff(e.g. non-empty lists) eval to true
validate_bool_kwarg(error_bad_lines, "error_bad_lines")
if error_bad_lines:
kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.ERROR
else:
if warn_bad_lines is not None:
# This is the case where error_bad_lines is False
# We can only warn/skip if error_bad_lines is False
# None doesn't work because backwards-compatibility reasons
validate_bool_kwarg(warn_bad_lines, "warn_bad_lines")
if warn_bad_lines:
kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.WARN
else:
kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.SKIP
else:
# Backwards compat, when only error_bad_lines = false, we warn
kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.WARN
else:
# Everything None -> Error
kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.ERROR
return kwds
def _extract_dialect(kwds: dict[str, Any]) -> csv.Dialect | None:
"""
Extract concrete csv dialect instance.
Returns
-------
csv.Dialect or None
"""
if kwds.get("dialect") is None:
return None
dialect = kwds["dialect"]
if dialect in csv.list_dialects():
dialect = csv.get_dialect(dialect)
_validate_dialect(dialect)
return dialect
MANDATORY_DIALECT_ATTRS = (
"delimiter",
"doublequote",
"escapechar",
"skipinitialspace",
"quotechar",
"quoting",
)
def _validate_dialect(dialect: csv.Dialect) -> None:
"""
Validate csv dialect instance.
Raises
------
ValueError
If incorrect dialect is provided.
"""
for param in MANDATORY_DIALECT_ATTRS:
if not hasattr(dialect, param):
raise ValueError(f"Invalid dialect {dialect} provided")
def _merge_with_dialect_properties(
dialect: csv.Dialect,
defaults: dict[str, Any],
) -> dict[str, Any]:
"""
Merge default kwargs in TextFileReader with dialect parameters.
Parameters
----------
dialect : csv.Dialect
Concrete csv dialect. See csv.Dialect documentation for more details.
defaults : dict
Keyword arguments passed to TextFileReader.
Returns
-------
kwds : dict
Updated keyword arguments, merged with dialect parameters.
"""
kwds = defaults.copy()
for param in MANDATORY_DIALECT_ATTRS:
dialect_val = getattr(dialect, param)
parser_default = parser_defaults[param]
provided = kwds.get(param, parser_default)
# Messages for conflicting values between the dialect
# instance and the actual parameters provided.
conflict_msgs = []
# Don't warn if the default parameter was passed in,
# even if it conflicts with the dialect (gh-23761).
if provided != parser_default and provided != dialect_val:
msg = (
f"Conflicting values for '{param}': '{provided}' was "
f"provided, but the dialect specifies '{dialect_val}'. "
"Using the dialect-specified value."
)
# Annoying corner case for not warning about
# conflicts between dialect and delimiter parameter.
# Refer to the outer "_read_" function for more info.
if not (param == "delimiter" and kwds.pop("sep_override", False)):
conflict_msgs.append(msg)
if conflict_msgs:
warnings.warn("\n\n".join(conflict_msgs), ParserWarning, stacklevel=2)
kwds[param] = dialect_val
return kwds
def _validate_skipfooter(kwds: dict[str, Any]) -> None:
"""
Check whether skipfooter is compatible with other kwargs in TextFileReader.
Parameters
----------
kwds : dict
Keyword arguments passed to TextFileReader.
Raises
------
ValueError
If skipfooter is not compatible with other parameters.
"""
if kwds.get("skipfooter"):
if kwds.get("iterator") or kwds.get("chunksize"):
raise ValueError("'skipfooter' not supported for iteration")
if kwds.get("nrows"):
raise ValueError("'skipfooter' not supported with 'nrows'")
| [
"csv.get_dialect",
"pandas.errors.AbstractMethodError",
"pandas.io.common.validate_header_arg",
"pandas.core.dtypes.common.is_file_like",
"pandas.core.dtypes.common.is_list_like",
"sys.getfilesystemencoding",
"pandas.io.parsers.base_parser.is_index_col",
"pandas.core.dtypes.common.is_float",
"csv.li... | [((20583, 20683), 'pandas.util._decorators.deprecate_nonkeyword_arguments', 'deprecate_nonkeyword_arguments', ([], {'version': 'None', 'allowed_args': "['filepath_or_buffer']", 'stacklevel': '(3)'}), "(version=None, allowed_args=[\n 'filepath_or_buffer'], stacklevel=3)\n", (20613, 20683), False, 'from pandas.util._decorators import Appender, deprecate_nonkeyword_arguments\n'), ((23045, 23145), 'pandas.util._decorators.deprecate_nonkeyword_arguments', 'deprecate_nonkeyword_arguments', ([], {'version': 'None', 'allowed_args': "['filepath_or_buffer']", 'stacklevel': '(3)'}), "(version=None, allowed_args=[\n 'filepath_or_buffer'], stacklevel=3)\n", (23075, 23145), False, 'from pandas.util._decorators import Appender, deprecate_nonkeyword_arguments\n'), ((18938, 18951), 'pandas.core.dtypes.common.is_float', 'is_float', (['val'], {}), '(val)\n', (18946, 18951), False, 'from pandas.core.dtypes.common import is_file_like, is_float, is_integer, is_list_like\n'), ((29966, 29989), 'pandas.io.parsers.base_parser.parser_defaults.items', 'parser_defaults.items', ([], {}), '()\n', (29987, 29989), False, 'from pandas.io.parsers.base_parser import ParserBase, is_index_col, parser_defaults\n'), ((35587, 35625), 'pandas.io.common.validate_header_arg', 'validate_header_arg', (["options['header']"], {}), "(options['header'])\n", (35606, 35625), False, 'from pandas.io.common import validate_header_arg\n'), ((36247, 36270), 'pandas.io.parsers.base_parser.is_index_col', 'is_index_col', (['index_col'], {}), '(index_col)\n', (36259, 36270), False, 'from pandas.io.parsers.base_parser import ParserBase, is_index_col, parser_defaults\n'), ((38322, 38347), 'pandas.errors.AbstractMethodError', 'AbstractMethodError', (['self'], {}), '(self)\n', (38341, 38347), False, 'from pandas.errors import AbstractMethodError, ParserWarning\n'), ((38834, 38883), 'pandas.core.frame.DataFrame', 'DataFrame', (['col_dict'], {'columns': 'columns', 'index': 'index'}), '(col_dict, columns=columns, index=index)\n', (38843, 38883), False, 'from pandas.core.frame import DataFrame\n'), ((51130, 51149), 'csv.list_dialects', 'csv.list_dialects', ([], {}), '()\n', (51147, 51149), False, 'import csv\n'), ((51169, 51193), 'csv.get_dialect', 'csv.get_dialect', (['dialect'], {}), '(dialect)\n', (51184, 51193), False, 'import csv\n'), ((31296, 31311), 'pandas.core.dtypes.common.is_file_like', 'is_file_like', (['f'], {}), '(f)\n', (31308, 31311), False, 'from pandas.core.dtypes.common import is_file_like, is_float, is_integer, is_list_like\n'), ((35063, 35240), 'warnings.warn', 'warnings.warn', (['f"""Falling back to the \'python\' engine because {fallback_reason}; you can avoid this warning by specifying engine=\'python\'."""', 'ParserWarning'], {'stacklevel': '(5)'}), '(\n f"Falling back to the \'python\' engine because {fallback_reason}; you can avoid this warning by specifying engine=\'python\'."\n , ParserWarning, stacklevel=5)\n', (35076, 35240), False, 'import warnings\n'), ((37144, 37164), 'pandas.core.dtypes.common.is_integer', 'is_integer', (['skiprows'], {}), '(skiprows)\n', (37154, 37164), False, 'from pandas.core.dtypes.common import is_file_like, is_float, is_integer, is_list_like\n'), ((49767, 49822), 'pandas.util._validators.validate_bool_kwarg', 'validate_bool_kwarg', (['error_bad_lines', '"""error_bad_lines"""'], {}), "(error_bad_lines, 'error_bad_lines')\n", (49786, 49822), False, 'from pandas.util._validators import validate_bool_kwarg\n'), ((19704, 19741), 'pandas.core.dtypes.common.is_list_like', 'is_list_like', (['names'], {'allow_sets': '(False)'}), '(names, allow_sets=False)\n', (19716, 19741), False, 'from pandas.core.dtypes.common import is_file_like, is_float, is_integer, is_list_like\n'), ((36020, 36067), 'warnings.warn', 'warnings.warn', (['msg', 'FutureWarning'], {'stacklevel': '(7)'}), '(msg, FutureWarning, stacklevel=7)\n', (36033, 36067), False, 'import warnings\n'), ((38675, 38724), 'pandas.core.indexes.api.RangeIndex', 'RangeIndex', (['self._currow', '(self._currow + new_rows)'], {}), '(self._currow, self._currow + new_rows)\n', (38685, 38724), False, 'from pandas.core.indexes.api import RangeIndex\n'), ((42891, 42914), 'pandas.core.dtypes.common.is_list_like', 'is_list_like', (['na_values'], {}), '(na_values)\n', (42903, 42914), False, 'from pandas.core.dtypes.common import is_file_like, is_float, is_integer, is_list_like\n'), ((43352, 43363), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (43360, 43363), True, 'import numpy as np\n'), ((19068, 19083), 'pandas.core.dtypes.common.is_integer', 'is_integer', (['val'], {}), '(val)\n', (19078, 19083), False, 'from pandas.core.dtypes.common import is_file_like, is_float, is_integer, is_list_like\n'), ((42640, 42655), 'pandas.core.dtypes.common.is_list_like', 'is_list_like', (['v'], {}), '(v)\n', (42652, 42655), False, 'from pandas.core.dtypes.common import is_file_like, is_float, is_integer, is_list_like\n'), ((50238, 50291), 'pandas.util._validators.validate_bool_kwarg', 'validate_bool_kwarg', (['warn_bad_lines', '"""warn_bad_lines"""'], {}), "(warn_bad_lines, 'warn_bad_lines')\n", (50257, 50291), False, 'from pandas.util._validators import validate_bool_kwarg\n'), ((33225, 33252), 'sys.getfilesystemencoding', 'sys.getfilesystemencoding', ([], {}), '()\n', (33250, 33252), False, 'import sys\n')] |
from leavedemo.leave.models import Account
from datetime import timedelta
def update_hr(workitem):
''' automated and simplistic version of hrform.
'''
instance = workitem.instance
leaverequest = workitem.instance.content_object
if leaverequest.reason_denial:
raise Exception('denial reason is not empty')
if leaverequest.dayStart > leaverequest.day_end:
raise Exception('date error')
delta = leaverequest.dayEnd - leaverequest.day_start
nbjours = delta.days + 1
account = Account.objects.get(user=instance.user)
if account.days < nbjours:
raise Exception('no days enough in user account.')
account.days -= nbjours
account.save()
print('test')
pass | [
"leavedemo.leave.models.Account.objects.get"
] | [((551, 590), 'leavedemo.leave.models.Account.objects.get', 'Account.objects.get', ([], {'user': 'instance.user'}), '(user=instance.user)\n', (570, 590), False, 'from leavedemo.leave.models import Account\n')] |
"""
Display information about ctapipe output files (DL1 or DL2)
"""
from pathlib import Path
import tables
import yaml
from astropy.table import Table
from ctapipe.tools.utils import get_parser
def unflatten(dictionary, separator=" "):
""" turn flattened dict keys into nested """
hierarch_dict = dict()
for key, value in dictionary.items():
parts = key.split(separator)
tmp_dict = hierarch_dict
for part in parts[:-1]:
if part not in tmp_dict:
tmp_dict[part] = dict()
tmp_dict = tmp_dict[part]
tmp_dict[parts[-1]] = value
return hierarch_dict
def fileinfo(args):
"""
Display information about ctapipe output files (DL1 or DL2 in HDF5 format).
Optionally create an index table from all headers
"""
info_total = {} # accumulated info for table output
for filename in args.files:
info = {}
# prevent failure if a non-file is given (e.g. a directory)
if Path(filename).is_file() is False:
info[filename] = "not a file"
elif tables.is_hdf5_file(filename) is not True:
info[filename] = "unknown file type"
else:
try:
with tables.open_file(filename, mode="r") as infile:
# pylint: disable=W0212,E1101
attrs = {
name: str(infile.root._v_attrs[name])
for name in infile.root._v_attrs._f_list()
}
if args.flat:
info[filename] = attrs
else:
info[filename] = unflatten(attrs)
if args.output_table:
info_total[filename] = attrs
except tables.exceptions.HDF5ExtError as err:
info[filename] = f"ERROR {err}"
print(yaml.dump(info, indent=4))
if args.output_table:
# use pandas' ability to convert a dict of flat values to a table
import pandas as pd # pylint: disable=C0415
dataframe = pd.DataFrame(info_total)
Table.from_pandas(dataframe.T, index=True).write(
args.output_table, format=args.table_format, overwrite=True
)
def main():
""" display info """
parser = get_parser(fileinfo)
parser.add_argument(
"files",
metavar="FILENAME",
type=str,
nargs="+",
help="filenames of files in ctapipe format",
)
parser.add_argument(
"-o", "--output-table", help="generate output file in tabular format"
)
parser.add_argument(
"-T",
"--table-format",
help="table format of output-table if not automatically guessed from filename",
)
parser.add_argument(
"-f", "--flat", action="store_true", help="show flat header hierarchy"
)
args = parser.parse_args()
fileinfo(args)
if __name__ == "__main__":
main()
| [
"ctapipe.tools.utils.get_parser",
"astropy.table.Table.from_pandas",
"yaml.dump",
"pathlib.Path",
"tables.open_file",
"tables.is_hdf5_file",
"pandas.DataFrame"
] | [((2312, 2332), 'ctapipe.tools.utils.get_parser', 'get_parser', (['fileinfo'], {}), '(fileinfo)\n', (2322, 2332), False, 'from ctapipe.tools.utils import get_parser\n'), ((2095, 2119), 'pandas.DataFrame', 'pd.DataFrame', (['info_total'], {}), '(info_total)\n', (2107, 2119), True, 'import pandas as pd\n'), ((1892, 1917), 'yaml.dump', 'yaml.dump', (['info'], {'indent': '(4)'}), '(info, indent=4)\n', (1901, 1917), False, 'import yaml\n'), ((1087, 1116), 'tables.is_hdf5_file', 'tables.is_hdf5_file', (['filename'], {}), '(filename)\n', (1106, 1116), False, 'import tables\n'), ((2128, 2170), 'astropy.table.Table.from_pandas', 'Table.from_pandas', (['dataframe.T'], {'index': '(True)'}), '(dataframe.T, index=True)\n', (2145, 2170), False, 'from astropy.table import Table\n'), ((996, 1010), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (1000, 1010), False, 'from pathlib import Path\n'), ((1231, 1267), 'tables.open_file', 'tables.open_file', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (1247, 1267), False, 'import tables\n')] |
import os
import time
from selenium import webdriver
if not os.path.exists("ogp"):
os.mkdir("ogp")
PATHS = {
"/?dummy": (959, 500),
"/cards/details-of-confirmed-cases": (959, 500),
"/cards/number-of-confirmed-cases": (959, 500),
"/cards/attributes-of-confirmed-cases": (959, 480),
"/cards/number-of-tested": (959, 540),
# "/cards/number-of-reports-to-returnee-contact-center": (959, 500),
"/cards/number-of-reports-to-health-consultation-desk": (959, 500),
# "/cards/predicted-number-of-toei-subway-passengers": (959, 750),
# "/cards/agency": (959, 730),
# "/cards/details-of-tested-cases": (959, 500),
# "/cards/number-of-inspection-persons": (959, 600),
# "/cards/shinjuku-visitors": (959, 820),
# "/cards/chiyoda-visitors": (959, 820),
# "/cards/shinjuku-st-heatmap": (959, 600),
# "/cards/tokyo-st-heatmap": (959, 600)
"/cards/number-of-reports-to-health-center-desk": (959, 500)
}
options = webdriver.ChromeOptions()
options.add_argument("--headless")
options.add_argument("--hide-scrollbars")
driver = webdriver.Chrome(options=options)
for lang in ("ja", "en", "zh-cn", "zh-tw", "ko", "ja-basic"):
if not os.path.exists("ogp/{}".format(lang)):
os.mkdir("ogp/{}".format(lang))
for path, size in PATHS.items():
driver.set_window_size(*size)
driver.get(
"http://localhost:8000{}?ogp=true".format(
path if lang == "ja" else "/{}{}".format(lang, path)
)
)
path = path.replace("/cards/", "").replace("/", "_")
if ('heatmap' in path):
time.sleep(20)
driver.save_screenshot(
"ogp/{}.png".format(
path if lang == "ja" else "{}/{}".format(lang, path)
)
)
| [
"os.path.exists",
"selenium.webdriver.ChromeOptions",
"selenium.webdriver.Chrome",
"time.sleep",
"os.mkdir"
] | [((968, 993), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (991, 993), False, 'from selenium import webdriver\n'), ((1081, 1114), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'options'}), '(options=options)\n', (1097, 1114), False, 'from selenium import webdriver\n'), ((62, 83), 'os.path.exists', 'os.path.exists', (['"""ogp"""'], {}), "('ogp')\n", (76, 83), False, 'import os\n'), ((89, 104), 'os.mkdir', 'os.mkdir', (['"""ogp"""'], {}), "('ogp')\n", (97, 104), False, 'import os\n'), ((1616, 1630), 'time.sleep', 'time.sleep', (['(20)'], {}), '(20)\n', (1626, 1630), False, 'import time\n')] |
from flask import Flask
from .config import Config
from .api import register_api
from .plugins import db, api, ma, login, migrate
def create_app(config=Config):
"""
Initializes the Flask app and Flask plugins.
:param config: configuration for the flask application
:type config: :class:`Config`
:returns App: Flask application
:rtype: :class:`Flask`
"""
# Initialise app and configuration
app = Flask(__name__)
app.config.from_object(config)
# Initialise flask plugins
db.init_app(app)
api.init_app(app)
ma.init_app(app)
login.init_app(app)
migrate.init_app(app, db)
register_api(api)
return app
| [
"flask.Flask"
] | [((450, 465), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (455, 465), False, 'from flask import Flask\n')] |
# -*- coding: utf-8 -*-
# author: itimor
from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
class UserManager(BaseUserManager):
def create_user(self, username, password=None):
'''username 是唯一标识,没有会报错'''
if not username:
raise ValueError('Users must have an username')
user = self.model(
username=username,
)
user.set_password(password) # 设置密码
user.save(using=self._db) # 保存密码
return user
def create_superuser(self, username, password):
user = self.create_user(username=username,
password=password,
)
user.is_admin = True # 比创建用户多的一个字段
user.save(using=self._db)
return user
class User(AbstractBaseUser):
username = models.CharField(max_length=32, unique=True, db_index=True)
email = models.EmailField(max_length=255, unique=True, blank=True)
name = models.CharField(max_length=100, null=True, blank=True, verbose_name=u'中文名')
group = models.ForeignKey('Group', on_delete=models.SET_NULL, null=True, blank=True, verbose_name=u'部门或组')
create_date = models.DateField(auto_now=True, verbose_name=u'创建时间')
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
roles = models.ForeignKey('Role', on_delete=models.SET_NULL, null=True, blank=True, verbose_name=u'角色')
USERNAME_FIELD = 'username' # 必须有一个唯一标识--USERNAME_FIELD
#REQUIRED_FIELDS = ['email'] # 创建superuser时的必须字段
def __str__(self): # __unicode__ on Python 2
return self.username
@property
def is_staff(self):
return self.is_admin
class Meta:
verbose_name = u'用户'
verbose_name_plural = u'用户'
objects = UserManager() # 创建用户
class Group(models.Model):
name = models.CharField(max_length=64, unique=True, verbose_name=u'部门')
desc = models.CharField(max_length=64, null=True, blank=True, verbose_name=u'描述')
def __str__(self):
return self.name
class Meta:
verbose_name = u'组'
verbose_name_plural = u'部门'
class Role(models.Model):
name = models.CharField(max_length=64, unique=True, verbose_name=u'角色')
cnname = models.CharField(max_length=64, unique=True, verbose_name=u'中文名')
desc = models.CharField(max_length=64, null=True, blank=True, verbose_name=u'描述')
def __str__(self):
return self.name
class Meta:
verbose_name = u'角色'
verbose_name_plural = u'角色' | [
"django.db.models.EmailField",
"django.db.models.DateField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.CharField"
] | [((861, 920), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'unique': '(True)', 'db_index': '(True)'}), '(max_length=32, unique=True, db_index=True)\n', (877, 920), False, 'from django.db import models\n'), ((933, 991), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(255)', 'unique': '(True)', 'blank': '(True)'}), '(max_length=255, unique=True, blank=True)\n', (950, 991), False, 'from django.db import models\n'), ((1003, 1079), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)', 'verbose_name': 'u"""中文名"""'}), "(max_length=100, null=True, blank=True, verbose_name=u'中文名')\n", (1019, 1079), False, 'from django.db import models\n'), ((1092, 1194), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Group"""'], {'on_delete': 'models.SET_NULL', 'null': '(True)', 'blank': '(True)', 'verbose_name': 'u"""部门或组"""'}), "('Group', on_delete=models.SET_NULL, null=True, blank=True,\n verbose_name=u'部门或组')\n", (1109, 1194), False, 'from django.db import models\n'), ((1209, 1262), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now': '(True)', 'verbose_name': 'u"""创建时间"""'}), "(auto_now=True, verbose_name=u'创建时间')\n", (1225, 1262), False, 'from django.db import models\n'), ((1279, 1312), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1298, 1312), False, 'from django.db import models\n'), ((1328, 1362), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1347, 1362), False, 'from django.db import models\n'), ((1375, 1474), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Role"""'], {'on_delete': 'models.SET_NULL', 'null': '(True)', 'blank': '(True)', 'verbose_name': 'u"""角色"""'}), "('Role', on_delete=models.SET_NULL, null=True, blank=True,\n verbose_name=u'角色')\n", (1392, 1474), False, 'from django.db import models\n'), ((1894, 1958), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'unique': '(True)', 'verbose_name': 'u"""部门"""'}), "(max_length=64, unique=True, verbose_name=u'部门')\n", (1910, 1958), False, 'from django.db import models\n'), ((1970, 2044), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)', 'blank': '(True)', 'verbose_name': 'u"""描述"""'}), "(max_length=64, null=True, blank=True, verbose_name=u'描述')\n", (1986, 2044), False, 'from django.db import models\n'), ((2214, 2278), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'unique': '(True)', 'verbose_name': 'u"""角色"""'}), "(max_length=64, unique=True, verbose_name=u'角色')\n", (2230, 2278), False, 'from django.db import models\n'), ((2292, 2357), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'unique': '(True)', 'verbose_name': 'u"""中文名"""'}), "(max_length=64, unique=True, verbose_name=u'中文名')\n", (2308, 2357), False, 'from django.db import models\n'), ((2369, 2443), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)', 'blank': '(True)', 'verbose_name': 'u"""描述"""'}), "(max_length=64, null=True, blank=True, verbose_name=u'描述')\n", (2385, 2443), False, 'from django.db import models\n')] |
#!/usr/bin/env python3
from litex import RemoteClient
wb = RemoteClient()
wb.open()
# # #
def icap_send(addr, data):
wb.regs.icap_addr.write(addr)
wb.regs.icap_data.write(data)
wb.regs.icap_send.write(1)
while (wb.regs.icap_done.read() == 0):
pass
# iprog
icap_send(0x04, 0x0000000f)
# # #
wb.close()
| [
"litex.RemoteClient"
] | [((61, 75), 'litex.RemoteClient', 'RemoteClient', ([], {}), '()\n', (73, 75), False, 'from litex import RemoteClient\n')] |
from cogs import task, argument, option
import sys, os
@task
class Write_Hello:
name = argument(default=None)
output = option(key='o', default=None)
def __init__(self, name, output):
if name is None:
name = os.environ['USER']
self.name = name
if output is None:
self.file = sys.stdout
else:
self.file = open(output, 'w')
def __call__(self):
self.file.write("Hello, %s!\n" % self.name.capitalize())
| [
"cogs.option",
"cogs.argument"
] | [((94, 116), 'cogs.argument', 'argument', ([], {'default': 'None'}), '(default=None)\n', (102, 116), False, 'from cogs import task, argument, option\n'), ((130, 159), 'cogs.option', 'option', ([], {'key': '"""o"""', 'default': 'None'}), "(key='o', default=None)\n", (136, 159), False, 'from cogs import task, argument, option\n')] |
from sims4.tuning.tunable_base import GroupNames
from situations.complex.give_job_object_situation_mixin import GiveJobObjectSituationMixin
from situations.situation import Situation
from situations.situation_complex import SituationComplexCommon, CommonSituationState, SituationStateData, TunableSituationJobAndRoleState
import sims4
logger = sims4.log.Logger('SuntannerSituation', default_owner='msundaram')
class _SuntannerSituationState(CommonSituationState):
pass
class SuntannerSituation(GiveJobObjectSituationMixin, SituationComplexCommon):
INSTANCE_TUNABLES = {'situation_default_job_and_role': TunableSituationJobAndRoleState(description='\n The default job that a visitor will be in during the situation.\n '), 'default_state': _SuntannerSituationState.TunableFactory(description='\n The default state of this situation.\n ', display_name='State', tuning_group=GroupNames.STATE)}
REMOVE_INSTANCE_TUNABLES = Situation.NON_USER_FACING_REMOVE_INSTANCE_TUNABLES
@classmethod
def default_job(cls):
return cls.situation_default_job_and_role.job
@classmethod
def _states(cls):
return [SituationStateData(1, _SuntannerSituationState, factory=cls.default_state)]
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [(cls.situation_default_job_and_role.job, cls.situation_default_job_and_role.role_state)]
def start_situation(self):
super().start_situation()
self._change_state(self.default_state())
| [
"situations.situation_complex.TunableSituationJobAndRoleState",
"situations.situation_complex.SituationStateData",
"sims4.log.Logger"
] | [((344, 409), 'sims4.log.Logger', 'sims4.log.Logger', (['"""SuntannerSituation"""'], {'default_owner': '"""msundaram"""'}), "('SuntannerSituation', default_owner='msundaram')\n", (360, 409), False, 'import sims4\n'), ((613, 763), 'situations.situation_complex.TunableSituationJobAndRoleState', 'TunableSituationJobAndRoleState', ([], {'description': '"""\n The default job that a visitor will be in during the situation.\n """'}), '(description=\n """\n The default job that a visitor will be in during the situation.\n """\n )\n', (644, 763), False, 'from situations.situation_complex import SituationComplexCommon, CommonSituationState, SituationStateData, TunableSituationJobAndRoleState\n'), ((1180, 1254), 'situations.situation_complex.SituationStateData', 'SituationStateData', (['(1)', '_SuntannerSituationState'], {'factory': 'cls.default_state'}), '(1, _SuntannerSituationState, factory=cls.default_state)\n', (1198, 1254), False, 'from situations.situation_complex import SituationComplexCommon, CommonSituationState, SituationStateData, TunableSituationJobAndRoleState\n')] |
from brownie import reverts
from fixtures import setup_wallet, owners_2
from eth_abi import encode_abi
from web3 import Web3
from fixtures import ACCOUNTS
from eth_account.messages import encode_defunct
def calculate_transaction_hash(nonce: int, to: str, value: int, data: str='00'):
encoded: bytes = nonce.to_bytes(32, 'big') + bytes.fromhex(to).rjust(32,b'\0') + value.to_bytes(32, 'big') + bytes.fromhex(data)
# '0x66aB6D9362d4F35596279692F0251Db635165871'
return Web3.keccak(encoded)
def get_sigdata(nonce: int, to: str, value: int, data: str='00', accounts=[]):
message_to_be_signed = encode_defunct(calculate_transaction_hash(nonce, to, value, data))
sigdata = [[0,0,0]] * 10
for i, account in enumerate(accounts):
signature = account.sign_message(message_to_be_signed)
sigdata[i] = [signature['v'], signature['r'], signature['s']]
return sigdata
def test_execute(setup_wallet):
nonce: int = 0
to: str = '77aB6D9362d4F35596279692F0251Db635165871'
value: int = 1
sigdata = get_sigdata(nonce, to, value, accounts=[ACCOUNTS[0], ACCOUNTS[1]])
assert setup_wallet.execute(to, value, '', sigdata, {'value': value})
assert setup_wallet.nonce() == 1
def test_execute_non_owner(setup_wallet):
nonce: int = 0
to: str = '77aB6D9362d4F35596279692F0251Db635165871'
value: int = 1
sigdata = get_sigdata(nonce, to, value, accounts=[ACCOUNTS[2]])
with reverts():
setup_wallet.execute(to, value, '', sigdata, {'value': value})
def test_execute_threshold_not_reached(setup_wallet):
nonce: int = 0
to: str = '77aB6D9362d4F35596279692F0251Db635165871'
value: int = 1
sigdata = get_sigdata(nonce, to, value, accounts=[ACCOUNTS[0]])
with reverts():
setup_wallet.execute(to, value, '', sigdata, {'value': value}) | [
"fixtures.setup_wallet.nonce",
"fixtures.setup_wallet.execute",
"brownie.reverts",
"web3.Web3.keccak"
] | [((481, 501), 'web3.Web3.keccak', 'Web3.keccak', (['encoded'], {}), '(encoded)\n', (492, 501), False, 'from web3 import Web3\n'), ((1134, 1196), 'fixtures.setup_wallet.execute', 'setup_wallet.execute', (['to', 'value', '""""""', 'sigdata', "{'value': value}"], {}), "(to, value, '', sigdata, {'value': value})\n", (1154, 1196), False, 'from fixtures import setup_wallet, owners_2\n'), ((1208, 1228), 'fixtures.setup_wallet.nonce', 'setup_wallet.nonce', ([], {}), '()\n', (1226, 1228), False, 'from fixtures import setup_wallet, owners_2\n'), ((1451, 1460), 'brownie.reverts', 'reverts', ([], {}), '()\n', (1458, 1460), False, 'from brownie import reverts\n'), ((1470, 1532), 'fixtures.setup_wallet.execute', 'setup_wallet.execute', (['to', 'value', '""""""', 'sigdata', "{'value': value}"], {}), "(to, value, '', sigdata, {'value': value})\n", (1490, 1532), False, 'from fixtures import setup_wallet, owners_2\n'), ((1762, 1771), 'brownie.reverts', 'reverts', ([], {}), '()\n', (1769, 1771), False, 'from brownie import reverts\n'), ((1781, 1843), 'fixtures.setup_wallet.execute', 'setup_wallet.execute', (['to', 'value', '""""""', 'sigdata', "{'value': value}"], {}), "(to, value, '', sigdata, {'value': value})\n", (1801, 1843), False, 'from fixtures import setup_wallet, owners_2\n')] |
from tests.testmodels import Event, Tournament
from tortoise.contrib import test
class TestUpdate(test.TestCase):
async def test_update(self):
await Tournament.create(name="1")
await Tournament.create(name="3")
rows_affected = await Tournament.all().update(name="2")
self.assertEqual(rows_affected, 2)
tournament = await Tournament.first()
self.assertEqual(tournament.name, "2")
async def test_update_relation(self):
tournament_first = await Tournament.create(name="1")
tournament_second = await Tournament.create(name="2")
await Event.create(name="1", tournament=tournament_first)
await Event.all().update(tournament=tournament_second)
event = await Event.first()
self.assertEqual(event.tournament_id, tournament_second.id)
| [
"tests.testmodels.Event.create",
"tests.testmodels.Tournament.create",
"tests.testmodels.Tournament.all",
"tests.testmodels.Event.all",
"tests.testmodels.Tournament.first",
"tests.testmodels.Event.first"
] | [((163, 190), 'tests.testmodels.Tournament.create', 'Tournament.create', ([], {'name': '"""1"""'}), "(name='1')\n", (180, 190), False, 'from tests.testmodels import Event, Tournament\n'), ((205, 232), 'tests.testmodels.Tournament.create', 'Tournament.create', ([], {'name': '"""3"""'}), "(name='3')\n", (222, 232), False, 'from tests.testmodels import Event, Tournament\n'), ((368, 386), 'tests.testmodels.Tournament.first', 'Tournament.first', ([], {}), '()\n', (384, 386), False, 'from tests.testmodels import Event, Tournament\n'), ((510, 537), 'tests.testmodels.Tournament.create', 'Tournament.create', ([], {'name': '"""1"""'}), "(name='1')\n", (527, 537), False, 'from tests.testmodels import Event, Tournament\n'), ((572, 599), 'tests.testmodels.Tournament.create', 'Tournament.create', ([], {'name': '"""2"""'}), "(name='2')\n", (589, 599), False, 'from tests.testmodels import Event, Tournament\n'), ((615, 666), 'tests.testmodels.Event.create', 'Event.create', ([], {'name': '"""1"""', 'tournament': 'tournament_first'}), "(name='1', tournament=tournament_first)\n", (627, 666), False, 'from tests.testmodels import Event, Tournament\n'), ((752, 765), 'tests.testmodels.Event.first', 'Event.first', ([], {}), '()\n', (763, 765), False, 'from tests.testmodels import Event, Tournament\n'), ((263, 279), 'tests.testmodels.Tournament.all', 'Tournament.all', ([], {}), '()\n', (277, 279), False, 'from tests.testmodels import Event, Tournament\n'), ((681, 692), 'tests.testmodels.Event.all', 'Event.all', ([], {}), '()\n', (690, 692), False, 'from tests.testmodels import Event, Tournament\n')] |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import pytest
@pytest.fixture
def mut():
from releasetool.commands.start import python
return python
@pytest.mark.parametrize(
"setup_py_contents,release_version,expected",
[
("version = '1.0.0'\n", "1.1.0", "version = '1.1.0'\n"),
('version = "1.0.0"\n', "1.1.0", 'version = "1.1.0"\n'),
],
)
def test_update_setup_py_sets_version(
mut, setup_py_contents, release_version, expected
):
context = mut.Context()
context.release_version = release_version
with mock.patch(
"builtins.open", mock.mock_open(read_data=setup_py_contents)
) as mock_open:
mut.update_setup_py(context)
mock_file = mock_open()
mock_file.write.assert_called_once_with(expected)
| [
"unittest.mock.mock_open",
"pytest.mark.parametrize"
] | [((718, 916), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""setup_py_contents,release_version,expected"""', '[("version = \'1.0.0\'\\n", \'1.1.0\', "version = \'1.1.0\'\\n"), (\n \'version = "1.0.0"\\n\', \'1.1.0\', """version = "1.1.0\\"\n""")]'], {}), '(\'setup_py_contents,release_version,expected\', [(\n "version = \'1.0.0\'\\n", \'1.1.0\', """version = \'1.1.0\'\n"""), (\n \'version = "1.0.0"\\n\', \'1.1.0\', \'version = "1.1.0"\\n\')])\n', (741, 916), False, 'import pytest\n'), ((1155, 1198), 'unittest.mock.mock_open', 'mock.mock_open', ([], {'read_data': 'setup_py_contents'}), '(read_data=setup_py_contents)\n', (1169, 1198), False, 'from unittest import mock\n')] |
import os
import pytest
import sys
import tempfile
import ray
from ray import serve
from ray.experimental.dag import DAGNode
from ray.experimental.dag.utils import DAGNodeNameGenerator
from ray.serve.deployment_graph import RayServeDAGHandle
from ray.serve.deployment_graph import InputNode
from ray.serve.drivers import DAGDriver
import starlette.requests
from ray.serve.pipeline.generate import transform_ray_dag_to_serve_dag
NESTED_HANDLE_KEY = "nested_handle"
@serve.deployment
class ClassHello:
def __init__(self):
pass
def hello(self):
return "hello"
@serve.deployment
class Model:
def __init__(self, weight: int, ratio: float = None):
self.weight = weight
self.ratio = ratio or 1
def forward(self, input: int):
return self.ratio * self.weight * input
def __call__(self, request):
input_data = request
return self.ratio * self.weight * input_data
@serve.deployment
class Combine:
def __init__(
self,
m1,
m2=None,
m2_nested: bool = False,
):
self.m1 = m1
self.m2 = m2.get(NESTED_HANDLE_KEY) if m2_nested else m2
def __call__(self, req):
r1_ref = self.m1.forward.remote(req)
r2_ref = self.m2.forward.remote(req)
return sum(ray.get([r1_ref, r2_ref]))
@serve.deployment
class Counter:
def __init__(self, val):
self.val = val
def get(self):
return self.val
def inc(self, inc):
self.val += inc
@serve.deployment
def fn_hello():
return "hello"
@serve.deployment
def combine(m1_output, m2_output, kwargs_output=0):
return m1_output + m2_output + kwargs_output
def class_factory():
class MyInlineClass:
def __init__(self, val):
self.val = val
def get(self):
return self.val
return MyInlineClass
@serve.deployment
class Adder:
def __init__(self, increment: int):
self.increment = increment
def forward(self, inp: int) -> int:
print(f"Adder got {inp}")
return inp + self.increment
__call__ = forward
@serve.deployment
class NoargDriver:
def __init__(self, dag: RayServeDAGHandle):
self.dag = dag
async def __call__(self):
return await self.dag.remote()
async def json_resolver(request: starlette.requests.Request):
return await request.json()
def ray_dag_to_serve_dag(dag: DAGNode):
with DAGNodeNameGenerator() as deployment_name_generator:
serve_dag = dag.apply_recursive(
lambda node: transform_ray_dag_to_serve_dag(node, deployment_name_generator)
)
return serve_dag
def test_serve_pipeline_single_func_no_input_plot():
dag = fn_hello.bind()
serve_dag = NoargDriver.bind(dag)
serve_dag = ray_dag_to_serve_dag(serve_dag)
with tempfile.TemporaryDirectory() as tmpdir:
to_file = os.path.join(tmpdir, "tmp.png")
ray.experimental.dag.plot(serve_dag, to_file)
assert os.path.isfile(to_file)
graph = ray.experimental.dag.vis_utils.dag_to_dot(serve_dag)
to_string = graph.to_string()
assert "fn_hello -> NoargDriver" in to_string
def test_serve_pipeline_single_func_deployment_dag_plot():
with InputNode() as dag_input:
dag = combine.bind(dag_input[0], dag_input[1], kwargs_output=1)
serve_dag = DAGDriver.bind(dag, input_schema=json_resolver)
serve_dag = ray_dag_to_serve_dag(serve_dag)
with tempfile.TemporaryDirectory() as tmpdir:
to_file = os.path.join(tmpdir, "tmp.png")
ray.experimental.dag.plot(serve_dag, to_file)
assert os.path.isfile(to_file)
graph = ray.experimental.dag.vis_utils.dag_to_dot(serve_dag)
to_string = graph.to_string()
assert "INPUT_NODE -> INPUT_ATTRIBUTE_NODE" in to_string
assert "INPUT_NODE -> INPUT_ATTRIBUTE_NODE_1" in to_string
assert "INPUT_ATTRIBUTE_NODE -> combine" in to_string
assert "INPUT_ATTRIBUTE_NODE_1 -> combine" in to_string
assert "combine -> DAGDriver" in to_string
def test_serve_pipeline_chained_function_plot():
@serve.deployment
def func_1(input):
return input
@serve.deployment
def func_2(input):
return input * 2
with InputNode() as dag_input:
output_1 = func_1.bind(dag_input)
output_2 = func_2.bind(dag_input)
serve_dag = combine.bind(output_1, output_2)
serve_dag = ray_dag_to_serve_dag(serve_dag)
with tempfile.TemporaryDirectory() as tmpdir:
to_file = os.path.join(tmpdir, "tmp.png")
ray.experimental.dag.plot(serve_dag, to_file)
assert os.path.isfile(to_file)
graph = ray.experimental.dag.vis_utils.dag_to_dot(serve_dag)
to_string = graph.to_string()
assert "INPUT_NODE -> func_1" in to_string
assert "INPUT_NODE -> func_2" in to_string
assert "func_1 -> combine" in to_string
assert "func_2 -> combine" in to_string
def test_serve_pipeline_class_with_class_method_plot():
with InputNode() as dag_input:
model = Model.bind(2, ratio=0.3)
dag = model.forward.bind(dag_input)
serve_dag = DAGDriver.bind(dag, input_schema=json_resolver)
serve_dag = ray_dag_to_serve_dag(serve_dag)
with tempfile.TemporaryDirectory() as tmpdir:
to_file = os.path.join(tmpdir, "tmp.png")
ray.experimental.dag.plot(serve_dag, to_file)
assert os.path.isfile(to_file)
graph = ray.experimental.dag.vis_utils.dag_to_dot(serve_dag)
to_string = graph.to_string()
assert "Model -> forward" in to_string
assert "INPUT_NODE -> forward" in to_string
assert "forward -> DAGDriver" in to_string
def test_serve_pipeline_func_class_with_class_method_plot():
with InputNode() as dag_input:
m1 = Model.bind(1)
m2 = Model.bind(2)
m1_output = m1.forward.bind(dag_input[0])
m2_output = m2.forward.bind(dag_input[1])
combine_output = combine.bind(m1_output, m2_output, kwargs_output=dag_input[2])
serve_dag = DAGDriver.bind(combine_output, input_schema=json_resolver)
serve_dag = ray_dag_to_serve_dag(serve_dag)
with tempfile.TemporaryDirectory() as tmpdir:
to_file = os.path.join(tmpdir, "tmp.png")
ray.experimental.dag.plot(serve_dag, to_file)
assert os.path.isfile(to_file)
graph = ray.experimental.dag.vis_utils.dag_to_dot(serve_dag)
to_string = graph.to_string()
assert "INPUT_NODE -> INPUT_ATTRIBUTE_NODE" in to_string
assert "INPUT_NODE -> INPUT_ATTRIBUTE_NODE_1" in to_string
assert "INPUT_NODE -> INPUT_ATTRIBUTE_NODE_2" in to_string
assert "Model -> forward" in to_string
assert "INPUT_ATTRIBUTE_NODE -> forward" in to_string
assert "Model_1 -> forward_1" in to_string
assert "INPUT_ATTRIBUTE_NODE_1 -> forward_1" in to_string
assert "INPUT_ATTRIBUTE_NODE_2 -> combine" in to_string
assert "forward -> combine" in to_string
assert "forward_1 -> combine" in to_string
assert "combine -> DAGDriver" in to_string
def test_serve_pipeline_multi_instantiation_class_deployment_in_init_args_plot():
with InputNode() as dag_input:
m1 = Model.bind(2)
m2 = Model.bind(3)
combine = Combine.bind(m1, m2=m2)
combine_output = combine.__call__.bind(dag_input)
serve_dag = DAGDriver.bind(combine_output, input_schema=json_resolver)
serve_dag = ray_dag_to_serve_dag(serve_dag)
with tempfile.TemporaryDirectory() as tmpdir:
to_file = os.path.join(tmpdir, "tmp.png")
ray.experimental.dag.plot(serve_dag, to_file)
assert os.path.isfile(to_file)
graph = ray.experimental.dag.vis_utils.dag_to_dot(serve_dag)
to_string = graph.to_string()
assert "Model -> Combine" in to_string
assert "Model_1 -> Combine" in to_string
assert "Combine -> __call__" in to_string
assert "INPUT_NODE -> __call__" in to_string
assert "__call__ -> DAGDriver" in to_string
def test_serve_pipeline_test_shared_deployment_handle_plot():
with InputNode() as dag_input:
m = Model.bind(2)
combine = Combine.bind(m, m2=m)
combine_output = combine.__call__.bind(dag_input)
serve_dag = DAGDriver.bind(combine_output, input_schema=json_resolver)
serve_dag = ray_dag_to_serve_dag(serve_dag)
with tempfile.TemporaryDirectory() as tmpdir:
to_file = os.path.join(tmpdir, "tmp.png")
ray.experimental.dag.plot(serve_dag, to_file)
assert os.path.isfile(to_file)
graph = ray.experimental.dag.vis_utils.dag_to_dot(serve_dag)
to_string = graph.to_string()
assert "Model -> Combine" in to_string
assert "Combine -> __call__" in to_string
assert "INPUT_NODE -> __call__" in to_string
assert "__call__ -> DAGDriver" in to_string
def test_serve_pipeline_multi_instantiation_class_nested_deployment_arg_dag_plot():
with InputNode() as dag_input:
m1 = Model.bind(2)
m2 = Model.bind(3)
combine = Combine.bind(m1, m2={NESTED_HANDLE_KEY: m2}, m2_nested=True)
output = combine.__call__.bind(dag_input)
serve_dag = DAGDriver.bind(output, input_schema=json_resolver)
serve_dag = ray_dag_to_serve_dag(serve_dag)
with tempfile.TemporaryDirectory() as tmpdir:
to_file = os.path.join(tmpdir, "tmp.png")
ray.experimental.dag.plot(serve_dag, to_file)
assert os.path.isfile(to_file)
graph = ray.experimental.dag.vis_utils.dag_to_dot(serve_dag)
to_string = graph.to_string()
assert "Model -> Combine" in to_string
assert "Model_1 -> Combine" in to_string
assert "Combine -> __call__" in to_string
assert "INPUT_NODE -> __call__" in to_string
assert "__call__ -> DAGDriver" in to_string
def test_serve_pipeline_class_factory_plot():
with InputNode() as _:
instance = serve.deployment(class_factory()).bind(3)
output = instance.get.bind()
serve_dag = NoargDriver.bind(output)
serve_dag = ray_dag_to_serve_dag(serve_dag)
with tempfile.TemporaryDirectory() as tmpdir:
to_file = os.path.join(tmpdir, "tmp.png")
ray.experimental.dag.plot(serve_dag, to_file)
assert os.path.isfile(to_file)
graph = ray.experimental.dag.vis_utils.dag_to_dot(serve_dag)
to_string = graph.to_string()
assert "MyInlineClass -> get" in to_string
assert "get -> NoargDriver" in to_string
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| [
"tempfile.TemporaryDirectory",
"ray.serve.pipeline.generate.transform_ray_dag_to_serve_dag",
"ray.get",
"ray.experimental.dag.plot",
"ray.experimental.dag.vis_utils.dag_to_dot",
"ray.serve.drivers.DAGDriver.bind",
"ray.experimental.dag.utils.DAGNodeNameGenerator",
"os.path.join",
"ray.serve.deployme... | [((3027, 3079), 'ray.experimental.dag.vis_utils.dag_to_dot', 'ray.experimental.dag.vis_utils.dag_to_dot', (['serve_dag'], {}), '(serve_dag)\n', (3068, 3079), False, 'import ray\n'), ((3659, 3711), 'ray.experimental.dag.vis_utils.dag_to_dot', 'ray.experimental.dag.vis_utils.dag_to_dot', (['serve_dag'], {}), '(serve_dag)\n', (3700, 3711), False, 'import ray\n'), ((4655, 4707), 'ray.experimental.dag.vis_utils.dag_to_dot', 'ray.experimental.dag.vis_utils.dag_to_dot', (['serve_dag'], {}), '(serve_dag)\n', (4696, 4707), False, 'import ray\n'), ((5429, 5481), 'ray.experimental.dag.vis_utils.dag_to_dot', 'ray.experimental.dag.vis_utils.dag_to_dot', (['serve_dag'], {}), '(serve_dag)\n', (5470, 5481), False, 'import ray\n'), ((6332, 6384), 'ray.experimental.dag.vis_utils.dag_to_dot', 'ray.experimental.dag.vis_utils.dag_to_dot', (['serve_dag'], {}), '(serve_dag)\n', (6373, 6384), False, 'import ray\n'), ((7626, 7678), 'ray.experimental.dag.vis_utils.dag_to_dot', 'ray.experimental.dag.vis_utils.dag_to_dot', (['serve_dag'], {}), '(serve_dag)\n', (7667, 7678), False, 'import ray\n'), ((8505, 8557), 'ray.experimental.dag.vis_utils.dag_to_dot', 'ray.experimental.dag.vis_utils.dag_to_dot', (['serve_dag'], {}), '(serve_dag)\n', (8546, 8557), False, 'import ray\n'), ((9412, 9464), 'ray.experimental.dag.vis_utils.dag_to_dot', 'ray.experimental.dag.vis_utils.dag_to_dot', (['serve_dag'], {}), '(serve_dag)\n', (9453, 9464), False, 'import ray\n'), ((10207, 10259), 'ray.experimental.dag.vis_utils.dag_to_dot', 'ray.experimental.dag.vis_utils.dag_to_dot', (['serve_dag'], {}), '(serve_dag)\n', (10248, 10259), False, 'import ray\n'), ((2439, 2461), 'ray.experimental.dag.utils.DAGNodeNameGenerator', 'DAGNodeNameGenerator', ([], {}), '()\n', (2459, 2461), False, 'from ray.experimental.dag.utils import DAGNodeNameGenerator\n'), ((2830, 2859), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2857, 2859), False, 'import tempfile\n'), ((2889, 2920), 'os.path.join', 'os.path.join', (['tmpdir', '"""tmp.png"""'], {}), "(tmpdir, 'tmp.png')\n", (2901, 2920), False, 'import os\n'), ((2929, 2974), 'ray.experimental.dag.plot', 'ray.experimental.dag.plot', (['serve_dag', 'to_file'], {}), '(serve_dag, to_file)\n', (2954, 2974), False, 'import ray\n'), ((2990, 3013), 'os.path.isfile', 'os.path.isfile', (['to_file'], {}), '(to_file)\n', (3004, 3013), False, 'import os\n'), ((3234, 3245), 'ray.serve.deployment_graph.InputNode', 'InputNode', ([], {}), '()\n', (3243, 3245), False, 'from ray.serve.deployment_graph import InputNode\n'), ((3352, 3399), 'ray.serve.drivers.DAGDriver.bind', 'DAGDriver.bind', (['dag'], {'input_schema': 'json_resolver'}), '(dag, input_schema=json_resolver)\n', (3366, 3399), False, 'from ray.serve.drivers import DAGDriver\n'), ((3462, 3491), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (3489, 3491), False, 'import tempfile\n'), ((3521, 3552), 'os.path.join', 'os.path.join', (['tmpdir', '"""tmp.png"""'], {}), "(tmpdir, 'tmp.png')\n", (3533, 3552), False, 'import os\n'), ((3561, 3606), 'ray.experimental.dag.plot', 'ray.experimental.dag.plot', (['serve_dag', 'to_file'], {}), '(serve_dag, to_file)\n', (3586, 3606), False, 'import ray\n'), ((3622, 3645), 'os.path.isfile', 'os.path.isfile', (['to_file'], {}), '(to_file)\n', (3636, 3645), False, 'import os\n'), ((4233, 4244), 'ray.serve.deployment_graph.InputNode', 'InputNode', ([], {}), '()\n', (4242, 4244), False, 'from ray.serve.deployment_graph import InputNode\n'), ((4458, 4487), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (4485, 4487), False, 'import tempfile\n'), ((4517, 4548), 'os.path.join', 'os.path.join', (['tmpdir', '"""tmp.png"""'], {}), "(tmpdir, 'tmp.png')\n", (4529, 4548), False, 'import os\n'), ((4557, 4602), 'ray.experimental.dag.plot', 'ray.experimental.dag.plot', (['serve_dag', 'to_file'], {}), '(serve_dag, to_file)\n', (4582, 4602), False, 'import ray\n'), ((4618, 4641), 'os.path.isfile', 'os.path.isfile', (['to_file'], {}), '(to_file)\n', (4632, 4641), False, 'import os\n'), ((4991, 5002), 'ray.serve.deployment_graph.InputNode', 'InputNode', ([], {}), '()\n', (5000, 5002), False, 'from ray.serve.deployment_graph import InputNode\n'), ((5122, 5169), 'ray.serve.drivers.DAGDriver.bind', 'DAGDriver.bind', (['dag'], {'input_schema': 'json_resolver'}), '(dag, input_schema=json_resolver)\n', (5136, 5169), False, 'from ray.serve.drivers import DAGDriver\n'), ((5232, 5261), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (5259, 5261), False, 'import tempfile\n'), ((5291, 5322), 'os.path.join', 'os.path.join', (['tmpdir', '"""tmp.png"""'], {}), "(tmpdir, 'tmp.png')\n", (5303, 5322), False, 'import os\n'), ((5331, 5376), 'ray.experimental.dag.plot', 'ray.experimental.dag.plot', (['serve_dag', 'to_file'], {}), '(serve_dag, to_file)\n', (5356, 5376), False, 'import ray\n'), ((5392, 5415), 'os.path.isfile', 'os.path.isfile', (['to_file'], {}), '(to_file)\n', (5406, 5415), False, 'import os\n'), ((5726, 5737), 'ray.serve.deployment_graph.InputNode', 'InputNode', ([], {}), '()\n', (5735, 5737), False, 'from ray.serve.deployment_graph import InputNode\n'), ((6014, 6072), 'ray.serve.drivers.DAGDriver.bind', 'DAGDriver.bind', (['combine_output'], {'input_schema': 'json_resolver'}), '(combine_output, input_schema=json_resolver)\n', (6028, 6072), False, 'from ray.serve.drivers import DAGDriver\n'), ((6135, 6164), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (6162, 6164), False, 'import tempfile\n'), ((6194, 6225), 'os.path.join', 'os.path.join', (['tmpdir', '"""tmp.png"""'], {}), "(tmpdir, 'tmp.png')\n", (6206, 6225), False, 'import os\n'), ((6234, 6279), 'ray.experimental.dag.plot', 'ray.experimental.dag.plot', (['serve_dag', 'to_file'], {}), '(serve_dag, to_file)\n', (6259, 6279), False, 'import ray\n'), ((6295, 6318), 'os.path.isfile', 'os.path.isfile', (['to_file'], {}), '(to_file)\n', (6309, 6318), False, 'import os\n'), ((7108, 7119), 'ray.serve.deployment_graph.InputNode', 'InputNode', ([], {}), '()\n', (7117, 7119), False, 'from ray.serve.deployment_graph import InputNode\n'), ((7308, 7366), 'ray.serve.drivers.DAGDriver.bind', 'DAGDriver.bind', (['combine_output'], {'input_schema': 'json_resolver'}), '(combine_output, input_schema=json_resolver)\n', (7322, 7366), False, 'from ray.serve.drivers import DAGDriver\n'), ((7429, 7458), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (7456, 7458), False, 'import tempfile\n'), ((7488, 7519), 'os.path.join', 'os.path.join', (['tmpdir', '"""tmp.png"""'], {}), "(tmpdir, 'tmp.png')\n", (7500, 7519), False, 'import os\n'), ((7528, 7573), 'ray.experimental.dag.plot', 'ray.experimental.dag.plot', (['serve_dag', 'to_file'], {}), '(serve_dag, to_file)\n', (7553, 7573), False, 'import ray\n'), ((7589, 7612), 'os.path.isfile', 'os.path.isfile', (['to_file'], {}), '(to_file)\n', (7603, 7612), False, 'import os\n'), ((8017, 8028), 'ray.serve.deployment_graph.InputNode', 'InputNode', ([], {}), '()\n', (8026, 8028), False, 'from ray.serve.deployment_graph import InputNode\n'), ((8187, 8245), 'ray.serve.drivers.DAGDriver.bind', 'DAGDriver.bind', (['combine_output'], {'input_schema': 'json_resolver'}), '(combine_output, input_schema=json_resolver)\n', (8201, 8245), False, 'from ray.serve.drivers import DAGDriver\n'), ((8308, 8337), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (8335, 8337), False, 'import tempfile\n'), ((8367, 8398), 'os.path.join', 'os.path.join', (['tmpdir', '"""tmp.png"""'], {}), "(tmpdir, 'tmp.png')\n", (8379, 8398), False, 'import os\n'), ((8407, 8452), 'ray.experimental.dag.plot', 'ray.experimental.dag.plot', (['serve_dag', 'to_file'], {}), '(serve_dag, to_file)\n', (8432, 8452), False, 'import ray\n'), ((8468, 8491), 'os.path.isfile', 'os.path.isfile', (['to_file'], {}), '(to_file)\n', (8482, 8491), False, 'import os\n'), ((8873, 8884), 'ray.serve.deployment_graph.InputNode', 'InputNode', ([], {}), '()\n', (8882, 8884), False, 'from ray.serve.deployment_graph import InputNode\n'), ((9102, 9152), 'ray.serve.drivers.DAGDriver.bind', 'DAGDriver.bind', (['output'], {'input_schema': 'json_resolver'}), '(output, input_schema=json_resolver)\n', (9116, 9152), False, 'from ray.serve.drivers import DAGDriver\n'), ((9215, 9244), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (9242, 9244), False, 'import tempfile\n'), ((9274, 9305), 'os.path.join', 'os.path.join', (['tmpdir', '"""tmp.png"""'], {}), "(tmpdir, 'tmp.png')\n", (9286, 9305), False, 'import os\n'), ((9314, 9359), 'ray.experimental.dag.plot', 'ray.experimental.dag.plot', (['serve_dag', 'to_file'], {}), '(serve_dag, to_file)\n', (9339, 9359), False, 'import ray\n'), ((9375, 9398), 'os.path.isfile', 'os.path.isfile', (['to_file'], {}), '(to_file)\n', (9389, 9398), False, 'import os\n'), ((9787, 9798), 'ray.serve.deployment_graph.InputNode', 'InputNode', ([], {}), '()\n', (9796, 9798), False, 'from ray.serve.deployment_graph import InputNode\n'), ((10010, 10039), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (10037, 10039), False, 'import tempfile\n'), ((10069, 10100), 'os.path.join', 'os.path.join', (['tmpdir', '"""tmp.png"""'], {}), "(tmpdir, 'tmp.png')\n", (10081, 10100), False, 'import os\n'), ((10109, 10154), 'ray.experimental.dag.plot', 'ray.experimental.dag.plot', (['serve_dag', 'to_file'], {}), '(serve_dag, to_file)\n', (10134, 10154), False, 'import ray\n'), ((10170, 10193), 'os.path.isfile', 'os.path.isfile', (['to_file'], {}), '(to_file)\n', (10184, 10193), False, 'import os\n'), ((10428, 10463), 'pytest.main', 'pytest.main', (["['-v', '-s', __file__]"], {}), "(['-v', '-s', __file__])\n", (10439, 10463), False, 'import pytest\n'), ((1300, 1325), 'ray.get', 'ray.get', (['[r1_ref, r2_ref]'], {}), '([r1_ref, r2_ref])\n', (1307, 1325), False, 'import ray\n'), ((2558, 2621), 'ray.serve.pipeline.generate.transform_ray_dag_to_serve_dag', 'transform_ray_dag_to_serve_dag', (['node', 'deployment_name_generator'], {}), '(node, deployment_name_generator)\n', (2588, 2621), False, 'from ray.serve.pipeline.generate import transform_ray_dag_to_serve_dag\n')] |
import numpy as np
import sys
from optparse import OptionParser
from utils import obtain_parameters
from test_models_20news import test20news
from test_models_cifar import testcifar
from test_models_snippets import testsnippets
from test_models_TMC import testtmc
op = OptionParser()
op.add_option("-M", "--model", type=int, default=3, help="model type (1,2,3)")
op.add_option("-p", "--ps", type=float, default=1.0, help="supervision level (float[0.1,1.0])")
op.add_option("-a", "--alpha", type=float, default=0.0, help="alpha value")
op.add_option("-b", "--beta", type=float, default=0.0, help="beta value")
op.add_option("-l", "--lambda_", type=float, default=0.0, help="lambda value")
op.add_option("-r", "--repetitions", type=int, default=2, help="repetitions")
op.add_option("-s", "--reseed", type=int, default=0, help="if >0 reseed numpy for each repetition")
op.add_option("-v", "--addvalidation", type=int, default=1, help="if >0 add the validation set to the train set")
op.add_option("-c", "--nbits", type=int, default=16, help="number of bits")
op.add_option("-d", "--ds", type="string", default="20news", help="Dataset to train: 20news, cifar, tmc, snippets")
(opts, args) = op.parse_args()
ps = float(opts.ps)
nbits = opts.nbits
df = str(opts.ds).lower()
model_dict = {1:"VDHS-S", 2: "PHS-GS", 3:"SSBVAE" }
model = model_dict.get(opts.model)
print("TESTING " + df.upper() +" with model " + str(opts.model))
print("Alpha: ", opts.alpha, " Beta: ", opts.beta, " Lambda :", opts.lambda_)
header = "test"+df
ofile = "\"./Results/ResultsTraning/" + model + "_" + df.upper() + "-" + str(nbits) + "BITS-" + \
str(opts.alpha) + "ALPHA-" + str(opts.beta) + "BETA-" + str(opts.lambda_) + "LAMBDA.csv\""
print(ofile)
tail = "(model="+str(opts.model)+",ps="+str(opts.ps)+", addvalidation="+str(opts.addvalidation)+",alpha="+str(opts.alpha)+\
",beta="+str(opts.beta)+",lambda_="+str(opts.lambda_)+",repetitions="+str(opts.repetitions)+",nbits="+str(nbits)+\
",ofilename="+ofile+")"
func = header + tail
eval(func)
| [
"optparse.OptionParser"
] | [((271, 285), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (283, 285), False, 'from optparse import OptionParser\n')] |
#!/usr/bin/env python3
exit()
import requests
import json
requests.packages.urllib3.disable_warnings()
src_url = "https://172.21.42.102:8089"
dest_url = "https://172.21.42.103:8089"
def login(url,username,password):
creds_payload = { 'cookie': '1', 'username': username, 'password': password }
s = requests.Session()
r = s.post(url+"/services/auth/login", creds_payload, verify=False)
r.raise_for_status()
return s
r = s.get(src_url+"/servicesNS/-/testkvstore/storage/collections/config?output_mode=json")
r.raise_for_status()
srcspl_coll_list = r.json()
r.raise_for_status()
for entry in srcspl_coll_list['entry']:
print(entry['id'])
| [
"requests.packages.urllib3.disable_warnings",
"requests.Session"
] | [((60, 104), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', ([], {}), '()\n', (102, 104), False, 'import requests\n'), ((304, 322), 'requests.Session', 'requests.Session', ([], {}), '()\n', (320, 322), False, 'import requests\n')] |
import sys
from collections import namedtuple
import mock
import pytest
from pca.packages.errors import ErrorBoundary
PY36 = (3, 6) <= sys.version_info < (3, 7)
Callbacks = namedtuple(
"Callbacks",
[
"log_inner_error",
"should_propagate_exception",
"transform_propagated_exception",
"on_no_exception",
"on_propagate_exception",
"on_suppress_exception",
],
)
class AnException(Exception):
pass
class AnotherException(Exception):
pass
@pytest.fixture
def catchall_boundary():
return ErrorBoundary()
@pytest.fixture
def specific_boundary():
return ErrorBoundary(catch=AnException)
@pytest.fixture
def callbacks():
return Callbacks(
log_inner_error=mock.Mock(),
should_propagate_exception=mock.Mock(return_value=False),
transform_propagated_exception=mock.Mock(return_value=None),
on_no_exception=mock.Mock(),
on_propagate_exception=mock.Mock(),
on_suppress_exception=mock.Mock(),
)
@pytest.fixture
def boundary_with_callbacks(callbacks):
return ErrorBoundary(name="boundary_with_callbacks", **callbacks._asdict())
class TestNotRaised:
def test_catchall_as_context_manager(self, catchall_boundary) -> None:
with catchall_boundary as error_boundary:
pass
assert error_boundary.exc_info is error_boundary.exc_info
def test_callbacks(self, boundary_with_callbacks, callbacks) -> None:
with boundary_with_callbacks:
pass
callbacks.should_propagate_exception.assert_not_called()
callbacks.transform_propagated_exception.assert_not_called()
callbacks.on_no_exception.assert_called_once_with()
callbacks.on_propagate_exception.assert_not_called()
callbacks.on_suppress_exception.assert_not_called()
class TestCatching:
def test_catchall_as_context_manager(self, catchall_boundary) -> None:
exception = AnException()
with catchall_boundary as error_boundary:
raise exception
assert error_boundary.exc_info.value is exception # type: ignore
def test_catchall_as_decorator(self, catchall_boundary) -> None:
exception = AnException()
@catchall_boundary
def foo() -> None:
raise exception
foo()
assert catchall_boundary.exc_info.value is exception # type: ignore
def test_specific_catching(self, specific_boundary) -> None:
exception = AnException()
with specific_boundary as error_boundary:
raise exception
assert error_boundary.exc_info.value is exception
def test_callbacks(self, boundary_with_callbacks, callbacks) -> None:
exception = AnException()
with boundary_with_callbacks:
raise exception
# type: ignore
callbacks.should_propagate_exception.assert_called_once_with(
boundary_with_callbacks.exc_info
)
callbacks.transform_propagated_exception.assert_not_called()
callbacks.on_no_exception.assert_not_called()
callbacks.on_propagate_exception.assert_not_called()
callbacks.on_suppress_exception.assert_called_once_with(boundary_with_callbacks.exc_info)
class TestPropagating:
def test_specific_catching(self, specific_boundary) -> None:
exception = AnotherException()
with pytest.raises(AnotherException) as error_info:
with specific_boundary:
raise exception
assert error_info.value is exception # type: ignore
def test_callbacks(self, boundary_with_callbacks, callbacks) -> None:
exception = AnException()
callbacks.should_propagate_exception.return_value = True
with pytest.raises(AnException):
with boundary_with_callbacks:
raise exception
callbacks.should_propagate_exception.assert_called_once_with(
boundary_with_callbacks.exc_info
)
callbacks.transform_propagated_exception.assert_called_once_with(
boundary_with_callbacks.exc_info
)
callbacks.on_no_exception.assert_not_called()
callbacks.on_propagate_exception.assert_called_once_with(boundary_with_callbacks.exc_info)
callbacks.on_suppress_exception.assert_not_called()
def test_transform_propagated_exception(self, boundary_with_callbacks, callbacks):
exception = AnException()
transformed_exception = AnotherException()
callbacks.should_propagate_exception.return_value = True
callbacks.transform_propagated_exception.return_value = transformed_exception
with pytest.raises(AnotherException) as error_info:
with boundary_with_callbacks:
raise exception
assert error_info.value is transformed_exception
callbacks.should_propagate_exception.assert_called_once_with(
boundary_with_callbacks.exc_info
)
callbacks.transform_propagated_exception.assert_called_once_with(
boundary_with_callbacks.exc_info
)
callbacks.on_no_exception.assert_not_called()
callbacks.on_propagate_exception.assert_called_once_with(boundary_with_callbacks.exc_info)
callbacks.on_suppress_exception.assert_not_called()
class TestCallbackErrors:
"""
Tests checking what happens when a callback throws an error.
"""
@pytest.mark.skipif(
PY36,
reason="strange behavior of representing Exception.args on CPython 3.6.15; waiting for obsoletion for Py36",
)
def test_log_inner_error(self, caplog) -> None:
main_exception = AnException("main_exception")
callback_exception = AnotherException("callback_exception")
boundary_with_callbacks = ErrorBoundary(name="boundary_with_callbacks")
boundary_with_callbacks.log_inner_error(
where="foo", main_error=main_exception, callback_error=callback_exception
)
assert caplog.messages == [
(
"ErrorBoundary(name='boundary_with_callbacks').foo callback raised unhandled "
"error: AnotherException('callback_exception') was raised while "
"AnException('main_exception') was handled."
)
]
def test_on_no_exception(self, boundary_with_callbacks, callbacks, caplog) -> None:
callback_exception = AnotherException("callback_exception")
callbacks.on_no_exception.side_effect = callback_exception
with boundary_with_callbacks:
pass
callbacks.log_inner_error.assert_called_once_with(
"on_no_exception", None, callback_exception
)
def test_should_propagate_exception(self, boundary_with_callbacks, callbacks, caplog) -> None:
main_exception = AnException("main_exception")
callback_exception = AnotherException("callback_exception")
callbacks.should_propagate_exception.side_effect = callback_exception
with pytest.raises(AnException) as error_info:
with boundary_with_callbacks:
raise main_exception
assert error_info.value is main_exception
callbacks.log_inner_error.assert_called_once_with(
"should_propagate_exception", main_exception, callback_exception
)
def test_on_propagate_exception(self, boundary_with_callbacks, callbacks) -> None:
main_exception = AnException("main_exception")
callback_exception = AnotherException("callback_exception")
callbacks.should_propagate_exception.return_value = True
callbacks.on_propagate_exception.side_effect = callback_exception
with pytest.raises(AnException) as error_info:
with boundary_with_callbacks:
raise main_exception
assert error_info.value is main_exception
callbacks.log_inner_error.assert_called_once_with(
"on_propagate_exception", main_exception, callback_exception
)
def test_transform_propagated_exception(self, boundary_with_callbacks, callbacks) -> None:
main_exception = AnException("main_exception")
callback_exception = AnotherException("callback_exception")
callbacks.should_propagate_exception.return_value = True
callbacks.transform_propagated_exception.side_effect = callback_exception
with pytest.raises(AnException) as error_info:
with boundary_with_callbacks:
raise main_exception
assert error_info.value is main_exception
callbacks.log_inner_error.assert_called_once_with(
"transform_propagated_exception", main_exception, callback_exception
)
def test_on_suppress_exception(self, boundary_with_callbacks, callbacks) -> None:
main_exception = AnException("main_exception")
callback_exception = AnotherException("callback_exception")
callbacks.on_suppress_exception.side_effect = callback_exception
with boundary_with_callbacks:
raise main_exception
callbacks.log_inner_error.assert_called_once_with(
"on_suppress_exception", main_exception, callback_exception
)
| [
"collections.namedtuple",
"mock.Mock",
"pytest.raises",
"pytest.mark.skipif",
"pca.packages.errors.ErrorBoundary"
] | [((179, 365), 'collections.namedtuple', 'namedtuple', (['"""Callbacks"""', "['log_inner_error', 'should_propagate_exception',\n 'transform_propagated_exception', 'on_no_exception',\n 'on_propagate_exception', 'on_suppress_exception']"], {}), "('Callbacks', ['log_inner_error', 'should_propagate_exception',\n 'transform_propagated_exception', 'on_no_exception',\n 'on_propagate_exception', 'on_suppress_exception'])\n", (189, 365), False, 'from collections import namedtuple\n'), ((565, 580), 'pca.packages.errors.ErrorBoundary', 'ErrorBoundary', ([], {}), '()\n', (578, 580), False, 'from pca.packages.errors import ErrorBoundary\n'), ((635, 667), 'pca.packages.errors.ErrorBoundary', 'ErrorBoundary', ([], {'catch': 'AnException'}), '(catch=AnException)\n', (648, 667), False, 'from pca.packages.errors import ErrorBoundary\n'), ((5417, 5560), 'pytest.mark.skipif', 'pytest.mark.skipif', (['PY36'], {'reason': '"""strange behavior of representing Exception.args on CPython 3.6.15; waiting for obsoletion for Py36"""'}), "(PY36, reason=\n 'strange behavior of representing Exception.args on CPython 3.6.15; waiting for obsoletion for Py36'\n )\n", (5435, 5560), False, 'import pytest\n'), ((5783, 5828), 'pca.packages.errors.ErrorBoundary', 'ErrorBoundary', ([], {'name': '"""boundary_with_callbacks"""'}), "(name='boundary_with_callbacks')\n", (5796, 5828), False, 'from pca.packages.errors import ErrorBoundary\n'), ((749, 760), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (758, 760), False, 'import mock\n'), ((797, 826), 'mock.Mock', 'mock.Mock', ([], {'return_value': '(False)'}), '(return_value=False)\n', (806, 826), False, 'import mock\n'), ((867, 895), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (876, 895), False, 'import mock\n'), ((921, 932), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (930, 932), False, 'import mock\n'), ((965, 976), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (974, 976), False, 'import mock\n'), ((1008, 1019), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1017, 1019), False, 'import mock\n'), ((3386, 3417), 'pytest.raises', 'pytest.raises', (['AnotherException'], {}), '(AnotherException)\n', (3399, 3417), False, 'import pytest\n'), ((3749, 3775), 'pytest.raises', 'pytest.raises', (['AnException'], {}), '(AnException)\n', (3762, 3775), False, 'import pytest\n'), ((4656, 4687), 'pytest.raises', 'pytest.raises', (['AnotherException'], {}), '(AnotherException)\n', (4669, 4687), False, 'import pytest\n'), ((7009, 7035), 'pytest.raises', 'pytest.raises', (['AnException'], {}), '(AnException)\n', (7022, 7035), False, 'import pytest\n'), ((7691, 7717), 'pytest.raises', 'pytest.raises', (['AnException'], {}), '(AnException)\n', (7704, 7717), False, 'import pytest\n'), ((8385, 8411), 'pytest.raises', 'pytest.raises', (['AnException'], {}), '(AnException)\n', (8398, 8411), False, 'import pytest\n')] |
# binary classification, missing data, impute with mean
import numpy
from pandas import read_csv
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.impute import SimpleImputer
# load data
dataframe = read_csv("horse-colic.csv", delim_whitespace=True, header=None)
dataset = dataframe.values
# split data into X and y
X = dataset[:,0:27]
Y = dataset[:,27]
# set missing values to NaN
X[X == '?'] = numpy.nan
# convert to numeric
X = X.astype('float32')
# impute missing values as the mean
imputer = SimpleImputer()
imputed_x = imputer.fit_transform(X)
# encode Y class values as integers
label_encoder = LabelEncoder()
label_encoder = label_encoder.fit(Y)
label_encoded_y = label_encoder.transform(Y)
# split data into train and test sets
seed = 7
test_size = 0.33
X_train, X_test, y_train, y_test = train_test_split(imputed_x, label_encoded_y, test_size=test_size, random_state=seed)
# fit model on training data
model = XGBClassifier()
model.fit(X_train, y_train)
print(model)
# make predictions for test data
predictions = model.predict(X_test)
# evaluate predictions
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0)) | [
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.impute.SimpleImputer",
"sklearn.metrics.accuracy_score",
"xgboost.XGBClassifier"
] | [((339, 402), 'pandas.read_csv', 'read_csv', (['"""horse-colic.csv"""'], {'delim_whitespace': '(True)', 'header': 'None'}), "('horse-colic.csv', delim_whitespace=True, header=None)\n", (347, 402), False, 'from pandas import read_csv\n'), ((637, 652), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {}), '()\n', (650, 652), False, 'from sklearn.impute import SimpleImputer\n'), ((742, 756), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (754, 756), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((938, 1026), 'sklearn.model_selection.train_test_split', 'train_test_split', (['imputed_x', 'label_encoded_y'], {'test_size': 'test_size', 'random_state': 'seed'}), '(imputed_x, label_encoded_y, test_size=test_size,\n random_state=seed)\n', (954, 1026), False, 'from sklearn.model_selection import train_test_split\n'), ((1060, 1075), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {}), '()\n', (1073, 1075), False, 'from xgboost import XGBClassifier\n'), ((1220, 1255), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (1234, 1255), False, 'from sklearn.metrics import accuracy_score\n')] |
import requests_async
import pytest
@pytest.mark.asyncio
async def test_auth(server):
url = "http://127.0.0.1:8000/echo_headers"
response = await requests_async.get(url, auth=("tom", "<PASSWORD>"))
assert response.status_code == 200
assert response.json()["headers"]["authorization"] == "Basic dG9tOnBhc3M="
| [
"requests_async.get"
] | [((156, 207), 'requests_async.get', 'requests_async.get', (['url'], {'auth': "('tom', '<PASSWORD>')"}), "(url, auth=('tom', '<PASSWORD>'))\n", (174, 207), False, 'import requests_async\n')] |
import uuid
from django.db.models import Sum
from django.utils.translation import ugettext_lazy as _
from .decorators import report_field_register
from .helpers import get_calculation_annotation
from .registry import field_registry
class SlickReportField(object):
"""
Computation field responsible for making the calculation unit
"""
_field_registry = field_registry
name = ''
"""The name to be used in the ReportGenerator"""
calculation_field = 'value'
"""the Field to compute on"""
calculation_method = Sum
"""The computation Method"""
verbose_name = None
"""Verbose name to be used in front end when needed"""
requires = None
"""This can be a list of sibling classes,
they will be asked to compute and their value would be available to you in the `resolve` method
requires = [BasicCalculationA, BasicCalculationB]
"""
type = 'number'
"""Just a string describing what this computation field return, usually passed to frontend"""
is_summable = True
"""Indicate if this computation can be summed over. Useful to be passed to frontend or whenever needed"""
report_model = None
group_by = None
plus_side_q = None
minus_side_q = None
_require_classes = None
_debit_and_credit = True
@classmethod
def create(cls, method, field, name=None, verbose_name=None, is_summable=True):
"""
Creates a ReportField class on the fly
:param method: The computation Method to be used
:param field: The field on which the computation would occur
:param name: a name to refer to this field else where
:param verbose_name: Verbose name
:param is_summable:
:return:
"""
if not name:
identifier = str(uuid.uuid4()).split('-')[-1]
name = name or f"__{method.name}_{field}_{identifier}__"
assert name not in cls._field_registry.get_all_report_fields_names()
verbose_name = verbose_name or f'{method.name} {field}'
report_klass = type(f'ReportField_{name}', (SlickReportField,), {
'name': name,
'verbose_name': verbose_name,
'calculation_field': field,
'calculation_method': method,
'is_summable': is_summable,
})
return report_klass
def __init__(self, plus_side_q=None, minus_side_q=None,
report_model=None,
qs=None,
calculation_field=None, calculation_method=None, date_field='', group_by=None):
super(SlickReportField, self).__init__()
self.date_field = date_field
self.report_model = self.report_model or report_model
self.calculation_field = calculation_field if calculation_field else self.calculation_field
self.calculation_method = calculation_method if calculation_method else self.calculation_method
self.plus_side_q = self.plus_side_q or plus_side_q
self.minus_side_q = self.minus_side_q or minus_side_q
self.requires = self.requires or []
self.group_by = self.group_by or group_by
self._cache = None, None, None
self._require_classes = [field_registry.get_field_by_name(x) for x in self.requires]
if not self.plus_side_q and not self.minus_side_q:
self._debit_and_credit = False
@classmethod
def _get_required_classes(cls):
requires = cls.requires or []
return [field_registry.get_field_by_name(x) for x in requires]
def apply_q_plus_filter(self, qs):
return qs.filter(*self.plus_side_q)
def apply_q_minus_filter(self, qs):
return qs.filter(*self.minus_side_q)
def apply_aggregation(self, queryset, group_by=''):
annotation = self.calculation_method(self.calculation_field)
if group_by:
queryset = queryset.values(group_by).annotate(annotation)
else:
queryset = queryset.aggregate(annotation)
return queryset
def prepare(self, q_filters=None, kwargs_filters=None, **kwargs):
"""
This is the first hook where you can customize the calculation away from the Django Query aggregation method
This method et called with all available parameters , so you can prepare the results for the whole set and save
it in a local cache (like self._cache) .
The flow will later call the method `resolve`, giving you the id, for you to return it respective calculation
:param q_filters:
:param kwargs_filters:
:param kwargs:
:return:
"""
kwargs_filters = kwargs_filters or {}
dep_values = self._prepare_dependencies(q_filters, kwargs_filters.copy())
queryset = self.get_queryset()
if q_filters:
queryset = queryset.filter(*q_filters)
if kwargs_filters:
queryset = queryset.filter(**kwargs_filters)
if self.plus_side_q:
queryset = self.apply_q_plus_filter(queryset)
debit_results = self.apply_aggregation(queryset, self.group_by)
credit_results = None
if self._debit_and_credit:
queryset = self.get_queryset()
if kwargs_filters:
queryset = queryset.filter(**kwargs_filters)
if q_filters:
queryset = queryset.filter(*q_filters)
if self.minus_side_q:
queryset = self.apply_q_minus_filter(queryset)
credit_results = self.apply_aggregation(queryset, self.group_by)
self._cache = debit_results, credit_results, dep_values
return debit_results, credit_results, dep_values
def get_queryset(self):
queryset = self.report_model.objects
return queryset.order_by()
def get_annotation_name(self):
"""
Get the annotation per the database
:return: string used ex:
"""
return get_calculation_annotation(self.calculation_field, self.calculation_method)
def _prepare_dependencies(self, q_filters=None, extra_filters=None, ):
values = {}
for dep_class in self._require_classes:
dep = dep_class(self.plus_side_q, self.minus_side_q, self.report_model,
date_field=self.date_field, group_by=self.group_by)
values[dep.name] = {'results': dep.prepare(q_filters, extra_filters),
'instance': dep}
return values
def resolve(self, current_obj):
'''
Reponsible for getting the exact data from the prepared value
:param cached: the returned data from prepare
:param current_obj:
:return: a solid number or value
'''
cached = self._cache
debit_value, credit_value = self.extract_data(cached, current_obj)
dependencies_value = self._resolve_dependencies(current_obj)
return self.final_calculation(debit_value, credit_value, dependencies_value)
def get_dependency_value(self, current_obj, name=None):
"""
Get the values of the ReportFields specified in `requires`
:param current_obj: the current object which we want the calculation for
:param name: Optional, the name of the specific dependency you want.
:return: a dict containing dependencies names as keys and their calculation as values
or a specific value if name is specified.
"""
values = self._resolve_dependencies(current_obj)
if name:
return values.get(name)
return values
def _resolve_dependencies(self, current_obj):
dep_results = {}
cached_debit, cached_credit, dependencies_value = self._cache
dependencies_value = dependencies_value or {}
for d in dependencies_value.keys():
d_instance = dependencies_value[d]['instance']
dep_results[d] = d_instance.resolve(current_obj)
return dep_results
def extract_data(self, cached, current_obj):
group_by = self.group_by
debit_value = 0
credit_value = 0
annotation = self.get_annotation_name()
cached_debit, cached_credit, dependencies_value = cached
if cached_debit or cached_credit:
debit = None
if cached_debit is not None:
if not group_by:
x = cached_debit.keys()[0]
debit_value = cached_debit[x]
else:
for i, x in enumerate(cached_debit):
if str(x[group_by]) == current_obj:
debit = cached_debit[i]
break
if debit:
debit_value = debit[annotation]
if cached_credit is not None:
credit = None
if cached_credit is not None:
if not group_by:
x = cached_credit.keys()[0]
credit_value = cached_credit[x]
else:
for i, x in enumerate(cached_credit):
if str(x[group_by]) == current_obj:
credit = cached_credit[i]
break
if credit:
credit_value = credit[annotation]
return debit_value, credit_value
def final_calculation(self, debit, credit, dep_dict):
debit = debit or 0
credit = credit or 0
return debit - credit
@classmethod
def get_full_dependency_list(cls):
"""
Get the full Hirearchy of dependencies and dependencies dependency.
:return: List of dependecies classes
"""
def get_dependency(field_class):
dependencies = field_class._get_required_classes()
klasses = []
for klass in dependencies:
klasses.append(klass)
other = get_dependency(klass)
if other:
klasses += other
return klasses
return get_dependency(cls)
@classmethod
def get_crosstab_field_verbose_name(cls, model, id):
"""
Construct a verbose name for the crosstab field
:param model: the model name
:param id: the id of the current crosstab object
:return: a verbose string
"""
if id == '----':
return _('The reminder')
return f'{cls.verbose_name} {model} {id}'
@classmethod
def get_time_series_field_verbose_name(cls, date_period):
"""
Sent the column data to construct a verbose name.
Default implemenetation is column name + the end date %Y%m%d
:param column_name: the computation field_name
:param date_period: a tuple of (start_date, end_date)
:return: a verbose string
"""
dt_format = '%Y/%m/%d'
return f'{cls.verbose_name} {date_period[0].strftime(dt_format)} - {date_period[1].strftime(dt_format)}'
class FirstBalanceField(SlickReportField):
name = '__fb__'
verbose_name = _('first balance')
def prepare(self, q_filters=None, extra_filters=None, **kwargs):
extra_filters = extra_filters or {}
from_date_value = extra_filters.get(f'{self.date_field}__gt')
extra_filters.pop(f'{self.date_field}__gt', None)
extra_filters[f'{self.date_field}__lte'] = from_date_value
return super(FirstBalanceField, self).prepare(q_filters, extra_filters)
field_registry.register(FirstBalanceField)
class TotalReportField(SlickReportField):
name = '__total__'
verbose_name = _('Sum of value')
requires = ['__debit__', '__credit__']
field_registry.register(TotalReportField)
class BalanceReportField(SlickReportField):
name = '__balance__'
verbose_name = _('Cumulative Total')
requires = ['__fb__']
def final_calculation(self, debit, credit, dep_dict):
fb = dep_dict.get('__fb__')
debit = debit or 0
credit = credit or 0
fb = fb or 0
return fb + debit - credit
field_registry.register(BalanceReportField)
class CreditReportField(SlickReportField):
name = '__credit__'
verbose_name = _('Credit')
def final_calculation(self, debit, credit, dep_dict):
return credit
field_registry.register(CreditReportField)
class DebitReportField(SlickReportField):
name = '__debit__'
verbose_name = _('Debit')
def final_calculation(self, debit, credit, dep_dict):
return debit
field_registry.register(DebitReportField)
class TotalQTYReportField(SlickReportField):
name = '__total_quantity__'
verbose_name = _('Total QTY')
calculation_field = 'quantity'
is_summable = False
field_registry.register(TotalQTYReportField)
class FirstBalanceQTYReportField(FirstBalanceField):
name = '__fb_quan__'
verbose_name = _('starting QTY')
calculation_field = 'quantity'
is_summable = False
field_registry.register(FirstBalanceQTYReportField)
class BalanceQTYReportField(SlickReportField):
name = '__balance_quantity__'
verbose_name = _('Cumulative QTY')
calculation_field = 'quantity'
requires = ['__fb_quan__']
def final_calculation(self, debit, credit, dep_dict):
# Use `get` so it fails loud if its not there
fb = dep_dict.get('__fb_quan__')
fb = fb or 0
return fb + debit - credit
field_registry.register(BalanceQTYReportField)
| [
"django.utils.translation.ugettext_lazy",
"uuid.uuid4"
] | [((11151, 11169), 'django.utils.translation.ugettext_lazy', '_', (['"""first balance"""'], {}), "('first balance')\n", (11152, 11169), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((11691, 11708), 'django.utils.translation.ugettext_lazy', '_', (['"""Sum of value"""'], {}), "('Sum of value')\n", (11692, 11708), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((11886, 11907), 'django.utils.translation.ugettext_lazy', '_', (['"""Cumulative Total"""'], {}), "('Cumulative Total')\n", (11887, 11907), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12275, 12286), 'django.utils.translation.ugettext_lazy', '_', (['"""Credit"""'], {}), "('Credit')\n", (12276, 12286), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12499, 12509), 'django.utils.translation.ugettext_lazy', '_', (['"""Debit"""'], {}), "('Debit')\n", (12500, 12509), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12732, 12746), 'django.utils.translation.ugettext_lazy', '_', (['"""Total QTY"""'], {}), "('Total QTY')\n", (12733, 12746), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12952, 12969), 'django.utils.translation.ugettext_lazy', '_', (['"""starting QTY"""'], {}), "('starting QTY')\n", (12953, 12969), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((13185, 13204), 'django.utils.translation.ugettext_lazy', '_', (['"""Cumulative QTY"""'], {}), "('Cumulative QTY')\n", (13186, 13204), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((10473, 10490), 'django.utils.translation.ugettext_lazy', '_', (['"""The reminder"""'], {}), "('The reminder')\n", (10474, 10490), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1799, 1811), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1809, 1811), False, 'import uuid\n')] |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2019 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
import scipy.linalg as spla
from pymor.algorithms.arnoldi import arnoldi
from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth
from pymor.core.interfaces import BasicInterface
from pymor.models.iosys import LTIModel, SecondOrderModel, LinearDelayModel
from pymor.operators.constructions import LincombOperator
from pymor.reductors.basic import LTIPGReductor, SOLTIPGReductor, DelayLTIPGReductor
class GenericBHIReductor(BasicInterface):
r"""Generic bitangential Hermite interpolation reductor.
This is a generic reductor for reducing any linear
:class:`~pymor.models.iosys.InputStateOutputModel` with
the transfer function which can be written in the generalized
coprime factorization :math:`\mathcal{C}(s) \mathcal{K}(s)^{-1}
\mathcal{B}(s)` as in [BG09]_.
The interpolation here is limited to only up to the first
derivative.
Hence, interpolation points are assumed to be pairwise distinct.
Parameters
----------
fom
Model.
"""
PGReductor = None
def __init__(self, fom):
self.fom = fom
self._product = None
def _B_apply(self, s, V):
raise NotImplementedError
def _C_apply_adjoint(self, s, V):
raise NotImplementedError
def _K_apply_inverse(self, s, V):
raise NotImplementedError
def _K_apply_inverse_adjoint(self, s, V):
raise NotImplementedError
def reduce(self, sigma, b, c, projection='orth'):
"""Bitangential Hermite interpolation.
Parameters
----------
sigma
Interpolation points (closed under conjugation), list of
length `r`.
b
Right tangential directions, |VectorArray| of length `r`
from `self.fom.input_space`.
c
Left tangential directions, |VectorArray| of length `r` from
`self.fom.output_space`.
projection
Projection method:
- `'orth'`: projection matrices are orthogonalized with
respect to the Euclidean inner product
- `'biorth'`: projection matrices are biorthogolized with
respect to the E product
Returns
-------
rom
Reduced model.
"""
r = len(sigma)
assert b in self.fom.input_space and len(b) == r
assert c in self.fom.output_space and len(c) == r
assert projection in ('orth', 'biorth')
# rescale tangential directions (to avoid overflow or underflow)
if b.dim > 1:
b.scal(1 / b.l2_norm())
else:
b = self.fom.input_space.ones(r)
if c.dim > 1:
c.scal(1 / c.l2_norm())
else:
c = self.fom.output_space.ones(r)
# compute projection matrices
self.V = self.fom.state_space.empty(reserve=r)
self.W = self.fom.state_space.empty(reserve=r)
for i in range(r):
if sigma[i].imag == 0:
Bb = self._B_apply(sigma[i].real, b.real[i])
self.V.append(self._K_apply_inverse(sigma[i].real, Bb))
CTc = self._C_apply_adjoint(sigma[i].real, c.real[i])
self.W.append(self._K_apply_inverse_adjoint(sigma[i].real, CTc))
elif sigma[i].imag > 0:
Bb = self._B_apply(sigma[i], b[i])
v = self._K_apply_inverse(sigma[i], Bb)
self.V.append(v.real)
self.V.append(v.imag)
CTc = self._C_apply_adjoint(sigma[i], c[i].conj())
w = self._K_apply_inverse_adjoint(sigma[i], CTc)
self.W.append(w.real)
self.W.append(w.imag)
if projection == 'orth':
self.V = gram_schmidt(self.V, atol=0, rtol=0)
self.W = gram_schmidt(self.W, atol=0, rtol=0)
elif projection == 'biorth':
self.V, self.W = gram_schmidt_biorth(self.V, self.W, product=self._product)
self.pg_reductor = self.PGReductor(self.fom, self.W, self.V, projection == 'biorth')
rom = self.pg_reductor.reduce()
return rom
def reconstruct(self, u):
"""Reconstruct high-dimensional vector from reduced vector `u`."""
return self.RB[:u.dim].lincomb(u.to_numpy())
class LTI_BHIReductor(GenericBHIReductor):
"""Bitangential Hermite interpolation for |LTIModels|.
Parameters
----------
fom
|LTIModel|.
"""
PGReductor = LTIPGReductor
def __init__(self, fom):
assert isinstance(fom, LTIModel)
self.fom = fom
self._product = fom.E
def _B_apply(self, s, V):
return self.fom.B.apply(V)
def _C_apply_adjoint(self, s, V):
return self.fom.C.apply_adjoint(V)
def _K_apply_inverse(self, s, V):
sEmA = s * self.fom.E - self.fom.A
return sEmA.apply_inverse(V)
def _K_apply_inverse_adjoint(self, s, V):
sEmA = s * self.fom.E - self.fom.A
return sEmA.apply_inverse_adjoint(V)
def reduce(self, sigma, b, c, projection='orth', use_arnoldi=False):
"""Bitangential Hermite interpolation.
Parameters
----------
sigma
Interpolation points (closed under conjugation), list of
length `r`.
b
Right tangential directions, |VectorArray| of length `r`
from `self.fom.input_space`.
c
Left tangential directions, |VectorArray| of length `r` from
`self.fom.output_space`.
projection
Projection method:
- `'orth'`: projection matrices are orthogonalized with
respect to the Euclidean inner product
- `'biorth'`: projection matrices are biorthogolized with
respect to the E product
use_arnoldi
Should the Arnoldi process be used for rational
interpolation. Available only for SISO systems. Otherwise,
it is ignored.
Returns
-------
rom
Reduced model.
"""
if use_arnoldi and self.fom.input_dim == 1 and self.fom.output_dim == 1:
return self.reduce_arnoldi(sigma, b, c)
else:
return super().reduce(sigma, b, c, projection=projection)
def reduce_arnoldi(self, sigma, b, c):
"""Bitangential Hermite interpolation for SISO |LTIModels|.
Parameters
----------
sigma
Interpolation points (closed under conjugation), list of
length `r`.
b
Right tangential directions, |VectorArray| of length `r`
from `self.fom.B.source`.
c
Left tangential directions, |VectorArray| of length `r` from
`self.fom.C.range`.
Returns
-------
rom
Reduced |LTIModel| model.
"""
fom = self.fom
assert fom.input_dim == 1 and fom.output_dim == 1
r = len(sigma)
assert b in fom.B.source and len(b) == r
assert c in fom.C.range and len(c) == r
self.V = arnoldi(fom.A, fom.E, fom.B, sigma)
self.W = arnoldi(fom.A, fom.E, fom.C, sigma, trans=True)
rom = super(GenericBHIReductor, self).reduce()
return rom
class SO_BHIReductor(GenericBHIReductor):
"""Bitangential Hermite interpolation for second-order systems.
Parameters
----------
fom
:class:`~pymor.models.iosys.SecondOrderModel`.
"""
PGReductor = SOLTIPGReductor
def __init__(self, fom):
assert isinstance(fom, SecondOrderModel)
self.fom = fom
self._product = fom.M
def _B_apply(self, s, V):
return self.fom.B.apply(V)
def _C_apply_adjoint(self, s, V):
x = self.fom.Cp.apply_adjoint(V)
y = self.fom.Cv.apply_adjoint(V)
return x + y * s.conjugate()
def _K_apply_inverse(self, s, V):
s2MpsEpK = s**2 * self.fom.M + s * self.fom.E + self.fom.K
return s2MpsEpK.apply_inverse(V)
def _K_apply_inverse_adjoint(self, s, V):
s2MpsEpK = s**2 * self.fom.M + s * self.fom.E + self.fom.K
return s2MpsEpK.apply_inverse_adjoint(V)
class DelayBHIReductor(GenericBHIReductor):
"""Bitangential Hermite interpolation for delay systems.
Parameters
----------
fom
:class:`~pymor.models.iosys.LinearDelayModel`.
"""
PGReductor = DelayLTIPGReductor
def __init__(self, fom):
assert isinstance(fom, LinearDelayModel)
self.fom = fom
self._product = fom.E
def _B_apply(self, s, V):
return self.fom.B.apply(V)
def _C_apply_adjoint(self, s, V):
return self.fom.C.apply_adjoint(V)
def _K_apply_inverse(self, s, V):
Ks = LincombOperator((self.fom.E, self.fom.A) + self.fom.Ad,
(s, -1) + tuple(-np.exp(-taui * s) for taui in self.fom.tau))
return Ks.apply_inverse(V)
def _K_apply_inverse_adjoint(self, s, V):
Ks = LincombOperator((self.fom.E, self.fom.A) + self.fom.Ad,
(s, -1) + tuple(-np.exp(-taui * s) for taui in self.fom.tau))
return Ks.apply_inverse_adjoint(V)
class TFInterpReductor(BasicInterface):
"""Loewner bitangential Hermite interpolation reductor.
See [BG12]_.
Parameters
----------
fom
Model with `eval_tf` and `eval_dtf` methods.
"""
def __init__(self, fom):
self.fom = fom
def reduce(self, sigma, b, c):
"""Realization-independent tangential Hermite interpolation.
Parameters
----------
sigma
Interpolation points (closed under conjugation), list of
length `r`.
b
Right tangential directions, |NumPy array| of shape
`(fom.input_dim, r)`.
c
Left tangential directions, |NumPy array| of shape
`(fom.output_dim, r)`.
Returns
-------
lti
|LTIModel| interpolating the transfer function of `fom`.
"""
fom = self.fom
r = len(sigma)
assert isinstance(b, np.ndarray) and b.shape == (fom.input_dim, r)
assert isinstance(c, np.ndarray) and c.shape == (fom.output_dim, r)
# rescale tangential directions (to avoid overflow or underflow)
if b.shape[0] > 1:
for i in range(r):
b[:, i] /= spla.norm(b[:, i])
else:
b = np.ones((1, r))
if c.shape[0] > 1:
for i in range(r):
c[:, i] /= spla.norm(c[:, i])
else:
c = np.ones((1, r))
# matrices of the interpolatory LTI system
Er = np.empty((r, r), dtype=complex)
Ar = np.empty((r, r), dtype=complex)
Br = np.empty((r, fom.input_dim), dtype=complex)
Cr = np.empty((fom.output_dim, r), dtype=complex)
Hs = [fom.eval_tf(s) for s in sigma]
dHs = [fom.eval_dtf(s) for s in sigma]
for i in range(r):
for j in range(r):
if i != j:
Er[i, j] = -c[:, i].dot((Hs[i] - Hs[j]).dot(b[:, j])) / (sigma[i] - sigma[j])
Ar[i, j] = -c[:, i].dot((sigma[i] * Hs[i] - sigma[j] * Hs[j])).dot(b[:, j]) / (sigma[i] - sigma[j])
else:
Er[i, i] = -c[:, i].dot(dHs[i].dot(b[:, i]))
Ar[i, i] = -c[:, i].dot((Hs[i] + sigma[i] * dHs[i]).dot(b[:, i]))
Br[i, :] = Hs[i].T.dot(c[:, i])
Cr[:, i] = Hs[i].dot(b[:, i])
# transform the system to have real matrices
T = np.zeros((r, r), dtype=complex)
for i in range(r):
if sigma[i].imag == 0:
T[i, i] = 1
else:
indices = np.nonzero(np.isclose(sigma[i + 1:], sigma[i].conjugate()))[0]
if len(indices) > 0:
j = i + 1 + indices[0]
T[i, i] = 1
T[i, j] = 1
T[j, i] = -1j
T[j, j] = 1j
Er = (T.dot(Er).dot(T.conj().T)).real
Ar = (T.dot(Ar).dot(T.conj().T)).real
Br = (T.dot(Br)).real
Cr = (Cr.dot(T.conj().T)).real
return LTIModel.from_matrices(Ar, Br, Cr, D=None, E=Er, cont_time=fom.cont_time)
| [
"pymor.algorithms.gram_schmidt.gram_schmidt_biorth",
"pymor.algorithms.gram_schmidt.gram_schmidt",
"numpy.ones",
"numpy.exp",
"numpy.zeros",
"numpy.empty",
"scipy.linalg.norm",
"pymor.algorithms.arnoldi.arnoldi",
"pymor.models.iosys.LTIModel.from_matrices"
] | [((7315, 7350), 'pymor.algorithms.arnoldi.arnoldi', 'arnoldi', (['fom.A', 'fom.E', 'fom.B', 'sigma'], {}), '(fom.A, fom.E, fom.B, sigma)\n', (7322, 7350), False, 'from pymor.algorithms.arnoldi import arnoldi\n'), ((7368, 7415), 'pymor.algorithms.arnoldi.arnoldi', 'arnoldi', (['fom.A', 'fom.E', 'fom.C', 'sigma'], {'trans': '(True)'}), '(fom.A, fom.E, fom.C, sigma, trans=True)\n', (7375, 7415), False, 'from pymor.algorithms.arnoldi import arnoldi\n'), ((10916, 10947), 'numpy.empty', 'np.empty', (['(r, r)'], {'dtype': 'complex'}), '((r, r), dtype=complex)\n', (10924, 10947), True, 'import numpy as np\n'), ((10961, 10992), 'numpy.empty', 'np.empty', (['(r, r)'], {'dtype': 'complex'}), '((r, r), dtype=complex)\n', (10969, 10992), True, 'import numpy as np\n'), ((11006, 11049), 'numpy.empty', 'np.empty', (['(r, fom.input_dim)'], {'dtype': 'complex'}), '((r, fom.input_dim), dtype=complex)\n', (11014, 11049), True, 'import numpy as np\n'), ((11063, 11107), 'numpy.empty', 'np.empty', (['(fom.output_dim, r)'], {'dtype': 'complex'}), '((fom.output_dim, r), dtype=complex)\n', (11071, 11107), True, 'import numpy as np\n'), ((11830, 11861), 'numpy.zeros', 'np.zeros', (['(r, r)'], {'dtype': 'complex'}), '((r, r), dtype=complex)\n', (11838, 11861), True, 'import numpy as np\n'), ((12447, 12520), 'pymor.models.iosys.LTIModel.from_matrices', 'LTIModel.from_matrices', (['Ar', 'Br', 'Cr'], {'D': 'None', 'E': 'Er', 'cont_time': 'fom.cont_time'}), '(Ar, Br, Cr, D=None, E=Er, cont_time=fom.cont_time)\n', (12469, 12520), False, 'from pymor.models.iosys import LTIModel, SecondOrderModel, LinearDelayModel\n'), ((3982, 4018), 'pymor.algorithms.gram_schmidt.gram_schmidt', 'gram_schmidt', (['self.V'], {'atol': '(0)', 'rtol': '(0)'}), '(self.V, atol=0, rtol=0)\n', (3994, 4018), False, 'from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth\n'), ((4040, 4076), 'pymor.algorithms.gram_schmidt.gram_schmidt', 'gram_schmidt', (['self.W'], {'atol': '(0)', 'rtol': '(0)'}), '(self.W, atol=0, rtol=0)\n', (4052, 4076), False, 'from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth\n'), ((10685, 10700), 'numpy.ones', 'np.ones', (['(1, r)'], {}), '((1, r))\n', (10692, 10700), True, 'import numpy as np\n'), ((10835, 10850), 'numpy.ones', 'np.ones', (['(1, r)'], {}), '((1, r))\n', (10842, 10850), True, 'import numpy as np\n'), ((4143, 4201), 'pymor.algorithms.gram_schmidt.gram_schmidt_biorth', 'gram_schmidt_biorth', (['self.V', 'self.W'], {'product': 'self._product'}), '(self.V, self.W, product=self._product)\n', (4162, 4201), False, 'from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth\n'), ((10636, 10654), 'scipy.linalg.norm', 'spla.norm', (['b[:, i]'], {}), '(b[:, i])\n', (10645, 10654), True, 'import scipy.linalg as spla\n'), ((10786, 10804), 'scipy.linalg.norm', 'spla.norm', (['c[:, i]'], {}), '(c[:, i])\n', (10795, 10804), True, 'import scipy.linalg as spla\n'), ((9085, 9102), 'numpy.exp', 'np.exp', (['(-taui * s)'], {}), '(-taui * s)\n', (9091, 9102), True, 'import numpy as np\n'), ((9327, 9344), 'numpy.exp', 'np.exp', (['(-taui * s)'], {}), '(-taui * s)\n', (9333, 9344), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import interval_bound_propagation as ibp
import sonnet as snt
import tensorflow.compat.v1 as tf
class FixedNN(snt.AbstractModule):
def _build(self, z0, is_training=False):
self._m = snt.Linear(2, initializers={
'w': tf.constant_initializer(1.),
'b': lambda *unsed_args, **unused_kwargs: tf.constant([0., 1.]),
})
return self._m(z0)
class LossTest(tf.test.TestCase):
def testEndToEnd(self):
predictor = FixedNN()
predictor = ibp.VerifiableModelWrapper(predictor)
# Labels.
labels = tf.constant([1], dtype=tf.int64)
# Connect to input.
z = tf.constant([[1, 2, 3]], dtype=tf.float32)
predictor(z, is_training=True)
# Input bounds.
eps = 1.
input_bounds = ibp.IntervalBounds(z - eps, z + eps)
predictor.propagate_bounds(input_bounds)
# Create output specification (that forces the first logits to be greater).
c = tf.constant([[[1, -1]]], dtype=tf.float32)
d = tf.constant([[0]], dtype=tf.float32)
# Turn elision off for more interesting results.
spec = ibp.LinearSpecification(c, d, collapse=False)
# Create an attack.
attack = ibp.UntargetedPGDAttack(
predictor, spec, eps, num_steps=1, input_bounds=(-100., 100))
# Build loss.
losses = ibp.Losses(predictor, spec, attack,
interval_bounds_loss_type='hinge',
interval_bounds_hinge_margin=0.)
losses(labels)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
# We expect the worst-case logits from IBP to be [9, 4].
# The adversarial attack should fail since logits are always [l, l + 1].
# Similarly, the nominal predictions are correct.
accuracy_values, loss_values = sess.run(
[losses.scalar_metrics, losses.scalar_losses])
self.assertAlmostEqual(1., accuracy_values.nominal_accuracy)
self.assertAlmostEqual(0., accuracy_values.verified_accuracy)
self.assertAlmostEqual(1., accuracy_values.attack_accuracy)
expected_xent = 0.31326168751822947
self.assertAlmostEqual(expected_xent, loss_values.nominal_cross_entropy,
places=5)
self.assertAlmostEqual(expected_xent, loss_values.attack_cross_entropy,
places=5)
expected_hinge = 5.
self.assertAlmostEqual(expected_hinge, loss_values.verified_loss)
if __name__ == '__main__':
tf.test.main()
| [
"interval_bound_propagation.VerifiableModelWrapper",
"interval_bound_propagation.Losses",
"tensorflow.compat.v1.constant_initializer",
"tensorflow.compat.v1.constant",
"interval_bound_propagation.UntargetedPGDAttack",
"interval_bound_propagation.IntervalBounds",
"tensorflow.compat.v1.test.main",
"inte... | [((3182, 3196), 'tensorflow.compat.v1.test.main', 'tf.test.main', ([], {}), '()\n', (3194, 3196), True, 'import tensorflow.compat.v1 as tf\n'), ((1222, 1259), 'interval_bound_propagation.VerifiableModelWrapper', 'ibp.VerifiableModelWrapper', (['predictor'], {}), '(predictor)\n', (1248, 1259), True, 'import interval_bound_propagation as ibp\n'), ((1287, 1319), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1]'], {'dtype': 'tf.int64'}), '([1], dtype=tf.int64)\n', (1298, 1319), True, 'import tensorflow.compat.v1 as tf\n'), ((1352, 1394), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[1, 2, 3]]'], {'dtype': 'tf.float32'}), '([[1, 2, 3]], dtype=tf.float32)\n', (1363, 1394), True, 'import tensorflow.compat.v1 as tf\n'), ((1482, 1518), 'interval_bound_propagation.IntervalBounds', 'ibp.IntervalBounds', (['(z - eps)', '(z + eps)'], {}), '(z - eps, z + eps)\n', (1500, 1518), True, 'import interval_bound_propagation as ibp\n'), ((1652, 1694), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[[1, -1]]]'], {'dtype': 'tf.float32'}), '([[[1, -1]]], dtype=tf.float32)\n', (1663, 1694), True, 'import tensorflow.compat.v1 as tf\n'), ((1703, 1739), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[0]]'], {'dtype': 'tf.float32'}), '([[0]], dtype=tf.float32)\n', (1714, 1739), True, 'import tensorflow.compat.v1 as tf\n'), ((1804, 1849), 'interval_bound_propagation.LinearSpecification', 'ibp.LinearSpecification', (['c', 'd'], {'collapse': '(False)'}), '(c, d, collapse=False)\n', (1827, 1849), True, 'import interval_bound_propagation as ibp\n'), ((1887, 1978), 'interval_bound_propagation.UntargetedPGDAttack', 'ibp.UntargetedPGDAttack', (['predictor', 'spec', 'eps'], {'num_steps': '(1)', 'input_bounds': '(-100.0, 100)'}), '(predictor, spec, eps, num_steps=1, input_bounds=(-\n 100.0, 100))\n', (1910, 1978), True, 'import interval_bound_propagation as ibp\n'), ((2013, 2121), 'interval_bound_propagation.Losses', 'ibp.Losses', (['predictor', 'spec', 'attack'], {'interval_bounds_loss_type': '"""hinge"""', 'interval_bounds_hinge_margin': '(0.0)'}), "(predictor, spec, attack, interval_bounds_loss_type='hinge',\n interval_bounds_hinge_margin=0.0)\n", (2023, 2121), True, 'import interval_bound_propagation as ibp\n'), ((2238, 2271), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2269, 2271), True, 'import tensorflow.compat.v1 as tf\n'), ((985, 1013), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (1008, 1013), True, 'import tensorflow.compat.v1 as tf\n'), ((1064, 1087), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (1075, 1087), True, 'import tensorflow.compat.v1 as tf\n')] |
from tests.unit.lib.iml_unit_test_case import IMLUnitTestCase
from chroma_core.models import LogMessage, MessageClass
class TestLogMessage(IMLUnitTestCase):
def test_classification(self):
"""
Test the classification code correctly classfies messages.
"""
test_messages = {
"Lustre: Lustre output here": MessageClass.LUSTRE,
"LustreError: Lustre output here": MessageClass.LUSTRE_ERROR,
"[NOT A TIME STAMP ] Lustre: Lustre output here": MessageClass.NORMAL,
"[1234567A89] LustreError: Not A Time Stamp": MessageClass.NORMAL,
"[123456789.123456789A] LustreError: Not A Time Stamp": MessageClass.NORMAL,
"Nothing to see here": MessageClass.NORMAL,
}
for with_timestamp in [False, True]:
for test_message, message_class in test_messages.iteritems():
test_message = ("[9830337.7944560] " if with_timestamp else "") + test_message
self.assertEqual(LogMessage.get_message_class(test_message), message_class, test_message)
| [
"chroma_core.models.LogMessage.get_message_class"
] | [((1015, 1057), 'chroma_core.models.LogMessage.get_message_class', 'LogMessage.get_message_class', (['test_message'], {}), '(test_message)\n', (1043, 1057), False, 'from chroma_core.models import LogMessage, MessageClass\n')] |
# -*- coding: utf-8 -*-
"""
Postpasses over the LLVM IR.
The signature of each postpass is postpass(env, ee, lmod, lfunc) -> lfunc
"""
from __future__ import print_function, division, absolute_import
import llvmmath
from llvmmath import linking
default_postpasses = {}
def register_default(name):
def dec(f):
default_postpasses[name] = f
return f
return dec
# ______________________________________________________________________
# Postpasses
@register_default('math')
def postpass_link_math(env, ee, lmod, lfunc):
"numba.math.* -> llvmmath.*"
replacements = {}
for lf in lmod.functions:
if lf.name.startswith('numba.math.'):
_, _, name = lf.name.rpartition('.')
replacements[lf.name] = name
del lf # this is dead after linking below
default_math_lib = llvmmath.get_default_math_lib()
linker = linking.get_linker(default_math_lib)
linking.link_llvm_math_intrinsics(ee, lmod, default_math_lib,
linker, replacements)
return lfunc | [
"llvmmath.linking.link_llvm_math_intrinsics",
"llvmmath.get_default_math_lib",
"llvmmath.linking.get_linker"
] | [((838, 869), 'llvmmath.get_default_math_lib', 'llvmmath.get_default_math_lib', ([], {}), '()\n', (867, 869), False, 'import llvmmath\n'), ((883, 919), 'llvmmath.linking.get_linker', 'linking.get_linker', (['default_math_lib'], {}), '(default_math_lib)\n', (901, 919), False, 'from llvmmath import linking\n'), ((924, 1011), 'llvmmath.linking.link_llvm_math_intrinsics', 'linking.link_llvm_math_intrinsics', (['ee', 'lmod', 'default_math_lib', 'linker', 'replacements'], {}), '(ee, lmod, default_math_lib, linker,\n replacements)\n', (957, 1011), False, 'from llvmmath import linking\n')] |
#!/usr/bin/env python
import random
import numpy as np
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
seed = 0
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train[..., None]
x_test = x_test[..., None]
datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1)
#validation_split=0.2
#flow = datagen.flow(x_train, y_train, batch_size=16, subset="training")
#flow = datagen.flow(x_train, y_train, batch_size=16, subset="validation")
flow = datagen.flow(x_train, y_train, batch_size=16)
plt.figure(figsize=(19.2, 10.8))
for i in range(16):
x, y = flow.next()
for j in range(16):
plt.subplot(16, 16, i*16+j+1)
plt.imshow(x[j, ..., 0])
plt.xticks([]), plt.yticks([]), plt.title(y[j], x=-0.2, y=0.6)
plt.show()
| [
"matplotlib.pyplot.imshow",
"tensorflow.random.set_seed",
"matplotlib.pyplot.xticks",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"random.seed",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"numpy.random.seed",
"matplotlib.pypl... | [((135, 152), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (146, 152), False, 'import random\n'), ((153, 173), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (167, 173), True, 'import numpy as np\n'), ((174, 198), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (192, 198), True, 'import tensorflow as tf\n'), ((239, 274), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), '()\n', (272, 274), True, 'import tensorflow as tf\n'), ((342, 475), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'tf.keras.preprocessing.image.ImageDataGenerator', ([], {'rotation_range': '(15)', 'width_shift_range': '(0.1)', 'height_shift_range': '(0.1)', 'zoom_range': '(0.1)'}), '(rotation_range=15,\n width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.1)\n', (389, 475), True, 'import tensorflow as tf\n'), ((718, 750), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(19.2, 10.8)'}), '(figsize=(19.2, 10.8))\n', (728, 750), True, 'import matplotlib.pyplot as plt\n'), ((960, 970), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (968, 970), True, 'import matplotlib.pyplot as plt\n'), ((826, 861), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(16)', '(16)', '(i * 16 + j + 1)'], {}), '(16, 16, i * 16 + j + 1)\n', (837, 861), True, 'import matplotlib.pyplot as plt\n'), ((864, 888), 'matplotlib.pyplot.imshow', 'plt.imshow', (['x[j, ..., 0]'], {}), '(x[j, ..., 0])\n', (874, 888), True, 'import matplotlib.pyplot as plt\n'), ((897, 911), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (907, 911), True, 'import matplotlib.pyplot as plt\n'), ((913, 927), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (923, 927), True, 'import matplotlib.pyplot as plt\n'), ((929, 959), 'matplotlib.pyplot.title', 'plt.title', (['y[j]'], {'x': '(-0.2)', 'y': '(0.6)'}), '(y[j], x=-0.2, y=0.6)\n', (938, 959), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample that demonstrates how to use Stackdriver Monitoring metrics to
programmatically scale a Google Cloud Bigtable cluster."""
import argparse
import time
from google.cloud import bigtable
from google.cloud import monitoring
def get_cpu_load():
"""Returns the most recent Cloud Bigtable CPU load measurement.
Returns:
float: The most recent Cloud Bigtable CPU usage metric
"""
# [START bigtable_cpu]
client = monitoring.Client()
query = client.query('bigtable.googleapis.com/cluster/cpu_load', minutes=5)
time_series = list(query)
recent_time_series = time_series[0]
return recent_time_series.points[0].value
# [END bigtable_cpu]
def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
"""Scales the number of Cloud Bigtable nodes up or down.
Edits the number of nodes in the Cloud Bigtable cluster to be increased
or decreased, depending on the `scale_up` boolean argument. Currently
the `incremental` strategy from `strategies.py` is used.
Args:
bigtable_instance (str): Cloud Bigtable instance ID to scale
bigtable_cluster (str): Cloud Bigtable cluster ID to scale
scale_up (bool): If true, scale up, otherwise scale down
"""
_MIN_NODE_COUNT = 3
"""
The minimum number of nodes to use. The default minimum is 3. If you have a
lot of data, the rule of thumb is to not go below 2.5 TB per node for SSD
clusters, and 8 TB for HDD. The bigtable.googleapis.com/disk/bytes_used
metric is useful in figuring out the minimum number of nodes.
"""
_MAX_NODE_COUNT = 30
"""
The maximum number of nodes to use. The default maximum is 30 nodes per zone.
If you need more quota, you can request more by following the instructions
<a href="https://cloud.google.com/bigtable/quota">here</a>.
"""
_SIZE_CHANGE_STEP = 3
"""The number of nodes to change the cluster by."""
# [START bigtable_scale]
bigtable_client = bigtable.Client(admin=True)
instance = bigtable_client.instance(bigtable_instance)
instance.reload()
cluster = instance.cluster(bigtable_cluster)
cluster.reload()
current_node_count = cluster.serve_nodes
if scale_up:
if current_node_count < _MAX_NODE_COUNT:
new_node_count = min(current_node_count + 3, _MAX_NODE_COUNT)
cluster.serve_nodes = new_node_count
cluster.update()
print('Scaled up from {} to {} nodes.'.format(
current_node_count, new_node_count))
else:
if current_node_count > _MIN_NODE_COUNT:
new_node_count = max(
current_node_count - _SIZE_CHANGE_STEP, _MIN_NODE_COUNT)
cluster.serve_nodes = new_node_count
cluster.update()
print('Scaled down from {} to {} nodes.'.format(
current_node_count, new_node_count))
# [END bigtable_scale]
def main(
bigtable_instance,
bigtable_cluster,
high_cpu_threshold,
low_cpu_threshold,
short_sleep,
long_sleep):
"""Main loop runner that autoscales Cloud Bigtable.
Args:
bigtable_instance (str): Cloud Bigtable instance ID to autoscale
high_cpu_threshold (float): If CPU is higher than this, scale up.
low_cpu_threshold (float): If CPU is lower than this, scale down.
short_sleep (int): How long to sleep after no operation
long_sleep (int): How long to sleep after the number of nodes is
changed
"""
cluster_cpu = get_cpu_load()
print('Detected cpu of {}'.format(cluster_cpu))
if cluster_cpu > high_cpu_threshold:
scale_bigtable(bigtable_instance, bigtable_cluster, True)
time.sleep(long_sleep)
elif cluster_cpu < low_cpu_threshold:
scale_bigtable(bigtable_instance, bigtable_cluster, False)
time.sleep(long_sleep)
else:
print('CPU within threshold, sleeping.')
time.sleep(short_sleep)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Scales Cloud Bigtable clusters based on CPU usage.')
parser.add_argument(
'bigtable_instance',
help='ID of the Cloud Bigtable instance to connect to.')
parser.add_argument(
'bigtable_cluster',
help='ID of the Cloud Bigtable cluster to connect to.')
parser.add_argument(
'--high_cpu_threshold',
help='If Cloud Bigtable CPU usage is above this threshold, scale up',
default=0.6)
parser.add_argument(
'--low_cpu_threshold',
help='If Cloud Bigtable CPU usage is below this threshold, scale down',
default=0.2)
parser.add_argument(
'--short_sleep',
help='How long to sleep in seconds between checking metrics after no '
'scale operation',
default=60)
parser.add_argument(
'--long_sleep',
help='How long to sleep in seconds between checking metrics after a '
'scaling operation',
default=60 * 10)
args = parser.parse_args()
while True:
main(
args.bigtable_instance,
args.bigtable_cluster,
float(args.high_cpu_threshold),
float(args.low_cpu_threshold),
int(args.short_sleep),
int(args.long_sleep))
| [
"google.cloud.monitoring.Client",
"time.sleep",
"argparse.ArgumentParser",
"google.cloud.bigtable.Client"
] | [((1026, 1045), 'google.cloud.monitoring.Client', 'monitoring.Client', ([], {}), '()\n', (1043, 1045), False, 'from google.cloud import monitoring\n'), ((2580, 2607), 'google.cloud.bigtable.Client', 'bigtable.Client', ([], {'admin': '(True)'}), '(admin=True)\n', (2595, 2607), False, 'from google.cloud import bigtable\n'), ((4659, 4753), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Scales Cloud Bigtable clusters based on CPU usage."""'}), "(description=\n 'Scales Cloud Bigtable clusters based on CPU usage.')\n", (4682, 4753), False, 'import argparse\n'), ((4363, 4385), 'time.sleep', 'time.sleep', (['long_sleep'], {}), '(long_sleep)\n', (4373, 4385), False, 'import time\n'), ((4503, 4525), 'time.sleep', 'time.sleep', (['long_sleep'], {}), '(long_sleep)\n', (4513, 4525), False, 'import time\n'), ((4593, 4616), 'time.sleep', 'time.sleep', (['short_sleep'], {}), '(short_sleep)\n', (4603, 4616), False, 'import time\n')] |
import requests
from PIL import Image
from io import BytesIO
import numpy as np
import cv2 as cv
from piquery.piq_feature import ImFeature, CropImFeature, ResizeImFeature, ImSim
from piquery.piq_hash import imhash_dct
from piquery.piq_error import DownloadError, ImageFormatError
class ImgDownloader:
@staticmethod
def download(url):
headers = headers = { 'user-agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36' }
try:
res = requests.get(url, timeout=3.0, headers=headers)
except:
raise DownloadError('image downloading timeout!')
if res.status_code != 200:
raise DownloadError("image doesn't exist!")
return res
@staticmethod
def download_numpy(url):
res = ImgDownloader.download(url)
img_data = np.asarray(Image.open(BytesIO(res.content)))
if len(img_data.shape) < 3:
raise ImageFormatError("image format not supported!")
return img_data
class ImgTransformer:
@staticmethod
def rgb2bgr(img_data):
return img_data[:, :, [2, 1, 0]]
@staticmethod
def bgr2gray(img_data):
return cv.cvtColor(img_data, cv.COLOR_BGR2GRAY)
class ImgFeature:
def __init__(self):
self.imf = ImFeature(k=50)
self.imf_crop = CropImFeature(k=50)
self.imf_resize = ResizeImFeature(k=50)
self.imsim = ImSim(k=50, crop=False)
def gray2hash(self, gray):
return imhash_dct(self.imf_resize.resize(self.imf_crop.crop(gray), size=4))
def gray2des(self, gray):
return self.imf.feature(gray)[1]
def des2fp(self, des):
return self.imf.fingerprint(des)
def fp2des(self, fp):
kp_num = int(len(fp) / (64))
ut8arr = np.array([int(fp[i:i+2], 16) for i in range(0, len(fp), 2)], dtype=np.uint8)
return ut8arr.reshape(kp_num, 32)
def sim(self, a, b):
return self.imsim.calcSim(a, b) | [
"piquery.piq_error.ImageFormatError",
"piquery.piq_feature.ImFeature",
"piquery.piq_feature.ResizeImFeature",
"piquery.piq_error.DownloadError",
"piquery.piq_feature.ImSim",
"io.BytesIO",
"requests.get",
"cv2.cvtColor",
"piquery.piq_feature.CropImFeature"
] | [((1255, 1295), 'cv2.cvtColor', 'cv.cvtColor', (['img_data', 'cv.COLOR_BGR2GRAY'], {}), '(img_data, cv.COLOR_BGR2GRAY)\n', (1266, 1295), True, 'import cv2 as cv\n'), ((1358, 1373), 'piquery.piq_feature.ImFeature', 'ImFeature', ([], {'k': '(50)'}), '(k=50)\n', (1367, 1373), False, 'from piquery.piq_feature import ImFeature, CropImFeature, ResizeImFeature, ImSim\n'), ((1398, 1417), 'piquery.piq_feature.CropImFeature', 'CropImFeature', ([], {'k': '(50)'}), '(k=50)\n', (1411, 1417), False, 'from piquery.piq_feature import ImFeature, CropImFeature, ResizeImFeature, ImSim\n'), ((1444, 1465), 'piquery.piq_feature.ResizeImFeature', 'ResizeImFeature', ([], {'k': '(50)'}), '(k=50)\n', (1459, 1465), False, 'from piquery.piq_feature import ImFeature, CropImFeature, ResizeImFeature, ImSim\n'), ((1487, 1510), 'piquery.piq_feature.ImSim', 'ImSim', ([], {'k': '(50)', 'crop': '(False)'}), '(k=50, crop=False)\n', (1492, 1510), False, 'from piquery.piq_feature import ImFeature, CropImFeature, ResizeImFeature, ImSim\n'), ((569, 616), 'requests.get', 'requests.get', (['url'], {'timeout': '(3.0)', 'headers': 'headers'}), '(url, timeout=3.0, headers=headers)\n', (581, 616), False, 'import requests\n'), ((748, 785), 'piquery.piq_error.DownloadError', 'DownloadError', (['"""image doesn\'t exist!"""'], {}), '("image doesn\'t exist!")\n', (761, 785), False, 'from piquery.piq_error import DownloadError, ImageFormatError\n'), ((1013, 1060), 'piquery.piq_error.ImageFormatError', 'ImageFormatError', (['"""image format not supported!"""'], {}), "('image format not supported!')\n", (1029, 1060), False, 'from piquery.piq_error import DownloadError, ImageFormatError\n'), ((651, 694), 'piquery.piq_error.DownloadError', 'DownloadError', (['"""image downloading timeout!"""'], {}), "('image downloading timeout!')\n", (664, 694), False, 'from piquery.piq_error import DownloadError, ImageFormatError\n'), ((936, 956), 'io.BytesIO', 'BytesIO', (['res.content'], {}), '(res.content)\n', (943, 956), False, 'from io import BytesIO\n')] |
"""Support for Sonarr sensors."""
from datetime import timedelta
import logging
from typing import Any, Callable, Dict, List, Optional
from sonarr import Sonarr, SonarrConnectionError, SonarrError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import DATA_GIGABYTES
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
import homeassistant.util.dt as dt_util
from . import SonarrEntity
from .const import CONF_UPCOMING_DAYS, CONF_WANTED_MAX_ITEMS, DATA_SONARR, DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up Sonarr sensors based on a config entry."""
options = entry.options
sonarr = hass.data[DOMAIN][entry.entry_id][DATA_SONARR]
entities = [
SonarrCommandsSensor(sonarr, entry.entry_id),
SonarrDiskspaceSensor(sonarr, entry.entry_id),
SonarrQueueSensor(sonarr, entry.entry_id),
SonarrSeriesSensor(sonarr, entry.entry_id),
SonarrUpcomingSensor(sonarr, entry.entry_id, days=options[CONF_UPCOMING_DAYS]),
SonarrWantedSensor(
sonarr, entry.entry_id, max_items=options[CONF_WANTED_MAX_ITEMS]
),
]
async_add_entities(entities, True)
def sonarr_exception_handler(func):
"""Decorate Sonarr calls to handle Sonarr exceptions.
A decorator that wraps the passed in function, catches Sonarr errors,
and handles the availability of the entity.
"""
async def handler(self, *args, **kwargs):
try:
await func(self, *args, **kwargs)
self.last_update_success = True
except SonarrConnectionError as error:
if self.available:
_LOGGER.error("Error communicating with API: %s", error)
self.last_update_success = False
except SonarrError as error:
if self.available:
_LOGGER.error("Invalid response from API: %s", error)
self.last_update_success = False
return handler
class SonarrSensor(SonarrEntity):
"""Implementation of the Sonarr sensor."""
def __init__(
self,
*,
sonarr: Sonarr,
entry_id: str,
enabled_default: bool = True,
icon: str,
key: str,
name: str,
unit_of_measurement: Optional[str] = None,
) -> None:
"""Initialize Sonarr sensor."""
self._unit_of_measurement = unit_of_measurement
self._key = key
self._unique_id = f"{entry_id}_{key}"
self.last_update_success = False
super().__init__(
sonarr=sonarr,
entry_id=entry_id,
device_id=entry_id,
name=name,
icon=icon,
enabled_default=enabled_default,
)
@property
def unique_id(self) -> str:
"""Return the unique ID for this sensor."""
return self._unique_id
@property
def available(self) -> bool:
"""Return sensor availability."""
return self.last_update_success
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
class SonarrCommandsSensor(SonarrSensor):
"""Defines a Sonarr Commands sensor."""
def __init__(self, sonarr: Sonarr, entry_id: str) -> None:
"""Initialize Sonarr Commands sensor."""
self._commands = []
super().__init__(
sonarr=sonarr,
entry_id=entry_id,
icon="mdi:code-braces",
key="commands",
name=f"{sonarr.app.info.app_name} Commands",
unit_of_measurement="Commands",
enabled_default=False,
)
@sonarr_exception_handler
async def async_update(self) -> None:
"""Update entity."""
self._commands = await self.sonarr.commands()
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
attrs = {}
for command in self._commands:
attrs[command.name] = command.state
return attrs
@property
def state(self) -> int:
"""Return the state of the sensor."""
return len(self._commands)
class SonarrDiskspaceSensor(SonarrSensor):
"""Defines a Sonarr Disk Space sensor."""
def __init__(self, sonarr: Sonarr, entry_id: str) -> None:
"""Initialize Sonarr Disk Space sensor."""
self._disks = []
self._total_free = 0
super().__init__(
sonarr=sonarr,
entry_id=entry_id,
icon="mdi:harddisk",
key="diskspace",
name=f"{sonarr.app.info.app_name} Disk Space",
unit_of_measurement=DATA_GIGABYTES,
enabled_default=False,
)
@sonarr_exception_handler
async def async_update(self) -> None:
"""Update entity."""
app = await self.sonarr.update()
self._disks = app.disks
self._total_free = sum([disk.free for disk in self._disks])
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
attrs = {}
for disk in self._disks:
free = disk.free / 1024 ** 3
total = disk.total / 1024 ** 3
usage = free / total * 100
attrs[
disk.path
] = f"{free:.2f}/{total:.2f}{self._unit_of_measurement} ({usage:.2f}%)"
return attrs
@property
def state(self) -> str:
"""Return the state of the sensor."""
free = self._total_free / 1024 ** 3
return f"{free:.2f}"
class SonarrQueueSensor(SonarrSensor):
"""Defines a Sonarr Queue sensor."""
def __init__(self, sonarr: Sonarr, entry_id: str) -> None:
"""Initialize Sonarr Queue sensor."""
self._queue = []
super().__init__(
sonarr=sonarr,
entry_id=entry_id,
icon="mdi:download",
key="queue",
name=f"{sonarr.app.info.app_name} Queue",
unit_of_measurement="Episodes",
enabled_default=False,
)
@sonarr_exception_handler
async def async_update(self) -> None:
"""Update entity."""
self._queue = await self.sonarr.queue()
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
attrs = {}
for item in self._queue:
remaining = 1 if item.size == 0 else item.size_remaining / item.size
remaining_pct = 100 * (1 - remaining)
name = f"{item.episode.series.title} {item.episode.identifier}"
attrs[name] = f"{remaining_pct:.2f}%"
return attrs
@property
def state(self) -> int:
"""Return the state of the sensor."""
return len(self._queue)
class SonarrSeriesSensor(SonarrSensor):
"""Defines a Sonarr Series sensor."""
def __init__(self, sonarr: Sonarr, entry_id: str) -> None:
"""Initialize Sonarr Series sensor."""
self._items = []
super().__init__(
sonarr=sonarr,
entry_id=entry_id,
icon="mdi:television",
key="series",
name=f"{sonarr.app.info.app_name} Shows",
unit_of_measurement="Series",
enabled_default=False,
)
@sonarr_exception_handler
async def async_update(self) -> None:
"""Update entity."""
self._items = await self.sonarr.series()
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
attrs = {}
for item in self._items:
attrs[item.series.title] = f"{item.downloaded}/{item.episodes} Episodes"
return attrs
@property
def state(self) -> int:
"""Return the state of the sensor."""
return len(self._items)
class SonarrUpcomingSensor(SonarrSensor):
"""Defines a Sonarr Upcoming sensor."""
def __init__(self, sonarr: Sonarr, entry_id: str, days: int = 1) -> None:
"""Initialize Sonarr Upcoming sensor."""
self._days = days
self._upcoming = []
super().__init__(
sonarr=sonarr,
entry_id=entry_id,
icon="mdi:television",
key="upcoming",
name=f"{sonarr.app.info.app_name} Upcoming",
unit_of_measurement="Episodes",
)
@sonarr_exception_handler
async def async_update(self) -> None:
"""Update entity."""
local = dt_util.start_of_local_day().replace(microsecond=0)
start = dt_util.as_utc(local)
end = start + timedelta(days=self._days)
self._upcoming = await self.sonarr.calendar(
start=start.isoformat(), end=end.isoformat()
)
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
attrs = {}
for episode in self._upcoming:
attrs[episode.series.title] = episode.identifier
return attrs
@property
def state(self) -> int:
"""Return the state of the sensor."""
return len(self._upcoming)
class SonarrWantedSensor(SonarrSensor):
"""Defines a Sonarr Wanted sensor."""
def __init__(self, sonarr: Sonarr, entry_id: str, max_items: int = 10) -> None:
"""Initialize Sonarr Wanted sensor."""
self._max_items = max_items
self._results = None
self._total: Optional[int] = None
super().__init__(
sonarr=sonarr,
entry_id=entry_id,
icon="mdi:television",
key="wanted",
name=f"{sonarr.app.info.app_name} Wanted",
unit_of_measurement="Episodes",
enabled_default=False,
)
@sonarr_exception_handler
async def async_update(self) -> None:
"""Update entity."""
self._results = await self.sonarr.wanted(page_size=self._max_items)
self._total = self._results.total
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
attrs = {}
if self._results is not None:
for episode in self._results.episodes:
name = f"{episode.series.title} {episode.identifier}"
attrs[name] = episode.airdate
return attrs
@property
def state(self) -> Optional[int]:
"""Return the state of the sensor."""
return self._total
| [
"logging.getLogger",
"homeassistant.util.dt.as_utc",
"homeassistant.util.dt.start_of_local_day",
"datetime.timedelta"
] | [((567, 594), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (584, 594), False, 'import logging\n'), ((8873, 8894), 'homeassistant.util.dt.as_utc', 'dt_util.as_utc', (['local'], {}), '(local)\n', (8887, 8894), True, 'import homeassistant.util.dt as dt_util\n'), ((8917, 8943), 'datetime.timedelta', 'timedelta', ([], {'days': 'self._days'}), '(days=self._days)\n', (8926, 8943), False, 'from datetime import timedelta\n'), ((8805, 8833), 'homeassistant.util.dt.start_of_local_day', 'dt_util.start_of_local_day', ([], {}), '()\n', (8831, 8833), True, 'import homeassistant.util.dt as dt_util\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import instance_list
from nova import context
from nova.db.main import api as db
from nova import exception
from nova import objects
from nova import test
class InstanceListTestCase(test.TestCase):
NUMBER_OF_CELLS = 3
def setUp(self):
super(InstanceListTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.num_instances = 3
self.instances = []
start = datetime.datetime(1985, 10, 25, 1, 21, 0)
dt = start
spread = datetime.timedelta(minutes=10)
self.cells = objects.CellMappingList.get_all(self.context)
# Create three instances in each of the real cells. Leave the
# first cell empty to make sure we don't break with an empty
# one.
for cell in self.cells[1:]:
for i in range(0, self.num_instances):
with context.target_cell(self.context, cell) as cctx:
inst = objects.Instance(
context=cctx,
project_id=self.context.project_id,
user_id=self.context.user_id,
created_at=start,
launched_at=dt,
instance_type_id=i,
hostname='%s-inst%i' % (cell.name, i))
inst.create()
if i % 2 == 0:
# Make some faults for this instance
for n in range(0, i + 1):
msg = 'fault%i-%s' % (n, inst.hostname)
f = objects.InstanceFault(context=cctx,
instance_uuid=inst.uuid,
code=i,
message=msg,
details='fake',
host='fakehost')
f.create()
self.instances.append(inst)
im = objects.InstanceMapping(context=self.context,
project_id=inst.project_id,
user_id=inst.user_id,
instance_uuid=inst.uuid,
cell_mapping=cell)
im.create()
dt += spread
def test_get_sorted(self):
filters = {}
limit = None
marker = None
columns = []
sort_keys = ['uuid']
sort_dirs = ['asc']
obj, insts = instance_list.get_instances_sorted(self.context, filters,
limit, marker, columns,
sort_keys, sort_dirs)
uuids = [inst['uuid'] for inst in insts]
self.assertEqual(sorted(uuids), uuids)
self.assertEqual(len(self.instances), len(uuids))
def test_get_sorted_descending(self):
filters = {}
limit = None
marker = None
columns = []
sort_keys = ['uuid']
sort_dirs = ['desc']
obj, insts = instance_list.get_instances_sorted(self.context, filters,
limit, marker, columns,
sort_keys, sort_dirs)
uuids = [inst['uuid'] for inst in insts]
self.assertEqual(list(reversed(sorted(uuids))), uuids)
self.assertEqual(len(self.instances), len(uuids))
def test_get_sorted_with_filter(self):
filters = {'instance_type_id': 1}
limit = None
marker = None
columns = []
sort_keys = ['uuid']
sort_dirs = ['asc']
obj, insts = instance_list.get_instances_sorted(self.context, filters,
limit, marker, columns,
sort_keys, sort_dirs)
uuids = [inst['uuid'] for inst in insts]
expected = [inst['uuid'] for inst in self.instances
if inst['instance_type_id'] == 1]
self.assertEqual(list(sorted(expected)), uuids)
def test_get_sorted_by_defaults(self):
filters = {}
limit = None
marker = None
columns = []
sort_keys = None
sort_dirs = None
obj, insts = instance_list.get_instances_sorted(self.context, filters,
limit, marker, columns,
sort_keys, sort_dirs)
uuids = set([inst['uuid'] for inst in insts])
expected = set([inst['uuid'] for inst in self.instances])
self.assertEqual(expected, uuids)
def test_get_sorted_with_limit(self):
obj, insts = instance_list.get_instances_sorted(self.context, {},
5, None,
[], ['uuid'], ['asc'])
uuids = [inst['uuid'] for inst in insts]
had_uuids = [inst.uuid for inst in self.instances]
self.assertEqual(sorted(had_uuids)[:5], uuids)
self.assertEqual(5, len(uuids))
def test_get_sorted_with_large_limit(self):
obj, insts = instance_list.get_instances_sorted(self.context, {},
5000, None,
[], ['uuid'], ['asc'])
uuids = [inst['uuid'] for inst in insts]
self.assertEqual(sorted(uuids), uuids)
self.assertEqual(len(self.instances), len(uuids))
def test_get_sorted_with_large_limit_batched(self):
obj, insts = instance_list.get_instances_sorted(self.context, {},
5000, None,
[], ['uuid'], ['asc'],
batch_size=2)
uuids = [inst['uuid'] for inst in insts]
self.assertEqual(sorted(uuids), uuids)
self.assertEqual(len(self.instances), len(uuids))
def _test_get_sorted_with_limit_marker(self, sort_by, pages=2, pagesize=2,
sort_dir='asc'):
"""Get multiple pages by a sort key and validate the results.
This requests $pages of $pagesize, followed by a final page with
no limit, and a final-final page which should be empty. It validates
that we got a consistent set of results no patter where the page
boundary is, that we got all the results after the unlimited query,
and that the final page comes back empty when we use the last
instance as a marker.
"""
insts = []
page = 0
while True:
if page >= pages:
# We've requested the specified number of limited (by pagesize)
# pages, so request a penultimate page with no limit which
# should always finish out the result.
limit = None
else:
# Request a limited-size page for the first $pages pages.
limit = pagesize
if insts:
# If we're not on the first page, use the last instance we
# received as the marker
marker = insts[-1]['uuid']
else:
# No marker for the first page
marker = None
batch = list(
instance_list.get_instances_sorted(self.context, {},
limit, marker,
[], [sort_by],
[sort_dir])[1])
if not batch:
# This should only happen when we've pulled the last empty
# page because we used the marker of the last instance. If
# we end up with a non-deterministic ordering, we'd loop
# forever.
break
insts.extend(batch)
page += 1
if page > len(self.instances) * 2:
# Do this sanity check in case we introduce (or find) another
# repeating page bug like #1721791. Without this we loop
# until timeout, which is less obvious.
raise Exception('Infinite paging loop')
# We should have requested exactly (or one more unlimited) pages
self.assertIn(page, (pages, pages + 1))
# Make sure the full set matches what we know to be true
found = [x[sort_by] for x in insts]
had = [x[sort_by] for x in self.instances]
if sort_by in ('launched_at', 'created_at'):
# We're comparing objects and database entries, so we need to
# squash the tzinfo of the object ones so we can compare
had = [x.replace(tzinfo=None) for x in had]
self.assertEqual(len(had), len(found))
if sort_dir == 'asc':
self.assertEqual(sorted(had), found)
else:
self.assertEqual(list(reversed(sorted(had))), found)
def test_get_sorted_with_limit_marker_stable(self):
"""Test sorted by hostname.
This will be a stable sort that won't change on each run.
"""
self._test_get_sorted_with_limit_marker(sort_by='hostname')
def test_get_sorted_with_limit_marker_stable_reverse(self):
"""Test sorted by hostname.
This will be a stable sort that won't change on each run.
"""
self._test_get_sorted_with_limit_marker(sort_by='hostname',
sort_dir='desc')
def test_get_sorted_with_limit_marker_stable_different_pages(self):
"""Test sorted by hostname with different page sizes.
Just do the above with page seams in different places.
"""
self._test_get_sorted_with_limit_marker(sort_by='hostname',
pages=3, pagesize=1)
def test_get_sorted_with_limit_marker_stable_different_pages_reverse(self):
"""Test sorted by hostname with different page sizes.
Just do the above with page seams in different places.
"""
self._test_get_sorted_with_limit_marker(sort_by='hostname',
pages=3, pagesize=1,
sort_dir='desc')
def test_get_sorted_with_limit_marker_random(self):
"""Test sorted by uuid.
This will not be stable and the actual ordering will depend on
uuid generation and thus be different on each run. Do this in
addition to the stable sort above to keep us honest.
"""
self._test_get_sorted_with_limit_marker(sort_by='uuid')
def test_get_sorted_with_limit_marker_random_different_pages(self):
"""Test sorted by uuid with different page sizes.
Just do the above with page seams in different places.
"""
self._test_get_sorted_with_limit_marker(sort_by='uuid',
pages=3, pagesize=2)
def test_get_sorted_with_limit_marker_datetime(self):
"""Test sorted by launched_at.
This tests that we can do all of this, but with datetime
fields.
"""
self._test_get_sorted_with_limit_marker(sort_by='launched_at')
def test_get_sorted_with_limit_marker_datetime_same(self):
"""Test sorted by created_at.
This tests that we can do all of this, but with datetime
fields that are identical.
"""
self._test_get_sorted_with_limit_marker(sort_by='created_at')
def test_get_sorted_with_deleted_marker(self):
marker = self.instances[1]['uuid']
before = list(
instance_list.get_instances_sorted(self.context, {},
None, marker,
[], None, None)[1])
db.instance_destroy(self.context, marker)
after = list(
instance_list.get_instances_sorted(self.context, {},
None, marker,
[], None, None)[1])
self.assertEqual(before, after)
def test_get_sorted_with_invalid_marker(self):
self.assertRaises(exception.MarkerNotFound,
list, instance_list.get_instances_sorted(
self.context, {}, None, 'not-a-marker',
[], None, None)[1])
def test_get_sorted_with_purged_instance(self):
"""Test that we handle a mapped but purged instance."""
im = objects.InstanceMapping(self.context,
instance_uuid=uuids.missing,
project_id=self.context.project_id,
user_id=self.context.user_id,
cell=self.cells[0])
im.create()
self.assertRaises(exception.MarkerNotFound,
list, instance_list.get_instances_sorted(
self.context, {}, None, uuids.missing,
[], None, None)[1])
def _test_get_paginated_with_filter(self, filters):
found_uuids = []
marker = None
while True:
# Query for those instances, sorted by a different key in
# pages of one until we've consumed them all
batch = list(
instance_list.get_instances_sorted(self.context,
filters,
1, marker, [],
['hostname'],
['asc'])[1])
if not batch:
break
found_uuids.extend([x['uuid'] for x in batch])
marker = found_uuids[-1]
return found_uuids
def test_get_paginated_with_uuid_filter(self):
"""Test getting pages with uuid filters.
This runs through the results of a uuid-filtered query in pages of
length one to ensure that we land on markers that are filtered out
of the query and are not accidentally returned.
"""
# Pick a set of the instances by uuid, when sorted by uuid
all_uuids = [x['uuid'] for x in self.instances]
filters = {'uuid': sorted(all_uuids)[:7]}
found_uuids = self._test_get_paginated_with_filter(filters)
# Make sure we found all (and only) the instances we asked for
self.assertEqual(set(found_uuids), set(filters['uuid']))
self.assertEqual(7, len(found_uuids))
def test_get_paginated_with_other_filter(self):
"""Test getting pages with another filter.
This runs through the results of a filtered query in pages of
length one to ensure we land on markers that are filtered out
of the query and are not accidentally returned.
"""
expected = [inst['uuid'] for inst in self.instances
if inst['instance_type_id'] == 1]
filters = {'instance_type_id': 1}
found_uuids = self._test_get_paginated_with_filter(filters)
self.assertEqual(set(expected), set(found_uuids))
def test_get_paginated_with_uuid_and_other_filter(self):
"""Test getting pages with a uuid and other type of filter.
We do this to make sure that we still find (but exclude) the
marker even if one of the other filters would have included
it.
"""
# Pick a set of the instances by uuid, when sorted by uuid
all_uuids = [x['uuid'] for x in self.instances]
filters = {'uuid': sorted(all_uuids)[:7],
'user_id': 'fake'}
found_uuids = self._test_get_paginated_with_filter(filters)
# Make sure we found all (and only) the instances we asked for
self.assertEqual(set(found_uuids), set(filters['uuid']))
self.assertEqual(7, len(found_uuids))
def test_get_sorted_with_faults(self):
"""Make sure we get faults when we ask for them."""
insts = list(
instance_list.get_instances_sorted(self.context, {},
None, None,
['fault'],
['hostname'], ['asc'])[1])
# Two of the instances in each cell have faults (0th and 2nd)
expected_faults = self.NUMBER_OF_CELLS * 2
expected_no_fault = len(self.instances) - expected_faults
faults = [inst['fault'] for inst in insts]
self.assertEqual(expected_no_fault, faults.count(None))
def test_get_sorted_paginated_with_faults(self):
"""Get pages of one with faults.
Do this specifically so we make sure we land on faulted marker
instances to ensure we don't omit theirs.
"""
insts = []
while True:
if insts:
marker = insts[-1]['uuid']
else:
marker = None
batch = list(
instance_list.get_instances_sorted(self.context, {},
1, marker,
['fault'],
['hostname'], ['asc'])[1])
if not batch:
break
insts.extend(batch)
self.assertEqual(len(self.instances), len(insts))
# Two of the instances in each cell have faults (0th and 2nd)
expected_faults = self.NUMBER_OF_CELLS * 2
expected_no_fault = len(self.instances) - expected_faults
faults = [inst['fault'] for inst in insts]
self.assertEqual(expected_no_fault, faults.count(None))
def test_instance_list_minimal_cells(self):
"""Get a list of instances with a subset of cell mappings."""
last_cell = self.cells[-1]
with context.target_cell(self.context, last_cell) as cctxt:
last_cell_instances = db.instance_get_all(cctxt)
last_cell_uuids = [inst['uuid'] for inst in last_cell_instances]
instances = list(
instance_list.get_instances_sorted(self.context, {},
None, None, [],
['uuid'], ['asc'],
cell_mappings=self.cells[:-1])
[1])
found_uuids = [inst['hostname'] for inst in instances]
had_uuids = [inst['hostname'] for inst in self.instances
if inst['uuid'] not in last_cell_uuids]
self.assertEqual(sorted(had_uuids), sorted(found_uuids))
class TestInstanceListObjects(test.TestCase):
def setUp(self):
super(TestInstanceListObjects, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.num_instances = 3
self.instances = []
start = datetime.datetime(1985, 10, 25, 1, 21, 0)
dt = start
spread = datetime.timedelta(minutes=10)
cells = objects.CellMappingList.get_all(self.context)
# Create three instances in each of the real cells. Leave the
# first cell empty to make sure we don't break with an empty
# one
for cell in cells[1:]:
for i in range(0, self.num_instances):
with context.target_cell(self.context, cell) as cctx:
inst = objects.Instance(
context=cctx,
project_id=self.context.project_id,
user_id=self.context.user_id,
created_at=start,
launched_at=dt,
instance_type_id=i,
hostname='%s-inst%i' % (cell.name, i))
inst.create()
if i % 2 == 0:
# Make some faults for this instance
for n in range(0, i + 1):
msg = 'fault%i-%s' % (n, inst.hostname)
f = objects.InstanceFault(context=cctx,
instance_uuid=inst.uuid,
code=i,
message=msg,
details='fake',
host='fakehost')
f.create()
self.instances.append(inst)
im = objects.InstanceMapping(context=self.context,
project_id=inst.project_id,
user_id=inst.user_id,
instance_uuid=inst.uuid,
cell_mapping=cell)
im.create()
dt += spread
def test_get_instance_objects_sorted(self):
filters = {}
limit = None
marker = None
expected_attrs = []
sort_keys = ['uuid']
sort_dirs = ['asc']
insts, down_cell_uuids = instance_list.get_instance_objects_sorted(
self.context, filters, limit, marker, expected_attrs,
sort_keys, sort_dirs)
found_uuids = [x.uuid for x in insts]
had_uuids = sorted([x['uuid'] for x in self.instances])
self.assertEqual(had_uuids, found_uuids)
# Make sure none of the instances have fault set
self.assertEqual(0, len([inst for inst in insts
if 'fault' in inst]))
def test_get_instance_objects_sorted_with_fault(self):
filters = {}
limit = None
marker = None
expected_attrs = ['fault']
sort_keys = ['uuid']
sort_dirs = ['asc']
insts, down_cell_uuids = instance_list.get_instance_objects_sorted(
self.context, filters, limit, marker, expected_attrs,
sort_keys, sort_dirs)
found_uuids = [x.uuid for x in insts]
had_uuids = sorted([x['uuid'] for x in self.instances])
self.assertEqual(had_uuids, found_uuids)
# They should all have fault set, but only some have
# actual faults
self.assertEqual(2, len([inst for inst in insts
if inst.fault]))
def test_get_instance_objects_sorted_paged(self):
"""Query a full first page and ensure an empty second one.
This uses created_at which is enforced to be the same across
each instance by setUp(). This will help make sure we still
have a stable ordering, even when we only claim to care about
created_at.
"""
instp1, down_cell_uuids = instance_list.get_instance_objects_sorted(
self.context, {}, None, None, [],
['created_at'], ['asc'])
self.assertEqual(len(self.instances), len(instp1))
instp2, down_cell_uuids = instance_list.get_instance_objects_sorted(
self.context, {}, None, instp1[-1]['uuid'], [],
['created_at'], ['asc'])
self.assertEqual(0, len(instp2))
| [
"datetime.datetime",
"nova.db.main.api.instance_destroy",
"nova.compute.instance_list.get_instances_sorted",
"nova.db.main.api.instance_get_all",
"nova.objects.CellMappingList.get_all",
"nova.context.RequestContext",
"nova.objects.InstanceFault",
"nova.objects.Instance",
"nova.compute.instance_list.... | [((982, 1020), 'nova.context.RequestContext', 'context.RequestContext', (['"""fake"""', '"""fake"""'], {}), "('fake', 'fake')\n", (1004, 1020), False, 'from nova import context\n'), ((1097, 1138), 'datetime.datetime', 'datetime.datetime', (['(1985)', '(10)', '(25)', '(1)', '(21)', '(0)'], {}), '(1985, 10, 25, 1, 21, 0)\n', (1114, 1138), False, 'import datetime\n'), ((1175, 1205), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(10)'}), '(minutes=10)\n', (1193, 1205), False, 'import datetime\n'), ((1228, 1273), 'nova.objects.CellMappingList.get_all', 'objects.CellMappingList.get_all', (['self.context'], {}), '(self.context)\n', (1259, 1273), False, 'from nova import objects\n'), ((3313, 3420), 'nova.compute.instance_list.get_instances_sorted', 'instance_list.get_instances_sorted', (['self.context', 'filters', 'limit', 'marker', 'columns', 'sort_keys', 'sort_dirs'], {}), '(self.context, filters, limit, marker,\n columns, sort_keys, sort_dirs)\n', (3347, 3420), False, 'from nova.compute import instance_list\n'), ((3890, 3997), 'nova.compute.instance_list.get_instances_sorted', 'instance_list.get_instances_sorted', (['self.context', 'filters', 'limit', 'marker', 'columns', 'sort_keys', 'sort_dirs'], {}), '(self.context, filters, limit, marker,\n columns, sort_keys, sort_dirs)\n', (3924, 3997), False, 'from nova.compute import instance_list\n'), ((4504, 4611), 'nova.compute.instance_list.get_instances_sorted', 'instance_list.get_instances_sorted', (['self.context', 'filters', 'limit', 'marker', 'columns', 'sort_keys', 'sort_dirs'], {}), '(self.context, filters, limit, marker,\n columns, sort_keys, sort_dirs)\n', (4538, 4611), False, 'from nova.compute import instance_list\n'), ((5139, 5246), 'nova.compute.instance_list.get_instances_sorted', 'instance_list.get_instances_sorted', (['self.context', 'filters', 'limit', 'marker', 'columns', 'sort_keys', 'sort_dirs'], {}), '(self.context, filters, limit, marker,\n columns, sort_keys, sort_dirs)\n', (5173, 5246), False, 'from nova.compute import instance_list\n'), ((5581, 5669), 'nova.compute.instance_list.get_instances_sorted', 'instance_list.get_instances_sorted', (['self.context', '{}', '(5)', 'None', '[]', "['uuid']", "['asc']"], {}), "(self.context, {}, 5, None, [], ['uuid'],\n ['asc'])\n", (5615, 5669), False, 'from nova.compute import instance_list\n'), ((6051, 6143), 'nova.compute.instance_list.get_instances_sorted', 'instance_list.get_instances_sorted', (['self.context', '{}', '(5000)', 'None', '[]', "['uuid']", "['asc']"], {}), "(self.context, {}, 5000, None, [], [\n 'uuid'], ['asc'])\n", (6085, 6143), False, 'from nova.compute import instance_list\n'), ((6483, 6589), 'nova.compute.instance_list.get_instances_sorted', 'instance_list.get_instances_sorted', (['self.context', '{}', '(5000)', 'None', '[]', "['uuid']", "['asc']"], {'batch_size': '(2)'}), "(self.context, {}, 5000, None, [], [\n 'uuid'], ['asc'], batch_size=2)\n", (6517, 6589), False, 'from nova.compute import instance_list\n'), ((12855, 12896), 'nova.db.main.api.instance_destroy', 'db.instance_destroy', (['self.context', 'marker'], {}), '(self.context, marker)\n', (12874, 12896), True, 'from nova.db.main import api as db\n'), ((13576, 13737), 'nova.objects.InstanceMapping', 'objects.InstanceMapping', (['self.context'], {'instance_uuid': 'uuids.missing', 'project_id': 'self.context.project_id', 'user_id': 'self.context.user_id', 'cell': 'self.cells[0]'}), '(self.context, instance_uuid=uuids.missing,\n project_id=self.context.project_id, user_id=self.context.user_id, cell=\n self.cells[0])\n', (13599, 13737), False, 'from nova import objects\n'), ((19918, 19956), 'nova.context.RequestContext', 'context.RequestContext', (['"""fake"""', '"""fake"""'], {}), "('fake', 'fake')\n", (19940, 19956), False, 'from nova import context\n'), ((20033, 20074), 'datetime.datetime', 'datetime.datetime', (['(1985)', '(10)', '(25)', '(1)', '(21)', '(0)'], {}), '(1985, 10, 25, 1, 21, 0)\n', (20050, 20074), False, 'import datetime\n'), ((20111, 20141), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(10)'}), '(minutes=10)\n', (20129, 20141), False, 'import datetime\n'), ((20159, 20204), 'nova.objects.CellMappingList.get_all', 'objects.CellMappingList.get_all', (['self.context'], {}), '(self.context)\n', (20190, 20204), False, 'from nova import objects\n'), ((22274, 22395), 'nova.compute.instance_list.get_instance_objects_sorted', 'instance_list.get_instance_objects_sorted', (['self.context', 'filters', 'limit', 'marker', 'expected_attrs', 'sort_keys', 'sort_dirs'], {}), '(self.context, filters, limit,\n marker, expected_attrs, sort_keys, sort_dirs)\n', (22315, 22395), False, 'from nova.compute import instance_list\n'), ((22994, 23115), 'nova.compute.instance_list.get_instance_objects_sorted', 'instance_list.get_instance_objects_sorted', (['self.context', 'filters', 'limit', 'marker', 'expected_attrs', 'sort_keys', 'sort_dirs'], {}), '(self.context, filters, limit,\n marker, expected_attrs, sort_keys, sort_dirs)\n', (23035, 23115), False, 'from nova.compute import instance_list\n'), ((23884, 23988), 'nova.compute.instance_list.get_instance_objects_sorted', 'instance_list.get_instance_objects_sorted', (['self.context', '{}', 'None', 'None', '[]', "['created_at']", "['asc']"], {}), "(self.context, {}, None, None, [],\n ['created_at'], ['asc'])\n", (23925, 23988), False, 'from nova.compute import instance_list\n'), ((24103, 24222), 'nova.compute.instance_list.get_instance_objects_sorted', 'instance_list.get_instance_objects_sorted', (['self.context', '{}', 'None', "instp1[-1]['uuid']", '[]', "['created_at']", "['asc']"], {}), "(self.context, {}, None, instp1[-1\n ]['uuid'], [], ['created_at'], ['asc'])\n", (24144, 24222), False, 'from nova.compute import instance_list\n'), ((18974, 19018), 'nova.context.target_cell', 'context.target_cell', (['self.context', 'last_cell'], {}), '(self.context, last_cell)\n', (18993, 19018), False, 'from nova import context\n'), ((19063, 19089), 'nova.db.main.api.instance_get_all', 'db.instance_get_all', (['cctxt'], {}), '(cctxt)\n', (19082, 19089), True, 'from nova.db.main import api as db\n'), ((2741, 2884), 'nova.objects.InstanceMapping', 'objects.InstanceMapping', ([], {'context': 'self.context', 'project_id': 'inst.project_id', 'user_id': 'inst.user_id', 'instance_uuid': 'inst.uuid', 'cell_mapping': 'cell'}), '(context=self.context, project_id=inst.project_id,\n user_id=inst.user_id, instance_uuid=inst.uuid, cell_mapping=cell)\n', (2764, 2884), False, 'from nova import objects\n'), ((12665, 12751), 'nova.compute.instance_list.get_instances_sorted', 'instance_list.get_instances_sorted', (['self.context', '{}', 'None', 'marker', '[]', 'None', 'None'], {}), '(self.context, {}, None, marker, [], None,\n None)\n', (12699, 12751), False, 'from nova.compute import instance_list\n'), ((12932, 13018), 'nova.compute.instance_list.get_instances_sorted', 'instance_list.get_instances_sorted', (['self.context', '{}', 'None', 'marker', '[]', 'None', 'None'], {}), '(self.context, {}, None, marker, [], None,\n None)\n', (12966, 13018), False, 'from nova.compute import instance_list\n'), ((13290, 13384), 'nova.compute.instance_list.get_instances_sorted', 'instance_list.get_instances_sorted', (['self.context', '{}', 'None', '"""not-a-marker"""', '[]', 'None', 'None'], {}), "(self.context, {}, None, 'not-a-marker',\n [], None, None)\n", (13324, 13384), False, 'from nova.compute import instance_list\n'), ((13981, 14075), 'nova.compute.instance_list.get_instances_sorted', 'instance_list.get_instances_sorted', (['self.context', '{}', 'None', 'uuids.missing', '[]', 'None', 'None'], {}), '(self.context, {}, None, uuids.missing, [\n ], None, None)\n', (14015, 14075), False, 'from nova.compute import instance_list\n'), ((17141, 17243), 'nova.compute.instance_list.get_instances_sorted', 'instance_list.get_instances_sorted', (['self.context', '{}', 'None', 'None', "['fault']", "['hostname']", "['asc']"], {}), "(self.context, {}, None, None, ['fault'],\n ['hostname'], ['asc'])\n", (17175, 17243), False, 'from nova.compute import instance_list\n'), ((19206, 19329), 'nova.compute.instance_list.get_instances_sorted', 'instance_list.get_instances_sorted', (['self.context', '{}', 'None', 'None', '[]', "['uuid']", "['asc']"], {'cell_mappings': 'self.cells[:-1]'}), "(self.context, {}, None, None, [], [\n 'uuid'], ['asc'], cell_mappings=self.cells[:-1])\n", (19240, 19329), False, 'from nova.compute import instance_list\n'), ((21666, 21809), 'nova.objects.InstanceMapping', 'objects.InstanceMapping', ([], {'context': 'self.context', 'project_id': 'inst.project_id', 'user_id': 'inst.user_id', 'instance_uuid': 'inst.uuid', 'cell_mapping': 'cell'}), '(context=self.context, project_id=inst.project_id,\n user_id=inst.user_id, instance_uuid=inst.uuid, cell_mapping=cell)\n', (21689, 21809), False, 'from nova import objects\n'), ((1536, 1575), 'nova.context.target_cell', 'context.target_cell', (['self.context', 'cell'], {}), '(self.context, cell)\n', (1555, 1575), False, 'from nova import context\n'), ((1612, 1810), 'nova.objects.Instance', 'objects.Instance', ([], {'context': 'cctx', 'project_id': 'self.context.project_id', 'user_id': 'self.context.user_id', 'created_at': 'start', 'launched_at': 'dt', 'instance_type_id': 'i', 'hostname': "('%s-inst%i' % (cell.name, i))"}), "(context=cctx, project_id=self.context.project_id, user_id=\n self.context.user_id, created_at=start, launched_at=dt,\n instance_type_id=i, hostname='%s-inst%i' % (cell.name, i))\n", (1628, 1810), False, 'from nova import objects\n'), ((8300, 8399), 'nova.compute.instance_list.get_instances_sorted', 'instance_list.get_instances_sorted', (['self.context', '{}', 'limit', 'marker', '[]', '[sort_by]', '[sort_dir]'], {}), '(self.context, {}, limit, marker, [], [\n sort_by], [sort_dir])\n', (8334, 8399), False, 'from nova.compute import instance_list\n'), ((14430, 14530), 'nova.compute.instance_list.get_instances_sorted', 'instance_list.get_instances_sorted', (['self.context', 'filters', '(1)', 'marker', '[]', "['hostname']", "['asc']"], {}), "(self.context, filters, 1, marker, [], [\n 'hostname'], ['asc'])\n", (14464, 14530), False, 'from nova.compute import instance_list\n'), ((18111, 18212), 'nova.compute.instance_list.get_instances_sorted', 'instance_list.get_instances_sorted', (['self.context', '{}', '(1)', 'marker', "['fault']", "['hostname']", "['asc']"], {}), "(self.context, {}, 1, marker, ['fault'],\n ['hostname'], ['asc'])\n", (18145, 18212), False, 'from nova.compute import instance_list\n'), ((20461, 20500), 'nova.context.target_cell', 'context.target_cell', (['self.context', 'cell'], {}), '(self.context, cell)\n', (20480, 20500), False, 'from nova import context\n'), ((20537, 20735), 'nova.objects.Instance', 'objects.Instance', ([], {'context': 'cctx', 'project_id': 'self.context.project_id', 'user_id': 'self.context.user_id', 'created_at': 'start', 'launched_at': 'dt', 'instance_type_id': 'i', 'hostname': "('%s-inst%i' % (cell.name, i))"}), "(context=cctx, project_id=self.context.project_id, user_id=\n self.context.user_id, created_at=start, launched_at=dt,\n instance_type_id=i, hostname='%s-inst%i' % (cell.name, i))\n", (20553, 20735), False, 'from nova import objects\n'), ((2251, 2369), 'nova.objects.InstanceFault', 'objects.InstanceFault', ([], {'context': 'cctx', 'instance_uuid': 'inst.uuid', 'code': 'i', 'message': 'msg', 'details': '"""fake"""', 'host': '"""fakehost"""'}), "(context=cctx, instance_uuid=inst.uuid, code=i,\n message=msg, details='fake', host='fakehost')\n", (2272, 2369), False, 'from nova import objects\n'), ((21176, 21294), 'nova.objects.InstanceFault', 'objects.InstanceFault', ([], {'context': 'cctx', 'instance_uuid': 'inst.uuid', 'code': 'i', 'message': 'msg', 'details': '"""fake"""', 'host': '"""fakehost"""'}), "(context=cctx, instance_uuid=inst.uuid, code=i,\n message=msg, details='fake', host='fakehost')\n", (21197, 21294), False, 'from nova import objects\n')] |
# -*- coding: utf-8 -*-
from flask.ext.assets import Bundle, Environment
css = Bundle(
"libs/bootstrap/dist/css/bootstrap.css",
"libs/dataTables/dataTables.bootstrap.css",
"libs/dataTables/dataTables.tableTools.css",
"libs/font-awesome4/css/font-awesome.css",
"libs/bootstrap-datepicker/css/datepicker3.css",
"libs/bootstrap-tagsinput/dist/bootstrap-tagsinput.css",
"css/style.css",
filters="cssmin",
output="public/css/common.css"
)
js = Bundle(
"libs/jQuery/dist/jquery.js",
"libs/bootstrap/dist/js/bootstrap.js",
"libs/dataTables/jquery.dataTables.js",
"libs/dataTables/dataTables.bootstrap.js",
"libs/dataTables/dataTables.tableTools.js",
"libs/bootstrap-datepicker/js/bootstrap-datepicker.js",
"libs/bootstrap-tagsinput/dist/bootstrap-tagsinput.js",
"libs/typeahead.js/dist/typeahead.bundle.js",
"libs/pagedown/Markdown.Converter.js",
"libs/pagedown/Markdown.Sanitizer.js",
"js/plugins.js",
"js/script.js",
# filters='jsmin',
output="public/js/common.js"
)
# Warning: for fonts, you need to copy over everything manually for now to static/fonts
assets = Environment()
assets.register("js_all", js)
assets.register("css_all", css)
| [
"flask.ext.assets.Bundle",
"flask.ext.assets.Environment"
] | [((80, 456), 'flask.ext.assets.Bundle', 'Bundle', (['"""libs/bootstrap/dist/css/bootstrap.css"""', '"""libs/dataTables/dataTables.bootstrap.css"""', '"""libs/dataTables/dataTables.tableTools.css"""', '"""libs/font-awesome4/css/font-awesome.css"""', '"""libs/bootstrap-datepicker/css/datepicker3.css"""', '"""libs/bootstrap-tagsinput/dist/bootstrap-tagsinput.css"""', '"""css/style.css"""'], {'filters': '"""cssmin"""', 'output': '"""public/css/common.css"""'}), "('libs/bootstrap/dist/css/bootstrap.css',\n 'libs/dataTables/dataTables.bootstrap.css',\n 'libs/dataTables/dataTables.tableTools.css',\n 'libs/font-awesome4/css/font-awesome.css',\n 'libs/bootstrap-datepicker/css/datepicker3.css',\n 'libs/bootstrap-tagsinput/dist/bootstrap-tagsinput.css',\n 'css/style.css', filters='cssmin', output='public/css/common.css')\n", (86, 456), False, 'from flask.ext.assets import Bundle, Environment\n'), ((459, 996), 'flask.ext.assets.Bundle', 'Bundle', (['"""libs/jQuery/dist/jquery.js"""', '"""libs/bootstrap/dist/js/bootstrap.js"""', '"""libs/dataTables/jquery.dataTables.js"""', '"""libs/dataTables/dataTables.bootstrap.js"""', '"""libs/dataTables/dataTables.tableTools.js"""', '"""libs/bootstrap-datepicker/js/bootstrap-datepicker.js"""', '"""libs/bootstrap-tagsinput/dist/bootstrap-tagsinput.js"""', '"""libs/typeahead.js/dist/typeahead.bundle.js"""', '"""libs/pagedown/Markdown.Converter.js"""', '"""libs/pagedown/Markdown.Sanitizer.js"""', '"""js/plugins.js"""', '"""js/script.js"""'], {'output': '"""public/js/common.js"""'}), "('libs/jQuery/dist/jquery.js', 'libs/bootstrap/dist/js/bootstrap.js',\n 'libs/dataTables/jquery.dataTables.js',\n 'libs/dataTables/dataTables.bootstrap.js',\n 'libs/dataTables/dataTables.tableTools.js',\n 'libs/bootstrap-datepicker/js/bootstrap-datepicker.js',\n 'libs/bootstrap-tagsinput/dist/bootstrap-tagsinput.js',\n 'libs/typeahead.js/dist/typeahead.bundle.js',\n 'libs/pagedown/Markdown.Converter.js',\n 'libs/pagedown/Markdown.Sanitizer.js', 'js/plugins.js', 'js/script.js',\n output='public/js/common.js')\n", (465, 996), False, 'from flask.ext.assets import Bundle, Environment\n'), ((1109, 1122), 'flask.ext.assets.Environment', 'Environment', ([], {}), '()\n', (1120, 1122), False, 'from flask.ext.assets import Bundle, Environment\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Description
# -----------
# Dump abstract informations in a JSON format
# see: abstract_reader.py
import argparse
import sys
import lief
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument('binary', help = 'A binary')
args = parser.parse_args()
binary = lief.parse(args.binary)
json_data = json.loads(lief.to_json_from_abstract(binary))
print(json.dumps(json_data, sort_keys = True, indent = 4))
if __name__ == "__main__":
sys.exit(main())
| [
"lief.parse",
"json.dumps",
"argparse.ArgumentParser",
"lief.to_json_from_abstract"
] | [((225, 250), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (248, 250), False, 'import argparse\n'), ((352, 375), 'lief.parse', 'lief.parse', (['args.binary'], {}), '(args.binary)\n', (362, 375), False, 'import lief\n'), ((403, 437), 'lief.to_json_from_abstract', 'lief.to_json_from_abstract', (['binary'], {}), '(binary)\n', (429, 437), False, 'import lief\n'), ((449, 496), 'json.dumps', 'json.dumps', (['json_data'], {'sort_keys': '(True)', 'indent': '(4)'}), '(json_data, sort_keys=True, indent=4)\n', (459, 496), False, 'import json\n')] |
from piroq.service import Manager
def main():
Manager().run()
| [
"piroq.service.Manager"
] | [((49, 58), 'piroq.service.Manager', 'Manager', ([], {}), '()\n', (56, 58), False, 'from piroq.service import Manager\n')] |
# users/forms.py
# Django modules
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
class RegisterForm(UserCreationForm):
username = forms.CharField(max_length=50)
email = forms.EmailField(max_length=50)
password1 = forms.CharField()
password2 = forms.CharField()
class Meta(UserCreationForm):
model = User
fields = ('username','email','password1','<PASSWORD>') | [
"django.forms.EmailField",
"django.forms.CharField"
] | [((213, 243), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (228, 243), False, 'from django import forms\n'), ((256, 287), 'django.forms.EmailField', 'forms.EmailField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (272, 287), False, 'from django import forms\n'), ((304, 321), 'django.forms.CharField', 'forms.CharField', ([], {}), '()\n', (319, 321), False, 'from django import forms\n'), ((338, 355), 'django.forms.CharField', 'forms.CharField', ([], {}), '()\n', (353, 355), False, 'from django import forms\n')] |
from __future__ import unicode_literals
import collections
import itertools
import operator
import time
import functools
from cytoolz import (
dissoc,
assoc
)
from cytoolz.itertoolz import (
remove,
)
from cytoolz.functoolz import (
compose,
excepts,
partial,
)
from eth_utils import (
is_integer,
is_same_address,
to_dict,
to_list,
to_tuple,
to_int
)
from eth_tester.backends import (
get_chain_backend,
)
from eth_tester.exceptions import (
AccountLocked,
BlockNotFound,
FilterNotFound,
SnapshotNotFound,
TransactionNotFound,
ValidationError,
)
from eth_tester.normalization import (
get_normalizer_backend,
)
from eth_tester.utils.accounts import (
private_key_to_address,
)
from eth_tester.utils.filters import (
Filter,
check_if_log_matches,
)
from eth_tester.utils.transactions import (
extract_valid_transaction_params,
remove_matching_transaction_from_list,
)
from eth_tester.validation import (
get_validator,
)
def backend_proxy_method(backend_method_name):
def proxy_method(self, *args, **kwargs):
backend_method = getattr(self.backend, backend_method_name)
return backend_method(*args, **kwargs)
return proxy_method
@to_dict
def get_default_fork_blocks(supported_forks):
for fork_name in supported_forks:
yield (fork_name, None)
def handle_auto_mining(func):
@functools.wraps(func)
def func_wrapper(self, *args, **kwargs):
if self.auto_mine_transactions:
transaction_hash = func(self, *args, **kwargs)
self.mine_block()
else:
snapshot = self.take_snapshot()
try:
transaction_hash = func(self, *args, **kwargs)
pending_transaction = self.get_transaction_by_hash(transaction_hash)
# Remove any pending transactions with the same nonce
self._pending_transactions = remove_matching_transaction_from_list(
self._pending_transactions, pending_transaction)
self._pending_transactions.append(pending_transaction)
finally:
self.revert_to_snapshot(snapshot)
return transaction_hash
return func_wrapper
class EthereumTester(object):
backend = None
validator = None
normalizer = None
fork_blocks = None
auto_mine_transactions = None
def __init__(self,
backend=None,
validator=None,
normalizer=None,
auto_mine_transactions=True):
if backend is None:
backend = get_chain_backend()
if validator is None:
validator = get_validator()
if normalizer is None:
normalizer = get_normalizer_backend()
self.backend = backend
self.validator = validator
self.normalizer = normalizer
self.auto_mine_transactions = auto_mine_transactions
self._reset_local_state()
#
# Private API
#
_filter_counter = None
_log_filters = None
_block_filters = None
_pending_transaction_filters = None
_pending_transactions = []
_snapshot_counter = None
_snapshots = None
_account_passwords = None
_account_unlock = None
def _reset_local_state(self):
# filter tracking
self._filter_counter = itertools.count()
self._log_filters = {}
self._block_filters = {}
self._pending_transaction_filters = {}
# snapshot tracking
self._snapshot_counter = itertools.count()
self._snapshots = {}
# raw accounts
self._account_passwords = {}
self._account_unlock = collections.defaultdict(lambda: False)
#
# Time Traveling
#
def time_travel(self, to_timestamp):
self.validator.validate_inbound_timestamp(to_timestamp)
# make sure we are not traveling back in time as this is not possible.
current_timestamp = self.get_block_by_number('pending')['timestamp']
if to_timestamp == current_timestamp:
# no change, return immediately
return
elif to_timestamp < current_timestamp:
raise ValidationError(
"Space time continuum distortion detected. Traveling backwards "
"in time violates interdimensional ordinance 31415-926."
)
else:
self.backend.time_travel(to_timestamp)
#
# Accounts
#
def get_accounts(self):
raw_accounts = self.backend.get_accounts()
self.validator.validate_outbound_accounts(raw_accounts)
accounts = self.normalizer.normalize_outbound_account_list(raw_accounts)
return accounts
def add_account(self, private_key, password=None):
# TODO: validation
self.validator.validate_inbound_private_key(private_key)
raw_private_key = self.normalizer.normalize_inbound_private_key(private_key)
raw_account = private_key_to_address(raw_private_key)
account = self.normalizer.normalize_outbound_account(raw_account)
if any((is_same_address(account, value) for value in self.get_accounts())):
raise ValidationError("Account already present in account list")
self.backend.add_account(raw_private_key)
self._account_passwords[raw_account] = password
# TODO: outbound normalization
return account
def unlock_account(self, account, password, unlock_seconds=None):
self.validator.validate_inbound_account(account)
raw_account = self.normalizer.normalize_inbound_account(account)
try:
account_password = self._account_passwords[raw_account]
except KeyError:
raise ValidationError("Unknown account")
if account_password is None:
raise ValidationError("Account does not have a password")
if account_password != password:
raise ValidationError("Wrong password")
if unlock_seconds is None:
unlock_until = None
else:
unlock_until = time.time() + unlock_seconds
self._account_unlock[raw_account] = unlock_until
def lock_account(self, account):
self.validator.validate_inbound_account(account)
raw_account = self.normalizer.normalize_inbound_account(account)
if raw_account not in self._account_passwords:
raise ValidationError("Unknown account")
elif self._account_passwords[raw_account] is None:
raise ValidationError("Account does not have a password")
self._account_unlock[raw_account] = False
def get_balance(self, account, block_number="latest"):
self.validator.validate_inbound_account(account)
self.validator.validate_inbound_block_number(block_number)
raw_account = self.normalizer.normalize_inbound_account(account)
raw_block_number = self.normalizer.normalize_inbound_block_number(block_number)
raw_balance = self.backend.get_balance(raw_account, raw_block_number)
self.validator.validate_outbound_balance(raw_balance)
balance = self.normalizer.normalize_outbound_balance(raw_balance)
return balance
def get_code(self, account, block_number="latest"):
self.validator.validate_inbound_account(account)
self.validator.validate_inbound_block_number(block_number)
raw_account = self.normalizer.normalize_inbound_account(account)
raw_block_number = self.normalizer.normalize_inbound_block_number(block_number)
raw_code = self.backend.get_code(raw_account, raw_block_number)
self.validator.validate_outbound_code(raw_code)
code = self.normalizer.normalize_outbound_code(raw_code)
return code
def get_nonce(self, account, block_number="latest"):
self.validator.validate_inbound_account(account)
self.validator.validate_inbound_block_number(block_number)
raw_account = self.normalizer.normalize_inbound_account(account)
raw_block_number = self.normalizer.normalize_inbound_block_number(block_number)
raw_nonce = self.backend.get_nonce(raw_account, raw_block_number)
self.validator.validate_outbound_nonce(raw_nonce)
nonce = self.normalizer.normalize_outbound_nonce(raw_nonce)
return nonce
#
# Blocks, Transactions, Receipts
#
def _get_pending_transaction_by_hash(self, transaction_hash):
for transaction in self._pending_transactions:
if transaction['hash'] == transaction_hash:
return transaction
raise TransactionNotFound(
"No transaction found for transaction hash: {0}".format(transaction_hash)
)
def get_transaction_by_hash(self, transaction_hash):
self.validator.validate_inbound_transaction_hash(transaction_hash)
try:
return self._get_pending_transaction_by_hash(transaction_hash)
except TransactionNotFound:
raw_transaction_hash = self.normalizer.normalize_inbound_transaction_hash(
transaction_hash,
)
raw_transaction = self.backend.get_transaction_by_hash(raw_transaction_hash)
self.validator.validate_outbound_transaction(raw_transaction)
transaction = self.normalizer.normalize_outbound_transaction(raw_transaction)
return transaction
def get_block_by_number(self, block_number="latest", full_transactions=False):
self.validator.validate_inbound_block_number(block_number)
raw_block_number = self.normalizer.normalize_inbound_block_number(block_number)
raw_block = self.backend.get_block_by_number(raw_block_number, full_transactions)
self.validator.validate_outbound_block(raw_block)
block = self.normalizer.normalize_outbound_block(raw_block)
return block
def get_block_by_hash(self, block_hash, full_transactions=False):
self.validator.validate_inbound_block_hash(block_hash)
raw_block_hash = self.normalizer.normalize_inbound_block_hash(block_hash)
raw_block = self.backend.get_block_by_hash(raw_block_hash, full_transactions)
self.validator.validate_outbound_block(raw_block)
block = self.normalizer.normalize_outbound_block(raw_block)
return block
def get_transaction_receipt(self, transaction_hash):
self.validator.validate_inbound_transaction_hash(transaction_hash)
raw_transaction_hash = self.normalizer.normalize_inbound_transaction_hash(
transaction_hash,
)
raw_receipt = self.backend.get_transaction_receipt(raw_transaction_hash)
self.validator.validate_outbound_receipt(raw_receipt)
receipt = self.normalizer.normalize_outbound_receipt(raw_receipt)
# Assume backend supports Byzantium
status = to_int(receipt.pop('state_root'))
if status > 1:
raise ValidationError('Invalid status value: only 0 or 1 are valid')
return assoc(receipt, 'status', status)
#
# Mining
#
def enable_auto_mine_transactions(self):
self.auto_mine_transactions = True
sent_transaction_hashes = self._pop_pending_transactions_to_pending_block()
self.mine_block()
return sent_transaction_hashes
def disable_auto_mine_transactions(self):
self.auto_mine_transactions = False
def mine_blocks(self, num_blocks=1, coinbase=None):
if coinbase is None:
raw_coinbase = None
else:
self.validator.validate_inbound_account(coinbase)
raw_coinbase = self.normalizer.normalize_inbound_account(coinbase)
if not self.auto_mine_transactions:
self._pop_pending_transactions_to_pending_block()
raw_block_hashes = self.backend.mine_blocks(num_blocks, raw_coinbase)
if len(raw_block_hashes) != num_blocks:
raise ValidationError(
"Invariant: tried to mine {0} blocks. Got {1} mined block hashes.".format(
num_blocks,
len(raw_block_hashes),
)
)
for raw_block_hash in raw_block_hashes:
self.validator.validate_outbound_block_hash(raw_block_hash)
block_hashes = [
self.normalizer.normalize_outbound_block_hash(raw_block_hash)
for raw_block_hash
in raw_block_hashes
]
# feed the block hashes to any block filters
for block_hash in block_hashes:
block = self.get_block_by_hash(block_hash)
for _, block_filter in self._block_filters.items():
raw_block_hash = self.normalizer.normalize_inbound_block_hash(block_hash)
block_filter.add(raw_block_hash)
self._process_block_logs(block)
return block_hashes
def mine_block(self, coinbase=None):
block_hash = self.mine_blocks(1, coinbase=coinbase)[0]
return block_hash
#
# Private mining API
#
def _process_block_logs(self, block):
for fid, filter in self._log_filters.items():
self._add_log_entries_to_filter(block, filter)
def _add_log_entries_to_filter(self, block, filter_):
for transaction_hash in block['transactions']:
receipt = self.get_transaction_receipt(transaction_hash)
for log_entry in receipt['logs']:
raw_log_entry = self.normalizer.normalize_inbound_log_entry(log_entry)
filter_.add(raw_log_entry)
def _pop_pending_transactions_to_pending_block(self):
sent_transaction_hashes = self._add_all_to_pending_block(self._pending_transactions)
self._pending_transactions.clear()
return sent_transaction_hashes
@to_list
def _add_all_to_pending_block(self, pending_transactions):
for pending in pending_transactions:
txn = extract_valid_transaction_params(pending)
yield self._add_transaction_to_pending_block(txn, txn_type='send_signed')
#
# Transaction Sending
#
def _handle_filtering_for_transaction(self, transaction_hash):
# feed the transaction hash to any pending transaction filters.
for _, filter in self._pending_transaction_filters.items():
raw_transaction_hash = self.normalizer.normalize_inbound_transaction_hash(
transaction_hash,
)
filter.add(raw_transaction_hash)
if self._log_filters:
receipt = self.get_transaction_receipt(transaction_hash)
for log_entry in receipt['logs']:
for _, filter in self._log_filters.items():
raw_log_entry = self.normalizer.normalize_inbound_log_entry(log_entry)
filter.add(raw_log_entry)
@handle_auto_mining
def send_raw_transaction(self, raw_transaction_hex):
self.validator.validate_inbound_raw_transaction(raw_transaction_hex)
raw_transaction = self.normalizer.normalize_inbound_raw_transaction(raw_transaction_hex)
raw_transaction_hash = self.backend.send_raw_transaction(raw_transaction)
self.validator.validate_outbound_transaction_hash(raw_transaction_hash)
transaction_hash = self.normalizer.normalize_outbound_transaction_hash(
raw_transaction_hash,
)
self._handle_filtering_for_transaction(transaction_hash)
return transaction_hash
@handle_auto_mining
def send_transaction(self, transaction):
return self._add_transaction_to_pending_block(transaction)
def call(self, transaction, block_number="latest"):
self.validator.validate_inbound_transaction(transaction, txn_type='call')
raw_transaction = self.normalizer.normalize_inbound_transaction(transaction)
self.validator.validate_inbound_block_number(block_number)
raw_block_number = self.normalizer.normalize_inbound_block_number(block_number)
raw_result = self.backend.call(raw_transaction, raw_block_number)
self.validator.validate_outbound_return_data(raw_result)
result = self.normalizer.normalize_outbound_return_data(raw_result)
return result
def estimate_gas(self, transaction):
self.validator.validate_inbound_transaction(transaction, txn_type='estimate')
raw_transaction = self.normalizer.normalize_inbound_transaction(transaction)
raw_gas_estimate = self.backend.estimate_gas(raw_transaction)
self.validator.validate_outbound_gas_estimate(raw_gas_estimate)
gas_estimate = self.normalizer.normalize_outbound_gas_estimate(raw_gas_estimate)
return gas_estimate
#
# Private Transaction API
#
def _add_transaction_to_pending_block(self, transaction, txn_type='send'):
self.validator.validate_inbound_transaction(transaction, txn_type=txn_type)
raw_transaction = self.normalizer.normalize_inbound_transaction(transaction)
if raw_transaction['from'] in self._account_passwords:
unlocked_until = self._account_unlock[raw_transaction['from']]
account_password = self._account_passwords[raw_transaction['from']]
is_locked = account_password is not None and unlocked_until is not None and (
unlocked_until is False or time.time() > unlocked_until
)
if is_locked:
raise AccountLocked("The account is currently locked")
if {'r', 's', 'v'}.issubset(transaction.keys()):
try:
raw_transaction_hash = self.backend.send_signed_transaction(raw_transaction)
except NotImplementedError:
unsigned_transaction = dissoc(raw_transaction, 'r', 's', 'v')
raw_transaction_hash = self.backend.send_transaction(unsigned_transaction)
else:
raw_transaction_hash = self.backend.send_transaction(raw_transaction)
self.validator.validate_outbound_transaction_hash(raw_transaction_hash)
transaction_hash = self.normalizer.normalize_outbound_transaction_hash(
raw_transaction_hash,
)
self._handle_filtering_for_transaction(transaction_hash)
return transaction_hash
#
# Snapshot and Revert
#
def take_snapshot(self):
snapshot = self.backend.take_snapshot()
snapshot_id = next(self._snapshot_counter)
self._snapshots[snapshot_id] = snapshot
return snapshot_id
def revert_to_snapshot(self, snapshot_id):
try:
snapshot = self._snapshots[snapshot_id]
except KeyError:
raise SnapshotNotFound("No snapshot found for id: {0}".format(snapshot_id))
else:
self.backend.revert_to_snapshot(snapshot)
for block_filter in self._block_filters.values():
self._revert_block_filter(block_filter)
for pending_transaction_filter in self._pending_transaction_filters.values():
self._revert_pending_transaction_filter(pending_transaction_filter)
for log_filter in self._log_filters.values():
self._revert_log_filter(log_filter)
def reset_to_genesis(self):
self.backend.reset_to_genesis()
self._reset_local_state()
#
# Private filter API
#
def _revert_block_filter(self, filter):
is_valid_block_hash = excepts(
(BlockNotFound,),
compose(
bool,
self.get_block_by_hash,
self.normalizer.normalize_outbound_block_hash,
),
lambda v: False,
)
values_to_remove = tuple(remove(is_valid_block_hash, filter.get_all()))
filter.remove(*values_to_remove)
def _revert_pending_transaction_filter(self, filter):
is_valid_transaction_hash = excepts(
(TransactionNotFound,),
compose(
bool,
self.get_transaction_by_hash,
self.normalizer.normalize_outbound_transaction_hash,
),
lambda v: False,
)
values_to_remove = remove(is_valid_transaction_hash, filter.get_all())
filter.remove(*values_to_remove)
def _revert_log_filter(self, filter):
is_valid_transaction_hash = excepts(
(TransactionNotFound,),
compose(
bool,
self.get_transaction_by_hash,
self.normalizer.normalize_outbound_transaction_hash,
operator.itemgetter('transaction_hash'),
),
lambda v: False,
)
values_to_remove = remove(is_valid_transaction_hash, filter.get_all())
filter.remove(*values_to_remove)
#
# Filters
#
def create_block_filter(self):
raw_filter_id = next(self._filter_counter)
self._block_filters[raw_filter_id] = Filter(filter_params=None)
filter_id = self.normalizer.normalize_outbound_filter_id(raw_filter_id)
return filter_id
def create_pending_transaction_filter(self):
raw_filter_id = next(self._filter_counter)
self._pending_transaction_filters[raw_filter_id] = Filter(filter_params=None)
filter_id = self.normalizer.normalize_outbound_filter_id(raw_filter_id)
return filter_id
def create_log_filter(self, from_block=None, to_block=None, address=None, topics=None):
self.validator.validate_inbound_filter_params(
from_block=from_block,
to_block=to_block,
address=address,
topics=topics,
)
(
raw_from_block,
raw_to_block,
raw_address,
raw_topics,
) = self.normalizer.normalize_inbound_filter_params(
from_block=from_block,
to_block=to_block,
address=address,
topics=topics,
)
raw_filter_id = next(self._filter_counter)
raw_filter_params = {
'from_block': raw_from_block,
'to_block': raw_to_block,
'addresses': raw_address,
'topics': raw_topics,
}
filter_fn = partial(
check_if_log_matches,
**raw_filter_params
)
new_filter = Filter(
filter_params=raw_filter_params,
filter_fn=filter_fn,
)
self._log_filters[raw_filter_id] = new_filter
if is_integer(raw_from_block):
if is_integer(raw_to_block):
upper_bound = raw_to_block
else:
upper_bound = self.get_block_by_number('pending')['number']
for block_number in range(raw_from_block, upper_bound):
block = self.get_block_by_number(block_number)
self._add_log_entries_to_filter(block, new_filter)
filter_id = self.normalizer.normalize_outbound_filter_id(raw_filter_id)
return filter_id
def delete_filter(self, filter_id):
self.validator.validate_inbound_filter_id(filter_id)
raw_filter_id = self.normalizer.normalize_inbound_filter_id(filter_id)
if raw_filter_id in self._block_filters:
del self._block_filters[raw_filter_id]
elif raw_filter_id in self._pending_transaction_filters:
del self._pending_transaction_filters[raw_filter_id]
elif raw_filter_id in self._log_filters:
del self._log_filters[raw_filter_id]
else:
raise FilterNotFound("Unknown filter id")
@to_tuple
def get_only_filter_changes(self, filter_id):
self.validator.validate_inbound_filter_id(filter_id)
raw_filter_id = self.normalizer.normalize_inbound_filter_id(filter_id)
if raw_filter_id in self._block_filters:
filter = self._block_filters[raw_filter_id]
normalize_fn = self.normalizer.normalize_outbound_block_hash
elif raw_filter_id in self._pending_transaction_filters:
filter = self._pending_transaction_filters[raw_filter_id]
normalize_fn = self.normalizer.normalize_outbound_transaction_hash
elif raw_filter_id in self._log_filters:
filter = self._log_filters[raw_filter_id]
normalize_fn = self.normalizer.normalize_outbound_log_entry
else:
raise FilterNotFound("Unknown filter id")
for item in filter.get_changes():
yield normalize_fn(item)
@to_tuple
def get_all_filter_logs(self, filter_id):
self.validator.validate_inbound_filter_id(filter_id)
raw_filter_id = self.normalizer.normalize_inbound_filter_id(filter_id)
if raw_filter_id in self._block_filters:
filter = self._block_filters[raw_filter_id]
normalize_fn = self.normalizer.normalize_outbound_block_hash
elif raw_filter_id in self._pending_transaction_filters:
filter = self._pending_transaction_filters[raw_filter_id]
normalize_fn = self.normalizer.normalize_outbound_transaction_hash
elif raw_filter_id in self._log_filters:
filter = self._log_filters[raw_filter_id]
normalize_fn = self.normalizer.normalize_outbound_log_entry
else:
raise FilterNotFound("Unknown filter id")
for item in filter.get_all():
yield normalize_fn(item)
@to_tuple
def get_logs(self, from_block=None, to_block=None, address=None, topics=None):
self.validator.validate_inbound_filter_params(
from_block=from_block,
to_block=to_block,
address=address,
topics=topics,
)
(
raw_from_block,
raw_to_block,
raw_address,
raw_topics,
) = self.normalizer.normalize_inbound_filter_params(
from_block=from_block,
to_block=to_block,
address=address,
topics=topics,
)
# Setup the filter object
raw_filter_params = {
'from_block': raw_from_block,
'to_block': raw_to_block,
'addresses': raw_address,
'topics': raw_topics,
}
filter_fn = partial(
check_if_log_matches,
**raw_filter_params,
)
log_filter = Filter(
filter_params=raw_filter_params,
filter_fn=filter_fn,
)
# Set from/to block defaults
if raw_from_block is None:
raw_from_block = 'latest'
if raw_to_block is None:
raw_to_block = 'latest'
# Determine lower bound for block range.
if isinstance(raw_from_block, int):
lower_bound = raw_from_block
else:
lower_bound = self.get_block_by_number(raw_from_block)['number']
# Determine upper bound for block range.
if isinstance(raw_to_block, int):
upper_bound = raw_to_block
else:
upper_bound = self.get_block_by_number(raw_to_block)['number']
# Enumerate the blocks in the block range to find all log entries which match.
for block_number in range(lower_bound, upper_bound + 1):
block = self.get_block_by_number(block_number)
for transaction_hash in block['transactions']:
receipt = self.get_transaction_receipt(transaction_hash)
for log_entry in receipt['logs']:
raw_log_entry = self.normalizer.normalize_inbound_log_entry(log_entry)
log_filter.add(raw_log_entry)
# Return the matching log entries
for item in log_filter.get_all():
yield self.normalizer.normalize_outbound_log_entry(item)
| [
"cytoolz.functoolz.compose",
"eth_tester.backends.get_chain_backend",
"eth_utils.is_same_address",
"operator.itemgetter",
"eth_tester.exceptions.FilterNotFound",
"cytoolz.assoc",
"eth_tester.utils.transactions.extract_valid_transaction_params",
"functools.wraps",
"eth_tester.utils.transactions.remov... | [((1426, 1447), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1441, 1447), False, 'import functools\n'), ((3395, 3412), 'itertools.count', 'itertools.count', ([], {}), '()\n', (3410, 3412), False, 'import itertools\n'), ((3586, 3603), 'itertools.count', 'itertools.count', ([], {}), '()\n', (3601, 3603), False, 'import itertools\n'), ((3725, 3764), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : False)'], {}), '(lambda : False)\n', (3748, 3764), False, 'import collections\n'), ((5015, 5054), 'eth_tester.utils.accounts.private_key_to_address', 'private_key_to_address', (['raw_private_key'], {}), '(raw_private_key)\n', (5037, 5054), False, 'from eth_tester.utils.accounts import private_key_to_address\n'), ((11058, 11090), 'cytoolz.assoc', 'assoc', (['receipt', '"""status"""', 'status'], {}), "(receipt, 'status', status)\n", (11063, 11090), False, 'from cytoolz import dissoc, assoc\n'), ((20922, 20948), 'eth_tester.utils.filters.Filter', 'Filter', ([], {'filter_params': 'None'}), '(filter_params=None)\n', (20928, 20948), False, 'from eth_tester.utils.filters import Filter, check_if_log_matches\n'), ((21214, 21240), 'eth_tester.utils.filters.Filter', 'Filter', ([], {'filter_params': 'None'}), '(filter_params=None)\n', (21220, 21240), False, 'from eth_tester.utils.filters import Filter, check_if_log_matches\n'), ((22196, 22246), 'cytoolz.functoolz.partial', 'partial', (['check_if_log_matches'], {}), '(check_if_log_matches, **raw_filter_params)\n', (22203, 22246), False, 'from cytoolz.functoolz import compose, excepts, partial\n'), ((22302, 22362), 'eth_tester.utils.filters.Filter', 'Filter', ([], {'filter_params': 'raw_filter_params', 'filter_fn': 'filter_fn'}), '(filter_params=raw_filter_params, filter_fn=filter_fn)\n', (22308, 22362), False, 'from eth_tester.utils.filters import Filter, check_if_log_matches\n'), ((22464, 22490), 'eth_utils.is_integer', 'is_integer', (['raw_from_block'], {}), '(raw_from_block)\n', (22474, 22490), False, 'from eth_utils import is_integer, is_same_address, to_dict, to_list, to_tuple, to_int\n'), ((26224, 26274), 'cytoolz.functoolz.partial', 'partial', (['check_if_log_matches'], {}), '(check_if_log_matches, **raw_filter_params)\n', (26231, 26274), False, 'from cytoolz.functoolz import compose, excepts, partial\n'), ((26331, 26391), 'eth_tester.utils.filters.Filter', 'Filter', ([], {'filter_params': 'raw_filter_params', 'filter_fn': 'filter_fn'}), '(filter_params=raw_filter_params, filter_fn=filter_fn)\n', (26337, 26391), False, 'from eth_tester.utils.filters import Filter, check_if_log_matches\n'), ((2639, 2658), 'eth_tester.backends.get_chain_backend', 'get_chain_backend', ([], {}), '()\n', (2656, 2658), False, 'from eth_tester.backends import get_chain_backend\n'), ((2714, 2729), 'eth_tester.validation.get_validator', 'get_validator', ([], {}), '()\n', (2727, 2729), False, 'from eth_tester.validation import get_validator\n'), ((2787, 2811), 'eth_tester.normalization.get_normalizer_backend', 'get_normalizer_backend', ([], {}), '()\n', (2809, 2811), False, 'from eth_tester.normalization import get_normalizer_backend\n'), ((5231, 5289), 'eth_tester.exceptions.ValidationError', 'ValidationError', (['"""Account already present in account list"""'], {}), "('Account already present in account list')\n", (5246, 5289), False, 'from eth_tester.exceptions import AccountLocked, BlockNotFound, FilterNotFound, SnapshotNotFound, TransactionNotFound, ValidationError\n'), ((5875, 5926), 'eth_tester.exceptions.ValidationError', 'ValidationError', (['"""Account does not have a password"""'], {}), "('Account does not have a password')\n", (5890, 5926), False, 'from eth_tester.exceptions import AccountLocked, BlockNotFound, FilterNotFound, SnapshotNotFound, TransactionNotFound, ValidationError\n'), ((5987, 6020), 'eth_tester.exceptions.ValidationError', 'ValidationError', (['"""Wrong password"""'], {}), "('Wrong password')\n", (6002, 6020), False, 'from eth_tester.exceptions import AccountLocked, BlockNotFound, FilterNotFound, SnapshotNotFound, TransactionNotFound, ValidationError\n'), ((6459, 6493), 'eth_tester.exceptions.ValidationError', 'ValidationError', (['"""Unknown account"""'], {}), "('Unknown account')\n", (6474, 6493), False, 'from eth_tester.exceptions import AccountLocked, BlockNotFound, FilterNotFound, SnapshotNotFound, TransactionNotFound, ValidationError\n'), ((10980, 11042), 'eth_tester.exceptions.ValidationError', 'ValidationError', (['"""Invalid status value: only 0 or 1 are valid"""'], {}), "('Invalid status value: only 0 or 1 are valid')\n", (10995, 11042), False, 'from eth_tester.exceptions import AccountLocked, BlockNotFound, FilterNotFound, SnapshotNotFound, TransactionNotFound, ValidationError\n'), ((13964, 14005), 'eth_tester.utils.transactions.extract_valid_transaction_params', 'extract_valid_transaction_params', (['pending'], {}), '(pending)\n', (13996, 14005), False, 'from eth_tester.utils.transactions import extract_valid_transaction_params, remove_matching_transaction_from_list\n'), ((19470, 19559), 'cytoolz.functoolz.compose', 'compose', (['bool', 'self.get_block_by_hash', 'self.normalizer.normalize_outbound_block_hash'], {}), '(bool, self.get_block_by_hash, self.normalizer.\n normalize_outbound_block_hash)\n', (19477, 19559), False, 'from cytoolz.functoolz import compose, excepts, partial\n'), ((19931, 20032), 'cytoolz.functoolz.compose', 'compose', (['bool', 'self.get_transaction_by_hash', 'self.normalizer.normalize_outbound_transaction_hash'], {}), '(bool, self.get_transaction_by_hash, self.normalizer.\n normalize_outbound_transaction_hash)\n', (19938, 20032), False, 'from cytoolz.functoolz import compose, excepts, partial\n'), ((22507, 22531), 'eth_utils.is_integer', 'is_integer', (['raw_to_block'], {}), '(raw_to_block)\n', (22517, 22531), False, 'from eth_utils import is_integer, is_same_address, to_dict, to_list, to_tuple, to_int\n'), ((1960, 2050), 'eth_tester.utils.transactions.remove_matching_transaction_from_list', 'remove_matching_transaction_from_list', (['self._pending_transactions', 'pending_transaction'], {}), '(self._pending_transactions,\n pending_transaction)\n', (1997, 2050), False, 'from eth_tester.utils.transactions import extract_valid_transaction_params, remove_matching_transaction_from_list\n'), ((4233, 4379), 'eth_tester.exceptions.ValidationError', 'ValidationError', (['"""Space time continuum distortion detected. Traveling backwards in time violates interdimensional ordinance 31415-926."""'], {}), "(\n 'Space time continuum distortion detected. Traveling backwards in time violates interdimensional ordinance 31415-926.'\n )\n", (4248, 4379), False, 'from eth_tester.exceptions import AccountLocked, BlockNotFound, FilterNotFound, SnapshotNotFound, TransactionNotFound, ValidationError\n'), ((5145, 5176), 'eth_utils.is_same_address', 'is_same_address', (['account', 'value'], {}), '(account, value)\n', (5160, 5176), False, 'from eth_utils import is_integer, is_same_address, to_dict, to_list, to_tuple, to_int\n'), ((5784, 5818), 'eth_tester.exceptions.ValidationError', 'ValidationError', (['"""Unknown account"""'], {}), "('Unknown account')\n", (5799, 5818), False, 'from eth_tester.exceptions import AccountLocked, BlockNotFound, FilterNotFound, SnapshotNotFound, TransactionNotFound, ValidationError\n'), ((6130, 6141), 'time.time', 'time.time', ([], {}), '()\n', (6139, 6141), False, 'import time\n'), ((6571, 6622), 'eth_tester.exceptions.ValidationError', 'ValidationError', (['"""Account does not have a password"""'], {}), "('Account does not have a password')\n", (6586, 6622), False, 'from eth_tester.exceptions import AccountLocked, BlockNotFound, FilterNotFound, SnapshotNotFound, TransactionNotFound, ValidationError\n'), ((17459, 17507), 'eth_tester.exceptions.AccountLocked', 'AccountLocked', (['"""The account is currently locked"""'], {}), "('The account is currently locked')\n", (17472, 17507), False, 'from eth_tester.exceptions import AccountLocked, BlockNotFound, FilterNotFound, SnapshotNotFound, TransactionNotFound, ValidationError\n'), ((20549, 20588), 'operator.itemgetter', 'operator.itemgetter', (['"""transaction_hash"""'], {}), "('transaction_hash')\n", (20568, 20588), False, 'import operator\n'), ((17755, 17793), 'cytoolz.dissoc', 'dissoc', (['raw_transaction', '"""r"""', '"""s"""', '"""v"""'], {}), "(raw_transaction, 'r', 's', 'v')\n", (17761, 17793), False, 'from cytoolz import dissoc, assoc\n'), ((23516, 23551), 'eth_tester.exceptions.FilterNotFound', 'FilterNotFound', (['"""Unknown filter id"""'], {}), "('Unknown filter id')\n", (23530, 23551), False, 'from eth_tester.exceptions import AccountLocked, BlockNotFound, FilterNotFound, SnapshotNotFound, TransactionNotFound, ValidationError\n'), ((24357, 24392), 'eth_tester.exceptions.FilterNotFound', 'FilterNotFound', (['"""Unknown filter id"""'], {}), "('Unknown filter id')\n", (24371, 24392), False, 'from eth_tester.exceptions import AccountLocked, BlockNotFound, FilterNotFound, SnapshotNotFound, TransactionNotFound, ValidationError\n'), ((25274, 25309), 'eth_tester.exceptions.FilterNotFound', 'FilterNotFound', (['"""Unknown filter id"""'], {}), "('Unknown filter id')\n", (25288, 25309), False, 'from eth_tester.exceptions import AccountLocked, BlockNotFound, FilterNotFound, SnapshotNotFound, TransactionNotFound, ValidationError\n'), ((17368, 17379), 'time.time', 'time.time', ([], {}), '()\n', (17377, 17379), False, 'import time\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""planning poker blueprint."""
# from app.models import model
from flask import Blueprint, redirect, render_template, request, url_for, session, escape, jsonify, make_response
from app.models import model
import json
main_blueprint = Blueprint('main', __name__, template_folder='templates')
@main_blueprint.route("/main", methods=['GET', 'POST'])
def main():
message = "Welcome to the app"
return render_template(
"index.html",
message=message)
@main_blueprint.route("/healthcheck")
def healthcheck():
return True
| [
"flask.render_template",
"flask.Blueprint"
] | [((283, 339), 'flask.Blueprint', 'Blueprint', (['"""main"""', '__name__'], {'template_folder': '"""templates"""'}), "('main', __name__, template_folder='templates')\n", (292, 339), False, 'from flask import Blueprint, redirect, render_template, request, url_for, session, escape, jsonify, make_response\n'), ((456, 502), 'flask.render_template', 'render_template', (['"""index.html"""'], {'message': 'message'}), "('index.html', message=message)\n", (471, 502), False, 'from flask import Blueprint, redirect, render_template, request, url_for, session, escape, jsonify, make_response\n')] |
import pytest
from release_often import flit
class TestVersionFilePath:
"""Tests for release_often.flit.version_file_path()."""
def test_pyproject_does_not_exist(self, data_path):
with pytest.raises(TypeError):
flit.version_file_path(data_path)
def test_pyproject_missing_data(self, poetry_example_path):
with pytest.raises(ValueError):
flit.version_file_path(poetry_example_path)
@pytest.mark.parametrize(
"example_name, expected_path",
[
("src_module", "src/pkg.py"),
("src_pkg", "src/pkg/__init__.py"),
("top_module", "pkg.py"),
("top_pkg", "pkg/__init__.py"),
],
)
def test_found_path(self, data_path, example_name, expected_path):
example_path = data_path / "flit" / example_name
print(example_path)
assert flit.version_file_path(example_path) == example_path / expected_path
def test_version_file_not_found(self):
pass
class TestReadVersion:
"""Tests for release_often.flit.read_version()."""
@pytest.mark.parametrize(
"source",
[
"""__version__='1.2.3'""",
"""__version__=\"1.2.3\"""",
"""__version__ = \"1.2.3\"""",
"__version__ = '1.2.3' # A comment!",
],
)
def test_success(self, source):
assert flit.read_version(source) == "1.2.3"
class TestChangeVersion:
"""Tests for release_often.flit.change_version()."""
@pytest.mark.parametrize(
"source,expect",
[
("""__version__='1.2.3'""", """__version__='2.0.0'"""),
("""__version__=\"1.2.3\"""", """__version__=\"2.0.0\""""),
("""__version__ = \"1.2.3\"""", """__version__ = \"2.0.0\""""),
(
"__version__ = '1.2.3' # A comment!",
"__version__ = '2.0.0' # A comment!",
),
],
)
def test_success(self, source, expect):
assert flit.change_version(source, "1.2.3", "2.0.0") == expect
| [
"release_often.flit.change_version",
"release_often.flit.read_version",
"pytest.mark.parametrize",
"pytest.raises",
"release_often.flit.version_file_path"
] | [((445, 633), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""example_name, expected_path"""', "[('src_module', 'src/pkg.py'), ('src_pkg', 'src/pkg/__init__.py'), (\n 'top_module', 'pkg.py'), ('top_pkg', 'pkg/__init__.py')]"], {}), "('example_name, expected_path', [('src_module',\n 'src/pkg.py'), ('src_pkg', 'src/pkg/__init__.py'), ('top_module',\n 'pkg.py'), ('top_pkg', 'pkg/__init__.py')])\n", (468, 633), False, 'import pytest\n'), ((1092, 1245), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""source"""', '["__version__=\'1.2.3\'", \'__version__="1.2.3"\', \'__version__ = "1.2.3"\',\n "__version__ = \'1.2.3\' # A comment!"]'], {}), '(\'source\', ["__version__=\'1.2.3\'",\n \'__version__="1.2.3"\', \'__version__ = "1.2.3"\',\n "__version__ = \'1.2.3\' # A comment!"])\n', (1115, 1245), False, 'import pytest\n'), ((1515, 1802), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""source,expect"""', '[("__version__=\'1.2.3\'", "__version__=\'2.0.0\'"), (\'__version__="1.2.3"\',\n \'__version__="2.0.0"\'), (\'__version__ = "1.2.3"\',\n \'__version__ = "2.0.0"\'), ("__version__ = \'1.2.3\' # A comment!",\n "__version__ = \'2.0.0\' # A comment!")]'], {}), '(\'source,expect\', [("__version__=\'1.2.3\'",\n "__version__=\'2.0.0\'"), (\'__version__="1.2.3"\', \'__version__="2.0.0"\'),\n (\'__version__ = "1.2.3"\', \'__version__ = "2.0.0"\'), (\n "__version__ = \'1.2.3\' # A comment!",\n "__version__ = \'2.0.0\' # A comment!")])\n', (1538, 1802), False, 'import pytest\n'), ((206, 230), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (219, 230), False, 'import pytest\n'), ((244, 277), 'release_often.flit.version_file_path', 'flit.version_file_path', (['data_path'], {}), '(data_path)\n', (266, 277), False, 'from release_often import flit\n'), ((356, 381), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (369, 381), False, 'import pytest\n'), ((395, 438), 'release_often.flit.version_file_path', 'flit.version_file_path', (['poetry_example_path'], {}), '(poetry_example_path)\n', (417, 438), False, 'from release_often import flit\n'), ((879, 915), 'release_often.flit.version_file_path', 'flit.version_file_path', (['example_path'], {}), '(example_path)\n', (901, 915), False, 'from release_often import flit\n'), ((1387, 1412), 'release_often.flit.read_version', 'flit.read_version', (['source'], {}), '(source)\n', (1404, 1412), False, 'from release_often import flit\n'), ((2006, 2051), 'release_often.flit.change_version', 'flit.change_version', (['source', '"""1.2.3"""', '"""2.0.0"""'], {}), "(source, '1.2.3', '2.0.0')\n", (2025, 2051), False, 'from release_often import flit\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import shutil
import warnings
import botocore.exceptions
from zope.interface import implementer
from warehouse.packaging.interfaces import IFileStorage, IDocsStorage
@implementer(IFileStorage)
class LocalFileStorage:
def __init__(self, base):
# This class should not be used in production, it's trivial for it to
# be used to read arbitrary files from the disk. It is intended ONLY
# for local development with trusted users. To make this clear, we'll
# raise a warning.
warnings.warn(
"LocalFileStorage is intended only for use in development, you "
"should not use it in production due to the lack of safe guards "
"for safely locating files on disk.",
RuntimeWarning,
)
self.base = base
@classmethod
def create_service(cls, context, request):
return cls(request.registry.settings[f"files.path"])
def get(self, path):
return open(os.path.join(self.base, path), "rb")
def store(self, path, file_path, *, meta=None):
destination = os.path.join(self.base, path)
os.makedirs(os.path.dirname(destination), exist_ok=True)
with open(destination, "wb") as dest_fp:
with open(file_path, "rb") as src_fp:
dest_fp.write(src_fp.read())
@implementer(IDocsStorage)
class LocalDocsStorage:
def __init__(self, base):
# This class should not be used in production, it's trivial for it to
# be used to read arbitrary files from the disk. It is intended ONLY
# for local development with trusted users. To make this clear, we'll
# raise a warning.
warnings.warn(
"LocalDocsStorage is intended only for use in development, you "
"should not use it in production due to the lack of safe guards "
"for safely locating files on disk.",
RuntimeWarning,
)
self.base = base
@classmethod
def create_service(cls, context, request):
return cls(request.registry.settings[f"docs.path"])
def remove_by_prefix(self, prefix):
directory = os.path.join(self.base, prefix)
try:
shutil.rmtree(directory)
except FileNotFoundError:
pass
@implementer(IFileStorage)
class S3FileStorage:
def __init__(self, bucket, *, prefix=None):
self.bucket = bucket
self.prefix = prefix
@classmethod
def create_service(cls, context, request):
session = request.find_service(name="aws.session")
s3 = session.resource("s3")
bucket = s3.Bucket(request.registry.settings["files.bucket"])
prefix = request.registry.settings.get("files.prefix")
return cls(bucket, prefix=prefix)
def _get_path(self, path):
# Legacy paths will have a first directory of something like 2.7, we
# want to just continue to support them for now.
if len(path.split("/")[0]) > 2:
return path
# If we have a prefix, then prepend it to our path. This will let us
# store items inside of a sub directory without exposing that to end
# users.
if self.prefix:
path = self.prefix + path
return path
def get(self, path):
try:
return self.bucket.Object(self._get_path(path)).get()["Body"]
except botocore.exceptions.ClientError as exc:
if exc.response["Error"]["Code"] != "NoSuchKey":
raise
raise FileNotFoundError("No such key: {!r}".format(path)) from None
def store(self, path, file_path, *, meta=None):
extra_args = {}
if meta is not None:
extra_args["Metadata"] = meta
path = self._get_path(path)
self.bucket.upload_file(file_path, path, ExtraArgs=extra_args)
@implementer(IDocsStorage)
class S3DocsStorage:
def __init__(self, s3_client, bucket_name, *, prefix=None):
self.s3_client = s3_client
self.bucket_name = bucket_name
self.prefix = prefix
@classmethod
def create_service(cls, context, request):
session = request.find_service(name="aws.session")
s3_client = session.client("s3")
bucket_name = request.registry.settings["docs.bucket"]
prefix = request.registry.settings.get("docs.prefix")
return cls(s3_client, bucket_name, prefix=prefix)
def remove_by_prefix(self, prefix):
if self.prefix:
prefix = os.path.join(self.prefix, prefix)
keys_to_delete = []
keys = self.s3_client.list_objects_v2(
Bucket=self.bucket_name, Prefix=prefix
)
for key in keys.get('Contents', []):
keys_to_delete.append({'Key': key['Key']})
if len(keys_to_delete) > 99:
self.s3_client.delete_objects(
Bucket=self.bucket_name,
Delete={'Objects': keys_to_delete}
)
keys_to_delete = []
if len(keys_to_delete) > 0:
self.s3_client.delete_objects(
Bucket=self.bucket_name,
Delete={'Objects': keys_to_delete}
)
| [
"warnings.warn",
"zope.interface.implementer",
"shutil.rmtree"
] | [((728, 753), 'zope.interface.implementer', 'implementer', (['IFileStorage'], {}), '(IFileStorage)\n', (739, 753), False, 'from zope.interface import implementer\n'), ((1887, 1912), 'zope.interface.implementer', 'implementer', (['IDocsStorage'], {}), '(IDocsStorage)\n', (1898, 1912), False, 'from zope.interface import implementer\n'), ((2842, 2867), 'zope.interface.implementer', 'implementer', (['IFileStorage'], {}), '(IFileStorage)\n', (2853, 2867), False, 'from zope.interface import implementer\n'), ((4407, 4432), 'zope.interface.implementer', 'implementer', (['IDocsStorage'], {}), '(IDocsStorage)\n', (4418, 4432), False, 'from zope.interface import implementer\n'), ((1077, 1279), 'warnings.warn', 'warnings.warn', (['"""LocalFileStorage is intended only for use in development, you should not use it in production due to the lack of safe guards for safely locating files on disk."""', 'RuntimeWarning'], {}), "(\n 'LocalFileStorage is intended only for use in development, you should not use it in production due to the lack of safe guards for safely locating files on disk.'\n , RuntimeWarning)\n", (1090, 1279), False, 'import warnings\n'), ((2236, 2438), 'warnings.warn', 'warnings.warn', (['"""LocalDocsStorage is intended only for use in development, you should not use it in production due to the lack of safe guards for safely locating files on disk."""', 'RuntimeWarning'], {}), "(\n 'LocalDocsStorage is intended only for use in development, you should not use it in production due to the lack of safe guards for safely locating files on disk.'\n , RuntimeWarning)\n", (2249, 2438), False, 'import warnings\n'), ((2763, 2787), 'shutil.rmtree', 'shutil.rmtree', (['directory'], {}), '(directory)\n', (2776, 2787), False, 'import shutil\n')] |
# This script shows how to connect to a JIRA instance with a
# username and password over HTTP BASIC authentication.
from collections import Counter
from jira import JIRA
# By default, the client will connect to a JIRA instance started from the Atlassian Plugin SDK.
# See
# https://developer.atlassian.com/display/DOCS/Installing+the+Atlassian+Plugin+SDK
# for details.
jira = JIRA(basic_auth=('admin', 'admin')) # a username/password tuple
# Get the mutable application properties for this server (requires
# jira-system-administrators permission)
props = jira.application_properties()
# Find all issues reported by the admin
issues = jira.search_issues('assignee=admin')
# Find the top three projects containing issues reported by admin
top_three = Counter(
[issue.fields.project.key for issue in issues]).most_common(3)
| [
"collections.Counter",
"jira.JIRA"
] | [((380, 415), 'jira.JIRA', 'JIRA', ([], {'basic_auth': "('admin', 'admin')"}), "(basic_auth=('admin', 'admin'))\n", (384, 415), False, 'from jira import JIRA\n'), ((760, 815), 'collections.Counter', 'Counter', (['[issue.fields.project.key for issue in issues]'], {}), '([issue.fields.project.key for issue in issues])\n', (767, 815), False, 'from collections import Counter\n')] |
"""Main application class and user interface helper routines for
'git-cvs'."""
import os.path
import re
from cvsgit.cmd import Cmd
from cvsgit.error import Error
from cvsgit.git import Git
from cvsgit.cvs import CVS
from cvsgit.meta import MetaDb
from cvsgit.i18n import _
from cvsgit.term import Progress
class Command(Cmd):
"""Base class for conduit commands
"""
def add_authors_option(self):
self.add_option('--authors', metavar='AUTHORS', help=\
_("Map CVS committer login names to fullnames."))
def finalize_authors_option(self):
if not self.options.authors:
return
authors = {}
with open(self.options.authors, 'r') as file:
while True:
line = file.readline().strip()
if not line:
break
match = re.match('([^\s]+)\s+(.+)', line)
if match:
login, fullname = match.groups()
authors[login] = fullname
else:
self.warn('invalid line in authors map: %s' % line)
self.options.authors = authors
def add_stop_on_unknown_author_option(self):
self.add_option('--stop-on-unknown-author', action='store_true', help=\
_("Abort the operation if any author mapping is missing."))
def add_quiet_option(self):
self.add_option('--quiet', action='store_true', help=\
_("Only report error and warning messages."))
def add_verbose_option(self):
self.add_option('--verbose', action='store_true', help=\
_("Display each changeset as it is imported."))
class ConduitError(Error):
"""Base exception for errors in the cvsgit.main module
"""
class NoSourceError(ConduitError):
"""Raised when no CVS source has been configured
This error may occur if you run git-cvs commands in a directory
that has not been initialized with "git-cvs init".
"""
def __init__(self):
super(NoSourceError, self).__init__(
_("'cvs.source' is unset; not a git-cvs repository?"))
class UnknownAuthorFullnames(ConduitError):
"""Raised when there is no known fullname for an author's login
name and --stop-on-missing-author was given on the command-line.
"""
def __init__(self, authors):
msg = 'unknown authors: %s' % ', '.join(authors)
super(ConduitError, self).__init__(msg)
class Conduit(object):
"""CVS-to-Git conduit logic
"""
def __init__(self, directory=None):
self.git = Git(directory)
self.branch = 'refs/heads/cvs/HEAD'
self._cvs = None
self._config = {}
def config_get(self, varname):
"""Get a Git variable from the 'cvs' section
"""
if self._config.has_key(varname):
return self._config[varname]
else:
value = self.git.config_get('cvs.' + varname)
self._config[varname] = value
return value
def config_set(self, varname, value):
"""Set a Git variable in the 'cvs' section
"""
self.git.config_set('cvs.' + varname, value)
self._config[varname] = value
def get_source(self):
"""Get the CVS repository source path
"""
source = self.config_get('source')
if source is None:
raise NoSourceError
return source
def set_source(self, directory):
"""Set the CVS repository source path
"""
self.config_set('source', directory)
source = property(get_source, set_source)
def get_domain(self):
return self.config_get('domain')
def set_domain(self, directory):
self.config_set('domain', directory)
domain = property(get_domain, set_domain)
def get_cvs(self):
if self._cvs == None:
filename = os.path.join(self.git.git_dir, 'cvsgit.db')
metadb = MetaDb(filename)
self._cvs = CVS(self.source, metadb)
return self._cvs
cvs = property(get_cvs)
def init(self, repository, domain=None, bare=False, quiet=True):
self.git.init(bare=bare, quiet=quiet)
if not self.git.is_bare() and \
self.git.config_get('branch.master.remote') == None:
self.git.config_set('branch.master.remote', '.')
self.git.config_set('branch.master.merge', self.branch)
self.git.config_set('branch.master.rebase', 'true')
self.source = repository
if domain:
self.domain = domain
def fetch(self, limit=None, quiet=True, verbose=False,
authors=None, stop_on_unknown_author=False):
"""Fetch new changesets into the CVS tracking branch.
"""
if quiet or verbose:
progress = None
else:
progress = Progress()
self.cvs.fetch(progress=progress, limit=limit)
# XXX: Should not access private self.cvs.metadb.
if authors and stop_on_unknown_author:
unknown = []
for author in self.cvs.metadb.all_authors():
if not authors.has_key(author):
unknown.append(author)
if len(unknown) > 0:
raise UnknownAuthorFullnames(unknown)
self.git.import_changesets(self.cvs.changesets(), self.branch,
domain=self.domain,
limit=limit,
verbose=verbose,
progress=progress,
total=self.cvs.count_changesets(),
authors=authors,
stop_on_unknown_author=\
stop_on_unknown_author)
def pull(self, limit=None, quiet=True, verbose=False, authors=None,
stop_on_unknown_author=False):
self.fetch(limit=limit, quiet=quiet, verbose=verbose,
authors=authors, stop_on_unknown_author=\
stop_on_unknown_author)
args = []
if quiet:
args.append('--quiet')
# XXX: --quiet is not enough if branch.<branch>.rebase is true
#self.git.pull(*args)
import subprocess
self.git.check_command('pull', *args, stdout=subprocess.PIPE)
| [
"cvsgit.meta.MetaDb",
"cvsgit.term.Progress",
"re.match",
"cvsgit.i18n._",
"cvsgit.cvs.CVS",
"cvsgit.git.Git"
] | [((2575, 2589), 'cvsgit.git.Git', 'Git', (['directory'], {}), '(directory)\n', (2578, 2589), False, 'from cvsgit.git import Git\n'), ((2067, 2120), 'cvsgit.i18n._', '_', (['"""\'cvs.source\' is unset; not a git-cvs repository?"""'], {}), '("\'cvs.source\' is unset; not a git-cvs repository?")\n', (2068, 2120), False, 'from cvsgit.i18n import _\n'), ((3942, 3958), 'cvsgit.meta.MetaDb', 'MetaDb', (['filename'], {}), '(filename)\n', (3948, 3958), False, 'from cvsgit.meta import MetaDb\n'), ((3983, 4007), 'cvsgit.cvs.CVS', 'CVS', (['self.source', 'metadb'], {}), '(self.source, metadb)\n', (3986, 4007), False, 'from cvsgit.cvs import CVS\n'), ((4855, 4865), 'cvsgit.term.Progress', 'Progress', ([], {}), '()\n', (4863, 4865), False, 'from cvsgit.term import Progress\n'), ((486, 534), 'cvsgit.i18n._', '_', (['"""Map CVS committer login names to fullnames."""'], {}), "('Map CVS committer login names to fullnames.')\n", (487, 534), False, 'from cvsgit.i18n import _\n'), ((858, 893), 're.match', 're.match', (['"""([^\\\\s]+)\\\\s+(.+)"""', 'line'], {}), "('([^\\\\s]+)\\\\s+(.+)', line)\n", (866, 893), False, 'import re\n'), ((1296, 1354), 'cvsgit.i18n._', '_', (['"""Abort the operation if any author mapping is missing."""'], {}), "('Abort the operation if any author mapping is missing.')\n", (1297, 1354), False, 'from cvsgit.i18n import _\n'), ((1464, 1508), 'cvsgit.i18n._', '_', (['"""Only report error and warning messages."""'], {}), "('Only report error and warning messages.')\n", (1465, 1508), False, 'from cvsgit.i18n import _\n'), ((1622, 1668), 'cvsgit.i18n._', '_', (['"""Display each changeset as it is imported."""'], {}), "('Display each changeset as it is imported.')\n", (1623, 1668), False, 'from cvsgit.i18n import _\n')] |
"""dsl.py unit tests."""
from copy import deepcopy
from io import StringIO
import logging
import pytest
from unittest.mock import call, patch, MagicMock
from tests.common.utils import DeepCopyMagicMock, patch_logger
import ruamel.yaml as yamler
from ruamel.yaml.comments import CommentedMap, CommentedSeq, TaggedScalar
import pypyr.cache.stepcache as stepcache
from pypyr.context import Context
from pypyr.dsl import (Jsonify,
PyString,
SicString,
SpecialTagDirective,
Step,
RetryDecorator,
WhileDecorator)
from pypyr.errors import (Call,
HandledError,
LoopMaxExhaustedError,
PipelineDefinitionError)
def arb_step_mock(context):
"""No real reason, other than to mock the existence of a run_step."""
return 'from arb step mock'
# region custom yaml tags
# region SpecialTagDirective base
def test_special_tag_directive_base_no_get_value():
"""Base class SpecialTagDirective raises on get_value."""
base = SpecialTagDirective(None)
with pytest.raises(NotImplementedError):
base.get_value()
def test_special_tag_directive_base_eq():
"""Repr equivalence and inverse works."""
assert SpecialTagDirective(None) == SpecialTagDirective(None)
assert SpecialTagDirective('none') != SpecialTagDirective('some')
def test_special_tag_directive_repr_roundtrip():
"""Repr string repr evals back to instance."""
s = SpecialTagDirective('arb')
repr_string = repr(s)
assert repr_string == 'SpecialTagDirective(\'arb\')'
reconstituted = eval(repr_string)
assert isinstance(reconstituted, SpecialTagDirective)
assert str(reconstituted) == 'arb'
def test_special_tag_directive_truthy():
"""Special Tag String work as falsy, else Truthy."""
assert SpecialTagDirective('blah')
assert not SpecialTagDirective(None)
assert not SpecialTagDirective('')
# endregion SpecialTagDirective base
# region jsonify custom tag
def test_jsonify_behaves():
"""Jsonify does what it should."""
assert Jsonify.yaml_tag == '!jsonify'
jsonify = Jsonify({'a': 'string here', 'b': 123, 'c': False})
assert jsonify == Jsonify({'a': 'string here', 'b': 123, 'c': False})
assert jsonify
assert str(jsonify) == "{'a': 'string here', 'b': 123, 'c': False}"
assert repr(jsonify) == (
"Jsonify({'a': 'string here', 'b': 123, 'c': False})")
assert jsonify.get_value(Context({'a': 'BBB'})) == (
'{"a": "string here", "b": 123, "c": false}')
def get_yaml_jsonify_parser():
"""Create ruamel yaml parser with jsonify tag handler."""
yaml_parser = yamler.YAML(typ='rt', pure=True)
yaml_parser.register_class(Jsonify)
return yaml_parser
def get_yaml_with_jsonify(input_string):
"""Get yaml from yaml parser with jsonify tag."""
return get_yaml_jsonify_parser().load(input_string)
def get_string_from_yaml_with_jsonify(yaml):
"""Serialize yaml object to string."""
stream = StringIO()
get_yaml_jsonify_parser().dump(yaml, stream)
output = stream.getvalue()
stream.close()
return output
def test_jsonify_roundtrip_mapping():
"""Jsonify serializes and deserializes from yaml mapping."""
yaml_string = """\
a: 1
b: '1'
c: !jsonify
c1: v1
c2: 22
c3: 123.45
d: False
"""
yaml = get_yaml_with_jsonify(yaml_string)
assert type(yaml['c']) is Jsonify
assert type(yaml['c'].value) is CommentedMap
assert repr(yaml['c']) == f"Jsonify({yaml['c'].value!r})"
assert yaml['c'].value == {'c1': 'v1', 'c2': 22, 'c3': 123.45}
assert yaml['c'].get_value(Context()) == (
'{"c1": "v1", "c2": 22, "c3": 123.45}')
roundtripped_string = get_string_from_yaml_with_jsonify(yaml)
expected = (
"a: 1\n"
"b: '1'\n"
"c: !jsonify\n"
" c1: v1\n"
" c2: 22\n"
" c3: 123.45\n"
"d: false\n")
assert roundtripped_string == expected
def test_jsonify_roundtrip_sequence():
"""Jsonify serializes and de-serializes from yaml sequence."""
yaml_string = """\
a: 1
b: '1'
c: !jsonify
- v1
- 22
- 123.45
- a: a value
b: 123
d: False
"""
yaml = get_yaml_with_jsonify(yaml_string)
assert type(yaml['c']) is Jsonify
assert type(yaml['c'].value) is CommentedSeq
assert repr(yaml['c']) == f"Jsonify({yaml['c'].value!r})"
assert yaml['c'].value == ['v1',
22,
123.45,
{'a': 'a value',
'b': 123}]
assert yaml['c'].get_value(Context()) == (
'["v1", 22, 123.45, {"a": "a value", "b": 123}]')
roundtripped_string = get_string_from_yaml_with_jsonify(yaml)
expected = (
"a: 1\n"
"b: '1'\n"
"c: !jsonify\n"
"- v1\n"
"- 22\n"
"- 123.45\n"
"- a: a value\n"
" b: 123\n"
"d: false\n")
assert roundtripped_string == expected
def test_jsonify_roundtrip_scalar():
"""Jsonify serializes and de-serializes from yaml scalar."""
yaml_string = """\
a: 1
b: '1'
c: !jsonify my scalar
d: !jsonify False
e: !jsonify 123
f: !jsonify '123'
"""
yaml = get_yaml_with_jsonify(yaml_string)
assert type(yaml['c']) is Jsonify
assert yaml['c'].value == 'my scalar'
assert type(yaml['c'].scalar) is TaggedScalar
assert repr(yaml['c']) == f"Jsonify('my scalar', {yaml['c'].scalar!r})"
assert yaml['d'].value is False
assert repr(yaml['d']) == f"Jsonify(False, {yaml['d'].scalar!r})"
assert yaml['e'].value == 123
assert repr(yaml['e']) == f"Jsonify(123, {yaml['e'].scalar!r})"
assert yaml['f'].value == '123'
assert repr(yaml['f']) == f"Jsonify('123', {yaml['f'].scalar!r})"
assert yaml['c'].get_value(Context()) == '"my scalar"'
assert yaml['d'].get_value(Context()) == 'false'
assert yaml['e'].get_value(Context()) == '123'
assert yaml['f'].get_value(Context()) == '"123"'
roundtripped_string = get_string_from_yaml_with_jsonify(yaml)
expected = (
"a: 1\n"
"b: '1'\n"
"c: !jsonify my scalar\n"
"d: !jsonify False\n"
"e: !jsonify 123\n"
"f: !jsonify '123'\n")
assert roundtripped_string == expected
def test_jsonify_roundtrip_mapping_substitutions():
"""Jsonify serializes & deserializes yaml mapping with substitutions."""
yaml_string = """\
a: 1
b: '1'
c: !jsonify
c1: 'v{k3}'
c2: 22
c3: '{k2}'
c4: "{k1} b"
c5: '{k4}'
d: False
"""
yaml = get_yaml_with_jsonify(yaml_string)
context = Context({'k1': 'string {here}',
'k2': 123.45,
'k3': 1,
'k4': '{k2}'})
assert type(yaml['c']) is Jsonify
assert type(yaml['c'].value) is CommentedMap
assert repr(yaml['c']) == f"Jsonify({yaml['c'].value!r})"
assert yaml['c'].value == {'c1': 'v{k3}',
'c2': 22,
'c3': '{k2}',
'c4': '{k1} b',
'c5': '{k4}'}
expected_json = (
'{"c1": "v1", "c2": 22, "c3": 123.45, "c4": "string {here} b", '
'"c5": 123.45}')
assert yaml['c'].get_value(context) == expected_json
roundtripped_string = get_string_from_yaml_with_jsonify(yaml)
expected = (
"a: 1\n"
"b: '1'\n"
"c: !jsonify\n"
" c1: v{k3}\n"
" c2: 22\n"
" c3: '{k2}'\n"
" c4: '{k1} b'\n"
" c5: '{k4}'\n"
"d: false\n")
assert roundtripped_string == expected
def test_jsonify_roundtrip_sequence_substitutions():
"""Jsonify serializes & de-serializes yaml sequence with substitutions."""
yaml_string = """\
a: 1
b: '1'
c: !jsonify
- v{k3}
- 22
- "{k2}"
- a: a value
b: '{k4}'
d: False
"""
yaml = get_yaml_with_jsonify(yaml_string)
context = Context({'k1': 'string {here}',
'k2': 123.45,
'k3': 1,
'k4': '{k2}'})
assert type(yaml['c']) is Jsonify
assert type(yaml['c'].value) is CommentedSeq
assert repr(yaml['c']) == f"Jsonify({yaml['c'].value!r})"
assert yaml['c'].value == ['v{k3}',
22,
'{k2}',
{'a': 'a value',
'b': '{k4}'}]
assert yaml['c'].get_value(context) == (
'["v1", 22, 123.45, {"a": "a value", "b": 123.45}]')
roundtripped_string = get_string_from_yaml_with_jsonify(yaml)
expected = (
"a: 1\n"
"b: '1'\n"
"c: !jsonify\n"
"- v{k3}\n"
"- 22\n"
"- '{k2}'\n"
"- a: a value\n"
" b: '{k4}'\n"
"d: false\n")
assert roundtripped_string == expected
def test_jsonify_roundtrip_scalar_substitutions():
"""Jsonify serializes & de-serializes yaml scalar with substitutions."""
yaml_string = """\
a: 1
b: '1'
c: !jsonify '{k1}'
d: !jsonify '{k2}'
e: !jsonify '{k3}'
f: !jsonify b {k4}
"""
yaml = get_yaml_with_jsonify(yaml_string)
context = Context({'k1': 'my scalar',
'k2': False,
'k3': 123,
'k4': 'a {k1}'})
assert type(yaml['c']) is Jsonify
assert yaml['c'].value == '{k1}'
assert type(yaml['c'].scalar) is TaggedScalar
assert repr(yaml['c']) == f"Jsonify('{{k1}}', {yaml['c'].scalar!r})"
assert yaml['d'].value == '{k2}'
assert yaml['e'].value == '{k3}'
assert yaml['f'].value == 'b {k4}'
assert yaml['c'].get_value(context) == '"my scalar"'
assert yaml['d'].get_value(context) == 'false'
assert yaml['e'].get_value(context) == '123'
assert yaml['f'].get_value(context) == '"b a {k1}"'
roundtripped_string = get_string_from_yaml_with_jsonify(yaml)
expected = (
"a: 1\n"
"b: '1'\n"
"c: !jsonify '{k1}'\n"
"d: !jsonify '{k2}'\n"
"e: !jsonify '{k3}'\n"
"f: !jsonify b {k4}\n")
assert roundtripped_string == expected
# endregion jsonify custom tag
# region py string custom tag
def test_py_string_behaves():
"""Py string does what it should."""
assert PyString.yaml_tag == '!py'
py = PyString('1+1')
assert str(py) == '1+1'
assert repr(py) == "PyString('1+1')"
assert py.get_value(Context()) == 2
def test_py_string_class_methods():
"""Py string yaml class methods serialize and deserialize class."""
mock_node = MagicMock()
mock_node.value = 'False and False'
new_instance = PyString.from_yaml(None, mock_node)
assert isinstance(new_instance, PyString)
assert str(new_instance) == 'False and False'
assert repr(new_instance) == "PyString('False and False')"
assert not new_instance.get_value(Context())
mock_representer = MagicMock()
PyString.to_yaml(mock_representer, mock_node)
mock_representer.represent_scalar.assert_called_once_with('!py',
'False and False'
)
def test_py_string_with_context():
"""Py string works with Context."""
assert PyString('len(a)').get_value(Context({'a': '123'})) == 3
def test_py_string_with_imports():
"""Py string can use imported global namespace."""
context = Context({'a': -3, 'b': 4})
from math import sqrt
context.pystring_globals_update({'squareroot': sqrt})
assert PyString('abs(a) + squareroot(b)').get_value(context) == 5
# imports don't end up in context
assert context == {'a': -3, 'b': 4}
# imports don't contain builtins
assert context._pystring_globals == {'squareroot': sqrt}
def test_py_string_with_closure_scope():
"""Free variables resolve."""
# NameError b is not defined if not a single global scope.
# Just 'a' will work, it's the nested scope that's the prob
source = "[f'{x}{y}' for x in a for y in b]"
context = Context({'a': '12', 'b': 'ab'})
assert PyString(source).get_value(context) == ['1a', '1b', '2a', '2b']
# should contain nothing because nothing added to global as part of eval.
assert context._pystring_globals == {}
# context not polluted.
assert context == {'a': '12', 'b': 'ab'}
def test_py_string_eq_and_neq():
"""Py string equivalence passes on repr."""
assert PyString('arb') == PyString('arb')
assert PyString('blah') != PyString('arb')
def test_py_string_repr_roundtrip():
"""Py string repr evals back to instance."""
s = PyString('len("three")')
repr_string = repr(s)
assert repr_string == 'PyString(\'len("three")\')'
reconstituted = eval(repr_string)
assert isinstance(reconstituted, PyString)
assert reconstituted.get_value(Context()) == 5
def test_py_string_empty():
"""Empty py string raises error."""
with pytest.raises(ValueError) as err:
PyString(None).get_value({})
assert str(err.value) == ('!py string expression is empty. It must be a '
'valid python expression instead.')
with pytest.raises(ValueError) as err:
PyString('').get_value(Context())
def test_py_string_truthy():
"""Empty Py String work as falsy, else Truthy."""
assert PyString('blah')
assert not PyString(None)
assert not PyString('')
# endregion py string custom tag
# region sic string custom tag
def test_sic_string_behaves():
"""Sic string does what it should."""
assert SicString.yaml_tag == '!sic'
sic = SicString('1+1')
assert str(sic) == '1+1'
assert repr(sic) == "SicString('1+1')"
assert sic.get_value({}) == '1+1'
def test_sic_string_class_methods():
"""Sic string yaml class methods serialize and deserialize class."""
mock_node = MagicMock()
mock_node.value = 'False {and} False'
new_instance = SicString.from_yaml(None, mock_node)
assert isinstance(new_instance, SicString)
assert str(new_instance) == 'False {and} False'
assert repr(new_instance) == "SicString('False {and} False')"
assert new_instance.get_value({}) == 'False {and} False'
mock_representer = MagicMock()
SicString.to_yaml(mock_representer, mock_node)
mock_representer.represent_scalar.assert_called_once_with(
'!sic',
'False {and} False'
)
def test_sic_string_with_context():
"""Sic string works with Context."""
assert SicString('len(a)').get_value(Context({'a': '123'})) == 'len(a)'
def test_sic_string_eq_and_neq():
"""Sic string equivalence passes on repr."""
assert SicString('arb') == SicString('arb')
assert SicString('blah') != SicString('arb')
def test_sic_string_repr_roundtrip():
"""Sic string repr evals back to instance."""
s = SicString('arb')
repr_string = repr(s)
assert repr_string == "SicString('arb')"
reconstituted = eval(repr_string)
assert isinstance(reconstituted, SicString)
assert reconstituted.get_value() == 'arb'
def test_sic_string_truthy():
"""Empty Sic String work as falsy, else Truthy."""
assert SicString('blah')
assert not SicString(None)
assert not SicString('')
# endregion sic string custom tag
# endregion custom yaml tags
# region test setup & fixtures
# region test context
def get_test_context():
"""Return a pypyr context for testing."""
return Context({
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77
})
# endregion test context
# region step mocks
def mock_run_step(context):
"""Arbitrary mock function to execute instead of run_step."""
context['test_run_step'] = 'this was set in step'
def mock_run_step_empty_context(context):
"""Clear the context in the step."""
context.clear()
def mock_run_step_none_context(context):
"""None the context in the step."""
# ignore the context is not used flake8 warning
context = None # noqa: F841
# endregion step mocks
# endregion test setup & fixtures
# region Step
# region Step: init
@patch('pypyr.moduleloader.get_module')
def test_simple_step_init_defaults(mocked_moduleloader):
"""Simple step initializes with defaults as expected."""
mocked_moduleloader.return_value.run_step = arb_step_mock
with patch_logger('pypyr.dsl') as mock_logger_debug:
step = Step('blah', 'stepsrunner')
mock_logger_debug.assert_any_call("blah is a simple string.")
assert step.name == 'blah'
assert step.run_step_function('blahblah') == 'from arb step mock'
assert step.foreach_items is None
assert not hasattr(step, 'for_counter')
assert step.in_parameters is None
assert not step.retry_decorator
assert step.run_me
assert not step.skip_me
assert step.steps_runner == 'stepsrunner'
assert not step.swallow_me
assert not step.while_decorator
assert step.line_no is None
assert step.line_col is None
mocked_moduleloader.assert_called_once_with('blah')
@patch('pypyr.moduleloader.get_module')
def test_complex_step_init_defaults(mocked_moduleloader):
"""Complex step initializes with defaults as expected."""
stepcache.step_cache.clear()
mocked_moduleloader.return_value.run_step = arb_step_mock
with patch_logger('pypyr.dsl') as mock_logger_debug:
step = Step({'name': 'blah'}, 'stepsrunner')
assert mock_logger_debug.call_args_list == [
call("starting"),
call("blah is complex."),
call("step name: blah"),
call("done"),
]
assert step.name == 'blah'
assert step.run_step_function('blahblah') == 'from arb step mock'
assert step.foreach_items is None
assert not hasattr(step, 'for_counter')
assert step.in_parameters is None
assert not step.retry_decorator
assert step.run_me
assert not step.skip_me
assert step.steps_runner == 'stepsrunner'
assert not step.swallow_me
assert not step.while_decorator
assert step.line_col is None
assert step.line_no is None
mocked_moduleloader.assert_called_once_with('blah')
def test_complex_step_init_with_missing_name_round_trip():
"""Step can't get step name from the yaml pipeline."""
with pytest.raises(PipelineDefinitionError) as err_info:
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
step_info = CommentedMap({})
step_info._yaml_set_line_col(6, 7)
Step(step_info, None)
assert mock_logger_error.call_count == 1
assert mock_logger_error.mock_calls == [
call('Error at pipeline step yaml line: 7, col: 8'),
]
assert str(err_info.value) == "step must have a name."
@patch('pypyr.moduleloader.get_module', return_value=3)
def test_step_cant_get_run_step_dynamically(mocked_moduleloader):
"""Step can't get run_step method on the dynamically imported module."""
stepcache.step_cache.clear()
with pytest.raises(AttributeError) as err_info:
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with patch_logger('pypyr.cache.stepcache',
logging.ERROR) as mock_cache_logger_error:
Step('mocked.step', None)
mocked_moduleloader.assert_called_once_with('mocked.step')
mock_logger_error.assert_called_once_with(
'Error at pipeline step mocked.step')
mock_cache_logger_error.assert_called_once_with(
"The step mocked.step in module 3 doesn't have a "
"run_step(context) function.")
assert str(err_info.value) == "'int' object has no attribute 'run_step'"
@patch('pypyr.moduleloader.get_module', return_value=3)
def test_step_cant_get_run_step_dynamically_round_trip(mocked_moduleloader):
"""Step can't get run_step method on the dynamically imported module.
With round trip yaml loaded context.
"""
stepcache.step_cache.clear()
with pytest.raises(AttributeError) as err_info:
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with patch_logger('pypyr.cache.stepcache',
logging.ERROR) as mock_cache_logger_error:
commented_context = CommentedMap({'name': 'mocked.step'})
commented_context._yaml_set_line_col(1, 2)
Step(commented_context, None)
mocked_moduleloader.assert_called_once_with('mocked.step')
mock_logger_error.assert_called_once_with(
"Error at pipeline step mocked.step yaml line: 2, col: 3")
mock_cache_logger_error.assert_called_once_with(
"The step mocked.step in module 3 doesn't have a "
"run_step(context) function.")
assert str(err_info.value) == "'int' object has no attribute 'run_step'"
@patch('pypyr.moduleloader.get_module')
def test_complex_step_init_with_decorators(mocked_moduleloader):
"""Complex step initializes with decorators set."""
stepcache.step_cache.clear()
mocked_moduleloader.return_value.run_step = arb_step_mock
step = Step({'name': 'blah',
'in': {'k1': 'v1', 'k2': 'v2'},
'foreach': [0],
'retry': {'max': 5, 'sleep': 7},
'run': False,
'skip': True,
'swallow': True,
'while': {'stop': 'stop condition',
'errorOnMax': True,
'sleep': 3,
'max': 4}
},
'stepsrunner')
assert step.name == 'blah'
assert step.run_step_function('blah') == 'from arb step mock'
assert step.foreach_items == [0]
assert step.foreach_items == [0]
assert step.in_parameters == {'k1': 'v1', 'k2': 'v2'}
assert step.retry_decorator.max == 5
assert step.retry_decorator.sleep == 7
assert step.retry_decorator.retry_counter is None
assert not step.run_me
assert step.skip_me
assert step.steps_runner == 'stepsrunner'
assert step.swallow_me
assert step.while_decorator.stop == 'stop condition'
assert step.while_decorator.error_on_max
assert step.while_decorator.sleep == 3
assert step.while_decorator.max == 4
assert step.while_decorator.while_counter is None
mocked_moduleloader.assert_called_once_with('blah')
@patch('pypyr.moduleloader.get_module')
def test_complex_step_init_with_decorators_roundtrip(mocked_moduleloader):
"""Complex step initializes with decorators.
Set with round trip yaml loaded context.
"""
stepcache.step_cache.clear()
mocked_moduleloader.return_value.run_step = arb_step_mock
context = CommentedMap({
'name': 'blah',
'in': {'k1': 'v1', 'k2': 'v2'},
'foreach': [0],
'retry': {'max': 5, 'sleep': 7},
'run': False,
'skip': True,
'swallow': True,
'while': {
'stop': 'stop condition',
'errorOnMax': True,
'sleep': 3,
'max': 4
}
}
)
context._yaml_set_line_col(8, 9)
step = Step(context, None)
assert step.name == 'blah'
assert step.run_step_function('blah') == 'from arb step mock'
assert step.foreach_items == [0]
assert step.for_counter is None
assert step.in_parameters == {'k1': 'v1', 'k2': 'v2'}
assert step.retry_decorator.max == 5
assert step.retry_decorator.sleep == 7
assert step.retry_decorator.retry_counter is None
assert not step.run_me
assert step.skip_me
assert step.swallow_me
assert step.while_decorator.stop == 'stop condition'
assert step.while_decorator.error_on_max
assert step.while_decorator.sleep == 3
assert step.while_decorator.max == 4
assert step.while_decorator.while_counter is None
assert step.line_no == 9
assert step.line_col == 10
mocked_moduleloader.assert_called_once_with('blah')
# endregion Step: init
# region Step: description
@patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_description(mock_invoke_step,
mock_get_module):
"""Complex step with run decorator outputs notify description."""
step = Step({'name': 'step1',
'description': 'test {key1} description'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.NOTIFY) as mock_logger_notify:
step.run_step(context)
mock_logger_notify.assert_called_once_with('test value1 description')
mock_invoke_step.assert_called_once()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_description_not_run(mock_invoke_step,
mock_get_module):
"""Complex step with run decorator set false doesn't run step."""
step = Step({'name': 'step1',
'description': 'test description',
'run': '{key5}'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with patch_logger('pypyr.dsl', logging.NOTIFY) as mock_logger_notify:
step.run_step(context)
mock_logger_notify.assert_called_once_with('(skipping): test description')
mock_logger_info.assert_any_call("step1 not running because run is False.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_description_skip(mock_invoke_step,
mock_get_module):
"""Complex step with run decorator set false doesn't run step."""
step = Step({'name': 'step1',
'description': 'test {key5} description',
'skip': True},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with patch_logger('pypyr.dsl', logging.NOTIFY) as mock_logger_notify:
step.run_step(context)
mock_logger_notify.assert_called_once_with(
'(skipping): test False description')
mock_logger_info.assert_any_call("step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
# endregion Step: description
# region Step: run_step: foreach
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'run_conditional_decorators')
@patch.object(Step, 'foreach_loop')
def test_foreach_none(mock_foreach, mock_run, mock_moduleloader):
"""Simple step with None foreach decorator doesn't loop."""
step = Step('step1', None)
context = get_test_context()
original_len = len(context)
step.run_step(context)
mock_foreach.assert_not_called()
mock_run.assert_called_once_with(get_test_context())
# validate all the in params ended up in context as intended
assert len(context) == original_len
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'run_conditional_decorators')
@patch.object(Step, 'foreach_loop')
def test_foreach_empty(mock_foreach, mock_run, mock_moduleloader):
"""Complex step with empty foreach decorator doesn't loop."""
step = Step({'name': 'step1',
'foreach': []},
None)
context = get_test_context()
original_len = len(context)
step.run_step(context)
mock_foreach.assert_not_called()
mock_run.assert_called_once_with(get_test_context())
# validate all the in params ended up in context as intended
assert len(context) == original_len
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'run_conditional_decorators')
def test_foreach_once(mock_run, mock_moduleloader):
"""The foreach loops once."""
step = Step({'name': 'step1',
'foreach': ['one']},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('foreach: running step one'),
call('foreach decorator looped 1 times.')]
assert mock_run.call_count == 1
mutated_context = get_test_context()
mutated_context['i'] = 'one'
mock_run.assert_called_once_with(mutated_context)
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
assert context['i'] == 'one'
assert step.for_counter == 'one'
assert step.for_counter == 'one'
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'run_conditional_decorators')
@patch('unittest.mock.MagicMock', new=DeepCopyMagicMock)
def test_foreach_twice(mock_run, mock_moduleloader):
"""The foreach loops twice."""
step = Step({'name': 'step1',
'foreach': ['one', 'two']},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('foreach: running step one'),
call('foreach: running step two'),
call('foreach decorator looped 2 times.')]
assert mock_run.call_count == 2
mutated_context = get_test_context()
mutated_context['i'] = 'one'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'two'
mock_run.assert_any_call(mutated_context)
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
# after the looping's done, the i value will be the last iterator value
assert context['i'] == 'two'
assert step.for_counter == 'two'
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'run_conditional_decorators')
@patch('unittest.mock.MagicMock', new=DeepCopyMagicMock)
def test_foreach_thrice_with_substitutions(mock_run, mock_moduleloader):
"""The foreach loops thrice with substitutions inside a list."""
step = Step({'name': 'step1',
'foreach': ['{key1}', '{key2}', 'key3']},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('foreach: running step value1'),
call('foreach: running step value2'),
call('foreach: running step key3'),
call('foreach decorator looped 3 times.')]
assert mock_run.call_count == 3
mutated_context = get_test_context()
mutated_context['i'] = 'value1'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'value2'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'key3'
mock_run.assert_any_call(mutated_context)
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
# after the looping's done, the i value will be the last iterator value
assert context['i'] == 'key3'
assert step.for_counter == 'key3'
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'run_conditional_decorators')
@patch('unittest.mock.MagicMock', new=DeepCopyMagicMock)
def test_foreach_with_single_key_substitution(mock_run, mock_moduleloader):
"""The foreach gets list from string format expression."""
step = Step({'name': 'step1',
'foreach': '{list}'},
None)
context = get_test_context()
context['list'] = [99, True, 'string here', 'formatted {key1}']
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('foreach: running step 99'),
call('foreach: running step True'),
call('foreach: running step string here'),
call('foreach: running step formatted value1'),
call('foreach decorator looped 4 times.')]
assert mock_run.call_count == 4
mutated_context = get_test_context()
mutated_context['list'] = [99, True, 'string here', 'formatted {key1}']
mutated_context['i'] = 99
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = True
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'string here'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'formatted value1'
mock_run.assert_any_call(mutated_context)
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
# after the looping's done, the i value will be the last iterator value
assert context['i'] == 'formatted value1'
assert step.for_counter == 'formatted value1'
def mock_step_mutating_run(context):
"""Mock a step's run_step by setting a context value False."""
context['dynamic_run_expression'] = False
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'invoke_step', side_effect=mock_step_mutating_run)
def test_foreach_evaluates_run_decorator(mock_invoke, mock_moduleloader):
"""The foreach evaluates run_me expression on each loop iteration."""
step = Step({'name': 'step1',
'run': '{dynamic_run_expression}',
'foreach': ['{key1}', '{key2}', 'key3']},
None)
context = get_test_context()
context['dynamic_run_expression'] = True
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('foreach: running step value1'),
call('foreach: running step value2'),
call('step1 not running because run is False.'),
call('foreach: running step key3'),
call('step1 not running because run is False.'),
call('foreach decorator looped 3 times.')]
assert mock_invoke.call_count == 1
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
# after the looping's done, the i value will be the last iterator value
assert context['i'] == 'key3'
assert step.for_counter == 'key3'
def mock_step_mutating_skip(context):
"""Mock a step's run_step by setting a context value False."""
context['dynamic_skip_expression'] = True
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'invoke_step', side_effect=mock_step_mutating_skip)
def test_foreach_evaluates_skip_decorator(mock_invoke, mock_moduleloader):
"""The foreach evaluates skip expression on each loop iteration."""
step = Step({'name': 'step1',
'skip': '{dynamic_skip_expression}',
'foreach': ['{key1}', '{key2}', 'key3']},
None)
context = get_test_context()
context['dynamic_skip_expression'] = False
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('foreach: running step value1'),
call('foreach: running step value2'),
call('step1 not running because skip is True.'),
call('foreach: running step key3'),
call('step1 not running because skip is True.'),
call('foreach decorator looped 3 times.')]
assert mock_invoke.call_count == 1
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
# after the looping's done, the i value will be the last iterator value
assert context['i'] == 'key3'
assert step.for_counter == 'key3'
@patch('pypyr.moduleloader.get_module')
def test_foreach_evaluates_swallow_decorator(mock_moduleloader):
"""The foreach evaluates skip expression on each loop iteration."""
step = Step({'name': 'step1',
'swallow': '{dynamic_swallow_expression}',
'foreach': ['{key1}', '{key2}', 'key3']},
None)
context = get_test_context()
context['dynamic_swallow_expression'] = False
original_len = len(context)
arb_error = ValueError('arb error')
def mock_step_deliberate_error(context):
"""Mock step's run_step by setting swallow False and raising err."""
if context['i'] == 'value2':
context['dynamic_swallow_expression'] = True
elif context['i'] == 'key3':
raise arb_error
with patch.object(Step, 'invoke_step',
side_effect=mock_step_deliberate_error) as mock_invoke:
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('foreach: running step value1'),
call('foreach: running step value2'),
call('foreach: running step key3'),
call('foreach decorator looped 3 times.')]
assert mock_invoke.call_count == 3
assert mock_logger_error.call_count == 1
mock_logger_error.assert_called_once_with(
'step1 Ignoring error '
'because swallow is True for this step.\nValueError: arb error')
# validate all the in params ended up in context as intended, plus i,
# plus runErrors
assert len(context) == original_len + 2
# after the looping's done, the i value will be the last iterator value
assert context['i'] == 'key3'
assert step.for_counter == 'key3'
assert context['runErrors'] == [{
'col': None,
'customError': {},
'description': 'arb error',
'exception': arb_error,
'line': None,
'name': 'ValueError',
'step': step.name,
'swallowed': True,
}]
def test_foreach_with_iterator():
"""Loop over iterator in foreach."""
context = Context({'lst': []})
from itertools import product
context.pystring_globals_update({'product': product})
step = Step({'name': 'pypyr.steps.py',
'foreach': PyString('product([1, 2], ["A", "B"])'),
'in': {'py': 'lst.append(i)'}
},
None)
step.run_step(context)
assert context == {'lst': [(1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')],
'i': (2, 'B')}
def test_foreach_with_inline_iterator():
"""Loop over iterator in foreach."""
def myfunc():
yield from ['one', 'two', 'three']
context = Context({'lst': [],
'test_iterator': myfunc()})
step = Step({'name': 'pypyr.steps.py',
'foreach': PyString('test_iterator'),
'in': {'py': 'lst.append(i)'}
},
None)
step.run_step(context)
assert len(context) == 3
assert context['lst'] == ['one', 'two', 'three']
assert context['i'] == 'three'
# endregion Step: run_step
# region Step: run_step: while
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_while_max(mock_invoke, mock_moduleloader):
"""The while runs to max."""
step = Step({'name': 'step1',
'while': {'max': 3}},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times at 0.0s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3')]
assert mock_invoke.call_count == 3
# validate all the in params ended up in context as intended, plus counter
assert len(context) == original_len + 1
# after the looping's done, the counter value will be the last iterator
assert context['whileCounter'] == 3
assert step.while_decorator.while_counter == 3
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step', side_effect=mock_step_mutating_run)
def test_while_evaluates_run_decorator(mock_invoke, mock_moduleloader):
"""The while evaluates run_me expression on each loop iteration."""
step = Step({'name': 'step1',
'run': '{dynamic_run_expression}',
'while': {'max': '{whileMax}', 'stop': '{key5}'}},
None)
context = get_test_context()
context['dynamic_run_expression'] = True
context['whileMax'] = 3
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times, or until {key5} evaluates to '
'True at 0.0s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('step1 not running because run is False.'),
call('while: running step with counter 3'),
call('step1 not running because run is False.'),
call('while decorator looped 3 times, and {key5} never evaluated to '
'True.')]
assert mock_invoke.call_count == 1
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
# after the looping's done, the i value will be the last iterator value
assert context['whileCounter'] == 3
assert step.while_decorator.while_counter == 3
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step', side_effect=[None, ValueError('whoops')])
def test_while_error_kicks_loop(mock_invoke, mock_moduleloader):
"""Error during while kicks loop."""
step = Step({'name': 'step1',
'while': {'max': 3}},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with pytest.raises(ValueError) as err_info:
step.run_step(context)
assert str(err_info.value) == "whoops"
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times at 0.0s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2')]
assert mock_invoke.call_count == 2
# validate all the in params ended up in context as intended, plus i
# plus runErrors
assert len(context) == original_len + 2
# after the looping's done, the counter will be the last iterator value
assert context['whileCounter'] == 2
assert step.while_decorator.while_counter == 2
assert context['runErrors'] == [{
'col': None,
'customError': {},
'description': 'whoops',
'exception': err_info.value,
'line': None,
'name': 'ValueError',
'step': step.name,
'swallowed': False,
}]
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_while_exhausts(mock_invoke, mock_moduleloader):
"""While exhausts throws error on errorOnMax."""
step = Step({'name': 'step1',
'while': {'max': '{whileMax}',
'stop': '{key5}',
'errorOnMax': '{key6}'}},
None)
context = get_test_context()
context['whileMax'] = 3
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with pytest.raises(LoopMaxExhaustedError) as err_info:
step.run_step(context)
assert str(err_info.value) == ("while loop reached "
"3 and {key5} never evaluated to True.")
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times, or until {key5} evaluates to '
'True at 0.0s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3')]
assert mock_invoke.call_count == 3
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
# after the looping's done, the i value will be the last iterator value
assert context['whileCounter'] == 3
assert step.while_decorator.while_counter == 3
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_while_exhausts_hard_true(mock_invoke, mock_moduleloader):
"""While evaluates run_me expression on each loop iteration, no format."""
step = Step({'name': 'step1',
'while': {'max': '{whileMax}',
'stop': False,
'errorOnMax': True}},
None)
context = get_test_context()
context['whileMax'] = 3
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with pytest.raises(LoopMaxExhaustedError) as err_info:
step.run_step(context)
assert str(err_info.value) == "while loop reached 3."
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times, or until False evaluates to '
'True at 0.0s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3')]
assert mock_invoke.call_count == 3
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
# after the looping's done, the i value will be the last iterator value
assert context['whileCounter'] == 3
assert step.while_decorator.while_counter == 3
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'run_conditional_decorators')
@ patch('unittest.mock.MagicMock', new=DeepCopyMagicMock)
def test_while_nests_foreach_with_substitutions(mock_run, mock_moduleloader):
"""While loops twice, foreach thrice with substitutions inside a list."""
step = Step({'name': 'step1',
'foreach': ['{key1}', '{key2}', 'key3'],
'while': {'max': 2}
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('while decorator will loop 2 times at 0.0s intervals.'),
call('while: running step with counter 1'),
call('foreach: running step value1'),
call('foreach: running step value2'),
call('foreach: running step key3'),
call('foreach decorator looped 3 times.'),
call('while: running step with counter 2'),
call('foreach: running step value1'),
call('foreach: running step value2'),
call('foreach: running step key3'),
call('foreach decorator looped 3 times.')]
assert mock_run.call_count == 6
mutated_context = get_test_context()
mutated_context['whileCounter'] = 1
mutated_context['i'] = 'value1'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'value2'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'key3'
mock_run.assert_any_call(mutated_context)
mutated_context['whileCounter'] = 2
mutated_context['i'] = 'value1'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'value2'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'key3'
mock_run.assert_any_call(mutated_context)
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 2
# after the looping's done, the i value will be the last iterator value
assert context['i'] == 'key3'
assert step.for_counter == 'key3'
assert context['whileCounter'] == 2
assert step.while_decorator.while_counter == 2
# endregion Step: run_step: while
# region Step: invoke_step
@ patch('pypyr.moduleloader.get_module')
def test_invoke_step_pass(mocked_moduleloader):
"""run_pipeline_step test pass."""
stepcache.step_cache.clear()
step = Step('mocked.step', None)
step.invoke_step(get_test_context())
mocked_moduleloader.assert_called_once_with('mocked.step')
mocked_moduleloader.return_value.run_step.assert_called_once_with(
{'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4', 'k4lk2': 'value5'},
{'k4lk1': 'value6', 'k4lk2': 'value7'}],
'key5': False,
'key6': True,
'key7': 77})
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_invoke_step_context_abides(mocked_stepcache):
"""Step mutates context & mutation abides after run_pipeline_step."""
mocked_stepcache.return_value = mock_run_step
context = get_test_context()
step = Step('mocked.step', None)
step.invoke_step(context)
mocked_stepcache.assert_called_once_with('mocked.step')
assert context['test_run_step'] == 'this was set in step'
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_invoke_step_empty_context(mocked_stepcache):
"""Empty context in step (i.e count == 0, but not is None)."""
mocked_stepcache.return_value = mock_run_step_empty_context
context = get_test_context()
step = Step('mocked.step', None)
step.invoke_step(context)
mocked_stepcache.assert_called_once_with('mocked.step')
assert len(context) == 0
assert context is not None
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_invoke_step_none_context(mocked_stepcache):
"""Step rebinding context to None doesn't affect the caller Context."""
mocked_stepcache.return_value = mock_run_step_none_context
context = get_test_context()
step = Step('mocked.step', None)
step.invoke_step(False)
mocked_stepcache.assert_called_once_with('mocked.step')
assert context == {'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4', 'k4lk2': 'value5'},
{'k4lk1': 'value6', 'k4lk2': 'value7'}],
'key5': False,
'key6': True,
'key7': 77}
# endregion Step: invoke_step
# region Step: reset_context_counters
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_reset_context_counters(mock_step_cache):
"""Reset all counters in context."""
context = {'a': 'b',
'c': 'd',
'whileCounter': 99,
'retryCounter': 999,
'i': '9999'}
call = Call(['one', 'two'], 'sg', 'fg', ('a', 'changed'))
step_config = {'name': 'blah',
'while': {
'max': 4
},
'foreach': ['one', 'two'],
'retry': {
'max': 5
}
}
step = Step(step_config, None)
step.while_decorator.while_counter = 6
step.for_counter = 'seven'
step.retry_decorator.retry_counter = 8
step.reset_context_counters(context, call)
assert context == {'a': 'changed',
'c': 'd',
'whileCounter': 6,
'i': 'seven',
'retryCounter': 8}
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_reset_context_counters_dont_need_updating(mock_step_cache):
"""Reset all counters in context when they don't need to update."""
context = {'a': 'b',
'c': 'd',
'whileCounter': 99,
'retryCounter': 999,
'i': '9999'}
call = Call(['one', 'two'], 'sg', 'fg', ('a', 'b'))
step_config = {'name': 'blah',
'while': {
'max': 4
},
'foreach': ['one', 'two'],
'retry': {
'max': 5
}
}
step = Step(step_config, None)
step.while_decorator.while_counter = 99
step.for_counter = '9999'
step.retry_decorator.retry_counter = 999
step.reset_context_counters(context, call)
assert context == {'a': 'b',
'c': 'd',
'whileCounter': 99,
'i': '9999',
'retryCounter': 999}
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_reset_context_counters_none(mock_step_cache):
"""Reset but no counters available & key not found in context."""
context = {'a': 'b',
'c': 'd'}
call = Call(['one', 'two'], 'sg', 'fg', ('x', 'z'))
step_config = {'name': 'blah'}
step = Step(step_config, None)
step.reset_context_counters(context, call)
# reset added the key that didn't exist to context
assert context == {'a': 'b',
'c': 'd',
'x': 'z'}
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_reset_context_counters_none_none(mock_step_cache):
"""Reset key to none should not be possible."""
context = {'a': 'b',
'c': 'd'}
call = Call(['one', 'two'], 'sg', 'fg', ('x', None))
step_config = {'name': 'blah'}
step = Step(step_config, None)
with pytest.raises(AssertionError):
step.reset_context_counters(context, call)
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_reset_context_counters_mutable(mock_step_cache):
"""Reset to a mutable object."""
arb_mutable = ['b']
context = {'a': arb_mutable,
'c': 'd'}
call = Call(['one', 'two'], 'sg', 'fg', ('a', arb_mutable))
step_config = {'name': 'blah'}
step = Step(step_config, None)
step.reset_context_counters(context, call)
assert context == {'a': ['b'],
'c': 'd'}
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_reset_context_counters_mutate(mock_step_cache):
"""Reset to a mutating mutable."""
arb_mutable = ['b']
context = {'a': arb_mutable,
'c': 'd'}
call = Call(['one', 'two'], 'sg', 'fg', ('a', arb_mutable))
step_config = {'name': 'blah'}
step = Step(step_config, None)
arb_mutable[0] = 'changed'
step.reset_context_counters(context, call)
assert context == {'a': ['changed'],
'c': 'd'}
# endregion Step: reset_context_counters
# region Step: run_step: run
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_true(mock_invoke_step,
mock_get_module):
"""Complex step with run decorator set true will run step."""
step = Step({'name': 'step1',
'run': True},
None)
context = get_test_context()
original_len = len(context)
step.run_step(context)
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_false(mock_invoke_step,
mock_get_module):
"""Complex step with run decorator set false doesn't run step."""
step = Step({'name': 'step1',
'run': False},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call("step1 not running because run is False.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_str_formatting_false(
mock_invoke_step,
mock_get_module):
"""Complex step with run formatting expression false doesn't run step."""
step = Step({
'name': 'step1',
# name will evaluate False because it's a string and it's not 'True'.
'run': '{key1}'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call("step1 not running because run is False.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_str_false(mock_invoke_step,
mock_get_module):
"""Complex step with run set to string False doesn't run step."""
step = Step({
'name': 'step1',
# name will evaluate False because it's a string and it's not 'True'.
'run': 'False'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because run is False.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_str_lower_false(mock_invoke_step,
mock_get_module):
"""Complex step with run set to string false doesn't run step."""
step = Step({
'name': 'step1',
# name will evaluate False because it's a string and it's not 'True'.
'run': 'false'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because run is False.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_bool_formatting_false(
mock_invoke_step,
mock_get_module):
"""Complex step with run formatting expression false doesn't run step."""
step = Step({
'name': 'step1',
# key5 will evaluate False because it's a bool and it's False
'run': '{key5}'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because run is False.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_bool_formatting_true(
mock_invoke_step,
mock_get_module):
"""Complex step with run formatting expression true runs step."""
step = Step({
'name': 'step1',
# key6 will evaluate True because it's a bool and it's True
'run': '{key6}'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_string_true(mock_invoke_step,
mock_get_module):
"""Complex step with run formatting expression True runs step."""
step = Step({
'name': 'step1',
# 'True' will evaluate bool True
'run': 'True'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_1_true(mock_invoke_step,
mock_get_module):
"""Complex step with run 1 runs step."""
step = Step({
'name': 'step1',
# 1 will evaluate True because it's an int and 1
'run': 1},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_99_true(mock_invoke_step,
mock_get_module):
"""Complex step with run 99 runs step."""
step = Step({
'name': 'step1',
# 99 will evaluate True because it's an int and > 0
'run': 99
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_neg1_true(mock_invoke_step,
mock_get_module):
"""Complex step with run -1 runs step."""
step = Step({
'name': 'step1',
# -1 will evaluate True because it's an int and != 0
'run': -1
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_with_single_retry(mock_invoke_step,
mock_get_module):
"""Complex step with retry runs step."""
step = Step({
'name': 'step1',
# -1 will evaluate True because it's an int and != 0
'retry': {'max': 10}
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
{'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77,
'retryCounter': 1})
# validate all the in params ended up in context as intended
assert len(context) == original_len + 1
assert context['retryCounter'] == 1
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_with_retries(mock_invoke_step,
mock_get_module):
"""Complex step with retry runs step."""
step = Step({
'name': 'step1',
# -1 will evaluate True because it's an int and != 0
'retry': {'max': 0}
},
None)
context = get_test_context()
original_len = len(context)
mock_invoke_step.side_effect = [ValueError('arb'), None]
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
assert mock_invoke_step.call_count == 2
mock_invoke_step.assert_called_with(
{'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77,
'retryCounter': 2})
# validate all the in params ended up in context as intended
assert len(context) == original_len + 1
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step', side_effect=ValueError('arb error here'))
def test_run_on_error(mock_invoke_step,
mock_get_module):
"""Complex step with swallow false raises error."""
complex_step_info = CommentedMap({
'name': 'step1',
'swallow': 0,
'onError': {'arb': 'value'}
})
complex_step_info._yaml_set_line_col(5, 6)
step = Step(complex_step_info, None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(ValueError) as err_info:
step.run_step(context)
assert str(err_info.value) == "arb error here"
mock_logger_error.assert_called_once_with(
"Error while running step step1 at pipeline yaml line: 6, col: 7")
# validate all the in params ended up in context as intended,
# plus runErrors
assert len(context) == original_len + 1
assert context['runErrors'] == [{
'col': 7,
'customError': {'arb': 'value'},
'description': 'arb error here',
'exception': err_info.value,
'line': 6,
'name': 'ValueError',
'step': step.name,
'swallowed': False,
}]
# endregion Step: run_step: run
# region Step: run_step: skip
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_false(mock_invoke_step,
mock_get_module):
"""Complex step with skip decorator set false will run step."""
step = Step({
'name': 'step1',
'skip': False
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_true(mock_invoke_step,
mock_get_module):
"""Complex step with skip decorator set true runa step."""
step = Step({
'name': 'step1',
'skip': True
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_str_formatting_false(
mock_invoke_step,
mock_get_module):
"""Complex step with skip formatting expression false doesn't run step."""
step = Step({
'name': 'step1',
# name will evaluate True
'skip': '{key6}'
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_str_true(mock_invoke_step,
mock_get_module):
"""Complex step with skip set to string False doesn't run step."""
step = Step({
'name': 'step1',
# skip evaluates True because it's a string and TRUE parses to True.
'skip': 'TRUE'
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_str_lower_true(mock_invoke_step,
mock_get_module):
"""Complex step with run set to string true doesn't run step."""
step = Step({
'name': 'step1',
# skip will evaluate true because it's a string and true is True.
'skip': 'true'
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_and_skip_bool_formatting_false(
mock_invoke_step,
mock_get_module):
"""Complex step with run doesn't run step, evals before skip."""
step = Step({
'name': 'step1',
# key5 will evaluate False because it's a bool and it's False
'run': '{key5}',
'skip': True
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because run is False.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_bool_formatting_false(
mock_invoke_step,
mock_get_module):
"""Complex step with skip formatting expression true runs step."""
step = Step({
'name': 'step1',
# key5 will evaluate False because it's a bool and it's False
'skip': '{key5}'
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_string_false(
mock_invoke_step,
mock_get_module):
"""Complex step with skip formatting expression False runs step."""
step = Step({
'name': 'step1',
# 'False' will evaluate bool False
'skip': 'False'
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_0_true(
mock_invoke_step,
mock_get_module):
"""Complex step with run 1 runs step."""
step = Step({
'name': 'step1',
# 0 will evaluate False because it's an int and 0
'skip': 0
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_99_true(
mock_invoke_step,
mock_get_module):
"""Complex step with skip 99 doesn't run step."""
step = Step({
'name': 'step1',
# 99 will evaluate True because it's an int and > 0
'skip': 99
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_neg1_true(mock_invoke_step,
mock_get_module):
"""Complex step with run -1 runs step."""
step = Step({
'name': 'step1',
# -1 will evaluate True because it's an int and != 0
'skip': -1
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call("step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
# endregion Step: run_step: skip
# region Step: run_step: swallow
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_swallow_true(mock_invoke_step,
mock_get_module):
"""Complex step with swallow true runs normally even without error."""
step = Step({
'name': 'step1',
'swallow': True
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_swallow_false(mock_invoke_step,
mock_get_module):
"""Complex step with swallow false runs normally even without error."""
step = Step({
'name': 'step1',
'swallow': False
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch('unittest.mock.MagicMock', new=DeepCopyMagicMock)
def test_run_pipeline_steps_complex_swallow_true_error(mock_get_module):
"""Complex step with swallow true swallows error."""
step = Step({
'name': 'step1',
'swallow': 1
},
None)
context = get_test_context()
original_len = len(context)
arb_error = ValueError('arb error here')
with patch.object(
Step, 'invoke_step', side_effect=arb_error) as mock_invoke_step:
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_logger_error.assert_called_once_with(
"step1 Ignoring error because swallow is True "
"for this step.\n"
"ValueError: arb error here")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended,
# plus runErrors
assert len(context) == original_len + 1
assert context['runErrors'] == [{
'col': None,
'customError': {},
'description': 'arb error here',
'exception': arb_error,
'line': None,
'name': 'ValueError',
'step': step.name,
'swallowed': True,
}]
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step', side_effect=ValueError('arb error here'))
def test_run_pipeline_steps_complex_swallow_false_error(mock_invoke_step,
mock_get_module):
"""Complex step with swallow false raises error."""
step = Step({
'name': 'step1',
'swallow': 0
},
None)
context = get_test_context()
original_len = len(context)
with pytest.raises(ValueError) as err_info:
step.run_step(context)
assert str(err_info.value) == "arb error here"
# validate all the in params ended up in context as intended,
# plus runErrors
assert len(context) == original_len + 1
assert context['runErrors'] == [{
'col': None,
'customError': {},
'description': 'arb error here',
'exception': err_info.value,
'line': None,
'name': 'ValueError',
'step': step.name,
'swallowed': False,
}]
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step', side_effect=ValueError('arb error here'))
def test_run_pipeline_steps_complex_round_trip(mock_invoke_step,
mock_get_module):
"""Complex step with swallow false raises error."""
complex_step_info = CommentedMap({
'name': 'step1',
'swallow': 0
})
complex_step_info._yaml_set_line_col(5, 6)
step = Step(complex_step_info, None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(ValueError) as err_info:
step.run_step(context)
assert str(err_info.value) == "arb error here"
mock_logger_error.assert_called_once_with(
"Error while running step step1 at pipeline yaml line: 6, col: 7")
# validate all the in params ended up in context as intended,
# plus runErrors
assert len(context) == original_len + 1
assert context['runErrors'] == [{
'col': 7,
'customError': {},
'description': 'arb error here',
'exception': err_info.value,
'line': 6,
'name': 'ValueError',
'step': step.name,
'swallowed': False,
}]
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step', side_effect=ValueError('arb error here'))
def test_run_pipeline_steps_complex_swallow_defaults_false_error(
mock_invoke_step,
mock_get_module):
"""Complex step with swallow not specified still raises error."""
step = Step({
'name': 'step1'
},
None)
context = get_test_context()
original_len = len(context)
with pytest.raises(ValueError) as err_info:
step.run_step(context)
assert str(err_info.value) == "arb error here"
# validate all the in params ended up in context as intended,
# plus runErrors
assert len(context) == original_len + 1
assert context['runErrors'] == [{
'col': None,
'customError': {},
'description': 'arb error here',
'exception': err_info.value,
'line': None,
'name': 'ValueError',
'step': step.name,
'swallowed': False,
}]
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step', side_effect=ValueError('arb error here'))
@ patch('unittest.mock.MagicMock', new=DeepCopyMagicMock)
def test_run_pipeline_steps_simple_with_error(mock_invoke_step,
mock_get_module):
"""Simple step run with error should not swallow."""
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step = Step('step1', None)
with pytest.raises(ValueError) as err_info:
step.run_step(Context({'k1': 'v1'}))
assert str(err_info.value) == "arb error here"
mock_logger_debug.assert_any_call('step1 is a simple string.')
mock_invoke_step.assert_called_once_with(
context={'k1': 'v1'})
# endregion Step: run_step: swallow
# region Step: run_step: input context
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
@ patch('unittest.mock.MagicMock', new=DeepCopyMagicMock)
def test_run_step_in_with_clean(mock_invoke_step, mock_get_module):
"""Step sets 'in' arguments in context, unset from context when done."""
step = Step({
'name': 'step1',
'in': {
'key1': 'updated1',
'key2': 'updated2',
'keyadded': 'added3'
}
},
None)
context = get_test_context()
step.run_step(context)
# step called with context updated with 'in' arguments
assert mock_invoke_step.call_count == 1
assert mock_invoke_step.call_args_list[0] == call(context={
'key1': 'updated1',
'key2': 'updated2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77,
'keyadded': 'added3'})
# context when done has 'in' args removed.
assert context == {'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77}
# endregion Step: run_step: input context
# region Step: set_step_input_context
@ patch('pypyr.moduleloader.get_module')
def test_set_step_input_context_no_in_simple(mocked_moduleloader):
"""Set step context does nothing if no in key found in simple step."""
step = Step('blah', None)
context = get_test_context()
step.set_step_input_context(context)
assert context == get_test_context()
@ patch('pypyr.moduleloader.get_module')
def test_set_step_input_context_no_in_complex(mocked_moduleloader):
"""Set step context does nothing if no in key found in complex step."""
step = Step({'name': 'blah'}, None)
context = get_test_context()
step.set_step_input_context(context)
assert context == get_test_context()
@ patch('pypyr.moduleloader.get_module')
def test_set_step_input_context_in_empty(mocked_moduleloader):
"""Set step context does nothing if in key found but it's empty."""
step = Step({'name': 'blah', 'in': {}}, None)
context = get_test_context()
step.set_step_input_context(context)
assert context == get_test_context()
@ patch('pypyr.moduleloader.get_module')
def test_set_step_input_context_with_in(mocked_moduleloader):
"""Set step context adds in to context."""
context = get_test_context()
original_len = len(context)
in_args = {'newkey1': 'v1',
'newkey2': 'v2',
'key3': 'updated in',
'key4': [0, 1, 2, 3],
'key5': True,
'key6': False,
'key7': 88}
step = Step({'name': 'blah', 'in': in_args}, None)
step.set_step_input_context(context)
assert len(context) - 2 == original_len
assert context['newkey1'] == 'v1'
assert context['newkey2'] == 'v2'
assert context['key1'] == 'value1'
assert context['key2'] == 'value2'
assert context['key3'] == 'updated in'
assert context['key4'] == [0, 1, 2, 3]
assert context['key5']
assert not context['key6']
assert context['key7'] == 88
# endregion Step: set_step_input_context
# region Step: unset_step_input_context
def test_unset_step_input_context_in_none():
"""Unset works when in parameters None."""
context = get_test_context()
step = Step({'name': 'blah', 'in': None}, None)
step.unset_step_input_context(context)
# Nothing removed because 'in' was None
assert context == get_test_context()
def test_unset_step_input_context_in_empty():
"""Unset works when in parameters exists but is empty."""
context = get_test_context()
step = Step({'name': 'blah', 'in': {}}, None)
step.unset_step_input_context(context)
# Nothing removed because 'in' was empty list
assert context == get_test_context()
def test_unset_step_input_context():
"""Unset works when in parameters specified."""
context = get_test_context()
in_args = {'newkey1': 'v1',
'newkey2': 'v2',
'key3': 'updated in',
'key4': [0, 1, 2, 3],
'key5': True,
'key6': False,
'key7': 88}
step = Step({'name': 'blah', 'in': in_args}, None)
step.unset_step_input_context(context)
# Removed existing keys & non-existing keys specified in 'in' from context
assert context == {'key1': 'value1',
'key2': 'value2'}
# endregion Step: unset_step_input_context
# region Step: save_error
@ patch('pypyr.moduleloader.get_module')
def test_save_error_with_no_previous_errors_in_context(mocked_moduleloader):
"""Save error."""
step = Step({'name': 'blah'}, None)
context = get_test_context()
original_len = len(context)
arb_error = ValueError("arb error")
step.save_error(context, exception=arb_error, swallowed=False)
assert len(context) == original_len + 1
# validate all except runErrors
assert get_test_context().items() <= context.items()
assert context['runErrors'] == [{
'col': None,
'customError': {},
'description': 'arb error',
'exception': arb_error,
'line': None,
'name': 'ValueError',
'step': step.name,
'swallowed': False,
}]
@ patch('pypyr.moduleloader.get_module')
def test_save_error_round_trip(mocked_moduleloader):
"""Save error with CommentedMap."""
context = get_test_context()
step_info = CommentedMap({'name': 'arb step'})
step_info._yaml_set_line_col(6, 7)
step = Step(step_info, None)
original_len = len(context)
arb_error = ValueError("arb error")
step.save_error(context, exception=arb_error, swallowed=True)
assert len(context) == original_len + 1
assert get_test_context().items() <= context.items()
assert context['runErrors'] == [{
'col': 8,
'customError': {},
'description': 'arb error',
'exception': arb_error,
'line': 7,
'name': 'ValueError',
'step': step.name,
'swallowed': True,
}]
@ patch('pypyr.moduleloader.get_module')
def test_save_error_formatted(mocked_moduleloader):
"""Save error with formatting expression."""
step = Step({'name': 'blah', 'onError': {'key': '{key1}'}}, None)
context = get_test_context()
original_len = len(context)
arb_error = ValueError("arb error")
step.save_error(context, exception=arb_error, swallowed=False)
assert len(context) == original_len + 1
assert get_test_context().items() <= context.items()
assert context['runErrors'] == [{
'col': None,
'customError': {'key': 'value1'},
'description': 'arb error',
'exception': arb_error,
'line': None,
'name': 'ValueError',
'step': step.name,
'swallowed': False,
}]
@ patch('pypyr.moduleloader.get_module')
def test_save_error_multiple_call(mocked_moduleloader):
"""Save multiple errors."""
step = Step({'name': 'blah'}, None)
context = get_test_context()
original_len = len(context)
first_arb_error = ValueError("arb error first")
step.save_error(context, exception=first_arb_error, swallowed=True)
second_arb_error = RuntimeError("arb error second")
step.save_error(context, exception=second_arb_error, swallowed=False)
assert len(context) == original_len + 1
assert get_test_context().items() <= context.items()
assert len(context['runErrors']) == 2
assert context['runErrors'][0] == {
'col': None,
'customError': {},
'description': 'arb error first',
'exception': first_arb_error,
'line': None,
'name': 'ValueError',
'step': step.name,
'swallowed': True,
}
assert context['runErrors'][1] == {
'col': None,
'customError': {},
'description': 'arb error second',
'exception': second_arb_error,
'line': None,
'name': 'RuntimeError',
'step': step.name,
'swallowed': False,
}
# endregion Step: save_error
# endregion Step
# region RetryDecorator
# region RetryDecorator: init
def test_retry_init_defaults_all():
"""The RetryDecorator ctor sets defaults with nothing set."""
rd = RetryDecorator({})
assert rd.backoff is None
assert rd.backoff_args is None
assert rd.jrc == 0
assert rd.max is None
assert rd.sleep_max is None
assert rd.sleep == 0
assert rd.stop_on is None
assert rd.retry_on is None
assert rd.retry_counter is None
def test_retry_init_defaults_max():
"""The RetryDecorator ctor sets defaults with only max set."""
rd = RetryDecorator({'max': 3})
assert rd.backoff is None
assert rd.backoff_args is None
assert rd.jrc == 0
assert rd.max == 3
assert rd.sleep_max is None
assert rd.sleep == 0
assert rd.stop_on is None
assert rd.retry_on is None
assert rd.retry_counter is None
def test_retry_init_all_attributes():
"""The RetryDecorator ctor with all props set."""
rd = RetryDecorator({'max': 3,
'sleep': 4.4,
'retryOn': [1, 2, 3],
'stopOn': [4, 5, 6],
'backoff': 'arb',
'sleepMax': 5.5,
'jrc': 6.6,
'backoffArgs': {'a': 'b'}}
)
assert rd.backoff == 'arb'
assert rd.backoff_args == {'a': 'b'}
assert rd.jrc == 6.6
assert rd.max == 3
assert rd.sleep_max == 5.5
assert rd.sleep == 4.4
assert rd.stop_on == [4, 5, 6]
assert rd.retry_on == [1, 2, 3]
assert rd.retry_counter is None
def test_retry_init_not_a_dict():
"""The RetryDecorator raises PipelineDefinitionError on bad ctor input."""
with pytest.raises(PipelineDefinitionError) as err_info:
RetryDecorator('arb')
assert str(err_info.value) == (
"retry decorator must be a dict (i.e a map) type.")
# endregion RetryDecorator: init
# region RetryDecorator: exec_iteration
def test_retry_exec_iteration_returns_true_on_success():
"""exec_iteration returns True when no error on step method."""
rd = RetryDecorator({'max': 3})
context = Context({})
mock = MagicMock()
assert rd.exec_iteration(2, context, mock, 3)
# context endures
assert context['retryCounter'] == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
assert rd.retry_counter == 2
def test_retry_exec_iteration_returns_true_on_max_success():
"""exec_iteration returns True when no error on step method on max."""
rd = RetryDecorator({'max': 3})
context = Context({})
mock = MagicMock()
assert rd.exec_iteration(3, context, mock, 3)
# context endures
assert context['retryCounter'] == 3
assert rd.retry_counter == 3
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 3})
def test_retry_exec_iteration_returns_false_on_error():
"""exec_iteration returns True when no error on step method."""
rd = RetryDecorator({'max': 3})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
assert not rd.exec_iteration(2, context, mock, 3)
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with('retry: ignoring error because '
'retryCounter < max.\n'
'ValueError: arb')
def test_retry_exec_iteration_returns_false_on_error_with_retryon():
"""exec_iteration returns False when error specified in retryOn."""
rd = RetryDecorator({'max': 3,
'retryOn': ['KeyError', 'ValueError']})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
assert not rd.exec_iteration(2, context, mock, 3)
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with('retry: ignoring error because '
'retryCounter < max.\n'
'ValueError: arb')
def test_retry_exec_iteration_returns_false_on_error_with_retryon_format():
"""exec_iteration returns False when error in retryOn with format."""
rd = RetryDecorator({'max': 3,
'retryOn': ['KeyError', '{k1}']})
context = Context({'k1': 'ValueError'})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
assert not rd.exec_iteration(2, context, mock, 3)
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 2
# step_method called once and only once with updated context
mock.assert_called_once_with({'k1': 'ValueError', 'retryCounter': 2})
mock_logger_error.assert_called_once_with('retry: ignoring error because '
'retryCounter < max.\n'
'ValueError: arb')
mock_logger_debug.assert_any_call('ValueError in retryOn. Retry again.')
def test_retry_exec_iteration_raises_on_error_not_in_retryon():
"""exec_iteration raises when error not in retryOn."""
rd = RetryDecorator({'max': 3,
'retryOn': ['KeyError', 'BlahError']})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(ValueError) as err_info:
rd.exec_iteration(2, context, mock, 3)
assert str(err_info.value) == 'arb'
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with(
'ValueError not in retryOn. Raising error and exiting retry.')
def test_retry_exec_iteration_raises_on_error_in_stopon():
"""exec_iteration raises when error in stopOn."""
rd = RetryDecorator({'max': 3,
'stopOn': ['KeyError', 'ValueError']})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(ValueError) as err_info:
rd.exec_iteration(2, context, mock, 3)
assert str(err_info.value) == 'arb'
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with(
'ValueError in stopOn. Raising error and exiting retry.')
def test_retry_exec_iteration_raises_on_error_in_stopon_format():
"""exec_iteration raises when error in stopOn with formatting."""
rd = RetryDecorator({'max': 3,
'stopOn': '{k1}'})
context = Context({'k1': ['KeyError', 'ValueError']})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(ValueError) as err_info:
rd.exec_iteration(2, context, mock, 3)
assert str(err_info.value) == 'arb'
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 2
# step_method called once and only once with updated context
mock.assert_called_once_with({'k1': ['KeyError', 'ValueError'],
'retryCounter': 2})
mock_logger_error.assert_called_once_with(
'ValueError in stopOn. Raising error and exiting retry.')
def test_retry_exec_iteration_returns_false_on_error_not_in_stopon():
"""exec_iteration returns False when error specified in stopOn."""
rd = RetryDecorator({'max': 3,
'stopOn': ['KeyError', 'ArbError']})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
assert not rd.exec_iteration(2, context, mock, 3)
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with('retry: ignoring error because '
'retryCounter < max.\n'
'ValueError: arb')
def test_retry_exec_iteration_returns_false_on_error_not_in_stopon_format():
"""exec_iteration returns False when error specified in stopOn."""
rd = RetryDecorator({'max': 3,
'stopOn': '{k1}'})
context = Context({'k1': ['KeyError', 'ArbError']})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
assert not rd.exec_iteration(2, context, mock, 3)
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 2
# step_method called once and only once with updated context
mock.assert_called_once_with({'k1': ['KeyError', 'ArbError'],
'retryCounter': 2})
mock_logger_error.assert_called_once_with('retry: ignoring error because '
'retryCounter < max.\n'
'ValueError: arb')
mock_logger_debug.assert_any_call('ValueError not in stopOn. Continue.')
def test_retry_exec_iteration_raises_on_error_in_stopon_with_retryon():
"""exec_iteration stopOn supersedes retryOn."""
rd = RetryDecorator({'max': 3,
'stopOn': ['KeyError', 'ValueError'],
'retryOn': ['KeyError', 'ValueError']})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(ValueError) as err_info:
rd.exec_iteration(2, context, mock, 3)
assert str(err_info.value) == 'arb'
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with(
'ValueError in stopOn. Raising error and exiting retry.')
def test_retry_exec_iteration_raises_on_max_exhaust():
"""exec_iteration raises error if counter is max."""
rd = RetryDecorator({'max': 3})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
with pytest.raises(ValueError) as err_info:
rd.exec_iteration(3, context, mock, 3)
assert str(err_info.value) == 'arb'
# context endures
assert context['retryCounter'] == 3
assert rd.retry_counter == 3
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 3})
mock_logger_debug.assert_called_with('retry: max 3 retries '
'exhausted. raising error.')
def test_retry_exec_iteration_raises_on_max_exhaust_with_retryon():
"""exec_iteration raises error if counter is max and supersedes retryOn."""
rd = RetryDecorator({'max': 3,
'retryOn': ['KeyError', 'ValueError']})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
with pytest.raises(ValueError) as err_info:
rd.exec_iteration(3, context, mock, 3)
assert str(err_info.value) == 'arb'
# context endures
assert context['retryCounter'] == 3
assert rd.retry_counter == 3
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 3})
mock_logger_debug.assert_called_with('retry: max 3 retries '
'exhausted. raising error.')
def test_retry_exec_iteration_handlederror():
"""Use inner exception when error type is HandledError."""
rd = RetryDecorator({'max': 3,
'stopOn': ['KeyError', 'ArbError']})
context = Context({})
mock = MagicMock()
err = HandledError()
err.__cause__ = ValueError('arb')
mock.side_effect = err
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
assert not rd.exec_iteration(2, context, mock, 3)
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with('retry: ignoring error because '
'retryCounter < max.\n'
'ValueError: arb')
def test_retry_exec_iteration_handlederror_with_stopon():
"""exec_iteration evals inner error against stopon list."""
rd = RetryDecorator({'max': 3,
'stopOn': '{k1}'})
context = Context({'k1': ['KeyError', 'ArbError']})
mock = MagicMock()
err = HandledError()
err.__cause__ = ValueError('arb')
mock.side_effect = err
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
assert not rd.exec_iteration(2, context, mock, 3)
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 2
# step_method called once and only once with updated context
mock.assert_called_once_with({'k1': ['KeyError', 'ArbError'],
'retryCounter': 2})
mock_logger_error.assert_called_once_with('retry: ignoring error because '
'retryCounter < max.\n'
'ValueError: arb')
mock_logger_debug.assert_any_call('ValueError not in stopOn. Continue.')
def test_retry_exec_iteration_handlederror_stopon_raises():
"""exec_iteration raises HandledError on stopOn."""
rd = RetryDecorator({'max': 3,
'stopOn': ['ValueError']})
context = Context({})
mock = MagicMock()
err = HandledError()
err.__cause__ = ValueError('arb')
mock.side_effect = err
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(HandledError) as err_info:
rd.exec_iteration(2, context, mock, 3)
assert isinstance(err_info.value.__cause__, ValueError)
assert str(err_info.value.__cause__) == 'arb'
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with(
'ValueError in stopOn. Raising error and exiting retry.')
def test_retry_exec_iteration_handlederror_retryon_raises():
"""exec_iteration raises HandledError on retryOn."""
rd = RetryDecorator({'max': 3,
'retryOn': ['KeyError', 'BlahError']})
context = Context({})
mock = MagicMock()
err = HandledError()
err.__cause__ = ValueError('arb')
mock.side_effect = err
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(HandledError) as err_info:
rd.exec_iteration(2, context, mock, 3)
assert isinstance(err_info.value.__cause__, ValueError)
assert str(err_info.value.__cause__) == 'arb'
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with(
'ValueError not in retryOn. Raising error and exiting retry.')
# endregion RetryDecorator: exec_iteration
# region RetryDecorator: retry_loop
@patch('time.sleep')
def test_retry_loop_max_end_on_error(mock_time_sleep):
"""Retry loops until max and ends with error at end."""
rd = RetryDecorator({'max': 3})
context = Context({'k1': 'v1'})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with pytest.raises(ValueError) as err_info:
rd.retry_loop(context, mock)
assert str(err_info.value) == 'arb'
assert context['retryCounter'] == 3
assert rd.retry_counter == 3
assert mock.call_count == 3
mock.assert_called_with({'k1': 'v1', 'retryCounter': 3})
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(0)
assert mock_logger_info.mock_calls == [
call('retry decorator will try 3 times with fixed backoff starting at '
'0s intervals.'),
call('retry: running step with counter 1'),
call('retry: running step with counter 2'),
call('retry: running step with counter 3')]
@patch('time.sleep')
def test_retry_loop_max_end_on_error_substitution(mock_time_sleep):
"""Retry loops with substitution until max and ends with error at end."""
rd = RetryDecorator({'max': PyString('3')})
context = Context({'k1': 'v1'})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with pytest.raises(ValueError) as err_info:
rd.retry_loop(context, mock)
assert str(err_info.value) == 'arb'
assert context['retryCounter'] == 3
assert rd.retry_counter == 3
assert mock.call_count == 3
mock.assert_called_with({'k1': 'v1', 'retryCounter': 3})
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(0)
assert mock_logger_info.mock_calls == [
call('retry decorator will try 3 times with fixed backoff starting '
'at 0s intervals.'),
call('retry: running step with counter 1'),
call('retry: running step with counter 2'),
call('retry: running step with counter 3')]
@patch('time.sleep')
def test_retry_loop_max_continue_on_success(mock_time_sleep):
"""Retry loops breaks out of loop on success."""
rd = RetryDecorator({'max': 3, 'sleep': 10.1})
context = Context({'k1': 'v1'})
mock = MagicMock()
mock.side_effect = [ValueError('arb'), None]
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
rd.retry_loop(context, mock)
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert mock.call_count == 2
mock.assert_called_with({'k1': 'v1', 'retryCounter': 2})
assert mock_time_sleep.call_count == 1
mock_time_sleep.assert_called_with(10.1)
mock_logger_debug.assert_any_call(
'retry loop complete, reporting success.')
assert mock_logger_info.mock_calls == [
call('retry decorator will try 3 times with fixed backoff starting at '
'10.1s intervals.'),
call('retry: running step with counter 1'),
call('retry: running step with counter 2')]
@patch('time.sleep')
def test_retry_loop_max_continue_on_success_fixed_list(mock_time_sleep):
"""Retry loops breaks out of loop on success with list input to fixed."""
rd = RetryDecorator({'max': 5, 'sleep': [10.1, 10.2]})
context = Context({'k1': 'v1'})
mock = MagicMock()
mock.side_effect = [ValueError('arb'),
ValueError('arb'),
ValueError('arb'),
None]
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
rd.retry_loop(context, mock)
assert context['retryCounter'] == 4
assert rd.retry_counter == 4
assert mock.call_count == 4
mock.assert_called_with({'k1': 'v1', 'retryCounter': 4})
assert mock_time_sleep.call_count == 3
# list cycles over last element
mock_time_sleep.mock_calls == [call(10.1), call(10.2), call(10.2)]
mock_logger_debug.assert_any_call(
'retry loop complete, reporting success.')
assert mock_logger_info.mock_calls == [
call('retry decorator will try 5 times with fixed backoff starting at '
'[10.1, 10.2]s intervals.'),
call('retry: running step with counter 1'),
call('retry: running step with counter 2'),
call('retry: running step with counter 3'),
call('retry: running step with counter 4')]
@ patch('time.sleep')
def test_retry_loop_indefinite_continue_on_success(mock_time_sleep):
"""Retry loops breaks out of indefinite loop on success."""
rd = RetryDecorator({'sleep': 10.1})
context = Context({'k1': 'v1'})
mock = MagicMock()
mock.side_effect = [ValueError('arb1'), ValueError('arb2'), None]
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
rd.retry_loop(context, mock)
assert context['retryCounter'] == 3
assert rd.retry_counter == 3
assert mock.call_count == 3
mock.assert_called_with({'k1': 'v1', 'retryCounter': 3})
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(10.1)
assert mock_logger_info.mock_calls == [
call('retry decorator will try indefinitely with fixed backoff '
'starting at 10.1s intervals.'),
call('retry: running step with counter 1'),
call('retry: running step with counter 2'),
call('retry: running step with counter 3')]
@ patch('time.sleep')
def test_retry_all_substitutions(mock_time_sleep):
"""Retry loop runs every param substituted."""
rd = RetryDecorator({'max': '{k3[1][k031]}',
'sleep': '{k2}'})
context = Context({'k1': False,
'k2': 0.3,
'k3': [
0,
{'k031': 1, 'k032': False}
]})
step_count = 0
def mock_step(context):
nonlocal step_count
step_count += 1
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
rd.retry_loop(context, mock_step)
assert context['retryCounter'] == 1
assert rd.retry_counter == 1
assert step_count == 1
assert mock_time_sleep.call_count == 0
assert mock_logger_info.mock_calls == [
call('retry decorator will try 1 times with fixed backoff starting at '
'0.3s intervals.'),
call('retry: running step with counter 1')]
@ patch('pypyr.retries.random.uniform', side_effect=[11, 12, 13])
@ patch('time.sleep')
def test_retry_all_substitutions_backoff(mock_sleep, mock_random):
"""Retry loop runs every param substituted with non-default backoff."""
rd = RetryDecorator({'max': '{k3[1][k031]}',
'sleep': '{k2}',
'backoff': '{k6}',
'jrc': '{k4}',
'sleepMax': '{k5}',
'backoffArgs': {'base': '{k7}', 'arb': '{k8}'}})
context = Context({'k1': False,
'k2': 3,
'k3': [
0,
{'k031': 4, 'k032': False}
],
'k4': 0.5,
'k5': 30,
'k6': 'exponentialjitter',
'k7': 3,
'k8': 'a value',
'step_count': 0})
def mock_step(context):
context['step_count'] += 1
if context['step_count'] != 4:
raise ValueError()
rd.retry_loop(context, mock_step)
assert context['retryCounter'] == 4
assert rd.retry_counter == 4
assert context['step_count'] == 4
assert mock_sleep.mock_calls == [call(11), call(12), call(13)]
assert mock_random.mock_calls == [call(4.5, 9),
call(13.5, 27),
call(15, 30)]
@ patch('pypyr.retries.random.uniform', side_effect=[11, 12, 13])
@ patch('time.sleep')
def test_retry_all_substitutions_backoff_jitter_list(mock_sleep, mock_random):
"""Retry loop runs fixed jitter with list."""
rd = RetryDecorator({'max': '{k3[1][k031]}',
'sleep': '{k2}',
'backoff': '{k6}',
'jrc': '{k4}',
'sleepMax': '{k5}'})
context = Context({'k1': False,
'k2': [0.3, 0.2, 0.1],
'k3': [
0,
{'k031': 4, 'k032': False}
],
'k4': 2,
'k5': 0.25,
'k6': 'jitter',
'step_count': 0})
def mock_step(context):
context['step_count'] += 1
if context['step_count'] != 4:
raise ValueError()
rd.retry_loop(context, mock_step)
assert context['retryCounter'] == 4
assert rd.retry_counter == 4
assert context['step_count'] == 4
assert mock_sleep.mock_calls == [call(11), call(12), call(13)]
assert mock_random.mock_calls == [call(0.5, 0.25),
call(0.4, 0.2),
call(0.2, 0.1)]
# endregion RetryDecorator: retry_loop
# endregion RetryDecorator
# region WhileDecorator
# region WhileDecorator: init
def test_while_init_defaults_stop():
"""The WhileDecorator ctor sets defaults with only stop set."""
wd = WhileDecorator({'stop': 'arb'})
assert wd.stop == 'arb'
assert wd.sleep == 0
assert wd.max is None
assert not wd.error_on_max
assert wd.while_counter is None
def test_while_init_defaults_max():
"""The WhileDecorator ctor sets defaults with only max set."""
wd = WhileDecorator({'max': 3})
assert wd.stop is None
assert wd.sleep == 0
assert wd.max == 3
assert not wd.error_on_max
assert wd.while_counter is None
def test_while_init_all_attributes():
"""The WhileDecorator ctor with all props set."""
wd = WhileDecorator(
{'errorOnMax': True, 'max': 3, 'sleep': 4.4, 'stop': '5'})
assert wd.stop == '5'
assert wd.sleep == 4.4
assert wd.max == 3
assert wd.error_on_max
assert wd.while_counter is None
def test_while_init_not_a_dict():
"""The WhileDecorator raises PipelineDefinitionError on bad ctor input."""
with pytest.raises(PipelineDefinitionError) as err_info:
WhileDecorator('arb')
assert str(err_info.value) == (
"while decorator must be a dict (i.e a map) type.")
def test_while_init_no_max_no_stop():
"""The WhileDecorator raises PipelineDefinitionError no max and no stop."""
with pytest.raises(PipelineDefinitionError) as err_info:
WhileDecorator({'arb': 'arbv'})
assert str(err_info.value) == (
"the while decorator must have either max or "
"stop, or both. But not neither. Note that setting stop: False with "
"no max is an infinite loop. If an infinite loop is really what you "
"want, set stop: False")
# endregion WhileDecorator: init
# region WhileDecorator: exec_iteration
def test_while_exec_iteration_no_stop():
"""exec_iteration returns False when no stop condition given."""
wd = WhileDecorator({'max': 3})
context = Context({})
mock = MagicMock()
assert not wd.exec_iteration(2, context, mock)
# context endures
assert context['whileCounter'] == 2
assert wd.while_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'whileCounter': 2})
def test_while_exec_iteration_stop_true():
"""exec_iteration returns True when stop is bool True."""
wd = WhileDecorator({'stop': True})
context = Context({})
mock = MagicMock()
assert wd.exec_iteration(2, context, mock)
# context endures
assert context['whileCounter'] == 2
assert wd.while_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'whileCounter': 2})
def test_while_exec_iteration_stop_evals_true():
"""exec_iteration True when stop evals True from formatting expr."""
wd = WhileDecorator({'stop': '{stop}'})
context = Context({'stop': True})
mock = MagicMock()
assert wd.exec_iteration(2, context, mock)
# context endures
assert context['whileCounter'] == 2
assert wd.while_counter == 2
assert len(context) == 2
# step_method called once and only once with updated context
mock.assert_called_once_with({'stop': True, 'whileCounter': 2})
def test_while_exec_iteration_stop_false():
"""exec_iteration False when stop is False."""
wd = WhileDecorator({'max': 1, 'stop': False})
context = Context()
mock = MagicMock()
assert not wd.exec_iteration(2, context, mock)
# context endures
assert context['whileCounter'] == 2
assert wd.while_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'whileCounter': 2})
def test_while_exec_iteration_stop_evals_false():
"""exec_iteration False when stop is False."""
wd = WhileDecorator({'stop': '{stop}'})
context = Context({'stop': False})
mock = MagicMock()
assert not wd.exec_iteration(2, context, mock)
# context endures
assert context['whileCounter'] == 2
assert wd.while_counter == 2
assert len(context) == 2
# step_method called once and only once with updated context
mock.assert_called_once_with({'stop': False, 'whileCounter': 2})
# endregion WhileDecorator: exec_iteration
# region WhileDecorator: while_loop
def test_while_loop_stop_true():
"""Stop True runs loop once because it only evals after 1st iteration."""
wd = WhileDecorator({'stop': True})
mock = MagicMock()
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(Context(), mock)
mock.assert_called_once()
assert mock_logger_info.mock_calls == [
call('while decorator will loop until True evaluates to True '
'at 0.0s intervals.'),
call('while: running step with counter 1'),
call('while loop done, stop condition True evaluated True.')]
assert wd.while_counter == 1
def test_while_loop_max_0():
"""Max 0 doesn't run even once."""
wd = WhileDecorator({'max': 0})
mock = MagicMock()
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(Context(), mock)
mock.assert_not_called()
assert mock_logger_info.mock_calls == [
call('max 0 is 0. while only runs when max > 0.')]
assert wd.while_counter == 0
def test_while_loop_max_0_with_formatting():
"""Max 0 doesn't run even once with formatting expression."""
wd = WhileDecorator({'max': '{x}'})
mock = MagicMock()
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(Context({'x': -3}), mock)
mock.assert_not_called()
assert mock_logger_info.mock_calls == [
call('max {x} is -3. while only runs when max > 0.')]
assert wd.while_counter == 0
def test_while_loop_stop_evals_true():
"""Stop evaluates True from formatting expr runs once."""
wd = WhileDecorator({'stop': '{thisistrue}'})
mock = MagicMock()
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(Context({'thisistrue': True}), mock)
mock.assert_called_once()
assert wd.while_counter == 1
assert mock_logger_info.mock_calls == [
call('while decorator will loop until {thisistrue} evaluates to True '
'at 0.0s intervals.'),
call('while: running step with counter 1'),
call('while loop done, stop condition {thisistrue} evaluated True.')]
def test_while_loop_no_stop_no_max():
"""No stop, no max should raise error."""
wd = WhileDecorator({'stop': True})
wd.max = None
wd.stop = None
mock = MagicMock()
with pytest.raises(PipelineDefinitionError) as err_info:
wd.while_loop(Context(), mock)
mock.assert_not_called()
assert str(err_info.value) == (
"the while decorator must have either max or "
"stop, or both. But not neither.")
@ patch('time.sleep')
def test_while_loop_max_no_stop(mock_time_sleep):
"""While loop runs with max but no stop."""
wd = WhileDecorator({'max': 3})
context = Context({'k1': 'v1'})
mock = MagicMock()
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(context, mock)
assert context['whileCounter'] == 3
assert wd.while_counter == 3
assert mock.call_count == 3
mock.assert_called_with({'k1': 'v1', 'whileCounter': 3})
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(0)
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times at 0.0s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3')]
@ patch('time.sleep')
def test_while_loop_stop_no_max(mock_time_sleep):
"""While loop runs with stop but no max."""
wd = WhileDecorator({'stop': '{k1}', 'sleep': '{k2}'})
context = Context({'k1': False, 'k2': 0.3})
step_count = 0
step_context = []
def mock_step(context):
nonlocal step_count, step_context
step_count += 1
step_context.append(deepcopy(context))
if context['whileCounter'] == 3:
context['k1'] = True
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(context, mock_step)
assert context['whileCounter'] == 3
assert wd.while_counter == 3
assert step_count == 3
assert step_context == [{'k1': False, 'k2': 0.3, 'whileCounter': 1},
{'k1': False, 'k2': 0.3, 'whileCounter': 2},
{'k1': False, 'k2': 0.3, 'whileCounter': 3}]
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(0.3)
assert mock_logger_info.mock_calls == [
call('while decorator will loop until {k1} evaluates to True at 0.3s '
'intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3'),
call('while loop done, stop condition {k1} evaluated True.')]
@ patch('time.sleep')
def test_while_loop_stop_and_max_stop_before_max(mock_time_sleep):
"""While loop runs with stop and max, exit before max."""
wd = WhileDecorator({'max': 5, 'stop': '{k1}', 'sleep': '{k2}'})
context = Context({'k1': False, 'k2': 0.3})
step_count = 0
step_context = []
def mock_step(context):
nonlocal step_count, step_context
step_count += 1
step_context.append(deepcopy(context))
if context['whileCounter'] == 3:
context['k1'] = True
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(context, mock_step)
assert context['whileCounter'] == 3
assert wd.while_counter == 3
assert step_count == 3
assert step_context == [{'k1': False, 'k2': 0.3, 'whileCounter': 1},
{'k1': False, 'k2': 0.3, 'whileCounter': 2},
{'k1': False, 'k2': 0.3, 'whileCounter': 3}]
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(0.3)
assert mock_logger_info.mock_calls == [
call('while decorator will loop 5 times, or until {k1} evaluates to '
'True at 0.3s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3'),
call('while loop done, stop condition {k1} evaluated True.')]
@ patch('time.sleep')
def test_while_loop_stop_and_max_exhaust_max(mock_time_sleep):
"""While loop runs with stop and max, exhaust max."""
wd = WhileDecorator({'max': 3, 'stop': '{k1}', 'sleep': '{k2}'})
context = Context({'k1': False, 'k2': 0.3})
step_count = 0
step_context = []
def mock_step(context):
nonlocal step_count, step_context
step_count += 1
step_context.append(deepcopy(context))
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(context, mock_step)
assert context['whileCounter'] == 3
assert wd.while_counter == 3
assert step_count == 3
assert step_context == [{'k1': False, 'k2': 0.3, 'whileCounter': 1},
{'k1': False, 'k2': 0.3, 'whileCounter': 2},
{'k1': False, 'k2': 0.3, 'whileCounter': 3}]
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(0.3)
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times, or until {k1} evaluates to '
'True at 0.3s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3'),
call('while decorator looped 3 times, and {k1} never evaluated to '
'True.')]
@ patch('time.sleep')
def test_while_loop_stop_and_max_exhaust_error(mock_time_sleep):
"""While loop runs with stop and max, exhaust max."""
wd = WhileDecorator({'max': 3,
'stop': '{k1}',
'sleep': '{k2}',
'errorOnMax': '{k3}'})
context = Context({'k1': False, 'k2': 0.3, 'k3': True})
step_count = 0
step_context = []
def mock_step(context):
nonlocal step_count, step_context
step_count += 1
step_context.append(deepcopy(context))
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(LoopMaxExhaustedError) as err_info:
wd.while_loop(context, mock_step)
assert str(err_info.value) == (
"while loop reached 3 and {k1} never evaluated to True.")
assert context['whileCounter'] == 3
assert wd.while_counter == 3
assert step_count == 3
assert step_context == [{'k1': False,
'k2': 0.3,
'k3': True,
'whileCounter': 1},
{'k1': False,
'k2': 0.3,
'k3': True,
'whileCounter': 2},
{'k1': False,
'k2': 0.3,
'k3': True,
'whileCounter': 3}]
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(0.3)
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times, or until {k1} evaluates to '
'True at 0.3s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3')]
assert mock_logger_error.mock_calls == [
call('exhausted 3 iterations of while loop, and errorOnMax is True.')
]
@ patch('time.sleep')
def test_while_loop_max_exhaust_error(mock_time_sleep):
"""While loop runs with only max, exhaust max."""
wd = WhileDecorator({'max': 3,
'sleep': '{k2}',
'errorOnMax': True})
context = Context({'k1': False, 'k2': 0.3, 'k3': True})
step_count = 0
step_context = []
def mock_step(context):
nonlocal step_count, step_context
step_count += 1
step_context.append(deepcopy(context))
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(LoopMaxExhaustedError) as err_info:
wd.while_loop(context, mock_step)
assert str(err_info.value) == "while loop reached 3."
assert context['whileCounter'] == 3
assert wd.while_counter == 3
assert step_count == 3
assert step_context == [{'k1': False,
'k2': 0.3,
'k3': True,
'whileCounter': 1},
{'k1': False,
'k2': 0.3,
'k3': True,
'whileCounter': 2},
{'k1': False,
'k2': 0.3,
'k3': True,
'whileCounter': 3}]
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(0.3)
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times at 0.3s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3')]
assert mock_logger_error.mock_calls == [
call('exhausted 3 iterations of while loop, and errorOnMax is True.')
]
@ patch('time.sleep')
def test_while_loop_all_substitutions(mock_time_sleep):
"""While loop runs every param substituted."""
wd = WhileDecorator({'max': '{k3[1][k031]}',
'stop': '{k1}',
'sleep': '{k2}',
'errorOnMax': '{k3[1][k032]}'})
context = Context({'k1': False,
'k2': 0.3,
'k3': [
0,
{'k031': 1, 'k032': False}
]})
step_count = 0
def mock_step(context):
nonlocal step_count
step_count += 1
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(context, mock_step)
assert context['whileCounter'] == 1
assert wd.while_counter == 1
assert step_count == 1
assert mock_time_sleep.call_count == 0
assert mock_logger_info.mock_calls == [
call('while decorator will loop 1 times, or until {k1} evaluates to '
'True at 0.3s intervals.'),
call('while: running step with counter 1'),
call('while decorator looped 1 times, and {k1} never evaluated to '
'True.')]
# endregion WhileDecorator: while_loop
# endregion WhileDecorator
| [
"pypyr.dsl.PyString.from_yaml",
"ruamel.yaml.YAML",
"pypyr.dsl.PyString",
"copy.deepcopy",
"unittest.mock.patch",
"pypyr.dsl.Step",
"pypyr.dsl.Jsonify",
"ruamel.yaml.comments.CommentedMap",
"unittest.mock.call",
"pypyr.dsl.PyString.to_yaml",
"pypyr.dsl.RetryDecorator",
"io.StringIO",
"unitte... | [((17151, 17189), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (17156, 17189), False, 'from unittest.mock import call, patch, MagicMock\n'), ((18085, 18123), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (18090, 18123), False, 'from unittest.mock import call, patch, MagicMock\n'), ((19764, 19818), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {'return_value': '(3)'}), "('pypyr.moduleloader.get_module', return_value=3)\n", (19769, 19818), False, 'from unittest.mock import call, patch, MagicMock\n'), ((20684, 20738), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {'return_value': '(3)'}), "('pypyr.moduleloader.get_module', return_value=3)\n", (20689, 20738), False, 'from unittest.mock import call, patch, MagicMock\n'), ((21818, 21856), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (21823, 21856), False, 'from unittest.mock import call, patch, MagicMock\n'), ((23352, 23390), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (23357, 23390), False, 'from unittest.mock import call, patch, MagicMock\n'), ((24973, 25011), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (24978, 25011), False, 'from unittest.mock import call, patch, MagicMock\n'), ((25014, 25047), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (25026, 25047), False, 'from unittest.mock import call, patch, MagicMock\n'), ((25774, 25812), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (25779, 25812), False, 'from unittest.mock import call, patch, MagicMock\n'), ((25814, 25847), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (25826, 25847), False, 'from unittest.mock import call, patch, MagicMock\n'), ((26779, 26817), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (26784, 26817), False, 'from unittest.mock import call, patch, MagicMock\n'), ((26819, 26852), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (26831, 26852), False, 'from unittest.mock import call, patch, MagicMock\n'), ((27861, 27899), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (27866, 27899), False, 'from unittest.mock import call, patch, MagicMock\n'), ((27901, 27949), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""run_conditional_decorators"""'], {}), "(Step, 'run_conditional_decorators')\n", (27913, 27949), False, 'from unittest.mock import call, patch, MagicMock\n'), ((27951, 27985), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""foreach_loop"""'], {}), "(Step, 'foreach_loop')\n", (27963, 27985), False, 'from unittest.mock import call, patch, MagicMock\n'), ((28446, 28484), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (28451, 28484), False, 'from unittest.mock import call, patch, MagicMock\n'), ((28486, 28534), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""run_conditional_decorators"""'], {}), "(Step, 'run_conditional_decorators')\n", (28498, 28534), False, 'from unittest.mock import call, patch, MagicMock\n'), ((28536, 28570), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""foreach_loop"""'], {}), "(Step, 'foreach_loop')\n", (28548, 28570), False, 'from unittest.mock import call, patch, MagicMock\n'), ((29091, 29129), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (29096, 29129), False, 'from unittest.mock import call, patch, MagicMock\n'), ((29131, 29179), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""run_conditional_decorators"""'], {}), "(Step, 'run_conditional_decorators')\n", (29143, 29179), False, 'from unittest.mock import call, patch, MagicMock\n'), ((30060, 30098), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (30065, 30098), False, 'from unittest.mock import call, patch, MagicMock\n'), ((30100, 30148), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""run_conditional_decorators"""'], {}), "(Step, 'run_conditional_decorators')\n", (30112, 30148), False, 'from unittest.mock import call, patch, MagicMock\n'), ((30150, 30205), 'unittest.mock.patch', 'patch', (['"""unittest.mock.MagicMock"""'], {'new': 'DeepCopyMagicMock'}), "('unittest.mock.MagicMock', new=DeepCopyMagicMock)\n", (30155, 30205), False, 'from unittest.mock import call, patch, MagicMock\n'), ((31250, 31288), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (31255, 31288), False, 'from unittest.mock import call, patch, MagicMock\n'), ((31290, 31338), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""run_conditional_decorators"""'], {}), "(Step, 'run_conditional_decorators')\n", (31302, 31338), False, 'from unittest.mock import call, patch, MagicMock\n'), ((31340, 31395), 'unittest.mock.patch', 'patch', (['"""unittest.mock.MagicMock"""'], {'new': 'DeepCopyMagicMock'}), "('unittest.mock.MagicMock', new=DeepCopyMagicMock)\n", (31345, 31395), False, 'from unittest.mock import call, patch, MagicMock\n'), ((32647, 32685), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (32652, 32685), False, 'from unittest.mock import call, patch, MagicMock\n'), ((32687, 32735), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""run_conditional_decorators"""'], {}), "(Step, 'run_conditional_decorators')\n", (32699, 32735), False, 'from unittest.mock import call, patch, MagicMock\n'), ((32737, 32792), 'unittest.mock.patch', 'patch', (['"""unittest.mock.MagicMock"""'], {'new': 'DeepCopyMagicMock'}), "('unittest.mock.MagicMock', new=DeepCopyMagicMock)\n", (32742, 32792), False, 'from unittest.mock import call, patch, MagicMock\n'), ((34488, 34526), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (34493, 34526), False, 'from unittest.mock import call, patch, MagicMock\n'), ((34528, 34597), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {'side_effect': 'mock_step_mutating_run'}), "(Step, 'invoke_step', side_effect=mock_step_mutating_run)\n", (34540, 34597), False, 'from unittest.mock import call, patch, MagicMock\n'), ((35934, 35972), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (35939, 35972), False, 'from unittest.mock import call, patch, MagicMock\n'), ((35974, 36044), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {'side_effect': 'mock_step_mutating_skip'}), "(Step, 'invoke_step', side_effect=mock_step_mutating_skip)\n", (35986, 36044), False, 'from unittest.mock import call, patch, MagicMock\n'), ((37231, 37269), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (37236, 37269), False, 'from unittest.mock import call, patch, MagicMock\n'), ((40535, 40573), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (40540, 40573), False, 'from unittest.mock import call, patch, MagicMock\n'), ((40576, 40609), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (40588, 40609), False, 'from unittest.mock import call, patch, MagicMock\n'), ((41564, 41602), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (41569, 41602), False, 'from unittest.mock import call, patch, MagicMock\n'), ((41605, 41674), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {'side_effect': 'mock_step_mutating_run'}), "(Step, 'invoke_step', side_effect=mock_step_mutating_run)\n", (41617, 41674), False, 'from unittest.mock import call, patch, MagicMock\n'), ((43102, 43140), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (43107, 43140), False, 'from unittest.mock import call, patch, MagicMock\n'), ((44527, 44565), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (44532, 44565), False, 'from unittest.mock import call, patch, MagicMock\n'), ((44568, 44601), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (44580, 44601), False, 'from unittest.mock import call, patch, MagicMock\n'), ((45962, 46000), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (45967, 46000), False, 'from unittest.mock import call, patch, MagicMock\n'), ((46003, 46036), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (46015, 46036), False, 'from unittest.mock import call, patch, MagicMock\n'), ((47350, 47388), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (47355, 47388), False, 'from unittest.mock import call, patch, MagicMock\n'), ((47391, 47439), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""run_conditional_decorators"""'], {}), "(Step, 'run_conditional_decorators')\n", (47403, 47439), False, 'from unittest.mock import call, patch, MagicMock\n'), ((47442, 47497), 'unittest.mock.patch', 'patch', (['"""unittest.mock.MagicMock"""'], {'new': 'DeepCopyMagicMock'}), "('unittest.mock.MagicMock', new=DeepCopyMagicMock)\n", (47447, 47497), False, 'from unittest.mock import call, patch, MagicMock\n'), ((49657, 49695), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (49662, 49695), False, 'from unittest.mock import call, patch, MagicMock\n'), ((50309, 50359), 'unittest.mock.patch', 'patch', (['"""pypyr.cache.stepcache.step_cache.get_step"""'], {}), "('pypyr.cache.stepcache.step_cache.get_step')\n", (50314, 50359), False, 'from unittest.mock import call, patch, MagicMock\n'), ((50767, 50817), 'unittest.mock.patch', 'patch', (['"""pypyr.cache.stepcache.step_cache.get_step"""'], {}), "('pypyr.cache.stepcache.step_cache.get_step')\n", (50772, 50817), False, 'from unittest.mock import call, patch, MagicMock\n'), ((51229, 51279), 'unittest.mock.patch', 'patch', (['"""pypyr.cache.stepcache.step_cache.get_step"""'], {}), "('pypyr.cache.stepcache.step_cache.get_step')\n", (51234, 51279), False, 'from unittest.mock import call, patch, MagicMock\n'), ((52107, 52157), 'unittest.mock.patch', 'patch', (['"""pypyr.cache.stepcache.step_cache.get_step"""'], {}), "('pypyr.cache.stepcache.step_cache.get_step')\n", (52112, 52157), False, 'from unittest.mock import call, patch, MagicMock\n'), ((53130, 53180), 'unittest.mock.patch', 'patch', (['"""pypyr.cache.stepcache.step_cache.get_step"""'], {}), "('pypyr.cache.stepcache.step_cache.get_step')\n", (53135, 53180), False, 'from unittest.mock import call, patch, MagicMock\n'), ((54195, 54245), 'unittest.mock.patch', 'patch', (['"""pypyr.cache.stepcache.step_cache.get_step"""'], {}), "('pypyr.cache.stepcache.step_cache.get_step')\n", (54200, 54245), False, 'from unittest.mock import call, patch, MagicMock\n'), ((54757, 54807), 'unittest.mock.patch', 'patch', (['"""pypyr.cache.stepcache.step_cache.get_step"""'], {}), "('pypyr.cache.stepcache.step_cache.get_step')\n", (54762, 54807), False, 'from unittest.mock import call, patch, MagicMock\n'), ((55196, 55246), 'unittest.mock.patch', 'patch', (['"""pypyr.cache.stepcache.step_cache.get_step"""'], {}), "('pypyr.cache.stepcache.step_cache.get_step')\n", (55201, 55246), False, 'from unittest.mock import call, patch, MagicMock\n'), ((55682, 55732), 'unittest.mock.patch', 'patch', (['"""pypyr.cache.stepcache.step_cache.get_step"""'], {}), "('pypyr.cache.stepcache.step_cache.get_step')\n", (55687, 55732), False, 'from unittest.mock import call, patch, MagicMock\n'), ((56278, 56316), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (56283, 56316), False, 'from unittest.mock import call, patch, MagicMock\n'), ((56319, 56352), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (56331, 56352), False, 'from unittest.mock import call, patch, MagicMock\n'), ((57303, 57341), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (57308, 57341), False, 'from unittest.mock import call, patch, MagicMock\n'), ((57344, 57377), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (57356, 57377), False, 'from unittest.mock import call, patch, MagicMock\n'), ((58074, 58112), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (58079, 58112), False, 'from unittest.mock import call, patch, MagicMock\n'), ((58115, 58148), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (58127, 58148), False, 'from unittest.mock import call, patch, MagicMock\n'), ((58907, 58945), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (58912, 58945), False, 'from unittest.mock import call, patch, MagicMock\n'), ((58948, 58981), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (58960, 58981), False, 'from unittest.mock import call, patch, MagicMock\n'), ((59767, 59805), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (59772, 59805), False, 'from unittest.mock import call, patch, MagicMock\n'), ((59808, 59841), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (59820, 59841), False, 'from unittest.mock import call, patch, MagicMock\n'), ((60639, 60677), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (60644, 60677), False, 'from unittest.mock import call, patch, MagicMock\n'), ((60680, 60713), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (60692, 60713), False, 'from unittest.mock import call, patch, MagicMock\n'), ((61498, 61536), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (61503, 61536), False, 'from unittest.mock import call, patch, MagicMock\n'), ((61539, 61572), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (61551, 61572), False, 'from unittest.mock import call, patch, MagicMock\n'), ((62696, 62734), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (62701, 62734), False, 'from unittest.mock import call, patch, MagicMock\n'), ((62737, 62770), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (62749, 62770), False, 'from unittest.mock import call, patch, MagicMock\n'), ((63896, 63934), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (63901, 63934), False, 'from unittest.mock import call, patch, MagicMock\n'), ((63937, 63970), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (63949, 63970), False, 'from unittest.mock import call, patch, MagicMock\n'), ((65072, 65110), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (65077, 65110), False, 'from unittest.mock import call, patch, MagicMock\n'), ((65113, 65146), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (65125, 65146), False, 'from unittest.mock import call, patch, MagicMock\n'), ((66260, 66298), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (66265, 66298), False, 'from unittest.mock import call, patch, MagicMock\n'), ((66301, 66334), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (66313, 66334), False, 'from unittest.mock import call, patch, MagicMock\n'), ((67453, 67491), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (67458, 67491), False, 'from unittest.mock import call, patch, MagicMock\n'), ((67494, 67527), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (67506, 67527), False, 'from unittest.mock import call, patch, MagicMock\n'), ((68678, 68716), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (68683, 68716), False, 'from unittest.mock import call, patch, MagicMock\n'), ((68719, 68752), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (68731, 68752), False, 'from unittest.mock import call, patch, MagicMock\n'), ((69953, 69991), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (69958, 69991), False, 'from unittest.mock import call, patch, MagicMock\n'), ((71318, 71356), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (71323, 71356), False, 'from unittest.mock import call, patch, MagicMock\n'), ((71359, 71392), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (71371, 71392), False, 'from unittest.mock import call, patch, MagicMock\n'), ((72470, 72508), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (72475, 72508), False, 'from unittest.mock import call, patch, MagicMock\n'), ((72511, 72544), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (72523, 72544), False, 'from unittest.mock import call, patch, MagicMock\n'), ((73240, 73278), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (73245, 73278), False, 'from unittest.mock import call, patch, MagicMock\n'), ((73281, 73314), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (73293, 73314), False, 'from unittest.mock import call, patch, MagicMock\n'), ((74046, 74084), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (74051, 74084), False, 'from unittest.mock import call, patch, MagicMock\n'), ((74087, 74120), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (74099, 74120), False, 'from unittest.mock import call, patch, MagicMock\n'), ((74911, 74949), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (74916, 74949), False, 'from unittest.mock import call, patch, MagicMock\n'), ((74952, 74985), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (74964, 74985), False, 'from unittest.mock import call, patch, MagicMock\n'), ((75783, 75821), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (75788, 75821), False, 'from unittest.mock import call, patch, MagicMock\n'), ((75824, 75857), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (75836, 75857), False, 'from unittest.mock import call, patch, MagicMock\n'), ((76645, 76683), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (76650, 76683), False, 'from unittest.mock import call, patch, MagicMock\n'), ((76686, 76719), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (76698, 76719), False, 'from unittest.mock import call, patch, MagicMock\n'), ((77854, 77892), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (77859, 77892), False, 'from unittest.mock import call, patch, MagicMock\n'), ((77895, 77928), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (77907, 77928), False, 'from unittest.mock import call, patch, MagicMock\n'), ((79027, 79065), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (79032, 79065), False, 'from unittest.mock import call, patch, MagicMock\n'), ((79068, 79101), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (79080, 79101), False, 'from unittest.mock import call, patch, MagicMock\n'), ((80176, 80214), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (80181, 80214), False, 'from unittest.mock import call, patch, MagicMock\n'), ((80217, 80250), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (80229, 80250), False, 'from unittest.mock import call, patch, MagicMock\n'), ((80964, 81002), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (80969, 81002), False, 'from unittest.mock import call, patch, MagicMock\n'), ((81005, 81038), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (81017, 81038), False, 'from unittest.mock import call, patch, MagicMock\n'), ((81844, 81882), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (81849, 81882), False, 'from unittest.mock import call, patch, MagicMock\n'), ((81885, 81918), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (81897, 81918), False, 'from unittest.mock import call, patch, MagicMock\n'), ((82999, 83037), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (83004, 83037), False, 'from unittest.mock import call, patch, MagicMock\n'), ((83040, 83073), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (83052, 83073), False, 'from unittest.mock import call, patch, MagicMock\n'), ((84158, 84196), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (84163, 84196), False, 'from unittest.mock import call, patch, MagicMock\n'), ((84199, 84254), 'unittest.mock.patch', 'patch', (['"""unittest.mock.MagicMock"""'], {'new': 'DeepCopyMagicMock'}), "('unittest.mock.MagicMock', new=DeepCopyMagicMock)\n", (84204, 84254), False, 'from unittest.mock import call, patch, MagicMock\n'), ((85956, 85994), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (85961, 85994), False, 'from unittest.mock import call, patch, MagicMock\n'), ((86978, 87016), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (86983, 87016), False, 'from unittest.mock import call, patch, MagicMock\n'), ((88280, 88318), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (88285, 88318), False, 'from unittest.mock import call, patch, MagicMock\n'), ((89260, 89298), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (89265, 89298), False, 'from unittest.mock import call, patch, MagicMock\n'), ((89379, 89434), 'unittest.mock.patch', 'patch', (['"""unittest.mock.MagicMock"""'], {'new': 'DeepCopyMagicMock'}), "('unittest.mock.MagicMock', new=DeepCopyMagicMock)\n", (89384, 89434), False, 'from unittest.mock import call, patch, MagicMock\n'), ((90113, 90151), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (90118, 90151), False, 'from unittest.mock import call, patch, MagicMock\n'), ((90154, 90187), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {}), "(Step, 'invoke_step')\n", (90166, 90187), False, 'from unittest.mock import call, patch, MagicMock\n'), ((90190, 90245), 'unittest.mock.patch', 'patch', (['"""unittest.mock.MagicMock"""'], {'new': 'DeepCopyMagicMock'}), "('unittest.mock.MagicMock', new=DeepCopyMagicMock)\n", (90195, 90245), False, 'from unittest.mock import call, patch, MagicMock\n'), ((91692, 91730), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (91697, 91730), False, 'from unittest.mock import call, patch, MagicMock\n'), ((92023, 92061), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (92028, 92061), False, 'from unittest.mock import call, patch, MagicMock\n'), ((92366, 92404), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (92371, 92404), False, 'from unittest.mock import call, patch, MagicMock\n'), ((92710, 92748), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (92715, 92748), False, 'from unittest.mock import call, patch, MagicMock\n'), ((95020, 95058), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (95025, 95058), False, 'from unittest.mock import call, patch, MagicMock\n'), ((95782, 95820), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (95787, 95820), False, 'from unittest.mock import call, patch, MagicMock\n'), ((96576, 96614), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (96581, 96614), False, 'from unittest.mock import call, patch, MagicMock\n'), ((97348, 97386), 'unittest.mock.patch', 'patch', (['"""pypyr.moduleloader.get_module"""'], {}), "('pypyr.moduleloader.get_module')\n", (97353, 97386), False, 'from unittest.mock import call, patch, MagicMock\n'), ((116295, 116314), 'unittest.mock.patch', 'patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (116300, 116314), False, 'from unittest.mock import call, patch, MagicMock\n'), ((117343, 117362), 'unittest.mock.patch', 'patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (117348, 117362), False, 'from unittest.mock import call, patch, MagicMock\n'), ((118434, 118453), 'unittest.mock.patch', 'patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (118439, 118453), False, 'from unittest.mock import call, patch, MagicMock\n'), ((119529, 119548), 'unittest.mock.patch', 'patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (119534, 119548), False, 'from unittest.mock import call, patch, MagicMock\n'), ((120953, 120972), 'unittest.mock.patch', 'patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (120958, 120972), False, 'from unittest.mock import call, patch, MagicMock\n'), ((121964, 121983), 'unittest.mock.patch', 'patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (121969, 121983), False, 'from unittest.mock import call, patch, MagicMock\n'), ((122963, 123026), 'unittest.mock.patch', 'patch', (['"""pypyr.retries.random.uniform"""'], {'side_effect': '[11, 12, 13]'}), "('pypyr.retries.random.uniform', side_effect=[11, 12, 13])\n", (122968, 123026), False, 'from unittest.mock import call, patch, MagicMock\n'), ((123029, 123048), 'unittest.mock.patch', 'patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (123034, 123048), False, 'from unittest.mock import call, patch, MagicMock\n'), ((124440, 124503), 'unittest.mock.patch', 'patch', (['"""pypyr.retries.random.uniform"""'], {'side_effect': '[11, 12, 13]'}), "('pypyr.retries.random.uniform', side_effect=[11, 12, 13])\n", (124445, 124503), False, 'from unittest.mock import call, patch, MagicMock\n'), ((124506, 124525), 'unittest.mock.patch', 'patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (124511, 124525), False, 'from unittest.mock import call, patch, MagicMock\n'), ((132889, 132908), 'unittest.mock.patch', 'patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (132894, 132908), False, 'from unittest.mock import call, patch, MagicMock\n'), ((133738, 133757), 'unittest.mock.patch', 'patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (133743, 133757), False, 'from unittest.mock import call, patch, MagicMock\n'), ((135124, 135143), 'unittest.mock.patch', 'patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (135129, 135143), False, 'from unittest.mock import call, patch, MagicMock\n'), ((136563, 136582), 'unittest.mock.patch', 'patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (136568, 136582), False, 'from unittest.mock import call, patch, MagicMock\n'), ((137949, 137968), 'unittest.mock.patch', 'patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (137954, 137968), False, 'from unittest.mock import call, patch, MagicMock\n'), ((140028, 140047), 'unittest.mock.patch', 'patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (140033, 140047), False, 'from unittest.mock import call, patch, MagicMock\n'), ((141958, 141977), 'unittest.mock.patch', 'patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (141963, 141977), False, 'from unittest.mock import call, patch, MagicMock\n'), ((1143, 1168), 'pypyr.dsl.SpecialTagDirective', 'SpecialTagDirective', (['None'], {}), '(None)\n', (1162, 1168), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((1576, 1602), 'pypyr.dsl.SpecialTagDirective', 'SpecialTagDirective', (['"""arb"""'], {}), "('arb')\n", (1595, 1602), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((1932, 1959), 'pypyr.dsl.SpecialTagDirective', 'SpecialTagDirective', (['"""blah"""'], {}), "('blah')\n", (1951, 1959), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((2232, 2283), 'pypyr.dsl.Jsonify', 'Jsonify', (["{'a': 'string here', 'b': 123, 'c': False}"], {}), "({'a': 'string here', 'b': 123, 'c': False})\n", (2239, 2283), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((2766, 2798), 'ruamel.yaml.YAML', 'yamler.YAML', ([], {'typ': '"""rt"""', 'pure': '(True)'}), "(typ='rt', pure=True)\n", (2777, 2798), True, 'import ruamel.yaml as yamler\n'), ((3118, 3128), 'io.StringIO', 'StringIO', ([], {}), '()\n', (3126, 3128), False, 'from io import StringIO\n'), ((7207, 7276), 'pypyr.context.Context', 'Context', (["{'k1': 'string {here}', 'k2': 123.45, 'k3': 1, 'k4': '{k2}'}"], {}), "({'k1': 'string {here}', 'k2': 123.45, 'k3': 1, 'k4': '{k2}'})\n", (7214, 7276), False, 'from pypyr.context import Context\n'), ((8682, 8751), 'pypyr.context.Context', 'Context', (["{'k1': 'string {here}', 'k2': 123.45, 'k3': 1, 'k4': '{k2}'}"], {}), "({'k1': 'string {here}', 'k2': 123.45, 'k3': 1, 'k4': '{k2}'})\n", (8689, 8751), False, 'from pypyr.context import Context\n'), ((10008, 10076), 'pypyr.context.Context', 'Context', (["{'k1': 'my scalar', 'k2': False, 'k3': 123, 'k4': 'a {k1}'}"], {}), "({'k1': 'my scalar', 'k2': False, 'k3': 123, 'k4': 'a {k1}'})\n", (10015, 10076), False, 'from pypyr.context import Context\n'), ((11144, 11159), 'pypyr.dsl.PyString', 'PyString', (['"""1+1"""'], {}), "('1+1')\n", (11152, 11159), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((11395, 11406), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (11404, 11406), False, 'from unittest.mock import call, patch, MagicMock\n'), ((11467, 11502), 'pypyr.dsl.PyString.from_yaml', 'PyString.from_yaml', (['None', 'mock_node'], {}), '(None, mock_node)\n', (11485, 11502), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((11735, 11746), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (11744, 11746), False, 'from unittest.mock import call, patch, MagicMock\n'), ((11751, 11796), 'pypyr.dsl.PyString.to_yaml', 'PyString.to_yaml', (['mock_representer', 'mock_node'], {}), '(mock_representer, mock_node)\n', (11767, 11796), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((12261, 12287), 'pypyr.context.Context', 'Context', (["{'a': -3, 'b': 4}"], {}), "({'a': -3, 'b': 4})\n", (12268, 12287), False, 'from pypyr.context import Context\n'), ((12885, 12916), 'pypyr.context.Context', 'Context', (["{'a': '12', 'b': 'ab'}"], {}), "({'a': '12', 'b': 'ab'})\n", (12892, 12916), False, 'from pypyr.context import Context\n'), ((13458, 13482), 'pypyr.dsl.PyString', 'PyString', (['"""len("three")"""'], {}), '(\'len("three")\')\n', (13466, 13482), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((14177, 14193), 'pypyr.dsl.PyString', 'PyString', (['"""blah"""'], {}), "('blah')\n", (14185, 14193), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((14443, 14459), 'pypyr.dsl.SicString', 'SicString', (['"""1+1"""'], {}), "('1+1')\n", (14452, 14459), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((14698, 14709), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (14707, 14709), False, 'from unittest.mock import call, patch, MagicMock\n'), ((14772, 14808), 'pypyr.dsl.SicString.from_yaml', 'SicString.from_yaml', (['None', 'mock_node'], {}), '(None, mock_node)\n', (14791, 14808), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((15059, 15070), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (15068, 15070), False, 'from unittest.mock import call, patch, MagicMock\n'), ((15075, 15121), 'pypyr.dsl.SicString.to_yaml', 'SicString.to_yaml', (['mock_representer', 'mock_node'], {}), '(mock_representer, mock_node)\n', (15092, 15121), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((15670, 15686), 'pypyr.dsl.SicString', 'SicString', (['"""arb"""'], {}), "('arb')\n", (15679, 15686), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((15988, 16005), 'pypyr.dsl.SicString', 'SicString', (['"""blah"""'], {}), "('blah')\n", (15997, 16005), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((16268, 16471), 'pypyr.context.Context', 'Context', (["{'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'key4': [{'k4lk1':\n 'value4', 'k4lk2': 'value5'}, {'k4lk1': 'value6', 'k4lk2': 'value7'}],\n 'key5': False, 'key6': True, 'key7': 77}"], {}), "({'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'key4': [{\n 'k4lk1': 'value4', 'k4lk2': 'value5'}, {'k4lk1': 'value6', 'k4lk2':\n 'value7'}], 'key5': False, 'key6': True, 'key7': 77})\n", (16275, 16471), False, 'from pypyr.context import Context\n'), ((18248, 18276), 'pypyr.cache.stepcache.step_cache.clear', 'stepcache.step_cache.clear', ([], {}), '()\n', (18274, 18276), True, 'import pypyr.cache.stepcache as stepcache\n'), ((19966, 19994), 'pypyr.cache.stepcache.step_cache.clear', 'stepcache.step_cache.clear', ([], {}), '()\n', (19992, 19994), True, 'import pypyr.cache.stepcache as stepcache\n'), ((20944, 20972), 'pypyr.cache.stepcache.step_cache.clear', 'stepcache.step_cache.clear', ([], {}), '()\n', (20970, 20972), True, 'import pypyr.cache.stepcache as stepcache\n'), ((21982, 22010), 'pypyr.cache.stepcache.step_cache.clear', 'stepcache.step_cache.clear', ([], {}), '()\n', (22008, 22010), True, 'import pypyr.cache.stepcache as stepcache\n'), ((22084, 22339), 'pypyr.dsl.Step', 'Step', (["{'name': 'blah', 'in': {'k1': 'v1', 'k2': 'v2'}, 'foreach': [0], 'retry': {\n 'max': 5, 'sleep': 7}, 'run': False, 'skip': True, 'swallow': True,\n 'while': {'stop': 'stop condition', 'errorOnMax': True, 'sleep': 3,\n 'max': 4}}", '"""stepsrunner"""'], {}), "({'name': 'blah', 'in': {'k1': 'v1', 'k2': 'v2'}, 'foreach': [0],\n 'retry': {'max': 5, 'sleep': 7}, 'run': False, 'skip': True, 'swallow':\n True, 'while': {'stop': 'stop condition', 'errorOnMax': True, 'sleep': \n 3, 'max': 4}}, 'stepsrunner')\n", (22088, 22339), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((23573, 23601), 'pypyr.cache.stepcache.step_cache.clear', 'stepcache.step_cache.clear', ([], {}), '()\n', (23599, 23601), True, 'import pypyr.cache.stepcache as stepcache\n'), ((23679, 23927), 'ruamel.yaml.comments.CommentedMap', 'CommentedMap', (["{'name': 'blah', 'in': {'k1': 'v1', 'k2': 'v2'}, 'foreach': [0], 'retry': {\n 'max': 5, 'sleep': 7}, 'run': False, 'skip': True, 'swallow': True,\n 'while': {'stop': 'stop condition', 'errorOnMax': True, 'sleep': 3,\n 'max': 4}}"], {}), "({'name': 'blah', 'in': {'k1': 'v1', 'k2': 'v2'}, 'foreach': [0\n ], 'retry': {'max': 5, 'sleep': 7}, 'run': False, 'skip': True,\n 'swallow': True, 'while': {'stop': 'stop condition', 'errorOnMax': True,\n 'sleep': 3, 'max': 4}})\n", (23691, 23927), False, 'from ruamel.yaml.comments import CommentedMap, CommentedSeq, TaggedScalar\n'), ((24098, 24117), 'pypyr.dsl.Step', 'Step', (['context', 'None'], {}), '(context, None)\n', (24102, 24117), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((25271, 25342), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'description': 'test {key1} description'}", 'None'], {}), "({'name': 'step1', 'description': 'test {key1} description'}, None)\n", (25275, 25342), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((26087, 26172), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'description': 'test description', 'run': '{key5}'}", 'None'], {}), "({'name': 'step1', 'description': 'test description', 'run': '{key5}'},\n None)\n", (26091, 26172), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((27086, 27176), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'description': 'test {key5} description', 'skip': True}", 'None'], {}), "({'name': 'step1', 'description': 'test {key5} description', 'skip': \n True}, None)\n", (27090, 27176), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((28127, 28146), 'pypyr.dsl.Step', 'Step', (['"""step1"""', 'None'], {}), "('step1', None)\n", (28131, 28146), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((28715, 28759), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'foreach': []}", 'None'], {}), "({'name': 'step1', 'foreach': []}, None)\n", (28719, 28759), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((29277, 29326), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'foreach': ['one']}", 'None'], {}), "({'name': 'step1', 'foreach': ['one']}, None)\n", (29281, 29326), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((30305, 30361), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'foreach': ['one', 'two']}", 'None'], {}), "({'name': 'step1', 'foreach': ['one', 'two']}, None)\n", (30309, 30361), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((31549, 31619), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'foreach': ['{key1}', '{key2}', 'key3']}", 'None'], {}), "({'name': 'step1', 'foreach': ['{key1}', '{key2}', 'key3']}, None)\n", (31553, 31619), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((32943, 32993), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'foreach': '{list}'}", 'None'], {}), "({'name': 'step1', 'foreach': '{list}'}, None)\n", (32947, 32993), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((34757, 34867), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'run': '{dynamic_run_expression}', 'foreach': ['{key1}',\n '{key2}', 'key3']}", 'None'], {}), "({'name': 'step1', 'run': '{dynamic_run_expression}', 'foreach': [\n '{key1}', '{key2}', 'key3']}, None)\n", (34761, 34867), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((36203, 36315), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'skip': '{dynamic_skip_expression}', 'foreach': ['{key1}',\n '{key2}', 'key3']}", 'None'], {}), "({'name': 'step1', 'skip': '{dynamic_skip_expression}', 'foreach': [\n '{key1}', '{key2}', 'key3']}, None)\n", (36207, 36315), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((37418, 37535), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'swallow': '{dynamic_swallow_expression}', 'foreach': [\n '{key1}', '{key2}', 'key3']}", 'None'], {}), "({'name': 'step1', 'swallow': '{dynamic_swallow_expression}', 'foreach':\n ['{key1}', '{key2}', 'key3']}, None)\n", (37422, 37535), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((39452, 39472), 'pypyr.context.Context', 'Context', (["{'lst': []}"], {}), "({'lst': []})\n", (39459, 39472), False, 'from pypyr.context import Context\n'), ((40706, 40756), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'while': {'max': 3}}", 'None'], {}), "({'name': 'step1', 'while': {'max': 3}}, None)\n", (40710, 40756), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((41830, 41948), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'run': '{dynamic_run_expression}', 'while': {'max':\n '{whileMax}', 'stop': '{key5}'}}", 'None'], {}), "({'name': 'step1', 'run': '{dynamic_run_expression}', 'while': {'max':\n '{whileMax}', 'stop': '{key5}'}}, None)\n", (41834, 41948), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((43336, 43386), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'while': {'max': 3}}", 'None'], {}), "({'name': 'step1', 'while': {'max': 3}}, None)\n", (43340, 43386), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((44723, 44830), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'while': {'max': '{whileMax}', 'stop': '{key5}',\n 'errorOnMax': '{key6}'}}", 'None'], {}), "({'name': 'step1', 'while': {'max': '{whileMax}', 'stop': '{key5}',\n 'errorOnMax': '{key6}'}}, None)\n", (44727, 44830), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((46194, 46294), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'while': {'max': '{whileMax}', 'stop': False,\n 'errorOnMax': True}}", 'None'], {}), "({'name': 'step1', 'while': {'max': '{whileMax}', 'stop': False,\n 'errorOnMax': True}}, None)\n", (46198, 46294), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((47665, 47761), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'foreach': ['{key1}', '{key2}', 'key3'], 'while': {'max': 2}}", 'None'], {}), "({'name': 'step1', 'foreach': ['{key1}', '{key2}', 'key3'], 'while': {\n 'max': 2}}, None)\n", (47669, 47761), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((49787, 49815), 'pypyr.cache.stepcache.step_cache.clear', 'stepcache.step_cache.clear', ([], {}), '()\n', (49813, 49815), True, 'import pypyr.cache.stepcache as stepcache\n'), ((49827, 49852), 'pypyr.dsl.Step', 'Step', (['"""mocked.step"""', 'None'], {}), "('mocked.step', None)\n", (49831, 49852), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((50584, 50609), 'pypyr.dsl.Step', 'Step', (['"""mocked.step"""', 'None'], {}), "('mocked.step', None)\n", (50588, 50609), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((51048, 51073), 'pypyr.dsl.Step', 'Step', (['"""mocked.step"""', 'None'], {}), "('mocked.step', None)\n", (51052, 51073), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((51517, 51542), 'pypyr.dsl.Step', 'Step', (['"""mocked.step"""', 'None'], {}), "('mocked.step', None)\n", (51521, 51542), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((52410, 52460), 'pypyr.errors.Call', 'Call', (["['one', 'two']", '"""sg"""', '"""fg"""', "('a', 'changed')"], {}), "(['one', 'two'], 'sg', 'fg', ('a', 'changed'))\n", (52414, 52460), False, 'from pypyr.errors import Call, HandledError, LoopMaxExhaustedError, PipelineDefinitionError\n'), ((52743, 52766), 'pypyr.dsl.Step', 'Step', (['step_config', 'None'], {}), '(step_config, None)\n', (52747, 52766), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((53483, 53527), 'pypyr.errors.Call', 'Call', (["['one', 'two']", '"""sg"""', '"""fg"""', "('a', 'b')"], {}), "(['one', 'two'], 'sg', 'fg', ('a', 'b'))\n", (53487, 53527), False, 'from pypyr.errors import Call, HandledError, LoopMaxExhaustedError, PipelineDefinitionError\n'), ((53810, 53833), 'pypyr.dsl.Step', 'Step', (['step_config', 'None'], {}), '(step_config, None)\n', (53814, 53833), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((54433, 54477), 'pypyr.errors.Call', 'Call', (["['one', 'two']", '"""sg"""', '"""fg"""', "('x', 'z')"], {}), "(['one', 'two'], 'sg', 'fg', ('x', 'z'))\n", (54437, 54477), False, 'from pypyr.errors import Call, HandledError, LoopMaxExhaustedError, PipelineDefinitionError\n'), ((54526, 54549), 'pypyr.dsl.Step', 'Step', (['step_config', 'None'], {}), '(step_config, None)\n', (54530, 54549), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((54982, 55027), 'pypyr.errors.Call', 'Call', (["['one', 'two']", '"""sg"""', '"""fg"""', "('x', None)"], {}), "(['one', 'two'], 'sg', 'fg', ('x', None))\n", (54986, 55027), False, 'from pypyr.errors import Call, HandledError, LoopMaxExhaustedError, PipelineDefinitionError\n'), ((55076, 55099), 'pypyr.dsl.Step', 'Step', (['step_config', 'None'], {}), '(step_config, None)\n', (55080, 55099), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((55436, 55488), 'pypyr.errors.Call', 'Call', (["['one', 'two']", '"""sg"""', '"""fg"""', "('a', arb_mutable)"], {}), "(['one', 'two'], 'sg', 'fg', ('a', arb_mutable))\n", (55440, 55488), False, 'from pypyr.errors import Call, HandledError, LoopMaxExhaustedError, PipelineDefinitionError\n'), ((55537, 55560), 'pypyr.dsl.Step', 'Step', (['step_config', 'None'], {}), '(step_config, None)\n', (55541, 55560), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((55923, 55975), 'pypyr.errors.Call', 'Call', (["['one', 'two']", '"""sg"""', '"""fg"""', "('a', arb_mutable)"], {}), "(['one', 'two'], 'sg', 'fg', ('a', arb_mutable))\n", (55927, 55975), False, 'from pypyr.errors import Call, HandledError, LoopMaxExhaustedError, PipelineDefinitionError\n'), ((56024, 56047), 'pypyr.dsl.Step', 'Step', (['step_config', 'None'], {}), '(step_config, None)\n', (56028, 56047), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((56566, 56608), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'run': True}", 'None'], {}), "({'name': 'step1', 'run': True}, None)\n", (56570, 56608), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((57597, 57640), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'run': False}", 'None'], {}), "({'name': 'step1', 'run': False}, None)\n", (57601, 57640), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((58357, 58403), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'run': '{key1}'}", 'None'], {}), "({'name': 'step1', 'run': '{key1}'}, None)\n", (58361, 58403), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((59209, 59254), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'run': 'False'}", 'None'], {}), "({'name': 'step1', 'run': 'False'}, None)\n", (59213, 59254), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((60081, 60126), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'run': 'false'}", 'None'], {}), "({'name': 'step1', 'run': 'false'}, None)\n", (60085, 60126), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((60923, 60969), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'run': '{key5}'}", 'None'], {}), "({'name': 'step1', 'run': '{key5}'}, None)\n", (60927, 60969), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((61773, 61819), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'run': '{key6}'}", 'None'], {}), "({'name': 'step1', 'run': '{key6}'}, None)\n", (61777, 61819), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((63002, 63046), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'run': 'True'}", 'None'], {}), "({'name': 'step1', 'run': 'True'}, None)\n", (63006, 63046), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((64167, 64206), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'run': 1}", 'None'], {}), "({'name': 'step1', 'run': 1}, None)\n", (64171, 64206), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((65346, 65386), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'run': 99}", 'None'], {}), "({'name': 'step1', 'run': 99}, None)\n", (65350, 65386), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((66538, 66578), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'run': -1}", 'None'], {}), "({'name': 'step1', 'run': -1}, None)\n", (66542, 66578), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((67712, 67763), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'retry': {'max': 10}}", 'None'], {}), "({'name': 'step1', 'retry': {'max': 10}}, None)\n", (67716, 67763), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((68927, 68977), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'retry': {'max': 0}}", 'None'], {}), "({'name': 'step1', 'retry': {'max': 0}}, None)\n", (68931, 68977), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((70230, 70304), 'ruamel.yaml.comments.CommentedMap', 'CommentedMap', (["{'name': 'step1', 'swallow': 0, 'onError': {'arb': 'value'}}"], {}), "({'name': 'step1', 'swallow': 0, 'onError': {'arb': 'value'}})\n", (70242, 70304), False, 'from ruamel.yaml.comments import CommentedMap, CommentedSeq, TaggedScalar\n'), ((70395, 70424), 'pypyr.dsl.Step', 'Step', (['complex_step_info', 'None'], {}), '(complex_step_info, None)\n', (70399, 70424), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((71612, 71656), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'skip': False}", 'None'], {}), "({'name': 'step1', 'skip': False}, None)\n", (71616, 71656), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((72757, 72800), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'skip': True}", 'None'], {}), "({'name': 'step1', 'skip': True}, None)\n", (72761, 72800), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((73525, 73572), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'skip': '{key6}'}", 'None'], {}), "({'name': 'step1', 'skip': '{key6}'}, None)\n", (73529, 73572), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((74349, 74394), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'skip': 'TRUE'}", 'None'], {}), "({'name': 'step1', 'skip': 'TRUE'}, None)\n", (74353, 74394), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((75224, 75269), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'skip': 'true'}", 'None'], {}), "({'name': 'step1', 'skip': 'true'}, None)\n", (75228, 75269), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((76067, 76127), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'run': '{key5}', 'skip': True}", 'None'], {}), "({'name': 'step1', 'run': '{key5}', 'skip': True}, None)\n", (76071, 76127), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((76923, 76970), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'skip': '{key5}'}", 'None'], {}), "({'name': 'step1', 'skip': '{key5}'}, None)\n", (76927, 76970), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((78124, 78170), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'skip': 'False'}", 'None'], {}), "({'name': 'step1', 'skip': 'False'}, None)\n", (78128, 78170), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((79264, 79304), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'skip': 0}", 'None'], {}), "({'name': 'step1', 'skip': 0}, None)\n", (79268, 79304), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((80423, 80464), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'skip': 99}", 'None'], {}), "({'name': 'step1', 'skip': 99}, None)\n", (80427, 80464), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((81244, 81285), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'skip': -1}", 'None'], {}), "({'name': 'step1', 'skip': -1}, None)\n", (81248, 81285), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((82139, 82185), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'swallow': True}", 'None'], {}), "({'name': 'step1', 'swallow': True}, None)\n", (82143, 82185), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((83297, 83344), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'swallow': False}", 'None'], {}), "({'name': 'step1', 'swallow': False}, None)\n", (83301, 83344), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((84396, 84439), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'swallow': 1}", 'None'], {}), "({'name': 'step1', 'swallow': 1}, None)\n", (84400, 84439), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((86288, 86331), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'swallow': 0}", 'None'], {}), "({'name': 'step1', 'swallow': 0}, None)\n", (86292, 86331), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((87305, 87350), 'ruamel.yaml.comments.CommentedMap', 'CommentedMap', (["{'name': 'step1', 'swallow': 0}"], {}), "({'name': 'step1', 'swallow': 0})\n", (87317, 87350), False, 'from ruamel.yaml.comments import CommentedMap, CommentedSeq, TaggedScalar\n'), ((87433, 87462), 'pypyr.dsl.Step', 'Step', (['complex_step_info', 'None'], {}), '(complex_step_info, None)\n', (87437, 87462), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((88596, 88625), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1'}", 'None'], {}), "({'name': 'step1'}, None)\n", (88600, 88625), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((90402, 90505), 'pypyr.dsl.Step', 'Step', (["{'name': 'step1', 'in': {'key1': 'updated1', 'key2': 'updated2', 'keyadded':\n 'added3'}}", 'None'], {}), "({'name': 'step1', 'in': {'key1': 'updated1', 'key2': 'updated2',\n 'keyadded': 'added3'}}, None)\n", (90406, 90505), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((91884, 91902), 'pypyr.dsl.Step', 'Step', (['"""blah"""', 'None'], {}), "('blah', None)\n", (91888, 91902), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((92217, 92245), 'pypyr.dsl.Step', 'Step', (["{'name': 'blah'}", 'None'], {}), "({'name': 'blah'}, None)\n", (92221, 92245), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((92551, 92589), 'pypyr.dsl.Step', 'Step', (["{'name': 'blah', 'in': {}}", 'None'], {}), "({'name': 'blah', 'in': {}}, None)\n", (92555, 92589), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((93158, 93201), 'pypyr.dsl.Step', 'Step', (["{'name': 'blah', 'in': in_args}", 'None'], {}), "({'name': 'blah', 'in': in_args}, None)\n", (93162, 93201), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((93840, 93880), 'pypyr.dsl.Step', 'Step', (["{'name': 'blah', 'in': None}", 'None'], {}), "({'name': 'blah', 'in': None}, None)\n", (93844, 93880), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((94164, 94202), 'pypyr.dsl.Step', 'Step', (["{'name': 'blah', 'in': {}}", 'None'], {}), "({'name': 'blah', 'in': {}}, None)\n", (94168, 94202), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((94697, 94740), 'pypyr.dsl.Step', 'Step', (["{'name': 'blah', 'in': in_args}", 'None'], {}), "({'name': 'blah', 'in': in_args}, None)\n", (94701, 94740), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((95169, 95197), 'pypyr.dsl.Step', 'Step', (["{'name': 'blah'}", 'None'], {}), "({'name': 'blah'}, None)\n", (95173, 95197), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((95963, 95997), 'ruamel.yaml.comments.CommentedMap', 'CommentedMap', (["{'name': 'arb step'}"], {}), "({'name': 'arb step'})\n", (95975, 95997), False, 'from ruamel.yaml.comments import CommentedMap, CommentedSeq, TaggedScalar\n'), ((96048, 96069), 'pypyr.dsl.Step', 'Step', (['step_info', 'None'], {}), '(step_info, None)\n', (96052, 96069), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((96727, 96785), 'pypyr.dsl.Step', 'Step', (["{'name': 'blah', 'onError': {'key': '{key1}'}}", 'None'], {}), "({'name': 'blah', 'onError': {'key': '{key1}'}}, None)\n", (96731, 96785), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((97486, 97514), 'pypyr.dsl.Step', 'Step', (["{'name': 'blah'}", 'None'], {}), "({'name': 'blah'}, None)\n", (97490, 97514), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((98763, 98781), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (['{}'], {}), '({})\n', (98777, 98781), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((99164, 99190), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3}"], {}), "({'max': 3})\n", (99178, 99190), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((99559, 99725), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3, 'sleep': 4.4, 'retryOn': [1, 2, 3], 'stopOn': [4, 5, 6],\n 'backoff': 'arb', 'sleepMax': 5.5, 'jrc': 6.6, 'backoffArgs': {'a': 'b'}}"], {}), "({'max': 3, 'sleep': 4.4, 'retryOn': [1, 2, 3], 'stopOn': [4,\n 5, 6], 'backoff': 'arb', 'sleepMax': 5.5, 'jrc': 6.6, 'backoffArgs': {\n 'a': 'b'}})\n", (99573, 99725), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((100715, 100741), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3}"], {}), "({'max': 3})\n", (100729, 100741), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((100757, 100768), 'pypyr.context.Context', 'Context', (['{}'], {}), '({})\n', (100764, 100768), False, 'from pypyr.context import Context\n'), ((100780, 100791), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (100789, 100791), False, 'from unittest.mock import call, patch, MagicMock\n'), ((101234, 101260), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3}"], {}), "({'max': 3})\n", (101248, 101260), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((101276, 101287), 'pypyr.context.Context', 'Context', (['{}'], {}), '({})\n', (101283, 101287), False, 'from pypyr.context import Context\n'), ((101299, 101310), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (101308, 101310), False, 'from unittest.mock import call, patch, MagicMock\n'), ((101739, 101765), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3}"], {}), "({'max': 3})\n", (101753, 101765), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((101781, 101792), 'pypyr.context.Context', 'Context', (['{}'], {}), '({})\n', (101788, 101792), False, 'from pypyr.context import Context\n'), ((101804, 101815), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (101813, 101815), False, 'from unittest.mock import call, patch, MagicMock\n'), ((102597, 102662), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3, 'retryOn': ['KeyError', 'ValueError']}"], {}), "({'max': 3, 'retryOn': ['KeyError', 'ValueError']})\n", (102611, 102662), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((102703, 102714), 'pypyr.context.Context', 'Context', (['{}'], {}), '({})\n', (102710, 102714), False, 'from pypyr.context import Context\n'), ((102726, 102737), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (102735, 102737), False, 'from unittest.mock import call, patch, MagicMock\n'), ((103528, 103587), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3, 'retryOn': ['KeyError', '{k1}']}"], {}), "({'max': 3, 'retryOn': ['KeyError', '{k1}']})\n", (103542, 103587), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((103628, 103657), 'pypyr.context.Context', 'Context', (["{'k1': 'ValueError'}"], {}), "({'k1': 'ValueError'})\n", (103635, 103657), False, 'from pypyr.context import Context\n'), ((103669, 103680), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (103678, 103680), False, 'from unittest.mock import call, patch, MagicMock\n'), ((104622, 104686), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3, 'retryOn': ['KeyError', 'BlahError']}"], {}), "({'max': 3, 'retryOn': ['KeyError', 'BlahError']})\n", (104636, 104686), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((104727, 104738), 'pypyr.context.Context', 'Context', (['{}'], {}), '({})\n', (104734, 104738), False, 'from pypyr.context import Context\n'), ((104750, 104761), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (104759, 104761), False, 'from unittest.mock import call, patch, MagicMock\n'), ((105510, 105574), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3, 'stopOn': ['KeyError', 'ValueError']}"], {}), "({'max': 3, 'stopOn': ['KeyError', 'ValueError']})\n", (105524, 105574), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((105615, 105626), 'pypyr.context.Context', 'Context', (['{}'], {}), '({})\n', (105622, 105626), False, 'from pypyr.context import Context\n'), ((105638, 105649), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (105647, 105649), False, 'from unittest.mock import call, patch, MagicMock\n'), ((106416, 106460), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3, 'stopOn': '{k1}'}"], {}), "({'max': 3, 'stopOn': '{k1}'})\n", (106430, 106460), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((106501, 106544), 'pypyr.context.Context', 'Context', (["{'k1': ['KeyError', 'ValueError']}"], {}), "({'k1': ['KeyError', 'ValueError']})\n", (106508, 106544), False, 'from pypyr.context import Context\n'), ((106556, 106567), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (106565, 106567), False, 'from unittest.mock import call, patch, MagicMock\n'), ((107407, 107469), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3, 'stopOn': ['KeyError', 'ArbError']}"], {}), "({'max': 3, 'stopOn': ['KeyError', 'ArbError']})\n", (107421, 107469), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((107510, 107521), 'pypyr.context.Context', 'Context', (['{}'], {}), '({})\n', (107517, 107521), False, 'from pypyr.context import Context\n'), ((107533, 107544), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (107542, 107544), False, 'from unittest.mock import call, patch, MagicMock\n'), ((108333, 108377), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3, 'stopOn': '{k1}'}"], {}), "({'max': 3, 'stopOn': '{k1}'})\n", (108347, 108377), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((108418, 108459), 'pypyr.context.Context', 'Context', (["{'k1': ['KeyError', 'ArbError']}"], {}), "({'k1': ['KeyError', 'ArbError']})\n", (108425, 108459), False, 'from pypyr.context import Context\n'), ((108471, 108482), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (108480, 108482), False, 'from unittest.mock import call, patch, MagicMock\n'), ((109470, 109577), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3, 'stopOn': ['KeyError', 'ValueError'], 'retryOn': ['KeyError',\n 'ValueError']}"], {}), "({'max': 3, 'stopOn': ['KeyError', 'ValueError'], 'retryOn':\n ['KeyError', 'ValueError']})\n", (109484, 109577), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((109639, 109650), 'pypyr.context.Context', 'Context', (['{}'], {}), '({})\n', (109646, 109650), False, 'from pypyr.context import Context\n'), ((109662, 109673), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (109671, 109673), False, 'from unittest.mock import call, patch, MagicMock\n'), ((110416, 110442), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3}"], {}), "({'max': 3})\n", (110430, 110442), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((110458, 110469), 'pypyr.context.Context', 'Context', (['{}'], {}), '({})\n', (110465, 110469), False, 'from pypyr.context import Context\n'), ((110481, 110492), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (110490, 110492), False, 'from unittest.mock import call, patch, MagicMock\n'), ((111293, 111358), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3, 'retryOn': ['KeyError', 'ValueError']}"], {}), "({'max': 3, 'retryOn': ['KeyError', 'ValueError']})\n", (111307, 111358), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((111399, 111410), 'pypyr.context.Context', 'Context', (['{}'], {}), '({})\n', (111406, 111410), False, 'from pypyr.context import Context\n'), ((111422, 111433), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (111431, 111433), False, 'from unittest.mock import call, patch, MagicMock\n'), ((112195, 112257), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3, 'stopOn': ['KeyError', 'ArbError']}"], {}), "({'max': 3, 'stopOn': ['KeyError', 'ArbError']})\n", (112209, 112257), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((112298, 112309), 'pypyr.context.Context', 'Context', (['{}'], {}), '({})\n', (112305, 112309), False, 'from pypyr.context import Context\n'), ((112321, 112332), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (112330, 112332), False, 'from unittest.mock import call, patch, MagicMock\n'), ((112343, 112357), 'pypyr.errors.HandledError', 'HandledError', ([], {}), '()\n', (112355, 112357), False, 'from pypyr.errors import Call, HandledError, LoopMaxExhaustedError, PipelineDefinitionError\n'), ((113144, 113188), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3, 'stopOn': '{k1}'}"], {}), "({'max': 3, 'stopOn': '{k1}'})\n", (113158, 113188), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((113229, 113270), 'pypyr.context.Context', 'Context', (["{'k1': ['KeyError', 'ArbError']}"], {}), "({'k1': ['KeyError', 'ArbError']})\n", (113236, 113270), False, 'from pypyr.context import Context\n'), ((113282, 113293), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (113291, 113293), False, 'from unittest.mock import call, patch, MagicMock\n'), ((113304, 113318), 'pypyr.errors.HandledError', 'HandledError', ([], {}), '()\n', (113316, 113318), False, 'from pypyr.errors import Call, HandledError, LoopMaxExhaustedError, PipelineDefinitionError\n'), ((114322, 114374), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3, 'stopOn': ['ValueError']}"], {}), "({'max': 3, 'stopOn': ['ValueError']})\n", (114336, 114374), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((114415, 114426), 'pypyr.context.Context', 'Context', (['{}'], {}), '({})\n', (114422, 114426), False, 'from pypyr.context import Context\n'), ((114438, 114449), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (114447, 114449), False, 'from unittest.mock import call, patch, MagicMock\n'), ((114460, 114474), 'pypyr.errors.HandledError', 'HandledError', ([], {}), '()\n', (114472, 114474), False, 'from pypyr.errors import Call, HandledError, LoopMaxExhaustedError, PipelineDefinitionError\n'), ((115323, 115387), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3, 'retryOn': ['KeyError', 'BlahError']}"], {}), "({'max': 3, 'retryOn': ['KeyError', 'BlahError']})\n", (115337, 115387), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((115428, 115439), 'pypyr.context.Context', 'Context', (['{}'], {}), '({})\n', (115435, 115439), False, 'from pypyr.context import Context\n'), ((115451, 115462), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (115460, 115462), False, 'from unittest.mock import call, patch, MagicMock\n'), ((115473, 115487), 'pypyr.errors.HandledError', 'HandledError', ([], {}), '()\n', (115485, 115487), False, 'from pypyr.errors import Call, HandledError, LoopMaxExhaustedError, PipelineDefinitionError\n'), ((116439, 116465), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3}"], {}), "({'max': 3})\n", (116453, 116465), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((116480, 116501), 'pypyr.context.Context', 'Context', (["{'k1': 'v1'}"], {}), "({'k1': 'v1'})\n", (116487, 116501), False, 'from pypyr.context import Context\n'), ((116513, 116524), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (116522, 116524), False, 'from unittest.mock import call, patch, MagicMock\n'), ((117571, 117592), 'pypyr.context.Context', 'Context', (["{'k1': 'v1'}"], {}), "({'k1': 'v1'})\n", (117578, 117592), False, 'from pypyr.context import Context\n'), ((117604, 117615), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (117613, 117615), False, 'from unittest.mock import call, patch, MagicMock\n'), ((118578, 118619), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 3, 'sleep': 10.1}"], {}), "({'max': 3, 'sleep': 10.1})\n", (118592, 118619), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((118634, 118655), 'pypyr.context.Context', 'Context', (["{'k1': 'v1'}"], {}), "({'k1': 'v1'})\n", (118641, 118655), False, 'from pypyr.context import Context\n'), ((118667, 118678), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (118676, 118678), False, 'from unittest.mock import call, patch, MagicMock\n'), ((119709, 119758), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': 5, 'sleep': [10.1, 10.2]}"], {}), "({'max': 5, 'sleep': [10.1, 10.2]})\n", (119723, 119758), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((119773, 119794), 'pypyr.context.Context', 'Context', (["{'k1': 'v1'}"], {}), "({'k1': 'v1'})\n", (119780, 119794), False, 'from pypyr.context import Context\n'), ((119806, 119817), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (119815, 119817), False, 'from unittest.mock import call, patch, MagicMock\n'), ((121115, 121146), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'sleep': 10.1}"], {}), "({'sleep': 10.1})\n", (121129, 121146), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((121161, 121182), 'pypyr.context.Context', 'Context', (["{'k1': 'v1'}"], {}), "({'k1': 'v1'})\n", (121168, 121182), False, 'from pypyr.context import Context\n'), ((121194, 121205), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (121203, 121205), False, 'from unittest.mock import call, patch, MagicMock\n'), ((122095, 122152), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': '{k3[1][k031]}', 'sleep': '{k2}'}"], {}), "({'max': '{k3[1][k031]}', 'sleep': '{k2}'})\n", (122109, 122152), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((122192, 122264), 'pypyr.context.Context', 'Context', (["{'k1': False, 'k2': 0.3, 'k3': [0, {'k031': 1, 'k032': False}]}"], {}), "({'k1': False, 'k2': 0.3, 'k3': [0, {'k031': 1, 'k032': False}]})\n", (122199, 122264), False, 'from pypyr.context import Context\n'), ((123201, 123368), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': '{k3[1][k031]}', 'sleep': '{k2}', 'backoff': '{k6}', 'jrc': '{k4}',\n 'sleepMax': '{k5}', 'backoffArgs': {'base': '{k7}', 'arb': '{k8}'}}"], {}), "({'max': '{k3[1][k031]}', 'sleep': '{k2}', 'backoff': '{k6}',\n 'jrc': '{k4}', 'sleepMax': '{k5}', 'backoffArgs': {'base': '{k7}',\n 'arb': '{k8}'}})\n", (123215, 123368), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((123500, 123669), 'pypyr.context.Context', 'Context', (["{'k1': False, 'k2': 3, 'k3': [0, {'k031': 4, 'k032': False}], 'k4': 0.5,\n 'k5': 30, 'k6': 'exponentialjitter', 'k7': 3, 'k8': 'a value',\n 'step_count': 0}"], {}), "({'k1': False, 'k2': 3, 'k3': [0, {'k031': 4, 'k032': False}], 'k4':\n 0.5, 'k5': 30, 'k6': 'exponentialjitter', 'k7': 3, 'k8': 'a value',\n 'step_count': 0})\n", (123507, 123669), False, 'from pypyr.context import Context\n'), ((124664, 124779), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (["{'max': '{k3[1][k031]}', 'sleep': '{k2}', 'backoff': '{k6}', 'jrc': '{k4}',\n 'sleepMax': '{k5}'}"], {}), "({'max': '{k3[1][k031]}', 'sleep': '{k2}', 'backoff': '{k6}',\n 'jrc': '{k4}', 'sleepMax': '{k5}'})\n", (124678, 124779), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((124890, 125033), 'pypyr.context.Context', 'Context', (["{'k1': False, 'k2': [0.3, 0.2, 0.1], 'k3': [0, {'k031': 4, 'k032': False}],\n 'k4': 2, 'k5': 0.25, 'k6': 'jitter', 'step_count': 0}"], {}), "({'k1': False, 'k2': [0.3, 0.2, 0.1], 'k3': [0, {'k031': 4, 'k032': \n False}], 'k4': 2, 'k5': 0.25, 'k6': 'jitter', 'step_count': 0})\n", (124897, 125033), False, 'from pypyr.context import Context\n'), ((126002, 126033), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'stop': 'arb'}"], {}), "({'stop': 'arb'})\n", (126016, 126033), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((126294, 126320), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'max': 3}"], {}), "({'max': 3})\n", (126308, 126320), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((126566, 126639), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'errorOnMax': True, 'max': 3, 'sleep': 4.4, 'stop': '5'}"], {}), "({'errorOnMax': True, 'max': 3, 'sleep': 4.4, 'stop': '5'})\n", (126580, 126639), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((127788, 127814), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'max': 3}"], {}), "({'max': 3})\n", (127802, 127814), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((127830, 127841), 'pypyr.context.Context', 'Context', (['{}'], {}), '({})\n', (127837, 127841), False, 'from pypyr.context import Context\n'), ((127853, 127864), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (127862, 127864), False, 'from unittest.mock import call, patch, MagicMock\n'), ((128275, 128305), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'stop': True}"], {}), "({'stop': True})\n", (128289, 128305), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((128321, 128332), 'pypyr.context.Context', 'Context', (['{}'], {}), '({})\n', (128328, 128332), False, 'from pypyr.context import Context\n'), ((128344, 128355), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (128353, 128355), False, 'from unittest.mock import call, patch, MagicMock\n'), ((128779, 128813), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'stop': '{stop}'}"], {}), "({'stop': '{stop}'})\n", (128793, 128813), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((128829, 128852), 'pypyr.context.Context', 'Context', (["{'stop': True}"], {}), "({'stop': True})\n", (128836, 128852), False, 'from pypyr.context import Context\n'), ((128864, 128875), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (128873, 128875), False, 'from unittest.mock import call, patch, MagicMock\n'), ((129286, 129327), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'max': 1, 'stop': False}"], {}), "({'max': 1, 'stop': False})\n", (129300, 129327), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((129343, 129352), 'pypyr.context.Context', 'Context', ([], {}), '()\n', (129350, 129352), False, 'from pypyr.context import Context\n'), ((129364, 129375), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (129373, 129375), False, 'from unittest.mock import call, patch, MagicMock\n'), ((129782, 129816), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'stop': '{stop}'}"], {}), "({'stop': '{stop}'})\n", (129796, 129816), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((129832, 129856), 'pypyr.context.Context', 'Context', (["{'stop': False}"], {}), "({'stop': False})\n", (129839, 129856), False, 'from pypyr.context import Context\n'), ((129868, 129879), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (129877, 129879), False, 'from unittest.mock import call, patch, MagicMock\n'), ((130393, 130423), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'stop': True}"], {}), "({'stop': True})\n", (130407, 130423), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((130436, 130447), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (130445, 130447), False, 'from unittest.mock import call, patch, MagicMock\n'), ((130976, 131002), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'max': 0}"], {}), "({'max': 0})\n", (130990, 131002), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((131015, 131026), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (131024, 131026), False, 'from unittest.mock import call, patch, MagicMock\n'), ((131427, 131457), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'max': '{x}'}"], {}), "({'max': '{x}'})\n", (131441, 131457), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((131470, 131481), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (131479, 131481), False, 'from unittest.mock import call, patch, MagicMock\n'), ((131884, 131924), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'stop': '{thisistrue}'}"], {}), "({'stop': '{thisistrue}'})\n", (131898, 131924), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((131937, 131948), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (131946, 131948), False, 'from unittest.mock import call, patch, MagicMock\n'), ((132529, 132559), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'stop': True}"], {}), "({'stop': True})\n", (132543, 132559), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((132609, 132620), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (132618, 132620), False, 'from unittest.mock import call, patch, MagicMock\n'), ((133016, 133042), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'max': 3}"], {}), "({'max': 3})\n", (133030, 133042), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((133057, 133078), 'pypyr.context.Context', 'Context', (["{'k1': 'v1'}"], {}), "({'k1': 'v1'})\n", (133064, 133078), False, 'from pypyr.context import Context\n'), ((133090, 133101), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (133099, 133101), False, 'from unittest.mock import call, patch, MagicMock\n'), ((133865, 133914), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'stop': '{k1}', 'sleep': '{k2}'}"], {}), "({'stop': '{k1}', 'sleep': '{k2}'})\n", (133879, 133914), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((133929, 133962), 'pypyr.context.Context', 'Context', (["{'k1': False, 'k2': 0.3}"], {}), "({'k1': False, 'k2': 0.3})\n", (133936, 133962), False, 'from pypyr.context import Context\n'), ((135282, 135341), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'max': 5, 'stop': '{k1}', 'sleep': '{k2}'}"], {}), "({'max': 5, 'stop': '{k1}', 'sleep': '{k2}'})\n", (135296, 135341), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((135356, 135389), 'pypyr.context.Context', 'Context', (["{'k1': False, 'k2': 0.3}"], {}), "({'k1': False, 'k2': 0.3})\n", (135363, 135389), False, 'from pypyr.context import Context\n'), ((136713, 136772), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'max': 3, 'stop': '{k1}', 'sleep': '{k2}'}"], {}), "({'max': 3, 'stop': '{k1}', 'sleep': '{k2}'})\n", (136727, 136772), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((136787, 136820), 'pypyr.context.Context', 'Context', (["{'k1': False, 'k2': 0.3}"], {}), "({'k1': False, 'k2': 0.3})\n", (136794, 136820), False, 'from pypyr.context import Context\n'), ((138101, 138186), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'max': 3, 'stop': '{k1}', 'sleep': '{k2}', 'errorOnMax': '{k3}'}"], {}), "({'max': 3, 'stop': '{k1}', 'sleep': '{k2}', 'errorOnMax':\n '{k3}'})\n", (138115, 138186), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((138272, 138317), 'pypyr.context.Context', 'Context', (["{'k1': False, 'k2': 0.3, 'k3': True}"], {}), "({'k1': False, 'k2': 0.3, 'k3': True})\n", (138279, 138317), False, 'from pypyr.context import Context\n'), ((140167, 140230), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'max': 3, 'sleep': '{k2}', 'errorOnMax': True}"], {}), "({'max': 3, 'sleep': '{k2}', 'errorOnMax': True})\n", (140181, 140230), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((140295, 140340), 'pypyr.context.Context', 'Context', (["{'k1': False, 'k2': 0.3, 'k3': True}"], {}), "({'k1': False, 'k2': 0.3, 'k3': True})\n", (140302, 140340), False, 'from pypyr.context import Context\n'), ((142094, 142202), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'max': '{k3[1][k031]}', 'stop': '{k1}', 'sleep': '{k2}', 'errorOnMax':\n '{k3[1][k032]}'}"], {}), "({'max': '{k3[1][k031]}', 'stop': '{k1}', 'sleep': '{k2}',\n 'errorOnMax': '{k3[1][k032]}'})\n", (142108, 142202), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((142288, 142360), 'pypyr.context.Context', 'Context', (["{'k1': False, 'k2': 0.3, 'k3': [0, {'k031': 1, 'k032': False}]}"], {}), "({'k1': False, 'k2': 0.3, 'k3': [0, {'k031': 1, 'k032': False}]})\n", (142295, 142360), False, 'from pypyr.context import Context\n'), ((1179, 1213), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (1192, 1213), False, 'import pytest\n'), ((1341, 1366), 'pypyr.dsl.SpecialTagDirective', 'SpecialTagDirective', (['None'], {}), '(None)\n', (1360, 1366), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((1370, 1395), 'pypyr.dsl.SpecialTagDirective', 'SpecialTagDirective', (['None'], {}), '(None)\n', (1389, 1395), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((1407, 1434), 'pypyr.dsl.SpecialTagDirective', 'SpecialTagDirective', (['"""none"""'], {}), "('none')\n", (1426, 1434), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((1438, 1465), 'pypyr.dsl.SpecialTagDirective', 'SpecialTagDirective', (['"""some"""'], {}), "('some')\n", (1457, 1465), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((1975, 2000), 'pypyr.dsl.SpecialTagDirective', 'SpecialTagDirective', (['None'], {}), '(None)\n', (1994, 2000), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((2016, 2039), 'pypyr.dsl.SpecialTagDirective', 'SpecialTagDirective', (['""""""'], {}), "('')\n", (2035, 2039), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((2306, 2357), 'pypyr.dsl.Jsonify', 'Jsonify', (["{'a': 'string here', 'b': 123, 'c': False}"], {}), "({'a': 'string here', 'b': 123, 'c': False})\n", (2313, 2357), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((13280, 13295), 'pypyr.dsl.PyString', 'PyString', (['"""arb"""'], {}), "('arb')\n", (13288, 13295), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((13299, 13314), 'pypyr.dsl.PyString', 'PyString', (['"""arb"""'], {}), "('arb')\n", (13307, 13314), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((13326, 13342), 'pypyr.dsl.PyString', 'PyString', (['"""blah"""'], {}), "('blah')\n", (13334, 13342), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((13346, 13361), 'pypyr.dsl.PyString', 'PyString', (['"""arb"""'], {}), "('arb')\n", (13354, 13361), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((13779, 13804), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13792, 13804), False, 'import pytest\n'), ((14005, 14030), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (14018, 14030), False, 'import pytest\n'), ((14209, 14223), 'pypyr.dsl.PyString', 'PyString', (['None'], {}), '(None)\n', (14217, 14223), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((14239, 14251), 'pypyr.dsl.PyString', 'PyString', (['""""""'], {}), "('')\n", (14247, 14251), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((15486, 15502), 'pypyr.dsl.SicString', 'SicString', (['"""arb"""'], {}), "('arb')\n", (15495, 15502), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((15506, 15522), 'pypyr.dsl.SicString', 'SicString', (['"""arb"""'], {}), "('arb')\n", (15515, 15522), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((15534, 15551), 'pypyr.dsl.SicString', 'SicString', (['"""blah"""'], {}), "('blah')\n", (15543, 15551), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((15555, 15571), 'pypyr.dsl.SicString', 'SicString', (['"""arb"""'], {}), "('arb')\n", (15564, 15571), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((16021, 16036), 'pypyr.dsl.SicString', 'SicString', (['None'], {}), '(None)\n', (16030, 16036), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((16052, 16065), 'pypyr.dsl.SicString', 'SicString', (['""""""'], {}), "('')\n", (16061, 16065), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((17380, 17405), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""'], {}), "('pypyr.dsl')\n", (17392, 17405), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((17443, 17470), 'pypyr.dsl.Step', 'Step', (['"""blah"""', '"""stepsrunner"""'], {}), "('blah', 'stepsrunner')\n", (17447, 17470), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((18348, 18373), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""'], {}), "('pypyr.dsl')\n", (18360, 18373), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((18411, 18448), 'pypyr.dsl.Step', 'Step', (["{'name': 'blah'}", '"""stepsrunner"""'], {}), "({'name': 'blah'}, 'stepsrunner')\n", (18415, 18448), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((19293, 19331), 'pytest.raises', 'pytest.raises', (['PipelineDefinitionError'], {}), '(PipelineDefinitionError)\n', (19306, 19331), False, 'import pytest\n'), ((20004, 20033), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (20017, 20033), False, 'import pytest\n'), ((20982, 21011), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (20995, 21011), False, 'import pytest\n'), ((25452, 25493), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.NOTIFY'], {}), "('pypyr.dsl', logging.NOTIFY)\n", (25464, 25493), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((26295, 26334), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (26307, 26334), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((27298, 27337), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (27310, 27337), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((29436, 29475), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (29448, 29475), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((30471, 30510), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (30483, 30510), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((31729, 31768), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (31741, 31768), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((33171, 33210), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (33183, 33210), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((35034, 35073), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (35046, 35073), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((36484, 36523), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (36496, 36523), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((38031, 38104), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {'side_effect': 'mock_step_deliberate_error'}), "(Step, 'invoke_step', side_effect=mock_step_deliberate_error)\n", (38043, 38104), False, 'from unittest.mock import call, patch, MagicMock\n'), ((40866, 40905), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (40878, 40905), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((42144, 42183), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (42156, 42183), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((43496, 43535), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (43508, 43535), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((45018, 45057), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (45030, 45057), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((46482, 46521), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (46494, 46521), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((47901, 47940), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (47913, 47940), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((55110, 55139), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (55123, 55139), False, 'import pytest\n'), ((57750, 57789), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (57762, 57789), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((58583, 58622), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (58595, 58622), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((59434, 59473), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (59446, 59473), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((60306, 60345), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (60318, 60345), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((61165, 61204), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (61177, 61204), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((61989, 62029), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (62001, 62029), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((63189, 63229), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (63201, 63229), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((64365, 64405), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (64377, 64405), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((65553, 65593), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (65565, 65593), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((66746, 66786), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (66758, 66786), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((67931, 67971), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (67943, 67971), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((69207, 69247), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (69219, 69247), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((70501, 70541), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (70513, 70541), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((71763, 71803), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (71775, 71803), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((72907, 72946), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (72919, 72946), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((73713, 73752), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (73725, 73752), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((74578, 74617), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (74590, 74617), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((75450, 75489), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (75462, 75489), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((76312, 76351), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (76324, 76351), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((77147, 77187), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (77159, 77187), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((78320, 78360), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (78332, 78360), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((79469, 79509), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (79481, 79509), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((80631, 80670), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (80643, 80670), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((81453, 81492), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (81465, 81492), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((82292, 82332), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (82304, 82332), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((83451, 83491), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (83463, 83491), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((84591, 84647), 'unittest.mock.patch.object', 'patch.object', (['Step', '"""invoke_step"""'], {'side_effect': 'arb_error'}), "(Step, 'invoke_step', side_effect=arb_error)\n", (84603, 84647), False, 'from unittest.mock import call, patch, MagicMock\n'), ((86438, 86463), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (86451, 86463), False, 'import pytest\n'), ((87539, 87579), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (87551, 87579), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((88724, 88749), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (88737, 88749), False, 'import pytest\n'), ((89629, 89669), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (89641, 89669), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((89707, 89726), 'pypyr.dsl.Step', 'Step', (['"""step1"""', 'None'], {}), "('step1', None)\n", (89711, 89726), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((90792, 91029), 'unittest.mock.call', 'call', ([], {'context': "{'key1': 'updated1', 'key2': 'updated2', 'key3': 'value3', 'key4': [{\n 'k4lk1': 'value4', 'k4lk2': 'value5'}, {'k4lk1': 'value6', 'k4lk2':\n 'value7'}], 'key5': False, 'key6': True, 'key7': 77, 'keyadded': 'added3'}"}), "(context={'key1': 'updated1', 'key2': 'updated2', 'key3': 'value3',\n 'key4': [{'k4lk1': 'value4', 'k4lk2': 'value5'}, {'k4lk1': 'value6',\n 'k4lk2': 'value7'}], 'key5': False, 'key6': True, 'key7': 77,\n 'keyadded': 'added3'})\n", (90796, 91029), False, 'from unittest.mock import call, patch, MagicMock\n'), ((100326, 100364), 'pytest.raises', 'pytest.raises', (['PipelineDefinitionError'], {}), '(PipelineDefinitionError)\n', (100339, 100364), False, 'import pytest\n'), ((100386, 100407), 'pypyr.dsl.RetryDecorator', 'RetryDecorator', (['"""arb"""'], {}), "('arb')\n", (100400, 100407), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((101867, 101907), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (101879, 101907), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((102789, 102829), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (102801, 102829), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((103732, 103772), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (103744, 103772), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((104813, 104853), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (104825, 104853), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((105701, 105741), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (105713, 105741), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((106619, 106659), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (106631, 106659), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((107596, 107636), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (107608, 107636), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((108534, 108574), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (108546, 108574), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((109725, 109765), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (109737, 109765), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((110544, 110584), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (110556, 110584), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((111485, 111525), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (111497, 111525), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((112433, 112473), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (112445, 112473), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((113394, 113434), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (113406, 113434), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((114550, 114590), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (114562, 114590), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((115563, 115603), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (115575, 115603), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((116576, 116615), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (116588, 116615), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((117667, 117706), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (117679, 117706), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((118738, 118777), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (118750, 118777), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((119987, 120026), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (119999, 120026), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((121286, 121325), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (121298, 121325), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((122501, 122540), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (122513, 122540), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((126912, 126950), 'pytest.raises', 'pytest.raises', (['PipelineDefinitionError'], {}), '(PipelineDefinitionError)\n', (126925, 126950), False, 'import pytest\n'), ((126972, 126993), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (['"""arb"""'], {}), "('arb')\n", (126986, 126993), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((127220, 127258), 'pytest.raises', 'pytest.raises', (['PipelineDefinitionError'], {}), '(PipelineDefinitionError)\n', (127233, 127258), False, 'import pytest\n'), ((127280, 127311), 'pypyr.dsl.WhileDecorator', 'WhileDecorator', (["{'arb': 'arbv'}"], {}), "({'arb': 'arbv'})\n", (127294, 127311), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((130458, 130497), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (130470, 130497), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((131037, 131076), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (131049, 131076), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((131492, 131531), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (131504, 131531), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((131959, 131998), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (131971, 131998), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((132630, 132668), 'pytest.raises', 'pytest.raises', (['PipelineDefinitionError'], {}), '(PipelineDefinitionError)\n', (132643, 132668), False, 'import pytest\n'), ((133112, 133151), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (133124, 133151), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((134231, 134270), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (134243, 134270), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((135658, 135697), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (135670, 135697), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((137015, 137054), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (137027, 137054), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((138512, 138551), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (138524, 138551), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((140535, 140574), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (140547, 140574), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((142597, 142636), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (142609, 142636), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((2571, 2592), 'pypyr.context.Context', 'Context', (["{'a': 'BBB'}"], {}), "({'a': 'BBB'})\n", (2578, 2592), False, 'from pypyr.context import Context\n'), ((3849, 3858), 'pypyr.context.Context', 'Context', ([], {}), '()\n', (3856, 3858), False, 'from pypyr.context import Context\n'), ((4980, 4989), 'pypyr.context.Context', 'Context', ([], {}), '()\n', (4987, 4989), False, 'from pypyr.context import Context\n'), ((6279, 6288), 'pypyr.context.Context', 'Context', ([], {}), '()\n', (6286, 6288), False, 'from pypyr.context import Context\n'), ((6338, 6347), 'pypyr.context.Context', 'Context', ([], {}), '()\n', (6345, 6347), False, 'from pypyr.context import Context\n'), ((6391, 6400), 'pypyr.context.Context', 'Context', ([], {}), '()\n', (6398, 6400), False, 'from pypyr.context import Context\n'), ((6442, 6451), 'pypyr.context.Context', 'Context', ([], {}), '()\n', (6449, 6451), False, 'from pypyr.context import Context\n'), ((11253, 11262), 'pypyr.context.Context', 'Context', ([], {}), '()\n', (11260, 11262), False, 'from pypyr.context import Context\n'), ((11700, 11709), 'pypyr.context.Context', 'Context', ([], {}), '()\n', (11707, 11709), False, 'from pypyr.context import Context\n'), ((12127, 12148), 'pypyr.context.Context', 'Context', (["{'a': '123'}"], {}), "({'a': '123'})\n", (12134, 12148), False, 'from pypyr.context import Context\n'), ((13684, 13693), 'pypyr.context.Context', 'Context', ([], {}), '()\n', (13691, 13693), False, 'from pypyr.context import Context\n'), ((14070, 14079), 'pypyr.context.Context', 'Context', ([], {}), '()\n', (14077, 14079), False, 'from pypyr.context import Context\n'), ((15355, 15376), 'pypyr.context.Context', 'Context', (["{'a': '123'}"], {}), "({'a': '123'})\n", (15362, 15376), False, 'from pypyr.context import Context\n'), ((18507, 18523), 'unittest.mock.call', 'call', (['"""starting"""'], {}), "('starting')\n", (18511, 18523), False, 'from unittest.mock import call, patch, MagicMock\n'), ((18533, 18557), 'unittest.mock.call', 'call', (['"""blah is complex."""'], {}), "('blah is complex.')\n", (18537, 18557), False, 'from unittest.mock import call, patch, MagicMock\n'), ((18567, 18590), 'unittest.mock.call', 'call', (['"""step name: blah"""'], {}), "('step name: blah')\n", (18571, 18590), False, 'from unittest.mock import call, patch, MagicMock\n'), ((18600, 18612), 'unittest.mock.call', 'call', (['"""done"""'], {}), "('done')\n", (18604, 18612), False, 'from unittest.mock import call, patch, MagicMock\n'), ((19358, 19398), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (19370, 19398), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((19445, 19461), 'ruamel.yaml.comments.CommentedMap', 'CommentedMap', (['{}'], {}), '({})\n', (19457, 19461), False, 'from ruamel.yaml.comments import CommentedMap, CommentedSeq, TaggedScalar\n'), ((19521, 19542), 'pypyr.dsl.Step', 'Step', (['step_info', 'None'], {}), '(step_info, None)\n', (19525, 19542), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((19642, 19693), 'unittest.mock.call', 'call', (['"""Error at pipeline step yaml line: 7, col: 8"""'], {}), "('Error at pipeline step yaml line: 7, col: 8')\n", (19646, 19693), False, 'from unittest.mock import call, patch, MagicMock\n'), ((20060, 20100), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (20072, 20100), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((21038, 21078), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (21050, 21078), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((26369, 26410), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.NOTIFY'], {}), "('pypyr.dsl', logging.NOTIFY)\n", (26381, 26410), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((27372, 27413), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.NOTIFY'], {}), "('pypyr.dsl', logging.NOTIFY)\n", (27384, 27413), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((29581, 29614), 'unittest.mock.call', 'call', (['"""foreach: running step one"""'], {}), "('foreach: running step one')\n", (29585, 29614), False, 'from unittest.mock import call, patch, MagicMock\n'), ((29624, 29665), 'unittest.mock.call', 'call', (['"""foreach decorator looped 1 times."""'], {}), "('foreach decorator looped 1 times.')\n", (29628, 29665), False, 'from unittest.mock import call, patch, MagicMock\n'), ((30616, 30649), 'unittest.mock.call', 'call', (['"""foreach: running step one"""'], {}), "('foreach: running step one')\n", (30620, 30649), False, 'from unittest.mock import call, patch, MagicMock\n'), ((30659, 30692), 'unittest.mock.call', 'call', (['"""foreach: running step two"""'], {}), "('foreach: running step two')\n", (30663, 30692), False, 'from unittest.mock import call, patch, MagicMock\n'), ((30702, 30743), 'unittest.mock.call', 'call', (['"""foreach decorator looped 2 times."""'], {}), "('foreach decorator looped 2 times.')\n", (30706, 30743), False, 'from unittest.mock import call, patch, MagicMock\n'), ((31874, 31910), 'unittest.mock.call', 'call', (['"""foreach: running step value1"""'], {}), "('foreach: running step value1')\n", (31878, 31910), False, 'from unittest.mock import call, patch, MagicMock\n'), ((31920, 31956), 'unittest.mock.call', 'call', (['"""foreach: running step value2"""'], {}), "('foreach: running step value2')\n", (31924, 31956), False, 'from unittest.mock import call, patch, MagicMock\n'), ((31966, 32000), 'unittest.mock.call', 'call', (['"""foreach: running step key3"""'], {}), "('foreach: running step key3')\n", (31970, 32000), False, 'from unittest.mock import call, patch, MagicMock\n'), ((32010, 32051), 'unittest.mock.call', 'call', (['"""foreach decorator looped 3 times."""'], {}), "('foreach decorator looped 3 times.')\n", (32014, 32051), False, 'from unittest.mock import call, patch, MagicMock\n'), ((33316, 33348), 'unittest.mock.call', 'call', (['"""foreach: running step 99"""'], {}), "('foreach: running step 99')\n", (33320, 33348), False, 'from unittest.mock import call, patch, MagicMock\n'), ((33358, 33392), 'unittest.mock.call', 'call', (['"""foreach: running step True"""'], {}), "('foreach: running step True')\n", (33362, 33392), False, 'from unittest.mock import call, patch, MagicMock\n'), ((33402, 33443), 'unittest.mock.call', 'call', (['"""foreach: running step string here"""'], {}), "('foreach: running step string here')\n", (33406, 33443), False, 'from unittest.mock import call, patch, MagicMock\n'), ((33453, 33499), 'unittest.mock.call', 'call', (['"""foreach: running step formatted value1"""'], {}), "('foreach: running step formatted value1')\n", (33457, 33499), False, 'from unittest.mock import call, patch, MagicMock\n'), ((33509, 33550), 'unittest.mock.call', 'call', (['"""foreach decorator looped 4 times."""'], {}), "('foreach decorator looped 4 times.')\n", (33513, 33550), False, 'from unittest.mock import call, patch, MagicMock\n'), ((35179, 35215), 'unittest.mock.call', 'call', (['"""foreach: running step value1"""'], {}), "('foreach: running step value1')\n", (35183, 35215), False, 'from unittest.mock import call, patch, MagicMock\n'), ((35225, 35261), 'unittest.mock.call', 'call', (['"""foreach: running step value2"""'], {}), "('foreach: running step value2')\n", (35229, 35261), False, 'from unittest.mock import call, patch, MagicMock\n'), ((35271, 35318), 'unittest.mock.call', 'call', (['"""step1 not running because run is False."""'], {}), "('step1 not running because run is False.')\n", (35275, 35318), False, 'from unittest.mock import call, patch, MagicMock\n'), ((35328, 35362), 'unittest.mock.call', 'call', (['"""foreach: running step key3"""'], {}), "('foreach: running step key3')\n", (35332, 35362), False, 'from unittest.mock import call, patch, MagicMock\n'), ((35372, 35419), 'unittest.mock.call', 'call', (['"""step1 not running because run is False."""'], {}), "('step1 not running because run is False.')\n", (35376, 35419), False, 'from unittest.mock import call, patch, MagicMock\n'), ((35429, 35470), 'unittest.mock.call', 'call', (['"""foreach decorator looped 3 times."""'], {}), "('foreach decorator looped 3 times.')\n", (35433, 35470), False, 'from unittest.mock import call, patch, MagicMock\n'), ((36629, 36665), 'unittest.mock.call', 'call', (['"""foreach: running step value1"""'], {}), "('foreach: running step value1')\n", (36633, 36665), False, 'from unittest.mock import call, patch, MagicMock\n'), ((36675, 36711), 'unittest.mock.call', 'call', (['"""foreach: running step value2"""'], {}), "('foreach: running step value2')\n", (36679, 36711), False, 'from unittest.mock import call, patch, MagicMock\n'), ((36721, 36768), 'unittest.mock.call', 'call', (['"""step1 not running because skip is True."""'], {}), "('step1 not running because skip is True.')\n", (36725, 36768), False, 'from unittest.mock import call, patch, MagicMock\n'), ((36778, 36812), 'unittest.mock.call', 'call', (['"""foreach: running step key3"""'], {}), "('foreach: running step key3')\n", (36782, 36812), False, 'from unittest.mock import call, patch, MagicMock\n'), ((36822, 36869), 'unittest.mock.call', 'call', (['"""step1 not running because skip is True."""'], {}), "('step1 not running because skip is True.')\n", (36826, 36869), False, 'from unittest.mock import call, patch, MagicMock\n'), ((36879, 36920), 'unittest.mock.call', 'call', (['"""foreach decorator looped 3 times."""'], {}), "('foreach decorator looped 3 times.')\n", (36883, 36920), False, 'from unittest.mock import call, patch, MagicMock\n'), ((38156, 38195), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.INFO'], {}), "('pypyr.dsl', logging.INFO)\n", (38168, 38195), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((38389, 38425), 'unittest.mock.call', 'call', (['"""foreach: running step value1"""'], {}), "('foreach: running step value1')\n", (38393, 38425), False, 'from unittest.mock import call, patch, MagicMock\n'), ((38435, 38471), 'unittest.mock.call', 'call', (['"""foreach: running step value2"""'], {}), "('foreach: running step value2')\n", (38439, 38471), False, 'from unittest.mock import call, patch, MagicMock\n'), ((38481, 38515), 'unittest.mock.call', 'call', (['"""foreach: running step key3"""'], {}), "('foreach: running step key3')\n", (38485, 38515), False, 'from unittest.mock import call, patch, MagicMock\n'), ((38525, 38566), 'unittest.mock.call', 'call', (['"""foreach decorator looped 3 times."""'], {}), "('foreach decorator looped 3 times.')\n", (38529, 38566), False, 'from unittest.mock import call, patch, MagicMock\n'), ((39637, 39676), 'pypyr.dsl.PyString', 'PyString', (['"""product([1, 2], ["A", "B"])"""'], {}), '(\'product([1, 2], ["A", "B"])\')\n', (39645, 39676), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((40209, 40234), 'pypyr.dsl.PyString', 'PyString', (['"""test_iterator"""'], {}), "('test_iterator')\n", (40217, 40234), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((41011, 41071), 'unittest.mock.call', 'call', (['"""while decorator will loop 3 times at 0.0s intervals."""'], {}), "('while decorator will loop 3 times at 0.0s intervals.')\n", (41015, 41071), False, 'from unittest.mock import call, patch, MagicMock\n'), ((41081, 41123), 'unittest.mock.call', 'call', (['"""while: running step with counter 1"""'], {}), "('while: running step with counter 1')\n", (41085, 41123), False, 'from unittest.mock import call, patch, MagicMock\n'), ((41133, 41175), 'unittest.mock.call', 'call', (['"""while: running step with counter 2"""'], {}), "('while: running step with counter 2')\n", (41137, 41175), False, 'from unittest.mock import call, patch, MagicMock\n'), ((41185, 41227), 'unittest.mock.call', 'call', (['"""while: running step with counter 3"""'], {}), "('while: running step with counter 3')\n", (41189, 41227), False, 'from unittest.mock import call, patch, MagicMock\n'), ((42289, 42394), 'unittest.mock.call', 'call', (['"""while decorator will loop 3 times, or until {key5} evaluates to True at 0.0s intervals."""'], {}), "(\n 'while decorator will loop 3 times, or until {key5} evaluates to True at 0.0s intervals.'\n )\n", (42293, 42394), False, 'from unittest.mock import call, patch, MagicMock\n'), ((42410, 42452), 'unittest.mock.call', 'call', (['"""while: running step with counter 1"""'], {}), "('while: running step with counter 1')\n", (42414, 42452), False, 'from unittest.mock import call, patch, MagicMock\n'), ((42462, 42504), 'unittest.mock.call', 'call', (['"""while: running step with counter 2"""'], {}), "('while: running step with counter 2')\n", (42466, 42504), False, 'from unittest.mock import call, patch, MagicMock\n'), ((42514, 42561), 'unittest.mock.call', 'call', (['"""step1 not running because run is False."""'], {}), "('step1 not running because run is False.')\n", (42518, 42561), False, 'from unittest.mock import call, patch, MagicMock\n'), ((42571, 42613), 'unittest.mock.call', 'call', (['"""while: running step with counter 3"""'], {}), "('while: running step with counter 3')\n", (42575, 42613), False, 'from unittest.mock import call, patch, MagicMock\n'), ((42623, 42670), 'unittest.mock.call', 'call', (['"""step1 not running because run is False."""'], {}), "('step1 not running because run is False.')\n", (42627, 42670), False, 'from unittest.mock import call, patch, MagicMock\n'), ((42680, 42755), 'unittest.mock.call', 'call', (['"""while decorator looped 3 times, and {key5} never evaluated to True."""'], {}), "('while decorator looped 3 times, and {key5} never evaluated to True.')\n", (42684, 42755), False, 'from unittest.mock import call, patch, MagicMock\n'), ((43570, 43595), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (43583, 43595), False, 'import pytest\n'), ((43741, 43801), 'unittest.mock.call', 'call', (['"""while decorator will loop 3 times at 0.0s intervals."""'], {}), "('while decorator will loop 3 times at 0.0s intervals.')\n", (43745, 43801), False, 'from unittest.mock import call, patch, MagicMock\n'), ((43811, 43853), 'unittest.mock.call', 'call', (['"""while: running step with counter 1"""'], {}), "('while: running step with counter 1')\n", (43815, 43853), False, 'from unittest.mock import call, patch, MagicMock\n'), ((43863, 43905), 'unittest.mock.call', 'call', (['"""while: running step with counter 2"""'], {}), "('while: running step with counter 2')\n", (43867, 43905), False, 'from unittest.mock import call, patch, MagicMock\n'), ((45092, 45128), 'pytest.raises', 'pytest.raises', (['LoopMaxExhaustedError'], {}), '(LoopMaxExhaustedError)\n', (45105, 45128), False, 'import pytest\n'), ((45364, 45469), 'unittest.mock.call', 'call', (['"""while decorator will loop 3 times, or until {key5} evaluates to True at 0.0s intervals."""'], {}), "(\n 'while decorator will loop 3 times, or until {key5} evaluates to True at 0.0s intervals.'\n )\n", (45368, 45469), False, 'from unittest.mock import call, patch, MagicMock\n'), ((45485, 45527), 'unittest.mock.call', 'call', (['"""while: running step with counter 1"""'], {}), "('while: running step with counter 1')\n", (45489, 45527), False, 'from unittest.mock import call, patch, MagicMock\n'), ((45537, 45579), 'unittest.mock.call', 'call', (['"""while: running step with counter 2"""'], {}), "('while: running step with counter 2')\n", (45541, 45579), False, 'from unittest.mock import call, patch, MagicMock\n'), ((45589, 45631), 'unittest.mock.call', 'call', (['"""while: running step with counter 3"""'], {}), "('while: running step with counter 3')\n", (45593, 45631), False, 'from unittest.mock import call, patch, MagicMock\n'), ((46556, 46592), 'pytest.raises', 'pytest.raises', (['LoopMaxExhaustedError'], {}), '(LoopMaxExhaustedError)\n', (46569, 46592), False, 'import pytest\n'), ((46753, 46857), 'unittest.mock.call', 'call', (['"""while decorator will loop 3 times, or until False evaluates to True at 0.0s intervals."""'], {}), "(\n 'while decorator will loop 3 times, or until False evaluates to True at 0.0s intervals.'\n )\n", (46757, 46857), False, 'from unittest.mock import call, patch, MagicMock\n'), ((46873, 46915), 'unittest.mock.call', 'call', (['"""while: running step with counter 1"""'], {}), "('while: running step with counter 1')\n", (46877, 46915), False, 'from unittest.mock import call, patch, MagicMock\n'), ((46925, 46967), 'unittest.mock.call', 'call', (['"""while: running step with counter 2"""'], {}), "('while: running step with counter 2')\n", (46929, 46967), False, 'from unittest.mock import call, patch, MagicMock\n'), ((46977, 47019), 'unittest.mock.call', 'call', (['"""while: running step with counter 3"""'], {}), "('while: running step with counter 3')\n", (46981, 47019), False, 'from unittest.mock import call, patch, MagicMock\n'), ((48046, 48106), 'unittest.mock.call', 'call', (['"""while decorator will loop 2 times at 0.0s intervals."""'], {}), "('while decorator will loop 2 times at 0.0s intervals.')\n", (48050, 48106), False, 'from unittest.mock import call, patch, MagicMock\n'), ((48116, 48158), 'unittest.mock.call', 'call', (['"""while: running step with counter 1"""'], {}), "('while: running step with counter 1')\n", (48120, 48158), False, 'from unittest.mock import call, patch, MagicMock\n'), ((48168, 48204), 'unittest.mock.call', 'call', (['"""foreach: running step value1"""'], {}), "('foreach: running step value1')\n", (48172, 48204), False, 'from unittest.mock import call, patch, MagicMock\n'), ((48214, 48250), 'unittest.mock.call', 'call', (['"""foreach: running step value2"""'], {}), "('foreach: running step value2')\n", (48218, 48250), False, 'from unittest.mock import call, patch, MagicMock\n'), ((48260, 48294), 'unittest.mock.call', 'call', (['"""foreach: running step key3"""'], {}), "('foreach: running step key3')\n", (48264, 48294), False, 'from unittest.mock import call, patch, MagicMock\n'), ((48304, 48345), 'unittest.mock.call', 'call', (['"""foreach decorator looped 3 times."""'], {}), "('foreach decorator looped 3 times.')\n", (48308, 48345), False, 'from unittest.mock import call, patch, MagicMock\n'), ((48355, 48397), 'unittest.mock.call', 'call', (['"""while: running step with counter 2"""'], {}), "('while: running step with counter 2')\n", (48359, 48397), False, 'from unittest.mock import call, patch, MagicMock\n'), ((48407, 48443), 'unittest.mock.call', 'call', (['"""foreach: running step value1"""'], {}), "('foreach: running step value1')\n", (48411, 48443), False, 'from unittest.mock import call, patch, MagicMock\n'), ((48453, 48489), 'unittest.mock.call', 'call', (['"""foreach: running step value2"""'], {}), "('foreach: running step value2')\n", (48457, 48489), False, 'from unittest.mock import call, patch, MagicMock\n'), ((48499, 48533), 'unittest.mock.call', 'call', (['"""foreach: running step key3"""'], {}), "('foreach: running step key3')\n", (48503, 48533), False, 'from unittest.mock import call, patch, MagicMock\n'), ((48543, 48584), 'unittest.mock.call', 'call', (['"""foreach decorator looped 3 times."""'], {}), "('foreach decorator looped 3 times.')\n", (48547, 48584), False, 'from unittest.mock import call, patch, MagicMock\n'), ((70577, 70602), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (70590, 70602), False, 'import pytest\n'), ((84695, 84735), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (84707, 84735), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((87615, 87640), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (87628, 87640), False, 'import pytest\n'), ((89740, 89765), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (89753, 89765), False, 'import pytest\n'), ((103808, 103848), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (103820, 103848), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((104889, 104914), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (104902, 104914), False, 'import pytest\n'), ((105777, 105802), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (105790, 105802), False, 'import pytest\n'), ((106695, 106720), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (106708, 106720), False, 'import pytest\n'), ((108610, 108650), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (108622, 108650), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((109801, 109826), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (109814, 109826), False, 'import pytest\n'), ((110620, 110645), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (110633, 110645), False, 'import pytest\n'), ((111561, 111586), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (111574, 111586), False, 'import pytest\n'), ((113470, 113510), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (113482, 113510), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((114626, 114653), 'pytest.raises', 'pytest.raises', (['HandledError'], {}), '(HandledError)\n', (114639, 114653), False, 'import pytest\n'), ((115639, 115666), 'pytest.raises', 'pytest.raises', (['HandledError'], {}), '(HandledError)\n', (115652, 115666), False, 'import pytest\n'), ((116650, 116675), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (116663, 116675), False, 'import pytest\n'), ((117081, 117176), 'unittest.mock.call', 'call', (['"""retry decorator will try 3 times with fixed backoff starting at 0s intervals."""'], {}), "(\n 'retry decorator will try 3 times with fixed backoff starting at 0s intervals.'\n )\n", (117085, 117176), False, 'from unittest.mock import call, patch, MagicMock\n'), ((117192, 117234), 'unittest.mock.call', 'call', (['"""retry: running step with counter 1"""'], {}), "('retry: running step with counter 1')\n", (117196, 117234), False, 'from unittest.mock import call, patch, MagicMock\n'), ((117244, 117286), 'unittest.mock.call', 'call', (['"""retry: running step with counter 2"""'], {}), "('retry: running step with counter 2')\n", (117248, 117286), False, 'from unittest.mock import call, patch, MagicMock\n'), ((117296, 117338), 'unittest.mock.call', 'call', (['"""retry: running step with counter 3"""'], {}), "('retry: running step with counter 3')\n", (117300, 117338), False, 'from unittest.mock import call, patch, MagicMock\n'), ((117541, 117554), 'pypyr.dsl.PyString', 'PyString', (['"""3"""'], {}), "('3')\n", (117549, 117554), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((117741, 117766), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (117754, 117766), False, 'import pytest\n'), ((118172, 118267), 'unittest.mock.call', 'call', (['"""retry decorator will try 3 times with fixed backoff starting at 0s intervals."""'], {}), "(\n 'retry decorator will try 3 times with fixed backoff starting at 0s intervals.'\n )\n", (118176, 118267), False, 'from unittest.mock import call, patch, MagicMock\n'), ((118283, 118325), 'unittest.mock.call', 'call', (['"""retry: running step with counter 1"""'], {}), "('retry: running step with counter 1')\n", (118287, 118325), False, 'from unittest.mock import call, patch, MagicMock\n'), ((118335, 118377), 'unittest.mock.call', 'call', (['"""retry: running step with counter 2"""'], {}), "('retry: running step with counter 2')\n", (118339, 118377), False, 'from unittest.mock import call, patch, MagicMock\n'), ((118387, 118429), 'unittest.mock.call', 'call', (['"""retry: running step with counter 3"""'], {}), "('retry: running step with counter 3')\n", (118391, 118429), False, 'from unittest.mock import call, patch, MagicMock\n'), ((118812, 118852), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (118824, 118852), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((119316, 119414), 'unittest.mock.call', 'call', (['"""retry decorator will try 3 times with fixed backoff starting at 10.1s intervals."""'], {}), "(\n 'retry decorator will try 3 times with fixed backoff starting at 10.1s intervals.'\n )\n", (119320, 119414), False, 'from unittest.mock import call, patch, MagicMock\n'), ((119430, 119472), 'unittest.mock.call', 'call', (['"""retry: running step with counter 1"""'], {}), "('retry: running step with counter 1')\n", (119434, 119472), False, 'from unittest.mock import call, patch, MagicMock\n'), ((119482, 119524), 'unittest.mock.call', 'call', (['"""retry: running step with counter 2"""'], {}), "('retry: running step with counter 2')\n", (119486, 119524), False, 'from unittest.mock import call, patch, MagicMock\n'), ((120061, 120101), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.DEBUG'], {}), "('pypyr.dsl', logging.DEBUG)\n", (120073, 120101), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((120447, 120457), 'unittest.mock.call', 'call', (['(10.1)'], {}), '(10.1)\n', (120451, 120457), False, 'from unittest.mock import call, patch, MagicMock\n'), ((120459, 120469), 'unittest.mock.call', 'call', (['(10.2)'], {}), '(10.2)\n', (120463, 120469), False, 'from unittest.mock import call, patch, MagicMock\n'), ((120471, 120481), 'unittest.mock.call', 'call', (['(10.2)'], {}), '(10.2)\n', (120475, 120481), False, 'from unittest.mock import call, patch, MagicMock\n'), ((120627, 120733), 'unittest.mock.call', 'call', (['"""retry decorator will try 5 times with fixed backoff starting at [10.1, 10.2]s intervals."""'], {}), "(\n 'retry decorator will try 5 times with fixed backoff starting at [10.1, 10.2]s intervals.'\n )\n", (120631, 120733), False, 'from unittest.mock import call, patch, MagicMock\n'), ((120749, 120791), 'unittest.mock.call', 'call', (['"""retry: running step with counter 1"""'], {}), "('retry: running step with counter 1')\n", (120753, 120791), False, 'from unittest.mock import call, patch, MagicMock\n'), ((120801, 120843), 'unittest.mock.call', 'call', (['"""retry: running step with counter 2"""'], {}), "('retry: running step with counter 2')\n", (120805, 120843), False, 'from unittest.mock import call, patch, MagicMock\n'), ((120853, 120895), 'unittest.mock.call', 'call', (['"""retry: running step with counter 3"""'], {}), "('retry: running step with counter 3')\n", (120857, 120895), False, 'from unittest.mock import call, patch, MagicMock\n'), ((120905, 120947), 'unittest.mock.call', 'call', (['"""retry: running step with counter 4"""'], {}), "('retry: running step with counter 4')\n", (120909, 120947), False, 'from unittest.mock import call, patch, MagicMock\n'), ((121693, 121796), 'unittest.mock.call', 'call', (['"""retry decorator will try indefinitely with fixed backoff starting at 10.1s intervals."""'], {}), "(\n 'retry decorator will try indefinitely with fixed backoff starting at 10.1s intervals.'\n )\n", (121697, 121796), False, 'from unittest.mock import call, patch, MagicMock\n'), ((121812, 121854), 'unittest.mock.call', 'call', (['"""retry: running step with counter 1"""'], {}), "('retry: running step with counter 1')\n", (121816, 121854), False, 'from unittest.mock import call, patch, MagicMock\n'), ((121864, 121906), 'unittest.mock.call', 'call', (['"""retry: running step with counter 2"""'], {}), "('retry: running step with counter 2')\n", (121868, 121906), False, 'from unittest.mock import call, patch, MagicMock\n'), ((121916, 121958), 'unittest.mock.call', 'call', (['"""retry: running step with counter 3"""'], {}), "('retry: running step with counter 3')\n", (121920, 121958), False, 'from unittest.mock import call, patch, MagicMock\n'), ((122802, 122899), 'unittest.mock.call', 'call', (['"""retry decorator will try 1 times with fixed backoff starting at 0.3s intervals."""'], {}), "(\n 'retry decorator will try 1 times with fixed backoff starting at 0.3s intervals.'\n )\n", (122806, 122899), False, 'from unittest.mock import call, patch, MagicMock\n'), ((122915, 122957), 'unittest.mock.call', 'call', (['"""retry: running step with counter 1"""'], {}), "('retry: running step with counter 1')\n", (122919, 122957), False, 'from unittest.mock import call, patch, MagicMock\n'), ((124248, 124256), 'unittest.mock.call', 'call', (['(11)'], {}), '(11)\n', (124252, 124256), False, 'from unittest.mock import call, patch, MagicMock\n'), ((124258, 124266), 'unittest.mock.call', 'call', (['(12)'], {}), '(12)\n', (124262, 124266), False, 'from unittest.mock import call, patch, MagicMock\n'), ((124268, 124276), 'unittest.mock.call', 'call', (['(13)'], {}), '(13)\n', (124272, 124276), False, 'from unittest.mock import call, patch, MagicMock\n'), ((124316, 124328), 'unittest.mock.call', 'call', (['(4.5)', '(9)'], {}), '(4.5, 9)\n', (124320, 124328), False, 'from unittest.mock import call, patch, MagicMock\n'), ((124368, 124382), 'unittest.mock.call', 'call', (['(13.5)', '(27)'], {}), '(13.5, 27)\n', (124372, 124382), False, 'from unittest.mock import call, patch, MagicMock\n'), ((124422, 124434), 'unittest.mock.call', 'call', (['(15)', '(30)'], {}), '(15, 30)\n', (124426, 124434), False, 'from unittest.mock import call, patch, MagicMock\n'), ((125569, 125577), 'unittest.mock.call', 'call', (['(11)'], {}), '(11)\n', (125573, 125577), False, 'from unittest.mock import call, patch, MagicMock\n'), ((125579, 125587), 'unittest.mock.call', 'call', (['(12)'], {}), '(12)\n', (125583, 125587), False, 'from unittest.mock import call, patch, MagicMock\n'), ((125589, 125597), 'unittest.mock.call', 'call', (['(13)'], {}), '(13)\n', (125593, 125597), False, 'from unittest.mock import call, patch, MagicMock\n'), ((125637, 125652), 'unittest.mock.call', 'call', (['(0.5)', '(0.25)'], {}), '(0.5, 0.25)\n', (125641, 125652), False, 'from unittest.mock import call, patch, MagicMock\n'), ((125692, 125706), 'unittest.mock.call', 'call', (['(0.4)', '(0.2)'], {}), '(0.4, 0.2)\n', (125696, 125706), False, 'from unittest.mock import call, patch, MagicMock\n'), ((125746, 125760), 'unittest.mock.call', 'call', (['(0.2)', '(0.1)'], {}), '(0.2, 0.1)\n', (125750, 125760), False, 'from unittest.mock import call, patch, MagicMock\n'), ((130541, 130550), 'pypyr.context.Context', 'Context', ([], {}), '()\n', (130548, 130550), False, 'from pypyr.context import Context\n'), ((130642, 130733), 'unittest.mock.call', 'call', (['"""while decorator will loop until True evaluates to True at 0.0s intervals."""'], {}), "(\n 'while decorator will loop until True evaluates to True at 0.0s intervals.'\n )\n", (130646, 130733), False, 'from unittest.mock import call, patch, MagicMock\n'), ((130749, 130791), 'unittest.mock.call', 'call', (['"""while: running step with counter 1"""'], {}), "('while: running step with counter 1')\n", (130753, 130791), False, 'from unittest.mock import call, patch, MagicMock\n'), ((130801, 130861), 'unittest.mock.call', 'call', (['"""while loop done, stop condition True evaluated True."""'], {}), "('while loop done, stop condition True evaluated True.')\n", (130805, 130861), False, 'from unittest.mock import call, patch, MagicMock\n'), ((131120, 131129), 'pypyr.context.Context', 'Context', ([], {}), '()\n', (131127, 131129), False, 'from pypyr.context import Context\n'), ((131220, 131269), 'unittest.mock.call', 'call', (['"""max 0 is 0. while only runs when max > 0."""'], {}), "('max 0 is 0. while only runs when max > 0.')\n", (131224, 131269), False, 'from unittest.mock import call, patch, MagicMock\n'), ((131575, 131593), 'pypyr.context.Context', 'Context', (["{'x': -3}"], {}), "({'x': -3})\n", (131582, 131593), False, 'from pypyr.context import Context\n'), ((131684, 131736), 'unittest.mock.call', 'call', (['"""max {x} is -3. while only runs when max > 0."""'], {}), "('max {x} is -3. while only runs when max > 0.')\n", (131688, 131736), False, 'from unittest.mock import call, patch, MagicMock\n'), ((132042, 132071), 'pypyr.context.Context', 'Context', (["{'thisistrue': True}"], {}), "({'thisistrue': True})\n", (132049, 132071), False, 'from pypyr.context import Context\n'), ((132197, 132296), 'unittest.mock.call', 'call', (['"""while decorator will loop until {thisistrue} evaluates to True at 0.0s intervals."""'], {}), "(\n 'while decorator will loop until {thisistrue} evaluates to True at 0.0s intervals.'\n )\n", (132201, 132296), False, 'from unittest.mock import call, patch, MagicMock\n'), ((132312, 132354), 'unittest.mock.call', 'call', (['"""while: running step with counter 1"""'], {}), "('while: running step with counter 1')\n", (132316, 132354), False, 'from unittest.mock import call, patch, MagicMock\n'), ((132364, 132432), 'unittest.mock.call', 'call', (['"""while loop done, stop condition {thisistrue} evaluated True."""'], {}), "('while loop done, stop condition {thisistrue} evaluated True.')\n", (132368, 132432), False, 'from unittest.mock import call, patch, MagicMock\n'), ((132704, 132713), 'pypyr.context.Context', 'Context', ([], {}), '()\n', (132711, 132713), False, 'from pypyr.context import Context\n'), ((133516, 133576), 'unittest.mock.call', 'call', (['"""while decorator will loop 3 times at 0.0s intervals."""'], {}), "('while decorator will loop 3 times at 0.0s intervals.')\n", (133520, 133576), False, 'from unittest.mock import call, patch, MagicMock\n'), ((133586, 133628), 'unittest.mock.call', 'call', (['"""while: running step with counter 1"""'], {}), "('while: running step with counter 1')\n", (133590, 133628), False, 'from unittest.mock import call, patch, MagicMock\n'), ((133638, 133680), 'unittest.mock.call', 'call', (['"""while: running step with counter 2"""'], {}), "('while: running step with counter 2')\n", (133642, 133680), False, 'from unittest.mock import call, patch, MagicMock\n'), ((133690, 133732), 'unittest.mock.call', 'call', (['"""while: running step with counter 3"""'], {}), "('while: running step with counter 3')\n", (133694, 133732), False, 'from unittest.mock import call, patch, MagicMock\n'), ((134128, 134145), 'copy.deepcopy', 'deepcopy', (['context'], {}), '(context)\n', (134136, 134145), False, 'from copy import deepcopy\n'), ((134795, 134886), 'unittest.mock.call', 'call', (['"""while decorator will loop until {k1} evaluates to True at 0.3s intervals."""'], {}), "(\n 'while decorator will loop until {k1} evaluates to True at 0.3s intervals.'\n )\n", (134799, 134886), False, 'from unittest.mock import call, patch, MagicMock\n'), ((134902, 134944), 'unittest.mock.call', 'call', (['"""while: running step with counter 1"""'], {}), "('while: running step with counter 1')\n", (134906, 134944), False, 'from unittest.mock import call, patch, MagicMock\n'), ((134954, 134996), 'unittest.mock.call', 'call', (['"""while: running step with counter 2"""'], {}), "('while: running step with counter 2')\n", (134958, 134996), False, 'from unittest.mock import call, patch, MagicMock\n'), ((135006, 135048), 'unittest.mock.call', 'call', (['"""while: running step with counter 3"""'], {}), "('while: running step with counter 3')\n", (135010, 135048), False, 'from unittest.mock import call, patch, MagicMock\n'), ((135058, 135118), 'unittest.mock.call', 'call', (['"""while loop done, stop condition {k1} evaluated True."""'], {}), "('while loop done, stop condition {k1} evaluated True.')\n", (135062, 135118), False, 'from unittest.mock import call, patch, MagicMock\n'), ((135555, 135572), 'copy.deepcopy', 'deepcopy', (['context'], {}), '(context)\n', (135563, 135572), False, 'from copy import deepcopy\n'), ((136222, 136325), 'unittest.mock.call', 'call', (['"""while decorator will loop 5 times, or until {k1} evaluates to True at 0.3s intervals."""'], {}), "(\n 'while decorator will loop 5 times, or until {k1} evaluates to True at 0.3s intervals.'\n )\n", (136226, 136325), False, 'from unittest.mock import call, patch, MagicMock\n'), ((136341, 136383), 'unittest.mock.call', 'call', (['"""while: running step with counter 1"""'], {}), "('while: running step with counter 1')\n", (136345, 136383), False, 'from unittest.mock import call, patch, MagicMock\n'), ((136393, 136435), 'unittest.mock.call', 'call', (['"""while: running step with counter 2"""'], {}), "('while: running step with counter 2')\n", (136397, 136435), False, 'from unittest.mock import call, patch, MagicMock\n'), ((136445, 136487), 'unittest.mock.call', 'call', (['"""while: running step with counter 3"""'], {}), "('while: running step with counter 3')\n", (136449, 136487), False, 'from unittest.mock import call, patch, MagicMock\n'), ((136497, 136557), 'unittest.mock.call', 'call', (['"""while loop done, stop condition {k1} evaluated True."""'], {}), "('while loop done, stop condition {k1} evaluated True.')\n", (136501, 136557), False, 'from unittest.mock import call, patch, MagicMock\n'), ((136986, 137003), 'copy.deepcopy', 'deepcopy', (['context'], {}), '(context)\n', (136994, 137003), False, 'from copy import deepcopy\n'), ((137579, 137682), 'unittest.mock.call', 'call', (['"""while decorator will loop 3 times, or until {k1} evaluates to True at 0.3s intervals."""'], {}), "(\n 'while decorator will loop 3 times, or until {k1} evaluates to True at 0.3s intervals.'\n )\n", (137583, 137682), False, 'from unittest.mock import call, patch, MagicMock\n'), ((137698, 137740), 'unittest.mock.call', 'call', (['"""while: running step with counter 1"""'], {}), "('while: running step with counter 1')\n", (137702, 137740), False, 'from unittest.mock import call, patch, MagicMock\n'), ((137750, 137792), 'unittest.mock.call', 'call', (['"""while: running step with counter 2"""'], {}), "('while: running step with counter 2')\n", (137754, 137792), False, 'from unittest.mock import call, patch, MagicMock\n'), ((137802, 137844), 'unittest.mock.call', 'call', (['"""while: running step with counter 3"""'], {}), "('while: running step with counter 3')\n", (137806, 137844), False, 'from unittest.mock import call, patch, MagicMock\n'), ((137854, 137927), 'unittest.mock.call', 'call', (['"""while decorator looped 3 times, and {k1} never evaluated to True."""'], {}), "('while decorator looped 3 times, and {k1} never evaluated to True.')\n", (137858, 137927), False, 'from unittest.mock import call, patch, MagicMock\n'), ((138483, 138500), 'copy.deepcopy', 'deepcopy', (['context'], {}), '(context)\n', (138491, 138500), False, 'from copy import deepcopy\n'), ((138586, 138626), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (138598, 138626), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((139627, 139730), 'unittest.mock.call', 'call', (['"""while decorator will loop 3 times, or until {k1} evaluates to True at 0.3s intervals."""'], {}), "(\n 'while decorator will loop 3 times, or until {k1} evaluates to True at 0.3s intervals.'\n )\n", (139631, 139730), False, 'from unittest.mock import call, patch, MagicMock\n'), ((139746, 139788), 'unittest.mock.call', 'call', (['"""while: running step with counter 1"""'], {}), "('while: running step with counter 1')\n", (139750, 139788), False, 'from unittest.mock import call, patch, MagicMock\n'), ((139798, 139840), 'unittest.mock.call', 'call', (['"""while: running step with counter 2"""'], {}), "('while: running step with counter 2')\n", (139802, 139840), False, 'from unittest.mock import call, patch, MagicMock\n'), ((139850, 139892), 'unittest.mock.call', 'call', (['"""while: running step with counter 3"""'], {}), "('while: running step with counter 3')\n", (139854, 139892), False, 'from unittest.mock import call, patch, MagicMock\n'), ((139948, 140017), 'unittest.mock.call', 'call', (['"""exhausted 3 iterations of while loop, and errorOnMax is True."""'], {}), "('exhausted 3 iterations of while loop, and errorOnMax is True.')\n", (139952, 140017), False, 'from unittest.mock import call, patch, MagicMock\n'), ((140506, 140523), 'copy.deepcopy', 'deepcopy', (['context'], {}), '(context)\n', (140514, 140523), False, 'from copy import deepcopy\n'), ((140609, 140649), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (140621, 140649), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((141606, 141666), 'unittest.mock.call', 'call', (['"""while decorator will loop 3 times at 0.3s intervals."""'], {}), "('while decorator will loop 3 times at 0.3s intervals.')\n", (141610, 141666), False, 'from unittest.mock import call, patch, MagicMock\n'), ((141676, 141718), 'unittest.mock.call', 'call', (['"""while: running step with counter 1"""'], {}), "('while: running step with counter 1')\n", (141680, 141718), False, 'from unittest.mock import call, patch, MagicMock\n'), ((141728, 141770), 'unittest.mock.call', 'call', (['"""while: running step with counter 2"""'], {}), "('while: running step with counter 2')\n", (141732, 141770), False, 'from unittest.mock import call, patch, MagicMock\n'), ((141780, 141822), 'unittest.mock.call', 'call', (['"""while: running step with counter 3"""'], {}), "('while: running step with counter 3')\n", (141784, 141822), False, 'from unittest.mock import call, patch, MagicMock\n'), ((141878, 141947), 'unittest.mock.call', 'call', (['"""exhausted 3 iterations of while loop, and errorOnMax is True."""'], {}), "('exhausted 3 iterations of while loop, and errorOnMax is True.')\n", (141882, 141947), False, 'from unittest.mock import call, patch, MagicMock\n'), ((142898, 143001), 'unittest.mock.call', 'call', (['"""while decorator will loop 1 times, or until {k1} evaluates to True at 0.3s intervals."""'], {}), "(\n 'while decorator will loop 1 times, or until {k1} evaluates to True at 0.3s intervals.'\n )\n", (142902, 143001), False, 'from unittest.mock import call, patch, MagicMock\n'), ((143017, 143059), 'unittest.mock.call', 'call', (['"""while: running step with counter 1"""'], {}), "('while: running step with counter 1')\n", (143021, 143059), False, 'from unittest.mock import call, patch, MagicMock\n'), ((143069, 143142), 'unittest.mock.call', 'call', (['"""while decorator looped 1 times, and {k1} never evaluated to True."""'], {}), "('while decorator looped 1 times, and {k1} never evaluated to True.')\n", (143073, 143142), False, 'from unittest.mock import call, patch, MagicMock\n'), ((12098, 12116), 'pypyr.dsl.PyString', 'PyString', (['"""len(a)"""'], {}), "('len(a)')\n", (12106, 12116), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((12383, 12417), 'pypyr.dsl.PyString', 'PyString', (['"""abs(a) + squareroot(b)"""'], {}), "('abs(a) + squareroot(b)')\n", (12391, 12417), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((12928, 12944), 'pypyr.dsl.PyString', 'PyString', (['source'], {}), '(source)\n', (12936, 12944), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((13821, 13835), 'pypyr.dsl.PyString', 'PyString', (['None'], {}), '(None)\n', (13829, 13835), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((14047, 14059), 'pypyr.dsl.PyString', 'PyString', (['""""""'], {}), "('')\n", (14055, 14059), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((15325, 15344), 'pypyr.dsl.SicString', 'SicString', (['"""len(a)"""'], {}), "('len(a)')\n", (15334, 15344), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((20140, 20192), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.cache.stepcache"""', 'logging.ERROR'], {}), "('pypyr.cache.stepcache', logging.ERROR)\n", (20152, 20192), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((20267, 20292), 'pypyr.dsl.Step', 'Step', (['"""mocked.step"""', 'None'], {}), "('mocked.step', None)\n", (20271, 20292), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((21118, 21170), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.cache.stepcache"""', 'logging.ERROR'], {}), "('pypyr.cache.stepcache', logging.ERROR)\n", (21130, 21170), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((21265, 21302), 'ruamel.yaml.comments.CommentedMap', 'CommentedMap', (["{'name': 'mocked.step'}"], {}), "({'name': 'mocked.step'})\n", (21277, 21302), False, 'from ruamel.yaml.comments import CommentedMap, CommentedSeq, TaggedScalar\n'), ((21378, 21407), 'pypyr.dsl.Step', 'Step', (['commented_context', 'None'], {}), '(commented_context, None)\n', (21382, 21407), False, 'from pypyr.dsl import Jsonify, PyString, SicString, SpecialTagDirective, Step, RetryDecorator, WhileDecorator\n'), ((38234, 38274), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (38246, 38274), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((84775, 84815), 'tests.common.utils.patch_logger', 'patch_logger', (['"""pypyr.dsl"""', 'logging.ERROR'], {}), "('pypyr.dsl', logging.ERROR)\n", (84787, 84815), False, 'from tests.common.utils import DeepCopyMagicMock, patch_logger\n'), ((89805, 89826), 'pypyr.context.Context', 'Context', (["{'k1': 'v1'}"], {}), "({'k1': 'v1'})\n", (89812, 89826), False, 'from pypyr.context import Context\n'), ((138666, 138702), 'pytest.raises', 'pytest.raises', (['LoopMaxExhaustedError'], {}), '(LoopMaxExhaustedError)\n', (138679, 138702), False, 'import pytest\n'), ((140689, 140725), 'pytest.raises', 'pytest.raises', (['LoopMaxExhaustedError'], {}), '(LoopMaxExhaustedError)\n', (140702, 140725), False, 'import pytest\n')] |
import os, sys
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
percentages = [0.01, 0.1, 0.2, 0.4, 0.5, 0.6]
for percentage in percentages:
data = []
save_path = '../logs/SOM_weights_MNIST_noise_{}.npy'.format(percentage)
wts = np.load(save_path).reshape(-1, 784)
print ("============{}============".format(wts.shape))
kmeans = KMeans(n_clusters=10).fit(wts)
centers = kmeans.cluster_centers_
for i in range(2):
for j in range(5):
plt.subplot(2, 5, i*5 + j + 1)
plt.imshow(centers[i*5+j].reshape(28, 28).T)
if (i == 0) and (j == 0): plt.title("MNIST Noise {}".format(percentage))
plt.show()
| [
"sklearn.cluster.KMeans",
"numpy.load",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((647, 657), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (655, 657), True, 'import matplotlib.pyplot as plt\n'), ((272, 290), 'numpy.load', 'np.load', (['save_path'], {}), '(save_path)\n', (279, 290), True, 'import numpy as np\n'), ((374, 395), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(10)'}), '(n_clusters=10)\n', (380, 395), False, 'from sklearn.cluster import KMeans\n'), ((489, 521), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(5)', '(i * 5 + j + 1)'], {}), '(2, 5, i * 5 + j + 1)\n', (500, 521), True, 'import matplotlib.pyplot as plt\n')] |
from spinnman.messages.scp.abstract_messages.abstract_scp_request \
import AbstractSCPRequest
from spinnman.messages.sdp.sdp_header import SDPHeader
from spinnman.messages.sdp.sdp_flag import SDPFlag
from spinnman.messages.scp.scp_request_header import SCPRequestHeader
from spinnman.messages.scp.scp_command import SCPCommand
from spinnman.messages.scp.impl.scp_check_ok_response import SCPCheckOKResponse
_NNP_FORWARD_RETRY = (1 << 31) | (0x3f << 8) | 0x18
_NNP_FLOOD_FILL_START = 6
class SCPFloodFillStartRequest(AbstractSCPRequest):
""" A request to start a flood fill of data
"""
def __init__(self, nearest_neighbour_id, n_blocks, x=None, y=None):
"""
:param nearest_neighbour_id: The id of the packet, between 0 and 127
:type nearest_neighbour_id: int
:param n_blocks: The number of blocks of data that will be sent,\
between 0 and 255
:type n_blocks: int
:param x: The x-coordindate of the chip to load the data on to. If\
not specified, the data will be loaded on to all chips
:type x: int
:param y: The y-coordinate of the chip to load the data on to. If\
not specified, the data will be loaded on to all chips
:type y: int
"""
key = ((_NNP_FLOOD_FILL_START << 24) | (nearest_neighbour_id << 16) |
(n_blocks << 8))
data = 0xFFFF
if x is not None and y is not None:
m = ((y & 3) * 4) + (x & 3)
data = (((x & 0xfc) << 24) + ((y & 0xfc) << 16) +
(3 << 16) + (1 << m))
super(SCPFloodFillStartRequest, self).__init__(
SDPHeader(flags=SDPFlag.REPLY_EXPECTED, destination_port=0,
destination_cpu=0, destination_chip_x=0,
destination_chip_y=0),
SCPRequestHeader(command=SCPCommand.CMD_NNP),
argument_1=key, argument_2=data, argument_3=_NNP_FORWARD_RETRY)
def get_scp_response(self):
return SCPCheckOKResponse("Flood Fill", "CMD_NNP:NNP_FFS")
| [
"spinnman.messages.scp.scp_request_header.SCPRequestHeader",
"spinnman.messages.scp.impl.scp_check_ok_response.SCPCheckOKResponse",
"spinnman.messages.sdp.sdp_header.SDPHeader"
] | [((2039, 2090), 'spinnman.messages.scp.impl.scp_check_ok_response.SCPCheckOKResponse', 'SCPCheckOKResponse', (['"""Flood Fill"""', '"""CMD_NNP:NNP_FFS"""'], {}), "('Flood Fill', 'CMD_NNP:NNP_FFS')\n", (2057, 2090), False, 'from spinnman.messages.scp.impl.scp_check_ok_response import SCPCheckOKResponse\n'), ((1689, 1816), 'spinnman.messages.sdp.sdp_header.SDPHeader', 'SDPHeader', ([], {'flags': 'SDPFlag.REPLY_EXPECTED', 'destination_port': '(0)', 'destination_cpu': '(0)', 'destination_chip_x': '(0)', 'destination_chip_y': '(0)'}), '(flags=SDPFlag.REPLY_EXPECTED, destination_port=0, destination_cpu\n =0, destination_chip_x=0, destination_chip_y=0)\n', (1698, 1816), False, 'from spinnman.messages.sdp.sdp_header import SDPHeader\n'), ((1869, 1913), 'spinnman.messages.scp.scp_request_header.SCPRequestHeader', 'SCPRequestHeader', ([], {'command': 'SCPCommand.CMD_NNP'}), '(command=SCPCommand.CMD_NNP)\n', (1885, 1913), False, 'from spinnman.messages.scp.scp_request_header import SCPRequestHeader\n')] |
import threading
from flask import Flask, jsonify
from src import consts
from src.cv_recogniser import run_cv_recogniser
app = Flask(__name__)
@app.route('/get', methods=['GET'])
def get_counter():
return jsonify({
'total': consts.total,
'out': consts.ppl_out,
'in': consts.ppl_in
})
th = threading.Thread(target=run_cv_recogniser)
th.start()
app.run(debug=True, use_reloader=False)
| [
"flask.jsonify",
"threading.Thread",
"flask.Flask"
] | [((128, 143), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (133, 143), False, 'from flask import Flask, jsonify\n'), ((326, 368), 'threading.Thread', 'threading.Thread', ([], {'target': 'run_cv_recogniser'}), '(target=run_cv_recogniser)\n', (342, 368), False, 'import threading\n'), ((212, 288), 'flask.jsonify', 'jsonify', (["{'total': consts.total, 'out': consts.ppl_out, 'in': consts.ppl_in}"], {}), "({'total': consts.total, 'out': consts.ppl_out, 'in': consts.ppl_in})\n", (219, 288), False, 'from flask import Flask, jsonify\n')] |
#! /usr/env/bin python
import os
import linecache
import multiprocessing
import numpy as np
from collections import OrderedDict
from CP2K_kit.tools import call
from CP2K_kit.tools import log_info
from CP2K_kit.tools import data_op
from CP2K_kit.tools import traj_info
from CP2K_kit.tools import file_tools
from CP2K_kit.deepff import load_data
def check_deepmd_model(deepmd_dic):
'''
check_deepmd_model: check the input file in the deepmd_model subsection
Args:
deepmd_dic: dictionary
deepmd_dic contains keywords used in deepmd.
Returns:
deepmd_dic: dictionary
deepmd_dic is the revised deepmd_dic.
'''
deepmd_valid_key = ['model', 'learning_rate', 'loss', 'training']
model_valid_key = ['type_map', 'descriptor']
descr_valid_key = ['type', 'sel', 'rcut_smth', 'rcut', 'neuron', 'axis_neuron']
lr_valid_key = ['type', 'start_lr', 'decay_steps', 'stop_lr']
loss_valid_key = ['start_pref_e', 'limit_pref_e', 'start_pref_f', 'limit_pref_f', 'start_pref_v', 'limit_pref_v']
training_valid_key = ['train_stress', 'shuffle_data', 'use_prev_model', 'fix_stop_batch', \
'lr_scale', 'epoch_num', 'set_data_dir', 'model_type', 'neuron', \
'stop_batch', 'batch_size', 'disp_freq', 'numb_test', 'save_freq']
for key in deepmd_dic.keys():
if key not in deepmd_valid_key:
log_info.log_error('Input error: %s is invalid key, please check or reset deepff/deepmd_model' %(key))
exit()
if ( 'model' not in deepmd_dic.keys() ):
log_info.log_error('Input error: no model, please set deepff/deepmd_model/model')
exit()
else:
for key in deepmd_dic['model'].keys():
if key not in model_valid_key:
log_info.log_error('Input error: %s is invalid key, please check or reset deepff/deepmd_model/model' %(key))
exit()
if ( 'type_map' in deepmd_dic['model'].keys() ):
if ( all(data_op.eval_str(i) == 0 for i in deepmd_dic['model']['type_map']) ):
pass
else:
log_info.log_error('Input error: type_map should be string, please check or reset deepff/deepmd_model/model/type_map')
exit()
else:
log_info.log_error('Input error: no type_map, please set deepff/deepmd_model/model/type_map')
exit()
if ( 'descriptor' not in deepmd_dic['model'].keys() ):
log_info.log_error('Input error: no descriptor, please set deepff/deepmd_model/model/descriptor')
exit()
else:
for key in deepmd_dic['model']['descriptor'].keys():
if key not in descr_valid_key:
log_info.log_error('Input error: %s is invalid key, please check or reset deepff/deepmd_model/model/descriptor' %(key))
exit()
valid_type = ['se_a', 'se_r', 'se_ar']
if ( 'type' in deepmd_dic['model']['descriptor'].keys() ):
descr_type = deepmd_dic['model']['descriptor']['type']
if ( descr_type in valid_type ):
pass
else:
log_info.log_error('Input error: %s is not supported for deepff/deepmd_model/model/descriptor/type')
exit()
if ( 'sel' in deepmd_dic['model']['descriptor'].keys() ):
sel = deepmd_dic['model']['descriptor']['sel']
if ( all(data_op.eval_str(i) == 1 for i in sel) ):
deepmd_dic['model']['descriptor']['sel'] = [int(x) for x in sel]
else:
log_info.log_error('Input error: sel shoule be list of integer, please check or reset deepff/deepmd_model/model/descriptor/sel')
exit()
else:
log_info.log_error('Input error: no sel, please set deepff/deepmd_model/model/descriptor/sel')
exit()
if ( 'rcut_smth' in deepmd_dic['model']['descriptor'].keys() ):
rcut_smth = deepmd_dic['model']['descriptor']['rcut_smth']
if ( data_op.eval_str(rcut_smth) == 1 or data_op.eval_str(rcut_smth) == 2 ):
deepmd_dic['model']['descriptor']['rcut_smth'] = float(rcut_smth)
else:
log_info.log_error('Input error: rcut_smth shoule be float, please check or reset deepff/deepmd_model/model/descriptor/rcut_smth')
exit()
else:
deepmd_dic['model']['descriptor']['rcut_smth'] = 5.0
if ( 'rcut' in deepmd_dic['model']['descriptor'].keys() ):
rcut = deepmd_dic['model']['descriptor']['rcut']
if ( data_op.eval_str(rcut) == 1 or data_op.eval_str(rcut) == 2 ):
deepmd_dic['model']['descriptor']['rcut'] = float(rcut)
else:
log_info.log_error('Input error: rcut should be float, please check or reset deepff/deepmd_model/model/descriptor/rcut')
exit()
else:
deepmd_dic['model']['descriptor']['rcut'] = 6.0
if ( 'neuron' in deepmd_dic['model']['descriptor'].keys() ):
neuron_encode = deepmd_dic['model']['descriptor']['neuron']
if ( all(data_op.eval_str(i) == 1 for i in neuron_encode) ):
deepmd_dic['model']['descriptor']['neuron'] = [int(x) for x in neuron_encode]
else:
log_info.log_error('Input error: neuron error, please check deepff/deepmd_model/model/descriptor/neuron')
exit()
else:
deepmd_dic['model']['descriptor']['neuron'] = [25, 50, 100]
deepmd_dic['model']['descriptor']['resnet_dt'] = False
if ( 'axis_neuron' in deepmd_dic['model']['descriptor'].keys() ):
axis_neuron = deepmd_dic['model']['descriptor']['axis_neuron']
if ( data_op.eval_str(axis_neuron) == 1 ):
deepmd_dic['model']['descriptor']['axis_neuron'] = int(axis_neuron)
else:
log_info.log_error('Input error: axis_neuron should be list of integer, please check deepff/deepmd_model/model/descriptor/axis_neuron')
exit()
else:
deepmd_dic['model']['descriptor']['axis_neuron'] = 16
deepmd_dic['model']['fitting_net'] = OrderedDict()
deepmd_dic['model']['fitting_net']['resnet_dt'] = True
if ( 'learning_rate' not in deepmd_dic.keys() ):
log_info.log_error('Input error: no learning_rate, please set deepff/deepmd_model/learning_rate')
exit()
else:
for key in deepmd_dic['learning_rate'].keys():
if key not in lr_valid_key:
log_info.log_error('Input error: %s is invalid key, please check or reset deepff/deepmd_model/learning_rate' %(key))
exit()
if ( 'type' in deepmd_dic['learning_rate'].keys() ):
decay_type = deepmd_dic['learning_rate']['type']
if ( data_op.eval_str(decay_type) == 0 ):
pass
else:
log_info.log_error('Input error: type in learning_rate should be string, please check or reset deepff/deepmd_model/learning_rate/type')
exit()
else:
deepmd_dic['learning_rate']['type'] = 'exp'
if ( 'start_lr' in deepmd_dic['learning_rate'].keys() ):
start_lr = deepmd_dic['learning_rate']['start_lr']
if ( data_op.eval_str(start_lr) == 2 ):
deepmd_dic['learning_rate']['start_lr'] = float(start_lr)
else:
log_info.log_error('Input error: start_lr should be float, please check deepff/deepmd_model/learning_rate/start_lr')
exit()
else:
deepmd_dic['learning_rate']['start_lr'] = 0.001
if ( 'stop_lr' in deepmd_dic['learning_rate'].keys() ):
stop_lr = deepmd_dic['learning_rate']['stop_lr']
if ( data_op.eval_str(stop_lr) == 2 ):
deepmd_dic['learning_rate']['stop_lr'] = float(stop_lr)
else:
log_info.log_error('Input error: stop_lr should be float, please check deepff/deepmd_model/learning_rate/stop_lr')
exit()
else:
deepmd_dic['learning_rate']['stop_lr'] = 1e-8
if ( 'loss' not in deepmd_dic.keys() ):
log_info.log_error('Input error: no loss, please check or set deepff/deepmd_model/loss')
exit()
else:
for key in deepmd_dic['loss'].keys():
if key not in loss_valid_key:
log_info.log_error('Input error: %s is invalid key, please check or reset deepff/deepmd_model/loss' %(key))
exit()
if ( 'start_pref_e' in deepmd_dic['loss'].keys() ):
start_pref_e = deepmd_dic['loss']['start_pref_e']
if ( data_op.eval_str(start_pref_e) == 1 or data_op.eval_str(start_pref_e) == 2 ):
deepmd_dic['loss']['start_pref_e'] = float(start_pref_e)
else:
log_info.log_error('Input error: start_pref_e should be float, please check deepff/deepmd_model/loss/start_pref_e')
exit()
else:
deepmd_dic['loss']['start_pref_e'] = 0.02
if ( 'limit_pref_e' in deepmd_dic['loss'].keys() ):
limit_pref_e = deepmd_dic['loss']['limit_pref_e']
if ( data_op.eval_str(limit_pref_e) == 1 or data_op.eval_str(limit_pref_e) == 2 ):
deepmd_dic['loss']['limit_pref_e'] = float(limit_pref_e)
else:
log_info.log_error('Input error: limit_pref_e should be float, please check deepff/deepmd_model/loss/limit_pref_e')
exit()
else:
deepmd_dic['loss']['limit_pref_e'] = 1.0
if ( 'start_pref_f' in deepmd_dic['loss'].keys() ):
start_pref_f = deepmd_dic['loss']['start_pref_f']
if ( data_op.eval_str(start_pref_f) == 1 or data_op.eval_str(start_pref_f) == 2 ):
deepmd_dic['loss']['start_pref_f'] = float(start_pref_f)
else:
log_info.log_error('Input error: start_pref_f should be float, please check deepff/deepmd_model/loss/start_pref_f')
exit()
else:
deepmd_dic['loss']['start_pref_f'] = 1000.0
if ( 'limit_pref_f' in deepmd_dic['loss'].keys() ):
limit_pref_f = deepmd_dic['loss']['limit_pref_f']
if ( data_op.eval_str(limit_pref_f) == 1 or data_op.eval_str(limit_pref_f) == 2 ):
deepmd_dic['loss']['limit_pref_f'] = float(limit_pref_f)
else:
log_info.log_error('Input error: limit_pref_f should be float, please check deepff/deepmd_model/loss/limit_pref_f')
exit()
else:
deepmd_dic['loss']['limit_pref_f'] = 1.0
if ( 'start_pref_v' in deepmd_dic['loss'].keys() ):
start_pref_v = deepmd_dic['loss']['start_pref_v']
if ( data_op.eval_str(start_pref_v) == 1 or data_op.eval_str(start_pref_v) == 2 ):
deepmd_dic['loss']['start_pref_v'] = float(start_pref_v)
else:
log_info.log_error('Input error: start_pref_v should be float, please check deepff/deepmd_model/loss/start_pref_v')
exit()
else:
deepmd_dic['loss']['start_pref_v'] = 0.0
if ( 'limit_pref_v' in deepmd_dic['loss'].keys() ):
limit_pref_v = deepmd_dic['loss']['limit_pref_v']
if ( data_op.eval_str(limit_pref_v) == 1 or data_op.eval_str(limit_pref_v) == 2 ):
deepmd_dic['loss']['limit_pref_v'] = float(limit_pref_v)
else:
log_info.log_error('Input error: limit_pref_v should, please check deepff/deepmd_model/loss/limit_pref_v')
exit()
else:
deepmd_dic['loss']['limit_pref_v'] = 0.0
if ( 'training' not in deepmd_dic.keys() ):
log_info.log_error('Input error: no training found, please check or set deepff/deepmd_model/training')
exit()
else:
for i in deepmd_dic['training'].keys():
if 'system' not in i and i not in training_valid_key:
log_info.log_error('Input error: %s is invalid key, please check or reset deepff/deepmd_model/training' %(i))
exit()
if ( 'system' in i ):
if ( 'traj_type' in deepmd_dic['training'][i] ):
traj_type = deepmd_dic['training'][i]['traj_type']
if ( traj_type == 'md' or traj_type == 'mtd' ):
pass
else:
log_info.log_error('Input error: traj_type should be md or mtd, please check and reset deepff/deepmd_model/training/system/traj_type')
else:
deepmd_dic['training'][i]['traj_type'] = 'md'
if ( 'traj_coord_file' in deepmd_dic['training'][i] ):
traj_coord_file = deepmd_dic['training'][i]['traj_coord_file']
if ( os.path.exists(os.path.abspath(traj_coord_file)) ):
deepmd_dic['training'][i]['traj_coord_file'] = os.path.abspath(traj_coord_file)
else:
log_info.log_error('Input error: %s does not exist, please check deepff/deepmd_model/training/system/traj_coord_file' %(traj_coord_file))
exit()
else:
log_info.log_error('Input error: no coordination trajectory file, please check deepff/deepmd_model/training/system/traj_coord_file')
exit()
if ( deepmd_dic['training'][i]['traj_type'] == 'md' ):
if ( 'traj_frc_file' in deepmd_dic['training'][i] ):
traj_frc_file = deepmd_dic['training'][i]['traj_frc_file']
if ( os.path.exists(os.path.abspath(traj_frc_file)) ):
deepmd_dic['training'][i]['traj_frc_file'] = os.path.abspath(traj_frc_file)
else:
log_info.log_error('Input error: %s does not exist, please check deepff/deepmd_model/training/system/traj_frc_file' %(traj_frc_file))
exit()
else:
log_info.log_error('Input error: no force trajectory file, please check deepff/deepmd_model/training/system/traj_frc_file')
exit()
line_num = file_tools.grep_line_num("'PDB file'", traj_coord_file, os.getcwd())
if ( line_num == 0 ):
coord_file_type = 'coord_xyz'
else:
coord_file_type = 'coord_pdb'
if ( coord_file_type == 'coord_xyz' ):
if ( 'traj_cell_file' in deepmd_dic['training'][i] ):
traj_cell_file = deepmd_dic['training'][i]['traj_cell_file']
if ( os.path.exists(os.path.abspath(traj_cell_file)) ):
deepmd_dic['training'][i]['traj_cell_file'] = os.path.abspath(traj_cell_file)
else:
log_info.log_error('Input error: %s does not exist, please check deepff/deepmd_model/training/system/traj_cell_file' %(traj_cell_file))
exit()
else:
log_info.log_error('Input error: no cell trajectory file, please check deepff/deepmd_model/training/system/traj_cell_file')
exit()
else:
deepmd_dic['training'][i]['traj_cell_file'] = 'none'
if ( 'traj_stress_file' in deepmd_dic['training'][i] ):
traj_stress_file = deepmd_dic['training'][i]['traj_stress_file']
if ( os.path.exists(os.path.abspath(traj_stress_file)) ):
deepmd_dic['training'][i]['traj_stress_file'] = os.path.abspath(traj_stress_file)
else:
log_info.log_error('Input error: %s does not exist, please check deepff/deepmd_model/training/system/traj_stress_file' %(traj_stress_file))
exit()
else:
deepmd_dic['training'][i]['traj_stress_file'] = 'none'
atoms_num, pre_base_block, end_base_block, pre_base, frames_num, each, start_id, end_id, time_step = \
traj_info.get_traj_info(traj_coord_file, coord_file_type)
elif ( deepmd_dic['training'][i]['traj_type'] == 'mtd' ):
if ( 'data_dir' in deepmd_dic['training'][i] ):
data_dir = deepmd_dic['training'][i]['data_dir']
if ( os.path.exists(os.path.abspath(data_dir)) ):
deepmd_dic['training'][i]['data_dir'] = os.path.abspath(data_dir)
else:
log_info.log_error('Input error: %s does not exist, please check deepff/deepmd_model/training/system/data_dir' %(data_dir))
exit()
else:
log_info.log_error('Input error: no data_dir, please set deepff/deepmd_model/training/system/data_dir')
exit()
if ( not 'task_dir_prefix' in deepmd_dic['training'][i] ):
log_info.log_error('Input error: no task_dir_prefix, please set deepff/deepmd_model/training/system/task_dir_prefix')
exit()
if ( not 'proj_name' in deepmd_dic['training'][i] ):
log_info.log_error('Input error: no proj_name, please set deepff/deepmd_model/training/system/proj_name')
exit()
if ( not 'out_file_name' in deepmd_dic['training'][i] ):
log_info.log_error('Input error: no out_file_name, please set deepff/deepmd_model/training/system/out_file_name')
exit()
if ( 'start_frame' in deepmd_dic['training'][i] ):
start_frame = deepmd_dic['training'][i]['start_frame']
if ( data_op.eval_str(start_frame) == 1 ):
deepmd_dic['training'][i]['start_frame'] = int(start_frame)
else:
log_info.log_error('Input error: start_frame should be integer, please check deepff/deepmd_model/training/system/start_frame')
exit()
else:
if ( deepmd_dic['training'][i]['traj_type'] == 'md' ):
deepmd_dic['training'][i]['start_frame'] = start_id
if ( 'end_frame' in deepmd_dic['training'][i] ):
end_frame = deepmd_dic['training'][i]['end_frame']
if ( data_op.eval_str(end_frame) == 1 ):
deepmd_dic['training'][i]['end_frame'] = int(end_frame)
else:
log_info.log_error('Input error: end_frame should be integer, please check deepff/deepmd_model/training/system/end_frame')
exit()
else:
if ( deepmd_dic['training'][i]['traj_type'] == 'md' ):
deepmd_dic['training'][i]['end_frame'] = end_id
if ( 'choosed_frame_num' in deepmd_dic['training'][i] ):
choosed_frame_num = deepmd_dic['training'][i]['choosed_frame_num']
if ( data_op.eval_str(choosed_frame_num) == 1 ):
deepmd_dic['training'][i]['choosed_frame_num'] = int(choosed_frame_num)
else:
log_info.log_error('Input error: choosed_frame_num should be integer, please check deepff/deepmd_model/training/system/choosed_frame_num')
exit()
else:
if ( deepmd_dic['training'][i]['traj_type'] == 'md' ):
deepmd_dic['training'][i]['choosed_frame_num'] = int((end_id-start_id)/each)+1
if ( 'set_parts' in deepmd_dic['training'][i] ):
set_parts = deepmd_dic['training'][i]['set_parts']
if ( data_op.eval_str(set_parts) == 1 ):
deepmd_dic['training'][i]['set_parts'] = int(set_parts)
else:
log_info.log_error('Input error: set_parts should be integer, please check deepff/deepmd_model/training/system/set_parts')
exit()
else:
deepmd_dic['training'][i]['set_parts'] = 1
if ( 'use_prev_model' in deepmd_dic['training'].keys() ):
use_prev_model = deepmd_dic['training']['use_prev_model']
use_prev_model_bool = data_op.str_to_bool(use_prev_model)
if ( isinstance(use_prev_model_bool, bool) ):
deepmd_dic['training']['use_prev_model'] = use_prev_model_bool
else:
log_info.log_error('Input error: use_prev_model should be bool, please check or reset deepff/deepmd_model/training/use_prev_model')
exit()
else:
deepmd_dic['training']['use_prev_model'] = False
if ( 'lr_scale' in deepmd_dic['training'].keys() ):
lr_scale = deepmd_dic['training']['lr_scale']
if ( data_op.eval_str(lr_scale) == 1 or data_op.eval_str(lr_scale) == 2 ):
deepmd_dic['training']['lr_scale'] = float(lr_scale)
else:
log_info.log_error('Input error: the lr_scale should be float, please check or reset deepff/deepmd_model/training/lr_scale')
exit()
else:
deepmd_dic['training']['lr_scale'] = 2
if ( 'shuffle_data' in deepmd_dic['training'].keys() ):
shuffle_data = deepmd_dic['training']['shuffle_data']
shuffle_data_bool = data_op.str_to_bool(shuffle_data)
if ( isinstance(shuffle_data_bool, bool) ):
deepmd_dic['training']['shuffle_data'] = shuffle_data_bool
else:
log_info.log_error('Input error: shuffle_data should be bool, please check or reset deepff/deepmd_model/training/shuffle_data')
exit()
else:
deepmd_dic['training']['shuffle_data'] = False
if ( 'train_stress' in deepmd_dic['training'].keys() ):
train_stress = deepmd_dic['training']['train_stress']
train_stress_bool = data_op.str_to_bool(train_stress)
if ( isinstance(train_stress_bool, bool) ):
deepmd_dic['training']['train_stress'] = train_stress_bool
else:
log_info.log_error('Input error: train_stress should be bool, please check or reset deepff/deepmd_model/training/train_stress')
exit()
else:
deepmd_dic['training']['train_stress'] = False
if ( 'set_data_dir' in deepmd_dic['training'].keys() ):
set_data_dir = deepmd_dic['training']['set_data_dir']
if ( os.path.exists(os.path.abspath(set_data_dir)) ):
deepmd_dic['training']['set_data_dir'] = os.path.abspath(set_data_dir)
else:
log_info.log_error('Input error: %s directory does not exist, please check or reset deepff/deepmd_model/training/set_data_dir')
exit()
if ( 'model_type' in deepmd_dic['training'].keys() ):
model_type = deepmd_dic['training']['model_type']
if ( model_type == 'use_seed' or model_type == 'use_node' ):
pass
else:
log_info.log_error('Input error: only use_seed and use_node are supported for model_type, please check deepff/deepmd_model/training/model_type')
exit()
else:
deepmd_dic['training']['model_type'] = 'use_seed'
if ( deepmd_dic['training']['model_type'] == 'use_seed' ):
if ( 'seed_num' in deepmd_dic['training'].keys() ):
seed_num = deepmd_dic['training']['seed_num']
if ( data_op.eval_str(seed_num) == 1 ):
deepmd_dic['training']['seed_num'] = int(seed_num)
else:
log_info.log_error('Input error: seed_num should be integer, please check or reset deepff/deepmd_model/training/seed_num')
exit()
else:
deepmd_dic['training']['seed_num'] = 2
if ( 'neuron' in deepmd_dic['training'].keys() ):
neuron_list = deepmd_dic['training']['neuron']
if ( deepmd_dic['training']['model_type'] == 'use_node' ):
neuron = []
tmp_str = data_op.comb_list_2_str(neuron_list, ' ')
tmp_list = data_op.split_str(tmp_str, '...')
for i in range(len(tmp_list)):
neuron_i = data_op.split_str(tmp_list[i], ' ')
if ( all(data_op.eval_str(j) == 1 for j in neuron_i) ):
neuron.append([int(x) for x in neuron_i])
else:
log_info.log_error('Input error: neuron should be list of integer, please check or reset deepff/deepmd_model/training/neuron')
exit()
elif ( deepmd_dic['training']['model_type'] == 'use_seed' ):
if ( all(data_op.eval_str(j) == 1 for j in neuron_list) ):
neuron = [int(x) for x in neuron_list]
else:
log_info.log_error('Input error: neuron should be list of integer, please check or reset deepff/deepmd_model/training/neuron')
exit()
deepmd_dic['training']['neuron'] = neuron
else:
log_info.log_error('Input error: no neuron, please set deepff/deepmd_model/training/neuron')
exit()
if ( 'fix_stop_batch' in deepmd_dic['training'].keys() ):
fix_stop_batch = deepmd_dic['training']['fix_stop_batch']
fix_stop_batch_bool = data_op.str_to_bool(fix_stop_batch)
if ( isinstance(fix_stop_batch_bool, bool) ):
deepmd_dic['training']['fix_stop_batch'] = fix_stop_batch_bool
else:
log_info.log_error('Input error: fix_stop_batch should be bool, please check or reset deepff/deepmd_model/training/fix_stop_batch')
exit()
else:
deepmd_dic['training']['fix_stop_batch'] = False
fix_stop_batch = deepmd_dic['training']['fix_stop_batch']
if fix_stop_batch:
if ( 'decay_steps' in deepmd_dic['learning_rate'].keys() ):
decay_steps = deepmd_dic['learning_rate']['decay_steps']
if ( data_op.eval_str(decay_steps) == 1 ):
deepmd_dic['learning_rate']['decay_steps'] = int(decay_steps)
else:
log_info.log_error('Input error: the decay_steps should be integer, please check or reset deepff/deepmd_model/learning_rate/decay_steps')
exit()
else:
deepmd_dic['learning_rate']['decay_steps'] = 5000
if ( 'stop_batch' in deepmd_dic['training'].keys() ):
stop_batch = deepmd_dic['training']['stop_batch']
if ( data_op.eval_str(stop_batch) == 1 ):
deepmd_dic['training']['stop_batch'] = int(stop_batch)
else:
log_info.log_error('Input error: the stop_batch should be integer, please check or reset deepff/deepmd_model/training/stop_batch')
exit()
else:
deepmd_dic['training']['stop_batch'] = 1000000
else:
if ( 'epoch_num' in deepmd_dic['training'].keys() ):
epoch_num = deepmd_dic['training']['epoch_num']
if ( data_op.eval_str(epoch_num) == 1 ):
deepmd_dic['training']['epoch_num'] = int(epoch_num)
else:
log_info.log_error('Input error: the number of epoch should be integer, please check or reset deepff/deepmd_model/training/epoch_num')
exit()
else:
deepmd_dic['training']['epoch_num'] = 200
if ( 'batch_size' in deepmd_dic['training'].keys() ):
batch_size = deepmd_dic['training']['batch_size']
if ( data_op.eval_str(batch_size) == 1 ):
deepmd_dic['training']['batch_size'] = int(batch_size)
else:
log_info.log_error('Input error: batch_size shoule be integer, please check or reset deepff/deepmd_model/training/batch_size')
exit()
else:
deepmd_dic['training']['batch_size'] = 1
if ( 'disp_freq' in deepmd_dic['training'].keys() ):
disp_freq = deepmd_dic['training']['disp_freq']
if ( data_op.eval_str(disp_freq) == 1 ):
deepmd_dic['training']['disp_freq'] = int(disp_freq)
else:
log_info.log_error('Input error: disp_freq should be integer, please check or reset deepff/deepmd_model/training/disp_freq')
exit()
else:
deepmd_dic['training']['disp_freq'] = 100
if ( 'numb_test' in deepmd_dic['training'].keys() ):
numb_test = deepmd_dic['training']['numb_test']
if ( data_op.eval_str(numb_test) == 1 ):
deepmd_dic['training']['numb_test'] = int(numb_test)
else:
log_info.log_error('Input error: numb_test should be integer, please check or reset deepff/deepmd_model/training/numb_test')
exit()
else:
deepmd_dic['training']['numb_test'] = 10
if ( 'save_freq' in deepmd_dic['training'].keys() ):
save_freq = deepmd_dic['training']['save_freq']
if ( data_op.eval_str(save_freq) == 1 ):
deepmd_dic['training']['save_freq'] = int(save_freq)
else:
log_info.log_error('Input error: save_freq should be integer, please check or reset deepff/deepmd_model/training/save_freq')
exit()
else:
deepmd_dic['training']['save_freq'] = 1000
if ( 'disp_training' in deepmd_dic['training'].keys() ):
disp_training = deepmd_dic['training']['disp_training']
disp_training_bool = data_op.str_to_bool(disp_training)
if ( isinstance(disp_training_bool, bool) ):
deepmd_dic['training']['disp_training'] = disp_training_bool
else:
log_info.log_error('Input error: disp_training should be bool, please check or reset deepff/deepmd_model/training/disp_training')
exit()
else:
deepmd_dic['training']['disp_training'] = True
deepmd_dic['training']['set_prefix'] = 'set'
deepmd_dic['training']['disp_file'] = 'lcurve.out'
deepmd_dic['training']['load_ckpt'] = 'model.ckpt'
deepmd_dic['training']['save_ckpt'] = 'model.ckpt'
deepmd_dic['training']['time_training'] = True
deepmd_dic['training']['profiling'] = False
deepmd_dic['training']['profiling_file'] = 'timeline.json'
return deepmd_dic
def check_deepmd_test(deepmd_dic):
'''
check_deepmd_model: check the input file in the deepmd_model subsection
Args:
deepmd_dic: dictionary
deepmd_dic contains keywords used in deepmd.
Returns:
deepmd_dic: dictionary
deepmd_dic is the revised deepmd_dic.
'''
deepmd_valid_key = ['init_dpff_dir', 'start_lr', 'lr_scale', 'fix_stop_batch', \
'use_prev_model', 'train_stress', 'shuffle_data', 'epoch_num']
for key in deepmd_dic.keys():
if key not in deepmd_valid_key:
log_info.log_error('Input error: %s is invalid key, please check or reset deepff/deepmd_test' %(key))
exit()
if ( 'init_dpff_dir' in deepmd_dic.keys() ):
init_dpff_dir = deepmd_dic['init_dpff_dir']
if ( os.path.exists(os.path.abspath(init_dpff_dir)) ):
deepmd_dic['init_dpff_dir'] = os.path.abspath(init_dpff_dir)
else:
log_info.log_error('Input error: %s file does not exist, please check or reset deepff/deepmd_test/init_dpff_dir' %(init_dpff_dir))
exit()
else:
log_info.log_error('Input error: no init_dpff_dir, please set deepff/deepmd_test/init_dpff_dir')
exit()
if ( 'start_lr' in deepmd_dic.keys() ):
start_lr = deepmd_dic['start_lr']
if ( data_op.eval_str(start_lr) == 2 ):
deepmd_dic['start_lr'] = float(start_lr)
else:
log_info.log_error('Input error: the start_lr should be float, please check or reset deepff/deepmd_test/start_lr')
exit()
else:
deepmd_dic['start_lr'] = 2.0
if ( 'lr_scale' in deepmd_dic.keys() ):
lr_scale = deepmd_dic['lr_scale']
if ( data_op.eval_str(lr_scale) == 1 or data_op.eval_str(lr_scale) == 2 ):
deepmd_dic['lr_scale'] = float(lr_scale)
else:
log_info.log_error('Input error: the lr_scale should be float, please check or reset deepff/deepmd_test/lr_scale')
exit()
else:
deepmd_dic['lr_scale'] = 2.0
if ( 'fix_stop_batch' in deepmd_dic.keys() ):
fix_stop_batch = deepmd_dic['fix_stop_batch']
fix_stop_batch_bool = data_op.str_to_bool(fix_stop_batch)
if ( isinstance(fix_stop_batch_bool, bool) ):
deepmd_dic['fix_stop_batch'] = fix_stop_batch_bool
else:
log_info.log_error('Input error: fix_stop_batch should be bool, please check or reset deepff/deepmd_test/fix_stop_batch')
exit()
else:
deepmd_dic['fix_stop_batch'] = False
if ( not deepmd_dic['fix_stop_batch'] ):
if ( 'epoch_num' in deepmd_dic.keys() ):
epoch_num = deepmd_dic['epoch_num']
if ( data_op.eval_str(epoch_num) == 1 ):
deepmd_dic['epoch_num'] = int(epoch_num)
else:
log_info.log_error('Input error: the number of epoch should be integer, please check or reset deepff/deepmd_test/epoch_num')
exit()
else:
deepmd_dic['epoch_num'] = 200
if ( 'use_prev_model' in deepmd_dic.keys() ):
use_prev_model = deepmd_dic['use_prev_model']
use_prev_model_bool = data_op.str_to_bool(use_prev_model)
if ( isinstance(use_prev_model_bool, bool) ):
deepmd_dic['use_prev_model'] = use_prev_model_bool
else:
log_info.log_error('Input error: use_prev_model should be bool, please check or reset deepff/deepmd_test/use_prev_model')
exit()
else:
deepmd_dic['use_prev_model'] = False
if ( 'shuffle_data' in deepmd_dic.keys() ):
shuffle_data = deepmd_dic['shuffle_data']
shuffle_data_bool = data_op.str_to_bool(shuffle_data)
if ( isinstance(shuffle_data_bool, bool) ):
deepmd_dic['shuffle_data'] = shuffle_data_bool
else:
log_info.log_error('Input error: shuffle_data should be bool, please check or reset deepff/deepmd_test/shuffle_data')
exit()
else:
deepmd_dic['shuffle_data'] = False
if ( 'train_stress' in deepmd_dic.keys() ):
train_stress = deepmd_dic['train_stress']
train_stress_bool = data_op.str_to_bool(train_stress)
if ( isinstance(train_stress_bool, bool) ):
deepmd_dic['train_stress'] = train_stress_bool
else:
log_info.log_error('Input error: train_stress should be bool, please check or reset deepff/deepmd_test/train_stress')
exit()
else:
deepmd_dic['train_stress'] = False
return deepmd_dic
def check_lammps(lmp_dic, active_learn_dic):
'''
check_lammps: check the input file in lammps subsection
Args:
lmp_dic: dictionary
lmp_dic contains keywords used in lammps.
active_learn_dic: dictionary
active_learn_dic contains keywords used in active_learn.
Returns:
lmp_dic: dictionary
lmp_dic is the revised lammps_dic.
'''
lammps_valid_key = ['nsteps', 'write_restart_freq', 'time_step', 'temp', 'pres', 'tau_t', 'tau_p', 'change_init_str']
for key in lmp_dic.keys():
if 'system' not in key and key not in lammps_valid_key:
log_info.log_error('Input error: %s is invalid key, please check or reset deepff/lammps' %(key))
exit()
if ( 'nsteps' in lmp_dic.keys() ):
nsteps = lmp_dic['nsteps']
if ( data_op.eval_str(nsteps) == 1 ):
if ( int(nsteps) < 1000 ):
lmp_dic['nsteps'] = '1000'
else:
log_info.log_error('Input error: nsteps should be integer, please check or reset deepff/lammps/nsteps')
exit()
else:
lmp_dic['nsteps'] = '10000'
if ( 'write_restart_freq' in lmp_dic.keys() ):
write_restart_freq = lmp_dic['write_restart_freq']
if ( data_op.eval_str(write_restart_freq) == 1 ):
pass
else:
log_info.log_error('Input error: write_restart_freq should be integer, please check or reset deepff/lammps/write_restart_freq')
exit()
else:
lmp_dic['write_restart_freq'] = '1000'
judge_freq = active_learn_dic['judge_freq']
lmp_dic['thermo_freq'] = judge_freq
lmp_dic['dump_freq'] = judge_freq
if ( 'time_step' in lmp_dic.keys() ):
time_step = lmp_dic['time_step']
if ( data_op.eval_str(time_step) == 1 or data_op.eval_str(time_step) == 2 ):
pass
else:
log_info.log_error('Input error: time_step should be integer or float, please check or reset deepff/lammps/time_step')
else:
lmp_dic['time_step'] = '0.0005'
if ( 'tau_t' in lmp_dic.keys() ):
tau_t = lmp_dic['tau_t']
if ( data_op.eval_str(tau_t) == 1 or data_op.eval_str(tau_t) == 2 ):
pass
else:
log_info.log_error('Input error: tau_t should be integer or float, please check or reset deepff/lammps/tau_t')
exit()
else:
lmp_dic['tau_t'] = '%f' %(float(lmp_dic['time_step'])*200)
if ( 'tau_p' in lmp_dic.keys() ):
tau_p = lmp_dic['tau_p']
if ( data_op.eval_str(tau_p) == 2 ):
pass
else:
log_info.log_error('Input error: tau_p should be integer or float, please check or reset deepff/lammps/tau_p')
exit()
else:
lmp_dic['tau_p'] = '%f' %(float(lmp_dic['time_step'])*200)
if ( 'temp' in lmp_dic.keys() ):
temp = lmp_dic['temp']
if ( isinstance(temp, list) ):
if ( all(data_op.eval_str(i) == 1 or data_op.eval_str(i) == 2 for i in temp)):
lmp_dic['temp'] = [float(i) for i in temp]
else:
log_info.log_error('Input error: multipole temperature should be list of float, please check or reset deepff/lammps/temp')
exit()
else:
if ( data_op.eval_str(temp) == 1 or data_op.eval_str(temp) == 2 ):
lmp_dic['temp'] = float(temp)
else:
log_info.log_error('Input error: temp should be float or list of float, please check or reset deepff/lammps/temp')
exit()
else:
lmp_dic['temp'] = '300.0'
if ( 'pres' in lmp_dic.keys() ):
pres = lmp_dic['pres']
if ( isinstance(pres, list) ):
if ( all(data_op.eval_str(i) == 1 or data_op.eval_str(i) == 2 for i in pres)):
lmp_dic['pres'] = [float(i) for i in pres]
else:
log_info.log_error('Input error: multipole pressure should be list of float, please check or reset deepff/lammps/pres')
exit()
else:
if ( data_op.eval_str(pres) == 1 or data_op.eval_str(pres) == 2 ):
lmp_dic['pres'] = float(pres)
else:
log_info.log_error('Input error: pres shoule be float or list of float, please check deepff/lammps/pres')
exit()
else:
lmp_dic['pres'] = '1.0'
if ( 'change_init_str' in lmp_dic.keys() ):
change_init_str = lmp_dic['change_init_str']
change_init_str_bool = data_op.str_to_bool(change_init_str)
if ( isinstance(change_init_str_bool, bool) ):
lmp_dic['change_init_str'] = change_init_str_bool
else:
log_info.log_out('Input error: change_init_str should be bool, please check or set deepff/lammps/change_init_str')
exit()
else:
lmp_dic['change_init_str'] = False
sys_num = 0
for key in lmp_dic.keys():
if ( 'system' in key ):
sys_num = sys_num+1
if ( 'box' in lmp_dic[key].keys() ):
box_file = lmp_dic[key]['box']
if ( os.path.exists(os.path.abspath(box_file)) ):
lmp_dic[key]['box'] = os.path.abspath(box_file)
else:
log_info.log_error('Input error: %s file does not exist' %(box_file))
exit()
if ( 'coord' in lmp_dic[key].keys() ):
coord_file = lmp_dic[key]['coord']
if ( os.path.exists(os.path.abspath(coord_file)) ):
lmp_dic[key]['coord'] = os.path.abspath(coord_file)
else:
log_info.log_error('Input error: %s file does not exist' %(coord_file))
exit()
if ( 'mass' in lmp_dic[key].keys() ):
for element in lmp_dic[key]['mass']:
element_mass = lmp_dic[key]['mass'][element]
if ( data_op.eval_str(element) == 1 or data_op.eval_str(element) == 2 ):
lmp_dic[key]['mass'][element] = float(element_mass)
else:
log_info.log_error('Input error: mass of element should be int or float, please check or reset deepff/lammps/system/mass')
exit()
valid_md_type = ['nve', 'nvt', 'npt']
if ( 'md_type' in lmp_dic[key].keys() ):
md_type = lmp_dic[key]['md_type']
if ( md_type in valid_md_type ):
pass
else:
log_info.log_error('Input error: only nve, nvt and npt are supportted for md_type, please check or reset deepff/lammps/system/md_type')
exit()
else:
lmp_dic[key]['md_type'] = 'nvt'
if ( 'use_mtd' in lmp_dic[key].keys() ):
use_mtd = lmp_dic[key]['use_mtd']
use_mtd_bool = data_op.str_to_bool(use_mtd)
if ( isinstance(use_mtd_bool, bool) ):
lmp_dic[key]['use_mtd'] = use_mtd_bool
else:
log_info.log_out('Input error: use_mtd should be bool, please check or set deepff/lammps/system/use_mtd')
exit()
else:
lmp_dic[key]['use_mtd'] = False
if lmp_dic[key]['use_mtd'] :
if ( 'plumed_file' in lmp_dic[key].keys() ):
plumed_file = lmp_dic[key]['plumed_file']
if ( os.path.exists(os.path.abspath(plumed_file)) ):
lmp_dic[key]['plumed_file'] = os.path.abspath(plumed_file)
else:
log_info.log_error('Input error: %s file does not exist, please check deepff/lammps/system/plumed_file' %(plumed_file))
exit()
else:
log_info.log_error('Input error: as user want to use plumed, but no plumed_file, please check deepff/lammps/system/plumed_file')
exit()
if ( sys_num == 0 ):
log_info.log_error('Input error: no system for lammps calculation, please set deepff/lammps/system')
exit()
return lmp_dic
def check_active_learn(active_learn_dic):
'''
check_active_learn: check the input file in active learn subsection
Args:
active_learn_dic: dictionary
active_learn_dic contains keywords used in active_learn.
Returns:
active_learn_dic: dictionary
active_learn_dic is the revised active_learn_dic.
'''
active_valid_key = ['choose_new_data_num_limit', 'judge_freq', 'force_conv', 'max_iter', \
'restart_iter', 'restart_index', 'data_num', 'restart_stage']
for key in active_learn_dic.keys():
if key not in active_valid_key:
log_info.log_error('Input error: %s is invalid key, please check or reset deepff/active_learn' %(key))
exit()
if ( 'choose_new_data_num_limit' in active_learn_dic.keys() ):
choose_new_data_num_limit = active_learn_dic['choose_new_data_num_limit']
if ( data_op.eval_str(choose_new_data_num_limit) == 1 ):
active_learn_dic['choose_new_data_num_limit'] = int(choose_new_data_num_limit)
else:
log_info.log_error('Input error: choose_new_data_num_limit should be integer, please check or reset deepff/active_learn/choose_new_data_num_limit')
exit()
else:
active_learn_dic['choose_new_data_num_limit'] = 100
if ( 'judge_freq' in active_learn_dic.keys() ):
judge_freq = active_learn_dic['judge_freq']
if ( data_op.eval_str(judge_freq) == 1 ):
pass
else:
log_info.log_error('Input error: judge_freq should be integer, please check or reset deepff/lammps/judge_freq')
exit()
else:
active_learn_dic['judge_freq'] = '10'
if ( 'force_conv' in active_learn_dic.keys() ):
force_conv = active_learn_dic['force_conv']
if ( data_op.eval_str(force_conv) == 1 or data_op.eval_str(force_conv) == 2 ):
active_learn_dic['force_conv'] = float(force_conv)
else:
log_info.log_error('Input error: force_conv should be integer or float, please check or set deepff/model_devi/force_conv')
exit()
else:
active_learn_dic['force_conv'] = 0.05
if ( 'energy_conv' in active_learn_dic.keys() ):
energy_conv = active_learn_dic['energy_conv']
if ( data_op.eval_str(energy_conv) == 1 or data_op.eval_str(energy_conv) == 2 ):
active_learn_dic['energy_conv'] = float(energy_conv)
else:
log_info.log_error('Input error: energy_conv should be integer or float, please check or set deepff/model_devi/energy_conv')
exit()
else:
active_learn_dic['energy_conv'] = 0.005
if ( 'max_iter' in active_learn_dic.keys() ):
max_iter = active_learn_dic['max_iter']
if ( data_op.eval_str(max_iter) == 1 ):
active_learn_dic['max_iter'] = int(max_iter)
else:
log_info.log_error('Input error: max_iter should be integer, please check or reset deepff/model_devi/max_iter')
exit()
else:
active_learn_dic['max_iter'] = 100
if ( 'restart_iter' in active_learn_dic.keys() ):
restart_iter = active_learn_dic['restart_iter']
if ( data_op.eval_str(restart_iter) == 1 ):
active_learn_dic['restart_iter'] = int(restart_iter)
else:
log_info.log_error('Input error: restart_iter should be integer, please check or reset deepff/model_devi/restart_iter')
exit()
else:
active_learn_dic['restart_iter'] = 0
if ( 'data_num' in active_learn_dic.keys() ):
data_num = active_learn_dic['data_num']
if ( isinstance(data_num, str) and data_op.eval_str(data_num) == 1 ):
active_learn_dic['data_num'] = [int(data_num)]
else:
if ( isinstance(data_num, list) and all(data_op.eval_str(i) == 1 for i in data_num) ):
active_learn_dic['data_num'] = [int(i) for i in data_num]
else:
log_info.log_error('Input error: data_num should be integer or integer list, please check or reset deepff/model_devi/data_num')
exit()
else:
active_learn_dic['data_num'] = [0]
if ( 'restart_stage' in active_learn_dic.keys() ):
restart_stage = active_learn_dic['restart_stage']
if ( data_op.eval_str(restart_stage) == 1 ):
active_learn_dic['restart_stage'] = int(restart_stage)
else:
log_info.log_error('Input error: restart_stage should be integer, please check or reset deepff/model_devi/restart_stage')
exit()
else:
active_learn_dic['restart_stage'] = 0
return active_learn_dic
def check_cp2k(cp2k_dic):
'''
check_cp2k: check the input file in cp2k subsection
Args:
cp2k_dic: dictionary
cp2k_dic contains keywords used in cp2k.
Returns:
cp2k_dic: dictionary
cp2k_dic is the revised cp2k_dic.
'''
#For multi-system, we need multi cp2k input files.
cp2k_inp_file_tot = []
if ( 'cp2k_inp_file' in cp2k_dic.keys() ):
cp2k_inp_file = cp2k_dic['cp2k_inp_file']
if ( isinstance(cp2k_inp_file, list) ):
if ( len(cp2k_inp_file) != sys_num ):
log_info.log_error('Input error: the number of cp2k input file should be equal to the number of systems, please check or reset deepff/cp2k/cp2k_inp_file')
exit()
else:
for inp_file in cp2k_inp_file:
if ( os.path.exists(os.path.abspath(inp_file)) ):
cp2k_inp_file_tot.append(os.path.abspath(inp_file))
else:
log_info.log_error('%s file does not exist' %(inp_file))
exit()
elif ( isinstance(cp2k_inp_file, str) ):
if ( os.path.exists(os.path.abspath(cp2k_inp_file)) ):
cp2k_inp_file_tot.append(os.path.abspath(cp2k_inp_file))
else:
log_info.log_error('%s file does not exist' %(cp2k_inp_file))
exit()
cp2k_dic['cp2k_inp_file'] = cp2k_inp_file_tot
else:
for i in range(sys_num):
cp2k_inp_file_tot.append('none')
cp2k_dic['cp2k_inp_file'] = cp2k_inp_file_tot
if ( 'basis_set_file_name' in cp2k_dic.keys() ):
basis_set_file_name = cp2k_dic['basis_set_file_name']
if ( os.path.exists(os.path.abspath(basis_set_file_name)) ):
cp2k_dic['basis_set_file_name'] = os.path.abspath(basis_set_file_name)
else:
log_info.log_error('%s file does not exist' %(basis_set_file_name))
exit()
else:
log_info.log_error('Input error: no basis_set_file_name, please set deepff/cp2k/basis_set_file_name')
exit()
if ( 'potential_file_name' in cp2k_dic.keys() ):
potential_file_name = cp2k_dic['potential_file_name']
if ( os.path.exists(os.path.abspath(potential_file_name)) ):
cp2k_dic['potential_file_name'] = os.path.abspath(potential_file_name)
else:
log_info.log_error('%s file does not exist' %(potential_file_name))
exit()
else:
log_info.log_error('Input error: no potential_file_name, please set deepff/cp2k/potential_file_name')
exit()
if ( 'use_sr_basis' in cp2k_dic.keys() ):
use_sr_basis = cp2k_dic['use_sr_basis']
use_sr_basis__bool = data_op.str_to_bool(use_sr_basis)
if ( isinstance(use_sr_basis_bool, bool) ):
cp2k_dic['use_sr_basis'] = use_sr_basis_bool
else:
log_info.log_error('Input error: use_sr_basis should be bool, please check or reset deepff/cp2k/use_sr_basis')
exit()
else:
cp2k_dic['use_sr_basis'] = False
if ( 'basis_level' in cp2k_dic.keys() ):
basis_level = cp2k_dic['basis_level']
if ( basis_level == 'svp' or basis_level == 'dzvp' or basis_level == 'tzvp' or basis_level == 'tzv2p'):
pass
else:
log_info.log_error('Input error: %s basis set is not surported!' %(basis_level))
exit()
else:
cp2k_dic['basis_level'] = 'dzvp'
periodic_valid = ['NONE', 'X', 'XY', 'XYZ', 'XZ', 'Y', 'YZ', 'Z']
if ( 'poisson_periodic' in cp2k_dic.keys() ):
poisson_periodic = cp2k_dic['poisson_periodic']
if ( poisson_periodic.upper() in periodic_valid ):
cp2k_dic['poisson_periodic'] = poisson_periodic.upper()
else:
log_info.log_error('Input error: poisson_periodic %s is not supported, please check' %(poisson_periodic))
else:
cp2k_dic['poisson_periodic'] = 'XYZ'
if ( 'cell_periodic' in cp2k_dic.keys() ):
cell_periodic = cp2k_dic['cell_periodic']
if ( cell_periodic.upper() in periodic_valid ):
cp2k_dic['cell_periodic'] = cell_periodic.upper()
else:
log_info.log_error('Input error: cell_periodic %s is not supported, please check' %(cell_periodic))
else:
cp2k_dic['cell_periodic'] = 'XYZ'
if ( 'charge' in cp2k_dic.keys() ):
charge = cp2k_dic['charge']
if ( data_op.eval_str(charge) == 1 ):
pass
else:
log_info.log_error('Input error: charge should be integer, please check or reset deepff/cp2k/charge')
exit()
else:
log_info.log_error('Input error: no charge, please set deepff/cp2k/charge')
exit()
if ( 'multiplicity' in cp2k_dic.keys() ):
multiplicity = cp2k_dic['multiplicity']
if ( data_op.eval_str(multiplicity) == 1 ):
pass
else:
log_info.log_error('Input error: multiplicity wrong should be integer, please check or reset deepff/cp2k/multiplicity')
exit()
else:
log_info.log_error('Input error: no multiplicity, please set deepff/cp2k/multiplicity')
exit()
if ( 'cutoff' in cp2k_dic.keys() ):
cutoff = cp2k_dic['cutoff']
if ( data_op.eval_str(cutoff) == 1 or data_op.eval_str(cutoff) == 2 ):
pass
else:
log_info.log_error('Input error: cutoff should be float or integer, please check deepff/cp2k/cutoff')
exit()
else:
cp2k_dic['cutoff'] = '400'
functional_lib = ['PBE', 'B3LYP', 'TPSS']
if ( 'xc_functional' in cp2k_dic.keys() ):
xc_functional = cp2k_dic['xc_functional']
if ( xc_functional in functional_lib ):
pass
else:
log_info.log_error('Input error: %s functional is not suported for xc functional' %(xc_functional))
exit()
else:
cp2k_dic['xc_functional'] = 'PBE'
if ( 'dftd3' in cp2k_dic.keys() ):
dftd3 = cp2k_dic['dftd3']
dftd3_bool = data_op.str_to_bool(dftd3)
if ( isinstance(dftd3_bool, bool) ):
cp2k_dic['dftd3'] = dftd3_bool
else:
log_info.log_error('Input error: dftd3 should be bool, please check or check or reset deepff/cp2k/dftd3')
exit()
else:
cp2k_dic['dftd3'] = False
if cp2k_dic['dftd3']:
if ( 'dftd3_file' in cp2k_dic.keys() ):
dftd3_file = cp2k_dic['dftd3_file']
if ( os.path.exists(os.path.abspath(dftd3_file)) ):
cp2k_dic['dftd3_file'] = os.path.abspath(dftd3_file)
else:
log_info.log_error('%s file does not exist' %(os.path.abspath(dftd3_file)))
exit()
else:
log_info.log_error('Input error: no dftd3 file, please set deepff/cp2k/dftd3_file')
if ( 'use_prev_wfn' in cp2k_dic.keys() ):
use_prev_wfn = cp2k_dic['use_prev_wfn']
use_prev_wfn_bool = data_op.str_to_bool(use_prev_wfn)
if ( isinstance(use_prev_wfn_bool, bool) ):
cp2k_dic['use_prev_wfn'] = use_prev_wfn_bool
else:
log_info.log_error('Input error: use_prev_wfn should be bool, please check or reset deepff/cp2k/use_prev_wfn')
exit()
else:
cp2k_dic['use_prev_wfn'] = False
return cp2k_dic
def check_environ(environ_dic, proc_num_one_node):
'''
check_environ: check the input file in environ subsection
Args:
environ_dic: dictionary
environ_dic contains keywords used in environment.
proc_num_one_node: int
proc_num_one_node is the number of processors in one node.
Returns:
environ_dic: dictionary
environ_dic is the revised environ_dic.
'''
environ_valid_key = ['<KEY>', '<KEY>', 'parallel_exe', 'cuda_dir', 'lmp_job_per_node', \
'lmp_mpi_num_per_job', 'lmp_omp_num_per_job', 'cp2k_job_per_node', 'dp_version', 'dp_job_per_node']
for key in environ_dic.keys():
if key not in environ_valid_key:
log_info.log_error('Input error: %s is invalid key, please check or reset deepff/environ' %(key))
exit()
if ( 'cp2k_exe' in environ_dic.keys() ):
cp2k_exe = environ_dic['cp2k_exe']
if ( os.path.exists(os.path.abspath(cp2k_exe)) ):
environ_dic['cp2k_exe'] = os.path.abspath(cp2k_exe)
else:
log_info.log_error('Input error: cp2k executable file does not exist, please check or set deepff/environ/cp2k_exe')
exit()
else:
log_info.log_error('Input error: no cp2k executable file, please set deepff/environ/cp2k_exe')
exit()
if ( 'cp2k_env_file' in environ_dic.keys() ):
cp2k_env_file = environ_dic['cp2k_env_file']
if ( os.path.exists(os.path.abspath(cp2k_env_file)) ):
environ_dic['cp2k_env_file'] = os.path.abspath(cp2k_env_file)
else:
log_info.log_error('Input error: cp2k environment file does not exist, please check or set deepff/environ/cp2k_env_file')
exit()
else:
log_info.log_error('Input error: no cp2k environment file, please set deepff/environ/cp2k_env_file')
exit()
if ( 'cuda_dir' in environ_dic.keys() ):
cuda_dir = environ_dic['cuda_dir']
else:
environ_dic['cuda_dir'] = 'none'
if ( 'dp_version' in environ_dic.keys() ):
dp_version = environ_dic['dp_version']
dp_version_sup = ['1.3.3', '2.0.0']
if ( dp_version not in dp_version_sup ):
log_info.log_error('Input error: current deepmd-kit version is not supported, please check or reset deepff/environ/dp_version')
exit()
else:
environ_dic['dp_version'] = '1.3.3'
if ( environ_dic['cuda_dir'] != 'none' ):
if ( os.path.exists(os.path.abspath(cuda_dir)) ):
environ_dic['cuda_dir'] = os.path.abspath(cuda_dir)
else:
log_info.log_error('Input error: cuda directory does not exist, please check or set deepff/environ/cuda_dir')
exit()
if ( 'parallel_exe' in environ_dic.keys() ):
parallel_exe = environ_dic['parallel_exe']
if ( os.path.exists(os.path.abspath(parallel_exe)) ):
environ_dic['parallel_exe'] = os.path.abspath(parallel_exe)
else:
log_info.log_error('Input error: parallel executable file does not exist, please check or set deepff/environ/parallel_exe')
exit()
else:
log_info.log_error('Input error: no cp2k parallel file, please set deepff/environ/parallel_exe')
exit()
if ( 'cp2k_job_per_node' in environ_dic.keys() ):
cp2k_job_per_node = environ_dic['cp2k_job_per_node']
if ( data_op.eval_str(cp2k_job_per_node) == 1 ):
environ_dic['cp2k_job_per_node'] = int(cp2k_job_per_node)
else:
log_info.log_error('Input error: cp2k_job_per_node should be integer, please check or reset deepff/environ/cp2k_job_per_node')
exit()
else:
environ_dic['cp2k_job_per_node'] = 1
if ( 'lmp_job_per_node' in environ_dic.keys() ):
lmp_job_per_node = environ_dic['lmp_job_per_node']
if ( data_op.eval_str(lmp_job_per_node) == 1 ):
environ_dic['lmp_job_per_node'] = int(lmp_job_per_node)
else:
log_info.log_error('Input error: lmp_job_per_node should be integer, please check or reset deepff/environ/lmp_job_per_node')
exit()
else:
environ_dic['lmp_job_per_node'] = 1
if ( 'dp_job_per_node' in environ_dic.keys() ):
dp_job_per_node = environ_dic['dp_job_per_node']
if ( data_op.eval_str(dp_job_per_node) == 1 ):
environ_dic['dp_job_per_node'] = int(dp_job_per_node)
else:
log_info.log_error('Input error: dp_job_per_node should be integer, please check or reset deepff/environ/dp_job_per_node')
exit()
else:
environ_dic['dp_job_per_node'] = 1
if ( 'lmp_omp_num_per_job' in environ_dic.keys() ):
lmp_omp_num_per_job = environ_dic['lmp_omp_num_per_job']
if ( data_op.eval_str(lmp_omp_num_per_job) == 1 ):
environ_dic['lmp_omp_num_per_job'] = int(lmp_omp_num_per_job)
else:
log_info.log_error('Input error: lmp_omp_num_per_job should be integer, please check or reset deepff/environ/lmp_omp_num_per_job')
exit()
else:
environ_dic['lmp_omp_num_per_job'] = 1
if ( 'lmp_mpi_num_per_job' in environ_dic.keys() ):
lmp_mpi_num_per_job = environ_dic['lmp_mpi_num_per_job']
if ( data_op.eval_str(lmp_mpi_num_per_job) == 1 ):
environ_dic['lmp_mpi_num_per_job'] = int(lmp_mpi_num_per_job)
else:
log_info.log_error('Input error: lmp_mpi_num_per_job should be integer, please check or reset deepff/environ/lmp_mpi_num_per_job')
exit()
else:
lmp_mpi_num_per_job = int(proc_num_one_node/environ_dic['lmp_job_per_node'])
if ( lmp_mpi_num_per_job%2 != 0 ):
lmp_mpi_num_per_job = lmp_mpi_num_per_job-1
environ_dic['lmp_mpi_num_per_job'] = lmp_mpi_num_per_job
return environ_dic
def check_dp_test(dp_test_dic):
'''
check_dp_test: check the input of dp_test.
Args:
dp_test_dic: dictionary
dp_test_dic contains parameters for dp_test.
Returns:
dp_test_dic: dictionary
dp_test_dic is the revised dp_test_dic.
'''
if ( 'cp2k_frc_file' in dp_test_dic.keys() ):
cp2k_frc_file = dp_test_dic['cp2k_frc_file']
if ( os.path.exists(os.path.abspath(cp2k_frc_file)) ):
dp_test_dic['cp2k_frc_file'] = os.path.abspath(cp2k_frc_file)
else:
log_info.log_error('Input error: %s file does not exist' %(cp2k_frc_file))
exit()
else:
log_info.log_error('Input error: no cp2k_frc_file, please set analyze/dp_test/cp2k_frc_file')
exit()
if ( 'cp2k_pos_file' in dp_test_dic.keys() ):
cp2k_pos_file = dp_test_dic['cp2k_pos_file']
if ( os.path.exists(os.path.abspath(cp2k_pos_file)) ):
dp_test_dic['cp2k_pos_file'] = os.path.abspath(cp2k_pos_file)
else:
log_info.log_error('Input error: %s file does not exist' %(cp2k_pos_file))
exit()
else:
log_info.log_error('Input error: no cp2k_pos_file, please set analyze/dp_test/cp2k_pos_file')
exit()
if ( 'cp2k_cell_file' in dp_test_dic.keys() ):
cp2k_cell_file = dp_test_dic['cp2k_cell_file']
if ( os.path.exists(os.path.abspath(cp2k_cell_file)) ):
dp_test_dic['cp2k_cell_file'] = os.path.abspath(cp2k_cell_file)
else:
log_info.log_error('Input error: %s file does not exist' %(cp2k_cell_file))
exit()
else:
log_info.log_error('Input error: no cp2k_cell_file, please set analyze/dp_test/cp2k_cell_file')
exit()
if ( 'dpff_file' in dp_test_dic.keys() ):
dpff_file = dp_test_dic['dpff_file']
if ( os.path.exists(os.path.abspath(dpff_file)) ):
dp_test_dic['dpff_file'] = os.path.abspath(dpff_file)
else:
log_info.log_error('Input error: %s file does not exist' %(dpff_file))
exit()
else:
log_info.log_error('Input error: no dpff_file, please set analyze/dp_test/dpff_file')
exit()
if ( 'atom_label' in dp_test_dic.keys() ):
atom_label = dp_test_dic['atom_label']
atom_label_dic = OrderedDict()
for i in range (len(atom_label)):
label_split = data_op.split_str(atom_label[i], ':')
atom_label_dic[int(label_split[0])] = label_split[1]
dp_test_dic['atom_label'] = atom_label
else:
log_info.log_error('Input error: no atom label, please set analyze/dp_test/atom_label')
exit()
if ( 'atom_label' in dp_test_dic.keys() ):
atom_label = dp_test_dic['atom_label']
atom_label_dic = OrderedDict()
for i in range (len(atom_label)):
label_split = data_op.split_str(atom_label[i], ':')
atom_label_dic[int(label_split[0])] = label_split[1]
dp_test_dic['atom_label'] = atom_label_dic
else:
log_info.log_error('Input error: no atom_label, please set analyze/dp_test/atom_label')
exit()
return dp_test_dic
| [
"CP2K_kit.tools.log_info.log_out",
"collections.OrderedDict",
"CP2K_kit.tools.data_op.eval_str",
"CP2K_kit.tools.data_op.str_to_bool",
"CP2K_kit.tools.traj_info.get_traj_info",
"CP2K_kit.tools.data_op.split_str",
"os.getcwd",
"CP2K_kit.tools.log_info.log_error",
"CP2K_kit.tools.data_op.comb_list_2_s... | [((1531, 1617), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no model, please set deepff/deepmd_model/model"""'], {}), "(\n 'Input error: no model, please set deepff/deepmd_model/model')\n", (1549, 1617), False, 'from CP2K_kit.tools import log_info\n'), ((5814, 5827), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5825, 5827), False, 'from collections import OrderedDict\n'), ((5943, 6050), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no learning_rate, please set deepff/deepmd_model/learning_rate"""'], {}), "(\n 'Input error: no learning_rate, please set deepff/deepmd_model/learning_rate'\n )\n", (5961, 6050), False, 'from CP2K_kit.tools import log_info\n'), ((7620, 7713), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no loss, please check or set deepff/deepmd_model/loss"""'], {}), "(\n 'Input error: no loss, please check or set deepff/deepmd_model/loss')\n", (7638, 7713), False, 'from CP2K_kit.tools import log_info\n'), ((10832, 10944), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no training found, please check or set deepff/deepmd_model/training"""'], {}), "(\n 'Input error: no training found, please check or set deepff/deepmd_model/training'\n )\n", (10850, 10944), False, 'from CP2K_kit.tools import log_info\n'), ((28785, 28891), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no init_dpff_dir, please set deepff/deepmd_test/init_dpff_dir"""'], {}), "(\n 'Input error: no init_dpff_dir, please set deepff/deepmd_test/init_dpff_dir'\n )\n", (28803, 28891), False, 'from CP2K_kit.tools import log_info\n'), ((29767, 29802), 'CP2K_kit.tools.data_op.str_to_bool', 'data_op.str_to_bool', (['fix_stop_batch'], {}), '(fix_stop_batch)\n', (29786, 29802), False, 'from CP2K_kit.tools import data_op\n'), ((30668, 30703), 'CP2K_kit.tools.data_op.str_to_bool', 'data_op.str_to_bool', (['use_prev_model'], {}), '(use_prev_model)\n', (30687, 30703), False, 'from CP2K_kit.tools import data_op\n'), ((31128, 31161), 'CP2K_kit.tools.data_op.str_to_bool', 'data_op.str_to_bool', (['shuffle_data'], {}), '(shuffle_data)\n', (31147, 31161), False, 'from CP2K_kit.tools import data_op\n'), ((31574, 31607), 'CP2K_kit.tools.data_op.str_to_bool', 'data_op.str_to_bool', (['train_stress'], {}), '(train_stress)\n', (31593, 31607), False, 'from CP2K_kit.tools import data_op\n'), ((36022, 36058), 'CP2K_kit.tools.data_op.str_to_bool', 'data_op.str_to_bool', (['change_init_str'], {}), '(change_init_str)\n', (36041, 36058), False, 'from CP2K_kit.tools import data_op\n'), ((39047, 39157), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no system for lammps calculation, please set deepff/lammps/system"""'], {}), "(\n 'Input error: no system for lammps calculation, please set deepff/lammps/system'\n )\n", (39065, 39157), False, 'from CP2K_kit.tools import log_info\n'), ((50055, 50088), 'CP2K_kit.tools.data_op.str_to_bool', 'data_op.str_to_bool', (['use_prev_wfn'], {}), '(use_prev_wfn)\n', (50074, 50088), False, 'from CP2K_kit.tools import data_op\n'), ((51542, 51641), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no cp2k executable file, please set deepff/environ/cp2k_exe"""'], {}), "(\n 'Input error: no cp2k executable file, please set deepff/environ/cp2k_exe')\n", (51560, 51641), False, 'from CP2K_kit.tools import log_info\n'), ((52036, 52146), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no cp2k environment file, please set deepff/environ/cp2k_env_file"""'], {}), "(\n 'Input error: no cp2k environment file, please set deepff/environ/cp2k_env_file'\n )\n", (52054, 52146), False, 'from CP2K_kit.tools import log_info\n'), ((53325, 53431), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no cp2k parallel file, please set deepff/environ/parallel_exe"""'], {}), "(\n 'Input error: no cp2k parallel file, please set deepff/environ/parallel_exe'\n )\n", (53343, 53431), False, 'from CP2K_kit.tools import log_info\n'), ((56417, 56515), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no cp2k_frc_file, please set analyze/dp_test/cp2k_frc_file"""'], {}), "(\n 'Input error: no cp2k_frc_file, please set analyze/dp_test/cp2k_frc_file')\n", (56435, 56515), False, 'from CP2K_kit.tools import log_info\n'), ((56863, 56961), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no cp2k_pos_file, please set analyze/dp_test/cp2k_pos_file"""'], {}), "(\n 'Input error: no cp2k_pos_file, please set analyze/dp_test/cp2k_pos_file')\n", (56881, 56961), False, 'from CP2K_kit.tools import log_info\n'), ((57316, 57421), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no cp2k_cell_file, please set analyze/dp_test/cp2k_cell_file"""'], {}), "(\n 'Input error: no cp2k_cell_file, please set analyze/dp_test/cp2k_cell_file'\n )\n", (57334, 57421), False, 'from CP2K_kit.tools import log_info\n'), ((57736, 57826), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no dpff_file, please set analyze/dp_test/dpff_file"""'], {}), "(\n 'Input error: no dpff_file, please set analyze/dp_test/dpff_file')\n", (57754, 57826), False, 'from CP2K_kit.tools import log_info\n'), ((57943, 57956), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (57954, 57956), False, 'from collections import OrderedDict\n'), ((58167, 58259), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no atom label, please set analyze/dp_test/atom_label"""'], {}), "(\n 'Input error: no atom label, please set analyze/dp_test/atom_label')\n", (58185, 58259), False, 'from CP2K_kit.tools import log_info\n'), ((58376, 58389), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (58387, 58389), False, 'from collections import OrderedDict\n'), ((58604, 58696), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no atom_label, please set analyze/dp_test/atom_label"""'], {}), "(\n 'Input error: no atom_label, please set analyze/dp_test/atom_label')\n", (58622, 58696), False, 'from CP2K_kit.tools import log_info\n'), ((1367, 1478), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s is invalid key, please check or reset deepff/deepmd_model' %\n key)"], {}), "(\n 'Input error: %s is invalid key, please check or reset deepff/deepmd_model'\n % key)\n", (1385, 1478), False, 'from CP2K_kit.tools import log_info\n'), ((2165, 2263), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no type_map, please set deepff/deepmd_model/model/type_map"""'], {}), "(\n 'Input error: no type_map, please set deepff/deepmd_model/model/type_map')\n", (2183, 2263), False, 'from CP2K_kit.tools import log_info\n'), ((2337, 2444), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no descriptor, please set deepff/deepmd_model/model/descriptor"""'], {}), "(\n 'Input error: no descriptor, please set deepff/deepmd_model/model/descriptor'\n )\n", (2355, 2444), False, 'from CP2K_kit.tools import log_info\n'), ((18473, 18508), 'CP2K_kit.tools.data_op.str_to_bool', 'data_op.str_to_bool', (['use_prev_model'], {}), '(use_prev_model)\n', (18492, 18508), False, 'from CP2K_kit.tools import data_op\n'), ((19477, 19510), 'CP2K_kit.tools.data_op.str_to_bool', 'data_op.str_to_bool', (['shuffle_data'], {}), '(shuffle_data)\n', (19496, 19510), False, 'from CP2K_kit.tools import data_op\n'), ((20001, 20034), 'CP2K_kit.tools.data_op.str_to_bool', 'data_op.str_to_bool', (['train_stress'], {}), '(train_stress)\n', (20020, 20034), False, 'from CP2K_kit.tools import data_op\n'), ((22862, 22959), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no neuron, please set deepff/deepmd_model/training/neuron"""'], {}), "(\n 'Input error: no neuron, please set deepff/deepmd_model/training/neuron')\n", (22880, 22959), False, 'from CP2K_kit.tools import log_info\n'), ((23123, 23158), 'CP2K_kit.tools.data_op.str_to_bool', 'data_op.str_to_bool', (['fix_stop_batch'], {}), '(fix_stop_batch)\n', (23142, 23158), False, 'from CP2K_kit.tools import data_op\n'), ((26960, 26994), 'CP2K_kit.tools.data_op.str_to_bool', 'data_op.str_to_bool', (['disp_training'], {}), '(disp_training)\n', (26979, 26994), False, 'from CP2K_kit.tools import data_op\n'), ((28276, 28386), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s is invalid key, please check or reset deepff/deepmd_test' %\n key)"], {}), "(\n 'Input error: %s is invalid key, please check or reset deepff/deepmd_test'\n % key)\n", (28294, 28386), False, 'from CP2K_kit.tools import log_info\n'), ((28511, 28541), 'os.path.abspath', 'os.path.abspath', (['init_dpff_dir'], {}), '(init_dpff_dir)\n', (28526, 28541), False, 'import os\n'), ((28582, 28612), 'os.path.abspath', 'os.path.abspath', (['init_dpff_dir'], {}), '(init_dpff_dir)\n', (28597, 28612), False, 'import os\n'), ((28629, 28768), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s file does not exist, please check or reset deepff/deepmd_test/init_dpff_dir'\n % init_dpff_dir)"], {}), "(\n 'Input error: %s file does not exist, please check or reset deepff/deepmd_test/init_dpff_dir'\n % init_dpff_dir)\n", (28647, 28768), False, 'from CP2K_kit.tools import log_info\n'), ((28983, 29009), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['start_lr'], {}), '(start_lr)\n', (28999, 29009), False, 'from CP2K_kit.tools import data_op\n'), ((29081, 29205), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: the start_lr should be float, please check or reset deepff/deepmd_test/start_lr"""'], {}), "(\n 'Input error: the start_lr should be float, please check or reset deepff/deepmd_test/start_lr'\n )\n", (29099, 29205), False, 'from CP2K_kit.tools import log_info\n'), ((29473, 29597), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: the lr_scale should be float, please check or reset deepff/deepmd_test/lr_scale"""'], {}), "(\n 'Input error: the lr_scale should be float, please check or reset deepff/deepmd_test/lr_scale'\n )\n", (29491, 29597), False, 'from CP2K_kit.tools import log_info\n'), ((29926, 30057), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: fix_stop_batch should be bool, please check or reset deepff/deepmd_test/fix_stop_batch"""'], {}), "(\n 'Input error: fix_stop_batch should be bool, please check or reset deepff/deepmd_test/fix_stop_batch'\n )\n", (29944, 30057), False, 'from CP2K_kit.tools import log_info\n'), ((30827, 30958), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: use_prev_model should be bool, please check or reset deepff/deepmd_test/use_prev_model"""'], {}), "(\n 'Input error: use_prev_model should be bool, please check or reset deepff/deepmd_test/use_prev_model'\n )\n", (30845, 30958), False, 'from CP2K_kit.tools import log_info\n'), ((31279, 31406), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: shuffle_data should be bool, please check or reset deepff/deepmd_test/shuffle_data"""'], {}), "(\n 'Input error: shuffle_data should be bool, please check or reset deepff/deepmd_test/shuffle_data'\n )\n", (31297, 31406), False, 'from CP2K_kit.tools import log_info\n'), ((31725, 31852), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: train_stress should be bool, please check or reset deepff/deepmd_test/train_stress"""'], {}), "(\n 'Input error: train_stress should be bool, please check or reset deepff/deepmd_test/train_stress'\n )\n", (31743, 31852), False, 'from CP2K_kit.tools import log_info\n'), ((32510, 32615), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s is invalid key, please check or reset deepff/lammps' % key)"], {}), "(\n 'Input error: %s is invalid key, please check or reset deepff/lammps' % key\n )\n", (32528, 32615), False, 'from CP2K_kit.tools import log_info\n'), ((32698, 32722), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['nsteps'], {}), '(nsteps)\n', (32714, 32722), False, 'from CP2K_kit.tools import data_op\n'), ((32815, 32928), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: nsteps should be integer, please check or reset deepff/lammps/nsteps"""'], {}), "(\n 'Input error: nsteps should be integer, please check or reset deepff/lammps/nsteps'\n )\n", (32833, 32928), False, 'from CP2K_kit.tools import log_info\n'), ((33086, 33122), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['write_restart_freq'], {}), '(write_restart_freq)\n', (33102, 33122), False, 'from CP2K_kit.tools import data_op\n'), ((33158, 33295), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: write_restart_freq should be integer, please check or reset deepff/lammps/write_restart_freq"""'], {}), "(\n 'Input error: write_restart_freq should be integer, please check or reset deepff/lammps/write_restart_freq'\n )\n", (33176, 33295), False, 'from CP2K_kit.tools import log_info\n'), ((33658, 33786), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: time_step should be integer or float, please check or reset deepff/lammps/time_step"""'], {}), "(\n 'Input error: time_step should be integer or float, please check or reset deepff/lammps/time_step'\n )\n", (33676, 33786), False, 'from CP2K_kit.tools import log_info\n'), ((33987, 34107), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: tau_t should be integer or float, please check or reset deepff/lammps/tau_t"""'], {}), "(\n 'Input error: tau_t should be integer or float, please check or reset deepff/lammps/tau_t'\n )\n", (34005, 34107), False, 'from CP2K_kit.tools import log_info\n'), ((34257, 34280), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['tau_p'], {}), '(tau_p)\n', (34273, 34280), False, 'from CP2K_kit.tools import data_op\n'), ((34316, 34436), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: tau_p should be integer or float, please check or reset deepff/lammps/tau_p"""'], {}), "(\n 'Input error: tau_p should be integer or float, please check or reset deepff/lammps/tau_p'\n )\n", (34334, 34436), False, 'from CP2K_kit.tools import log_info\n'), ((36182, 36306), 'CP2K_kit.tools.log_info.log_out', 'log_info.log_out', (['"""Input error: change_init_str should be bool, please check or set deepff/lammps/change_init_str"""'], {}), "(\n 'Input error: change_init_str should be bool, please check or set deepff/lammps/change_init_str'\n )\n", (36198, 36306), False, 'from CP2K_kit.tools import log_info\n'), ((39767, 39878), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s is invalid key, please check or reset deepff/active_learn' %\n key)"], {}), "(\n 'Input error: %s is invalid key, please check or reset deepff/active_learn'\n % key)\n", (39785, 39878), False, 'from CP2K_kit.tools import log_info\n'), ((40036, 40079), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['choose_new_data_num_limit'], {}), '(choose_new_data_num_limit)\n', (40052, 40079), False, 'from CP2K_kit.tools import data_op\n'), ((40189, 40346), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: choose_new_data_num_limit should be integer, please check or reset deepff/active_learn/choose_new_data_num_limit"""'], {}), "(\n 'Input error: choose_new_data_num_limit should be integer, please check or reset deepff/active_learn/choose_new_data_num_limit'\n )\n", (40207, 40346), False, 'from CP2K_kit.tools import log_info\n'), ((40522, 40550), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['judge_freq'], {}), '(judge_freq)\n', (40538, 40550), False, 'from CP2K_kit.tools import data_op\n'), ((40586, 40707), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: judge_freq should be integer, please check or reset deepff/lammps/judge_freq"""'], {}), "(\n 'Input error: judge_freq should be integer, please check or reset deepff/lammps/judge_freq'\n )\n", (40604, 40707), False, 'from CP2K_kit.tools import log_info\n'), ((41016, 41148), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: force_conv should be integer or float, please check or set deepff/model_devi/force_conv"""'], {}), "(\n 'Input error: force_conv should be integer or float, please check or set deepff/model_devi/force_conv'\n )\n", (41034, 41148), False, 'from CP2K_kit.tools import log_info\n'), ((41464, 41598), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: energy_conv should be integer or float, please check or set deepff/model_devi/energy_conv"""'], {}), "(\n 'Input error: energy_conv should be integer or float, please check or set deepff/model_devi/energy_conv'\n )\n", (41482, 41598), False, 'from CP2K_kit.tools import log_info\n'), ((41756, 41782), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['max_iter'], {}), '(max_iter)\n', (41772, 41782), False, 'from CP2K_kit.tools import data_op\n'), ((41858, 41979), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: max_iter should be integer, please check or reset deepff/model_devi/max_iter"""'], {}), "(\n 'Input error: max_iter should be integer, please check or reset deepff/model_devi/max_iter'\n )\n", (41876, 41979), False, 'from CP2K_kit.tools import log_info\n'), ((42144, 42174), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['restart_iter'], {}), '(restart_iter)\n', (42160, 42174), False, 'from CP2K_kit.tools import data_op\n'), ((42258, 42387), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: restart_iter should be integer, please check or reset deepff/model_devi/restart_iter"""'], {}), "(\n 'Input error: restart_iter should be integer, please check or reset deepff/model_devi/restart_iter'\n )\n", (42276, 42387), False, 'from CP2K_kit.tools import log_info\n'), ((43156, 43187), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['restart_stage'], {}), '(restart_stage)\n', (43172, 43187), False, 'from CP2K_kit.tools import data_op\n'), ((43273, 43404), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: restart_stage should be integer, please check or reset deepff/model_devi/restart_stage"""'], {}), "(\n 'Input error: restart_stage should be integer, please check or reset deepff/model_devi/restart_stage'\n )\n", (43291, 43404), False, 'from CP2K_kit.tools import log_info\n'), ((45273, 45384), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no basis_set_file_name, please set deepff/cp2k/basis_set_file_name"""'], {}), "(\n 'Input error: no basis_set_file_name, please set deepff/cp2k/basis_set_file_name'\n )\n", (45291, 45384), False, 'from CP2K_kit.tools import log_info\n'), ((45767, 45878), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no potential_file_name, please set deepff/cp2k/potential_file_name"""'], {}), "(\n 'Input error: no potential_file_name, please set deepff/cp2k/potential_file_name'\n )\n", (45785, 45878), False, 'from CP2K_kit.tools import log_info\n'), ((46002, 46035), 'CP2K_kit.tools.data_op.str_to_bool', 'data_op.str_to_bool', (['use_sr_basis'], {}), '(use_sr_basis)\n', (46021, 46035), False, 'from CP2K_kit.tools import data_op\n'), ((47850, 47925), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no charge, please set deepff/cp2k/charge"""'], {}), "('Input error: no charge, please set deepff/cp2k/charge')\n", (47868, 47925), False, 'from CP2K_kit.tools import log_info\n'), ((48266, 48358), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no multiplicity, please set deepff/cp2k/multiplicity"""'], {}), "(\n 'Input error: no multiplicity, please set deepff/cp2k/multiplicity')\n", (48284, 48358), False, 'from CP2K_kit.tools import log_info\n'), ((49189, 49215), 'CP2K_kit.tools.data_op.str_to_bool', 'data_op.str_to_bool', (['dftd3'], {}), '(dftd3)\n', (49208, 49215), False, 'from CP2K_kit.tools import data_op\n'), ((50204, 50324), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: use_prev_wfn should be bool, please check or reset deepff/cp2k/use_prev_wfn"""'], {}), "(\n 'Input error: use_prev_wfn should be bool, please check or reset deepff/cp2k/use_prev_wfn'\n )\n", (50222, 50324), False, 'from CP2K_kit.tools import log_info\n'), ((51079, 51184), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s is invalid key, please check or reset deepff/environ' % key)"], {}), "(\n 'Input error: %s is invalid key, please check or reset deepff/environ' %\n key)\n", (51097, 51184), False, 'from CP2K_kit.tools import log_info\n'), ((51297, 51322), 'os.path.abspath', 'os.path.abspath', (['cp2k_exe'], {}), '(cp2k_exe)\n', (51312, 51322), False, 'import os\n'), ((51359, 51384), 'os.path.abspath', 'os.path.abspath', (['cp2k_exe'], {}), '(cp2k_exe)\n', (51374, 51384), False, 'import os\n'), ((51401, 51526), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: cp2k executable file does not exist, please check or set deepff/environ/cp2k_exe"""'], {}), "(\n 'Input error: cp2k executable file does not exist, please check or set deepff/environ/cp2k_exe'\n )\n", (51419, 51526), False, 'from CP2K_kit.tools import log_info\n'), ((51770, 51800), 'os.path.abspath', 'os.path.abspath', (['cp2k_env_file'], {}), '(cp2k_env_file)\n', (51785, 51800), False, 'import os\n'), ((51842, 51872), 'os.path.abspath', 'os.path.abspath', (['cp2k_env_file'], {}), '(cp2k_env_file)\n', (51857, 51872), False, 'import os\n'), ((51889, 52020), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: cp2k environment file does not exist, please check or set deepff/environ/cp2k_env_file"""'], {}), "(\n 'Input error: cp2k environment file does not exist, please check or set deepff/environ/cp2k_env_file'\n )\n", (51907, 52020), False, 'from CP2K_kit.tools import log_info\n'), ((52456, 52593), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: current deepmd-kit version is not supported, please check or reset deepff/environ/dp_version"""'], {}), "(\n 'Input error: current deepmd-kit version is not supported, please check or reset deepff/environ/dp_version'\n )\n", (52474, 52593), False, 'from CP2K_kit.tools import log_info\n'), ((52714, 52739), 'os.path.abspath', 'os.path.abspath', (['cuda_dir'], {}), '(cuda_dir)\n', (52729, 52739), False, 'import os\n'), ((52776, 52801), 'os.path.abspath', 'os.path.abspath', (['cuda_dir'], {}), '(cuda_dir)\n', (52791, 52801), False, 'import os\n'), ((52818, 52937), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: cuda directory does not exist, please check or set deepff/environ/cuda_dir"""'], {}), "(\n 'Input error: cuda directory does not exist, please check or set deepff/environ/cuda_dir'\n )\n", (52836, 52937), False, 'from CP2K_kit.tools import log_info\n'), ((53060, 53089), 'os.path.abspath', 'os.path.abspath', (['parallel_exe'], {}), '(parallel_exe)\n', (53075, 53089), False, 'import os\n'), ((53130, 53159), 'os.path.abspath', 'os.path.abspath', (['parallel_exe'], {}), '(parallel_exe)\n', (53145, 53159), False, 'import os\n'), ((53176, 53309), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: parallel executable file does not exist, please check or set deepff/environ/parallel_exe"""'], {}), "(\n 'Input error: parallel executable file does not exist, please check or set deepff/environ/parallel_exe'\n )\n", (53194, 53309), False, 'from CP2K_kit.tools import log_info\n'), ((53552, 53587), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['cp2k_job_per_node'], {}), '(cp2k_job_per_node)\n', (53568, 53587), False, 'from CP2K_kit.tools import data_op\n'), ((53676, 53812), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: cp2k_job_per_node should be integer, please check or reset deepff/environ/cp2k_job_per_node"""'], {}), "(\n 'Input error: cp2k_job_per_node should be integer, please check or reset deepff/environ/cp2k_job_per_node'\n )\n", (53694, 53812), False, 'from CP2K_kit.tools import log_info\n'), ((53981, 54015), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['lmp_job_per_node'], {}), '(lmp_job_per_node)\n', (53997, 54015), False, 'from CP2K_kit.tools import data_op\n'), ((54102, 54236), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: lmp_job_per_node should be integer, please check or reset deepff/environ/lmp_job_per_node"""'], {}), "(\n 'Input error: lmp_job_per_node should be integer, please check or reset deepff/environ/lmp_job_per_node'\n )\n", (54120, 54236), False, 'from CP2K_kit.tools import log_info\n'), ((54401, 54434), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['dp_job_per_node'], {}), '(dp_job_per_node)\n', (54417, 54434), False, 'from CP2K_kit.tools import data_op\n'), ((54519, 54651), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: dp_job_per_node should be integer, please check or reset deepff/environ/dp_job_per_node"""'], {}), "(\n 'Input error: dp_job_per_node should be integer, please check or reset deepff/environ/dp_job_per_node'\n )\n", (54537, 54651), False, 'from CP2K_kit.tools import log_info\n'), ((54827, 54864), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['lmp_omp_num_per_job'], {}), '(lmp_omp_num_per_job)\n', (54843, 54864), False, 'from CP2K_kit.tools import data_op\n'), ((54957, 55097), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: lmp_omp_num_per_job should be integer, please check or reset deepff/environ/lmp_omp_num_per_job"""'], {}), "(\n 'Input error: lmp_omp_num_per_job should be integer, please check or reset deepff/environ/lmp_omp_num_per_job'\n )\n", (54975, 55097), False, 'from CP2K_kit.tools import log_info\n'), ((55277, 55314), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['lmp_mpi_num_per_job'], {}), '(lmp_mpi_num_per_job)\n', (55293, 55314), False, 'from CP2K_kit.tools import data_op\n'), ((55407, 55547), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: lmp_mpi_num_per_job should be integer, please check or reset deepff/environ/lmp_mpi_num_per_job"""'], {}), "(\n 'Input error: lmp_mpi_num_per_job should be integer, please check or reset deepff/environ/lmp_mpi_num_per_job'\n )\n", (55425, 55547), False, 'from CP2K_kit.tools import log_info\n'), ((56198, 56228), 'os.path.abspath', 'os.path.abspath', (['cp2k_frc_file'], {}), '(cp2k_frc_file)\n', (56213, 56228), False, 'import os\n'), ((56270, 56300), 'os.path.abspath', 'os.path.abspath', (['cp2k_frc_file'], {}), '(cp2k_frc_file)\n', (56285, 56300), False, 'import os\n'), ((56317, 56390), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s file does not exist' % cp2k_frc_file)"], {}), "('Input error: %s file does not exist' % cp2k_frc_file)\n", (56335, 56390), False, 'from CP2K_kit.tools import log_info\n'), ((56644, 56674), 'os.path.abspath', 'os.path.abspath', (['cp2k_pos_file'], {}), '(cp2k_pos_file)\n', (56659, 56674), False, 'import os\n'), ((56716, 56746), 'os.path.abspath', 'os.path.abspath', (['cp2k_pos_file'], {}), '(cp2k_pos_file)\n', (56731, 56746), False, 'import os\n'), ((56763, 56836), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s file does not exist' % cp2k_pos_file)"], {}), "('Input error: %s file does not exist' % cp2k_pos_file)\n", (56781, 56836), False, 'from CP2K_kit.tools import log_info\n'), ((57093, 57124), 'os.path.abspath', 'os.path.abspath', (['cp2k_cell_file'], {}), '(cp2k_cell_file)\n', (57108, 57124), False, 'import os\n'), ((57167, 57198), 'os.path.abspath', 'os.path.abspath', (['cp2k_cell_file'], {}), '(cp2k_cell_file)\n', (57182, 57198), False, 'import os\n'), ((57215, 57289), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s file does not exist' % cp2k_cell_file)"], {}), "('Input error: %s file does not exist' % cp2k_cell_file)\n", (57233, 57289), False, 'from CP2K_kit.tools import log_info\n'), ((57533, 57559), 'os.path.abspath', 'os.path.abspath', (['dpff_file'], {}), '(dpff_file)\n', (57548, 57559), False, 'import os\n'), ((57597, 57623), 'os.path.abspath', 'os.path.abspath', (['dpff_file'], {}), '(dpff_file)\n', (57612, 57623), False, 'import os\n'), ((57640, 57709), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s file does not exist' % dpff_file)"], {}), "('Input error: %s file does not exist' % dpff_file)\n", (57658, 57709), False, 'from CP2K_kit.tools import log_info\n'), ((58015, 58052), 'CP2K_kit.tools.data_op.split_str', 'data_op.split_str', (['atom_label[i]', '""":"""'], {}), "(atom_label[i], ':')\n", (58032, 58052), False, 'from CP2K_kit.tools import data_op\n'), ((58448, 58485), 'CP2K_kit.tools.data_op.split_str', 'data_op.split_str', (['atom_label[i]', '""":"""'], {}), "(atom_label[i], ':')\n", (58465, 58485), False, 'from CP2K_kit.tools import data_op\n'), ((1720, 1837), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s is invalid key, please check or reset deepff/deepmd_model/model'\n % key)"], {}), "(\n 'Input error: %s is invalid key, please check or reset deepff/deepmd_model/model'\n % key)\n", (1738, 1837), False, 'from CP2K_kit.tools import log_info\n'), ((2015, 2143), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: type_map should be string, please check or reset deepff/deepmd_model/model/type_map"""'], {}), "(\n 'Input error: type_map should be string, please check or reset deepff/deepmd_model/model/type_map'\n )\n", (2033, 2143), False, 'from CP2K_kit.tools import log_info\n'), ((3518, 3617), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no sel, please set deepff/deepmd_model/model/descriptor/sel"""'], {}), "(\n 'Input error: no sel, please set deepff/deepmd_model/model/descriptor/sel')\n", (3536, 3617), False, 'from CP2K_kit.tools import log_info\n'), ((6153, 6278), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s is invalid key, please check or reset deepff/deepmd_model/learning_rate'\n % key)"], {}), "(\n 'Input error: %s is invalid key, please check or reset deepff/deepmd_model/learning_rate'\n % key)\n", (6171, 6278), False, 'from CP2K_kit.tools import log_info\n'), ((6408, 6436), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['decay_type'], {}), '(decay_type)\n', (6424, 6436), False, 'from CP2K_kit.tools import data_op\n'), ((6478, 6623), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: type in learning_rate should be string, please check or reset deepff/deepmd_model/learning_rate/type"""'], {}), "(\n 'Input error: type in learning_rate should be string, please check or reset deepff/deepmd_model/learning_rate/type'\n )\n", (6496, 6623), False, 'from CP2K_kit.tools import log_info\n'), ((6819, 6845), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['start_lr'], {}), '(start_lr)\n', (6835, 6845), False, 'from CP2K_kit.tools import data_op\n'), ((6940, 7066), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: start_lr should be float, please check deepff/deepmd_model/learning_rate/start_lr"""'], {}), "(\n 'Input error: start_lr should be float, please check deepff/deepmd_model/learning_rate/start_lr'\n )\n", (6958, 7066), False, 'from CP2K_kit.tools import log_info\n'), ((7263, 7288), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['stop_lr'], {}), '(stop_lr)\n', (7279, 7288), False, 'from CP2K_kit.tools import data_op\n'), ((7381, 7505), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: stop_lr should be float, please check deepff/deepmd_model/learning_rate/stop_lr"""'], {}), "(\n 'Input error: stop_lr should be float, please check deepff/deepmd_model/learning_rate/stop_lr'\n )\n", (7399, 7505), False, 'from CP2K_kit.tools import log_info\n'), ((7814, 7930), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s is invalid key, please check or reset deepff/deepmd_model/loss'\n % key)"], {}), "(\n 'Input error: %s is invalid key, please check or reset deepff/deepmd_model/loss'\n % key)\n", (7832, 7930), False, 'from CP2K_kit.tools import log_info\n'), ((8223, 8348), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: start_pref_e should be float, please check deepff/deepmd_model/loss/start_pref_e"""'], {}), "(\n 'Input error: start_pref_e should be float, please check deepff/deepmd_model/loss/start_pref_e'\n )\n", (8241, 8348), False, 'from CP2K_kit.tools import log_info\n'), ((8699, 8824), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: limit_pref_e should be float, please check deepff/deepmd_model/loss/limit_pref_e"""'], {}), "(\n 'Input error: limit_pref_e should be float, please check deepff/deepmd_model/loss/limit_pref_e'\n )\n", (8717, 8824), False, 'from CP2K_kit.tools import log_info\n'), ((9174, 9299), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: start_pref_f should be float, please check deepff/deepmd_model/loss/start_pref_f"""'], {}), "(\n 'Input error: start_pref_f should be float, please check deepff/deepmd_model/loss/start_pref_f'\n )\n", (9192, 9299), False, 'from CP2K_kit.tools import log_info\n'), ((9652, 9777), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: limit_pref_f should be float, please check deepff/deepmd_model/loss/limit_pref_f"""'], {}), "(\n 'Input error: limit_pref_f should be float, please check deepff/deepmd_model/loss/limit_pref_f'\n )\n", (9670, 9777), False, 'from CP2K_kit.tools import log_info\n'), ((10127, 10252), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: start_pref_v should be float, please check deepff/deepmd_model/loss/start_pref_v"""'], {}), "(\n 'Input error: start_pref_v should be float, please check deepff/deepmd_model/loss/start_pref_v'\n )\n", (10145, 10252), False, 'from CP2K_kit.tools import log_info\n'), ((10602, 10718), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: limit_pref_v should, please check deepff/deepmd_model/loss/limit_pref_v"""'], {}), "(\n 'Input error: limit_pref_v should, please check deepff/deepmd_model/loss/limit_pref_v'\n )\n", (10620, 10718), False, 'from CP2K_kit.tools import log_info\n'), ((11066, 11184), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s is invalid key, please check or reset deepff/deepmd_model/training'\n % i)"], {}), "(\n 'Input error: %s is invalid key, please check or reset deepff/deepmd_model/training'\n % i)\n", (11084, 11184), False, 'from CP2K_kit.tools import log_info\n'), ((18652, 18793), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: use_prev_model should be bool, please check or reset deepff/deepmd_model/training/use_prev_model"""'], {}), "(\n 'Input error: use_prev_model should be bool, please check or reset deepff/deepmd_model/training/use_prev_model'\n )\n", (18670, 18793), False, 'from CP2K_kit.tools import log_info\n'), ((19135, 19269), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: the lr_scale should be float, please check or reset deepff/deepmd_model/training/lr_scale"""'], {}), "(\n 'Input error: the lr_scale should be float, please check or reset deepff/deepmd_model/training/lr_scale'\n )\n", (19153, 19269), False, 'from CP2K_kit.tools import log_info\n'), ((19648, 19785), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: shuffle_data should be bool, please check or reset deepff/deepmd_model/training/shuffle_data"""'], {}), "(\n 'Input error: shuffle_data should be bool, please check or reset deepff/deepmd_model/training/shuffle_data'\n )\n", (19666, 19785), False, 'from CP2K_kit.tools import log_info\n'), ((20172, 20309), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: train_stress should be bool, please check or reset deepff/deepmd_model/training/train_stress"""'], {}), "(\n 'Input error: train_stress should be bool, please check or reset deepff/deepmd_model/training/train_stress'\n )\n", (20190, 20309), False, 'from CP2K_kit.tools import log_info\n'), ((20525, 20554), 'os.path.abspath', 'os.path.abspath', (['set_data_dir'], {}), '(set_data_dir)\n', (20540, 20554), False, 'import os\n'), ((20608, 20637), 'os.path.abspath', 'os.path.abspath', (['set_data_dir'], {}), '(set_data_dir)\n', (20623, 20637), False, 'import os\n'), ((20658, 20795), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: %s directory does not exist, please check or reset deepff/deepmd_model/training/set_data_dir"""'], {}), "(\n 'Input error: %s directory does not exist, please check or reset deepff/deepmd_model/training/set_data_dir'\n )\n", (20676, 20795), False, 'from CP2K_kit.tools import log_info\n'), ((21016, 21170), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: only use_seed and use_node are supported for model_type, please check deepff/deepmd_model/training/model_type"""'], {}), "(\n 'Input error: only use_seed and use_node are supported for model_type, please check deepff/deepmd_model/training/model_type'\n )\n", (21034, 21170), False, 'from CP2K_kit.tools import log_info\n'), ((21961, 22002), 'CP2K_kit.tools.data_op.comb_list_2_str', 'data_op.comb_list_2_str', (['neuron_list', '""" """'], {}), "(neuron_list, ' ')\n", (21984, 22002), False, 'from CP2K_kit.tools import data_op\n'), ((22022, 22055), 'CP2K_kit.tools.data_op.split_str', 'data_op.split_str', (['tmp_str', '"""..."""'], {}), "(tmp_str, '...')\n", (22039, 22055), False, 'from CP2K_kit.tools import data_op\n'), ((23302, 23443), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: fix_stop_batch should be bool, please check or reset deepff/deepmd_model/training/fix_stop_batch"""'], {}), "(\n 'Input error: fix_stop_batch should be bool, please check or reset deepff/deepmd_model/training/fix_stop_batch'\n )\n", (23320, 23443), False, 'from CP2K_kit.tools import log_info\n'), ((25176, 25204), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['batch_size'], {}), '(batch_size)\n', (25192, 25204), False, 'from CP2K_kit.tools import data_op\n'), ((25296, 25432), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: batch_size shoule be integer, please check or reset deepff/deepmd_model/training/batch_size"""'], {}), "(\n 'Input error: batch_size shoule be integer, please check or reset deepff/deepmd_model/training/batch_size'\n )\n", (25314, 25432), False, 'from CP2K_kit.tools import log_info\n'), ((25618, 25645), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['disp_freq'], {}), '(disp_freq)\n', (25634, 25645), False, 'from CP2K_kit.tools import data_op\n'), ((25735, 25869), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: disp_freq should be integer, please check or reset deepff/deepmd_model/training/disp_freq"""'], {}), "(\n 'Input error: disp_freq should be integer, please check or reset deepff/deepmd_model/training/disp_freq'\n )\n", (25753, 25869), False, 'from CP2K_kit.tools import log_info\n'), ((26056, 26083), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['numb_test'], {}), '(numb_test)\n', (26072, 26083), False, 'from CP2K_kit.tools import data_op\n'), ((26173, 26307), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: numb_test should be integer, please check or reset deepff/deepmd_model/training/numb_test"""'], {}), "(\n 'Input error: numb_test should be integer, please check or reset deepff/deepmd_model/training/numb_test'\n )\n", (26191, 26307), False, 'from CP2K_kit.tools import log_info\n'), ((26493, 26520), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['save_freq'], {}), '(save_freq)\n', (26509, 26520), False, 'from CP2K_kit.tools import data_op\n'), ((26610, 26744), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: save_freq should be integer, please check or reset deepff/deepmd_model/training/save_freq"""'], {}), "(\n 'Input error: save_freq should be integer, please check or reset deepff/deepmd_model/training/save_freq'\n )\n", (26628, 26744), False, 'from CP2K_kit.tools import log_info\n'), ((27135, 27274), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: disp_training should be bool, please check or reset deepff/deepmd_model/training/disp_training"""'], {}), "(\n 'Input error: disp_training should be bool, please check or reset deepff/deepmd_model/training/disp_training'\n )\n", (27153, 27274), False, 'from CP2K_kit.tools import log_info\n'), ((29340, 29366), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['lr_scale'], {}), '(lr_scale)\n', (29356, 29366), False, 'from CP2K_kit.tools import data_op\n'), ((29375, 29401), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['lr_scale'], {}), '(lr_scale)\n', (29391, 29401), False, 'from CP2K_kit.tools import data_op\n'), ((30252, 30279), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['epoch_num'], {}), '(epoch_num)\n', (30268, 30279), False, 'from CP2K_kit.tools import data_op\n'), ((30357, 30491), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: the number of epoch should be integer, please check or reset deepff/deepmd_test/epoch_num"""'], {}), "(\n 'Input error: the number of epoch should be integer, please check or reset deepff/deepmd_test/epoch_num'\n )\n", (30375, 30491), False, 'from CP2K_kit.tools import log_info\n'), ((33559, 33586), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['time_step'], {}), '(time_step)\n', (33575, 33586), False, 'from CP2K_kit.tools import data_op\n'), ((33595, 33622), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['time_step'], {}), '(time_step)\n', (33611, 33622), False, 'from CP2K_kit.tools import data_op\n'), ((33896, 33919), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['tau_t'], {}), '(tau_t)\n', (33912, 33919), False, 'from CP2K_kit.tools import data_op\n'), ((33928, 33951), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['tau_t'], {}), '(tau_t)\n', (33944, 33951), False, 'from CP2K_kit.tools import data_op\n'), ((34765, 34897), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: multipole temperature should be list of float, please check or reset deepff/lammps/temp"""'], {}), "(\n 'Input error: multipole temperature should be list of float, please check or reset deepff/lammps/temp'\n )\n", (34783, 34897), False, 'from CP2K_kit.tools import log_info\n'), ((35044, 35168), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: temp should be float or list of float, please check or reset deepff/lammps/temp"""'], {}), "(\n 'Input error: temp should be float or list of float, please check or reset deepff/lammps/temp'\n )\n", (35062, 35168), False, 'from CP2K_kit.tools import log_info\n'), ((35466, 35595), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: multipole pressure should be list of float, please check or reset deepff/lammps/pres"""'], {}), "(\n 'Input error: multipole pressure should be list of float, please check or reset deepff/lammps/pres'\n )\n", (35484, 35595), False, 'from CP2K_kit.tools import log_info\n'), ((35742, 35857), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: pres shoule be float or list of float, please check deepff/lammps/pres"""'], {}), "(\n 'Input error: pres shoule be float or list of float, please check deepff/lammps/pres'\n )\n", (35760, 35857), False, 'from CP2K_kit.tools import log_info\n'), ((38083, 38111), 'CP2K_kit.tools.data_op.str_to_bool', 'data_op.str_to_bool', (['use_mtd'], {}), '(use_mtd)\n', (38102, 38111), False, 'from CP2K_kit.tools import data_op\n'), ((40869, 40897), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['force_conv'], {}), '(force_conv)\n', (40885, 40897), False, 'from CP2K_kit.tools import data_op\n'), ((40906, 40934), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['force_conv'], {}), '(force_conv)\n', (40922, 40934), False, 'from CP2K_kit.tools import data_op\n'), ((41313, 41342), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['energy_conv'], {}), '(energy_conv)\n', (41329, 41342), False, 'from CP2K_kit.tools import data_op\n'), ((41351, 41380), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['energy_conv'], {}), '(energy_conv)\n', (41367, 41380), False, 'from CP2K_kit.tools import data_op\n'), ((42572, 42598), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['data_num'], {}), '(data_num)\n', (42588, 42598), False, 'from CP2K_kit.tools import data_op\n'), ((42849, 42986), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: data_num should be integer or integer list, please check or reset deepff/model_devi/data_num"""'], {}), "(\n 'Input error: data_num should be integer or integer list, please check or reset deepff/model_devi/data_num'\n )\n", (42867, 42986), False, 'from CP2K_kit.tools import log_info\n'), ((44002, 44166), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: the number of cp2k input file should be equal to the number of systems, please check or reset deepff/cp2k/cp2k_inp_file"""'], {}), "(\n 'Input error: the number of cp2k input file should be equal to the number of systems, please check or reset deepff/cp2k/cp2k_inp_file'\n )\n", (44020, 44166), False, 'from CP2K_kit.tools import log_info\n'), ((45034, 45070), 'os.path.abspath', 'os.path.abspath', (['basis_set_file_name'], {}), '(basis_set_file_name)\n', (45049, 45070), False, 'import os\n'), ((45117, 45153), 'os.path.abspath', 'os.path.abspath', (['basis_set_file_name'], {}), '(basis_set_file_name)\n', (45132, 45153), False, 'import os\n'), ((45174, 45240), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('%s file does not exist' % basis_set_file_name)"], {}), "('%s file does not exist' % basis_set_file_name)\n", (45192, 45240), False, 'from CP2K_kit.tools import log_info\n'), ((45528, 45564), 'os.path.abspath', 'os.path.abspath', (['potential_file_name'], {}), '(potential_file_name)\n', (45543, 45564), False, 'import os\n'), ((45611, 45647), 'os.path.abspath', 'os.path.abspath', (['potential_file_name'], {}), '(potential_file_name)\n', (45626, 45647), False, 'import os\n'), ((45668, 45734), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('%s file does not exist' % potential_file_name)"], {}), "('%s file does not exist' % potential_file_name)\n", (45686, 45734), False, 'from CP2K_kit.tools import log_info\n'), ((46159, 46279), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: use_sr_basis should be bool, please check or reset deepff/cp2k/use_sr_basis"""'], {}), "(\n 'Input error: use_sr_basis should be bool, please check or reset deepff/cp2k/use_sr_basis'\n )\n", (46177, 46279), False, 'from CP2K_kit.tools import log_info\n'), ((46567, 46646), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s basis set is not surported!' % basis_level)"], {}), "('Input error: %s basis set is not surported!' % basis_level)\n", (46585, 46646), False, 'from CP2K_kit.tools import log_info\n'), ((47028, 47141), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: poisson_periodic %s is not supported, please check' %\n poisson_periodic)"], {}), "(\n 'Input error: poisson_periodic %s is not supported, please check' %\n poisson_periodic)\n", (47046, 47141), False, 'from CP2K_kit.tools import log_info\n'), ((47415, 47522), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: cell_periodic %s is not supported, please check' % cell_periodic\n )"], {}), "(\n 'Input error: cell_periodic %s is not supported, please check' %\n cell_periodic)\n", (47433, 47522), False, 'from CP2K_kit.tools import log_info\n'), ((47651, 47675), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['charge'], {}), '(charge)\n', (47667, 47675), False, 'from CP2K_kit.tools import data_op\n'), ((47717, 47828), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: charge should be integer, please check or reset deepff/cp2k/charge"""'], {}), "(\n 'Input error: charge should be integer, please check or reset deepff/cp2k/charge'\n )\n", (47735, 47828), False, 'from CP2K_kit.tools import log_info\n'), ((48043, 48073), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['multiplicity'], {}), '(multiplicity)\n', (48059, 48073), False, 'from CP2K_kit.tools import data_op\n'), ((48115, 48244), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: multiplicity wrong should be integer, please check or reset deepff/cp2k/multiplicity"""'], {}), "(\n 'Input error: multiplicity wrong should be integer, please check or reset deepff/cp2k/multiplicity'\n )\n", (48133, 48244), False, 'from CP2K_kit.tools import log_info\n'), ((48552, 48663), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: cutoff should be float or integer, please check deepff/cp2k/cutoff"""'], {}), "(\n 'Input error: cutoff should be float or integer, please check deepff/cp2k/cutoff'\n )\n", (48570, 48663), False, 'from CP2K_kit.tools import log_info\n'), ((48933, 49040), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s functional is not suported for xc functional' % xc_functional\n )"], {}), "(\n 'Input error: %s functional is not suported for xc functional' %\n xc_functional)\n", (48951, 49040), False, 'from CP2K_kit.tools import log_info\n'), ((49318, 49433), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: dftd3 should be bool, please check or check or reset deepff/cp2k/dftd3"""'], {}), "(\n 'Input error: dftd3 should be bool, please check or check or reset deepff/cp2k/dftd3'\n )\n", (49336, 49433), False, 'from CP2K_kit.tools import log_info\n'), ((49858, 49946), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no dftd3 file, please set deepff/cp2k/dftd3_file"""'], {}), "(\n 'Input error: no dftd3 file, please set deepff/cp2k/dftd3_file')\n", (49876, 49946), False, 'from CP2K_kit.tools import log_info\n'), ((2566, 2694), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s is invalid key, please check or reset deepff/deepmd_model/model/descriptor'\n % key)"], {}), "(\n 'Input error: %s is invalid key, please check or reset deepff/deepmd_model/model/descriptor'\n % key)\n", (2584, 2694), False, 'from CP2K_kit.tools import log_info\n'), ((2956, 3066), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: %s is not supported for deepff/deepmd_model/model/descriptor/type"""'], {}), "(\n 'Input error: %s is not supported for deepff/deepmd_model/model/descriptor/type'\n )\n", (2974, 3066), False, 'from CP2K_kit.tools import log_info\n'), ((3352, 3490), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: sel shoule be list of integer, please check or reset deepff/deepmd_model/model/descriptor/sel"""'], {}), "(\n 'Input error: sel shoule be list of integer, please check or reset deepff/deepmd_model/model/descriptor/sel'\n )\n", (3370, 3490), False, 'from CP2K_kit.tools import log_info\n'), ((3951, 4091), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: rcut_smth shoule be float, please check or reset deepff/deepmd_model/model/descriptor/rcut_smth"""'], {}), "(\n 'Input error: rcut_smth shoule be float, please check or reset deepff/deepmd_model/model/descriptor/rcut_smth'\n )\n", (3969, 4091), False, 'from CP2K_kit.tools import log_info\n'), ((4460, 4590), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: rcut should be float, please check or reset deepff/deepmd_model/model/descriptor/rcut"""'], {}), "(\n 'Input error: rcut should be float, please check or reset deepff/deepmd_model/model/descriptor/rcut'\n )\n", (4478, 4590), False, 'from CP2K_kit.tools import log_info\n'), ((4983, 5098), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: neuron error, please check deepff/deepmd_model/model/descriptor/neuron"""'], {}), "(\n 'Input error: neuron error, please check deepff/deepmd_model/model/descriptor/neuron'\n )\n", (5001, 5098), False, 'from CP2K_kit.tools import log_info\n'), ((5405, 5434), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['axis_neuron'], {}), '(axis_neuron)\n', (5421, 5434), False, 'from CP2K_kit.tools import data_op\n'), ((5545, 5690), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: axis_neuron should be list of integer, please check deepff/deepmd_model/model/descriptor/axis_neuron"""'], {}), "(\n 'Input error: axis_neuron should be list of integer, please check deepff/deepmd_model/model/descriptor/axis_neuron'\n )\n", (5563, 5690), False, 'from CP2K_kit.tools import log_info\n'), ((8060, 8090), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['start_pref_e'], {}), '(start_pref_e)\n', (8076, 8090), False, 'from CP2K_kit.tools import data_op\n'), ((8099, 8129), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['start_pref_e'], {}), '(start_pref_e)\n', (8115, 8129), False, 'from CP2K_kit.tools import data_op\n'), ((8536, 8566), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['limit_pref_e'], {}), '(limit_pref_e)\n', (8552, 8566), False, 'from CP2K_kit.tools import data_op\n'), ((8575, 8605), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['limit_pref_e'], {}), '(limit_pref_e)\n', (8591, 8605), False, 'from CP2K_kit.tools import data_op\n'), ((9011, 9041), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['start_pref_f'], {}), '(start_pref_f)\n', (9027, 9041), False, 'from CP2K_kit.tools import data_op\n'), ((9050, 9080), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['start_pref_f'], {}), '(start_pref_f)\n', (9066, 9080), False, 'from CP2K_kit.tools import data_op\n'), ((9489, 9519), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['limit_pref_f'], {}), '(limit_pref_f)\n', (9505, 9519), False, 'from CP2K_kit.tools import data_op\n'), ((9528, 9558), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['limit_pref_f'], {}), '(limit_pref_f)\n', (9544, 9558), False, 'from CP2K_kit.tools import data_op\n'), ((9964, 9994), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['start_pref_v'], {}), '(start_pref_v)\n', (9980, 9994), False, 'from CP2K_kit.tools import data_op\n'), ((10003, 10033), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['start_pref_v'], {}), '(start_pref_v)\n', (10019, 10033), False, 'from CP2K_kit.tools import data_op\n'), ((10439, 10469), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['limit_pref_v'], {}), '(limit_pref_v)\n', (10455, 10469), False, 'from CP2K_kit.tools import data_op\n'), ((10478, 10508), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['limit_pref_v'], {}), '(limit_pref_v)\n', (10494, 10508), False, 'from CP2K_kit.tools import data_op\n'), ((12149, 12291), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no coordination trajectory file, please check deepff/deepmd_model/training/system/traj_coord_file"""'], {}), "(\n 'Input error: no coordination trajectory file, please check deepff/deepmd_model/training/system/traj_coord_file'\n )\n", (12167, 12291), False, 'from CP2K_kit.tools import log_info\n'), ((14748, 14805), 'CP2K_kit.tools.traj_info.get_traj_info', 'traj_info.get_traj_info', (['traj_coord_file', 'coord_file_type'], {}), '(traj_coord_file, coord_file_type)\n', (14771, 14805), False, 'from CP2K_kit.tools import traj_info\n'), ((18984, 19010), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['lr_scale'], {}), '(lr_scale)\n', (19000, 19010), False, 'from CP2K_kit.tools import data_op\n'), ((19019, 19045), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['lr_scale'], {}), '(lr_scale)\n', (19035, 19045), False, 'from CP2K_kit.tools import data_op\n'), ((21431, 21457), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['seed_num'], {}), '(seed_num)\n', (21447, 21457), False, 'from CP2K_kit.tools import data_op\n'), ((21551, 21683), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: seed_num should be integer, please check or reset deepff/deepmd_model/training/seed_num"""'], {}), "(\n 'Input error: seed_num should be integer, please check or reset deepff/deepmd_model/training/seed_num'\n )\n", (21569, 21683), False, 'from CP2K_kit.tools import log_info\n'), ((22117, 22152), 'CP2K_kit.tools.data_op.split_str', 'data_op.split_str', (['tmp_list[i]', '""" """'], {}), "(tmp_list[i], ' ')\n", (22134, 22152), False, 'from CP2K_kit.tools import data_op\n'), ((23744, 23773), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['decay_steps'], {}), '(decay_steps)\n', (23760, 23773), False, 'from CP2K_kit.tools import data_op\n'), ((23878, 24025), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: the decay_steps should be integer, please check or reset deepff/deepmd_model/learning_rate/decay_steps"""'], {}), "(\n 'Input error: the decay_steps should be integer, please check or reset deepff/deepmd_model/learning_rate/decay_steps'\n )\n", (23896, 24025), False, 'from CP2K_kit.tools import log_info\n'), ((24234, 24262), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['stop_batch'], {}), '(stop_batch)\n', (24250, 24262), False, 'from CP2K_kit.tools import data_op\n'), ((24360, 24500), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: the stop_batch should be integer, please check or reset deepff/deepmd_model/training/stop_batch"""'], {}), "(\n 'Input error: the stop_batch should be integer, please check or reset deepff/deepmd_model/training/stop_batch'\n )\n", (24378, 24500), False, 'from CP2K_kit.tools import log_info\n'), ((24713, 24740), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['epoch_num'], {}), '(epoch_num)\n', (24729, 24740), False, 'from CP2K_kit.tools import data_op\n'), ((24836, 24980), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: the number of epoch should be integer, please check or reset deepff/deepmd_model/training/epoch_num"""'], {}), "(\n 'Input error: the number of epoch should be integer, please check or reset deepff/deepmd_model/training/epoch_num'\n )\n", (24854, 24980), False, 'from CP2K_kit.tools import log_info\n'), ((34924, 34946), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['temp'], {}), '(temp)\n', (34940, 34946), False, 'from CP2K_kit.tools import data_op\n'), ((34955, 34977), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['temp'], {}), '(temp)\n', (34971, 34977), False, 'from CP2K_kit.tools import data_op\n'), ((35622, 35644), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['pres'], {}), '(pres)\n', (35638, 35644), False, 'from CP2K_kit.tools import data_op\n'), ((35653, 35675), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['pres'], {}), '(pres)\n', (35669, 35675), False, 'from CP2K_kit.tools import data_op\n'), ((36566, 36591), 'os.path.abspath', 'os.path.abspath', (['box_file'], {}), '(box_file)\n', (36581, 36591), False, 'import os\n'), ((36628, 36653), 'os.path.abspath', 'os.path.abspath', (['box_file'], {}), '(box_file)\n', (36643, 36653), False, 'import os\n'), ((36678, 36746), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s file does not exist' % box_file)"], {}), "('Input error: %s file does not exist' % box_file)\n", (36696, 36746), False, 'from CP2K_kit.tools import log_info\n'), ((36882, 36909), 'os.path.abspath', 'os.path.abspath', (['coord_file'], {}), '(coord_file)\n', (36897, 36909), False, 'import os\n'), ((36948, 36975), 'os.path.abspath', 'os.path.abspath', (['coord_file'], {}), '(coord_file)\n', (36963, 36975), False, 'import os\n'), ((37000, 37070), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s file does not exist' % coord_file)"], {}), "('Input error: %s file does not exist' % coord_file)\n", (37018, 37070), False, 'from CP2K_kit.tools import log_info\n'), ((37765, 37910), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: only nve, nvt and npt are supportted for md_type, please check or reset deepff/lammps/system/md_type"""'], {}), "(\n 'Input error: only nve, nvt and npt are supportted for md_type, please check or reset deepff/lammps/system/md_type'\n )\n", (37783, 37910), False, 'from CP2K_kit.tools import log_info\n'), ((38232, 38347), 'CP2K_kit.tools.log_info.log_out', 'log_info.log_out', (['"""Input error: use_mtd should be bool, please check or set deepff/lammps/system/use_mtd"""'], {}), "(\n 'Input error: use_mtd should be bool, please check or set deepff/lammps/system/use_mtd'\n )\n", (38248, 38347), False, 'from CP2K_kit.tools import log_info\n'), ((38873, 39011), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: as user want to use plumed, but no plumed_file, please check deepff/lammps/system/plumed_file"""'], {}), "(\n 'Input error: as user want to use plumed, but no plumed_file, please check deepff/lammps/system/plumed_file'\n )\n", (38891, 39011), False, 'from CP2K_kit.tools import log_info\n'), ((44522, 44552), 'os.path.abspath', 'os.path.abspath', (['cp2k_inp_file'], {}), '(cp2k_inp_file)\n', (44537, 44552), False, 'import os\n'), ((44642, 44702), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('%s file does not exist' % cp2k_inp_file)"], {}), "('%s file does not exist' % cp2k_inp_file)\n", (44660, 44702), False, 'from CP2K_kit.tools import log_info\n'), ((48453, 48477), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['cutoff'], {}), '(cutoff)\n', (48469, 48477), False, 'from CP2K_kit.tools import data_op\n'), ((48486, 48510), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['cutoff'], {}), '(cutoff)\n', (48502, 48510), False, 'from CP2K_kit.tools import data_op\n'), ((49626, 49653), 'os.path.abspath', 'os.path.abspath', (['dftd3_file'], {}), '(dftd3_file)\n', (49641, 49653), False, 'import os\n'), ((49693, 49720), 'os.path.abspath', 'os.path.abspath', (['dftd3_file'], {}), '(dftd3_file)\n', (49708, 49720), False, 'import os\n'), ((1912, 1931), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['i'], {}), '(i)\n', (1928, 1931), False, 'from CP2K_kit.tools import data_op\n'), ((3779, 3806), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['rcut_smth'], {}), '(rcut_smth)\n', (3795, 3806), False, 'from CP2K_kit.tools import data_op\n'), ((3815, 3842), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['rcut_smth'], {}), '(rcut_smth)\n', (3831, 3842), False, 'from CP2K_kit.tools import data_op\n'), ((4308, 4330), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['rcut'], {}), '(rcut)\n', (4324, 4330), False, 'from CP2K_kit.tools import data_op\n'), ((4339, 4361), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['rcut'], {}), '(rcut)\n', (4355, 4361), False, 'from CP2K_kit.tools import data_op\n'), ((11440, 11584), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: traj_type should be md or mtd, please check and reset deepff/deepmd_model/training/system/traj_type"""'], {}), "(\n 'Input error: traj_type should be md or mtd, please check and reset deepff/deepmd_model/training/system/traj_type'\n )\n", (11458, 11584), False, 'from CP2K_kit.tools import log_info\n'), ((11811, 11843), 'os.path.abspath', 'os.path.abspath', (['traj_coord_file'], {}), '(traj_coord_file)\n', (11826, 11843), False, 'import os\n'), ((11907, 11939), 'os.path.abspath', 'os.path.abspath', (['traj_coord_file'], {}), '(traj_coord_file)\n', (11922, 11939), False, 'import os\n'), ((11968, 12114), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s does not exist, please check deepff/deepmd_model/training/system/traj_coord_file'\n % traj_coord_file)"], {}), "(\n 'Input error: %s does not exist, please check deepff/deepmd_model/training/system/traj_coord_file'\n % traj_coord_file)\n", (11986, 12114), False, 'from CP2K_kit.tools import log_info\n'), ((12868, 13001), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no force trajectory file, please check deepff/deepmd_model/training/system/traj_frc_file"""'], {}), "(\n 'Input error: no force trajectory file, please check deepff/deepmd_model/training/system/traj_frc_file'\n )\n", (12886, 13001), False, 'from CP2K_kit.tools import log_info\n'), ((13088, 13099), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13097, 13099), False, 'import os\n'), ((16230, 16259), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['start_frame'], {}), '(start_frame)\n', (16246, 16259), False, 'from CP2K_kit.tools import data_op\n'), ((16368, 16504), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: start_frame should be integer, please check deepff/deepmd_model/training/system/start_frame"""'], {}), "(\n 'Input error: start_frame should be integer, please check deepff/deepmd_model/training/system/start_frame'\n )\n", (16386, 16504), False, 'from CP2K_kit.tools import log_info\n'), ((16790, 16817), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['end_frame'], {}), '(end_frame)\n', (16806, 16817), False, 'from CP2K_kit.tools import data_op\n'), ((16922, 17054), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: end_frame should be integer, please check deepff/deepmd_model/training/system/end_frame"""'], {}), "(\n 'Input error: end_frame should be integer, please check deepff/deepmd_model/training/system/end_frame'\n )\n", (16940, 17054), False, 'from CP2K_kit.tools import log_info\n'), ((17360, 17395), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['choosed_frame_num'], {}), '(choosed_frame_num)\n', (17376, 17395), False, 'from CP2K_kit.tools import data_op\n'), ((17516, 17664), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: choosed_frame_num should be integer, please check deepff/deepmd_model/training/system/choosed_frame_num"""'], {}), "(\n 'Input error: choosed_frame_num should be integer, please check deepff/deepmd_model/training/system/choosed_frame_num'\n )\n", (17534, 17664), False, 'from CP2K_kit.tools import log_info\n'), ((17977, 18004), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['set_parts'], {}), '(set_parts)\n', (17993, 18004), False, 'from CP2K_kit.tools import data_op\n'), ((18109, 18241), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: set_parts should be integer, please check deepff/deepmd_model/training/system/set_parts"""'], {}), "(\n 'Input error: set_parts should be integer, please check deepff/deepmd_model/training/system/set_parts'\n )\n", (18127, 18241), False, 'from CP2K_kit.tools import log_info\n'), ((22301, 22437), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: neuron should be list of integer, please check or reset deepff/deepmd_model/training/neuron"""'], {}), "(\n 'Input error: neuron should be list of integer, please check or reset deepff/deepmd_model/training/neuron'\n )\n", (22319, 22437), False, 'from CP2K_kit.tools import log_info\n'), ((22654, 22790), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: neuron should be list of integer, please check or reset deepff/deepmd_model/training/neuron"""'], {}), "(\n 'Input error: neuron should be list of integer, please check or reset deepff/deepmd_model/training/neuron'\n )\n", (22672, 22790), False, 'from CP2K_kit.tools import log_info\n'), ((37409, 37541), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: mass of element should be int or float, please check or reset deepff/lammps/system/mass"""'], {}), "(\n 'Input error: mass of element should be int or float, please check or reset deepff/lammps/system/mass'\n )\n", (37427, 37541), False, 'from CP2K_kit.tools import log_info\n'), ((38578, 38606), 'os.path.abspath', 'os.path.abspath', (['plumed_file'], {}), '(plumed_file)\n', (38593, 38606), False, 'import os\n'), ((38653, 38681), 'os.path.abspath', 'os.path.abspath', (['plumed_file'], {}), '(plumed_file)\n', (38668, 38681), False, 'import os\n'), ((38710, 38838), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s file does not exist, please check deepff/lammps/system/plumed_file'\n % plumed_file)"], {}), "(\n 'Input error: %s file does not exist, please check deepff/lammps/system/plumed_file'\n % plumed_file)\n", (38728, 38838), False, 'from CP2K_kit.tools import log_info\n'), ((44253, 44278), 'os.path.abspath', 'os.path.abspath', (['inp_file'], {}), '(inp_file)\n', (44268, 44278), False, 'import os\n'), ((44375, 44430), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('%s file does not exist' % inp_file)"], {}), "('%s file does not exist' % inp_file)\n", (44393, 44430), False, 'from CP2K_kit.tools import log_info\n'), ((44590, 44620), 'os.path.abspath', 'os.path.abspath', (['cp2k_inp_file'], {}), '(cp2k_inp_file)\n', (44605, 44620), False, 'import os\n'), ((3211, 3230), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['i'], {}), '(i)\n', (3227, 3230), False, 'from CP2K_kit.tools import data_op\n'), ((4819, 4838), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['i'], {}), '(i)\n', (4835, 4838), False, 'from CP2K_kit.tools import data_op\n'), ((12528, 12558), 'os.path.abspath', 'os.path.abspath', (['traj_frc_file'], {}), '(traj_frc_file)\n', (12543, 12558), False, 'import os\n'), ((12622, 12652), 'os.path.abspath', 'os.path.abspath', (['traj_frc_file'], {}), '(traj_frc_file)\n', (12637, 12652), False, 'import os\n'), ((12685, 12827), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s does not exist, please check deepff/deepmd_model/training/system/traj_frc_file'\n % traj_frc_file)"], {}), "(\n 'Input error: %s does not exist, please check deepff/deepmd_model/training/system/traj_frc_file'\n % traj_frc_file)\n", (12703, 12827), False, 'from CP2K_kit.tools import log_info\n'), ((13814, 13947), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no cell trajectory file, please check deepff/deepmd_model/training/system/traj_cell_file"""'], {}), "(\n 'Input error: no cell trajectory file, please check deepff/deepmd_model/training/system/traj_cell_file'\n )\n", (13832, 13947), False, 'from CP2K_kit.tools import log_info\n'), ((14215, 14248), 'os.path.abspath', 'os.path.abspath', (['traj_stress_file'], {}), '(traj_stress_file)\n', (14230, 14248), False, 'import os\n'), ((14315, 14348), 'os.path.abspath', 'os.path.abspath', (['traj_stress_file'], {}), '(traj_stress_file)\n', (14330, 14348), False, 'import os\n'), ((14381, 14529), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s does not exist, please check deepff/deepmd_model/training/system/traj_stress_file'\n % traj_stress_file)"], {}), "(\n 'Input error: %s does not exist, please check deepff/deepmd_model/training/system/traj_stress_file'\n % traj_stress_file)\n", (14399, 14529), False, 'from CP2K_kit.tools import log_info\n'), ((15338, 15451), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no data_dir, please set deepff/deepmd_model/training/system/data_dir"""'], {}), "(\n 'Input error: no data_dir, please set deepff/deepmd_model/training/system/data_dir'\n )\n", (15356, 15451), False, 'from CP2K_kit.tools import log_info\n'), ((15542, 15669), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no task_dir_prefix, please set deepff/deepmd_model/training/system/task_dir_prefix"""'], {}), "(\n 'Input error: no task_dir_prefix, please set deepff/deepmd_model/training/system/task_dir_prefix'\n )\n", (15560, 15669), False, 'from CP2K_kit.tools import log_info\n'), ((15754, 15869), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no proj_name, please set deepff/deepmd_model/training/system/proj_name"""'], {}), "(\n 'Input error: no proj_name, please set deepff/deepmd_model/training/system/proj_name'\n )\n", (15772, 15869), False, 'from CP2K_kit.tools import log_info\n'), ((15958, 16081), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (['"""Input error: no out_file_name, please set deepff/deepmd_model/training/system/out_file_name"""'], {}), "(\n 'Input error: no out_file_name, please set deepff/deepmd_model/training/system/out_file_name'\n )\n", (15976, 16081), False, 'from CP2K_kit.tools import log_info\n'), ((34624, 34643), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['i'], {}), '(i)\n', (34640, 34643), False, 'from CP2K_kit.tools import data_op\n'), ((34652, 34671), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['i'], {}), '(i)\n', (34668, 34671), False, 'from CP2K_kit.tools import data_op\n'), ((35325, 35344), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['i'], {}), '(i)\n', (35341, 35344), False, 'from CP2K_kit.tools import data_op\n'), ((35353, 35372), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['i'], {}), '(i)\n', (35369, 35372), False, 'from CP2K_kit.tools import data_op\n'), ((37249, 37274), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['element'], {}), '(element)\n', (37265, 37274), False, 'from CP2K_kit.tools import data_op\n'), ((37283, 37308), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['element'], {}), '(element)\n', (37299, 37308), False, 'from CP2K_kit.tools import data_op\n'), ((42716, 42735), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['i'], {}), '(i)\n', (42732, 42735), False, 'from CP2K_kit.tools import data_op\n'), ((44320, 44345), 'os.path.abspath', 'os.path.abspath', (['inp_file'], {}), '(inp_file)\n', (44335, 44345), False, 'import os\n'), ((49791, 49818), 'os.path.abspath', 'os.path.abspath', (['dftd3_file'], {}), '(dftd3_file)\n', (49806, 49818), False, 'import os\n'), ((13457, 13488), 'os.path.abspath', 'os.path.abspath', (['traj_cell_file'], {}), '(traj_cell_file)\n', (13472, 13488), False, 'import os\n'), ((13555, 13586), 'os.path.abspath', 'os.path.abspath', (['traj_cell_file'], {}), '(traj_cell_file)\n', (13570, 13586), False, 'import os\n'), ((13623, 13767), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s does not exist, please check deepff/deepmd_model/training/system/traj_cell_file'\n % traj_cell_file)"], {}), "(\n 'Input error: %s does not exist, please check deepff/deepmd_model/training/system/traj_cell_file'\n % traj_cell_file)\n", (13641, 13767), False, 'from CP2K_kit.tools import log_info\n'), ((15023, 15048), 'os.path.abspath', 'os.path.abspath', (['data_dir'], {}), '(data_dir)\n', (15038, 15048), False, 'import os\n'), ((15107, 15132), 'os.path.abspath', 'os.path.abspath', (['data_dir'], {}), '(data_dir)\n', (15122, 15132), False, 'import os\n'), ((15165, 15297), 'CP2K_kit.tools.log_info.log_error', 'log_info.log_error', (["('Input error: %s does not exist, please check deepff/deepmd_model/training/system/data_dir'\n % data_dir)"], {}), "(\n 'Input error: %s does not exist, please check deepff/deepmd_model/training/system/data_dir'\n % data_dir)\n", (15183, 15297), False, 'from CP2K_kit.tools import log_info\n'), ((22172, 22191), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['j'], {}), '(j)\n', (22188, 22191), False, 'from CP2K_kit.tools import data_op\n'), ((22531, 22550), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['j'], {}), '(j)\n', (22547, 22550), False, 'from CP2K_kit.tools import data_op\n')] |
import matplotlib
matplotlib.use('Agg')
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.pyplot import gcf
from flask import Flask, render_template, request, flash, redirect
import pandas as pd
import librosa
import ffmpeg
import librosa.display
import numpy as np
import matplotlib.pyplot as plt
import io
import os
import base64
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Flatten, Dense, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications import MobileNetV2
from keras.preprocessing.image import img_to_array
from PIL import Image
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
BIRD_DATA = os.path.join(THIS_DIR, 'bird_data.xlsx')
def fig2img(fig):
''' Transforms matplotlib figure to image '''
fig.canvas.draw()
w,h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w,h,4)
buf = np.roll(buf,3,axis = 2)
w, h, d = buf.shape
return Image.frombytes("RGB",(w,h),buf.tostring())
def create_spectrogram(file):
''' loads audio file and creates spectrogram '''
signal, sr = librosa.load(file,duration=10)
fig = gcf()
DPI = fig.get_dpi()
fig = plt.figure()
fig.set_size_inches(224/float(DPI),224/float(DPI))
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
S = librosa.feature.melspectrogram(y=signal,sr=sr,
n_fft=1024,
hop_length=1024,
n_mels=128,
htk=True,
fmin=1400,
fmax=sr/2)
librosa.display.specshow(librosa.power_to_db(S**2,ref=np.max), fmin=1400,y_axis='linear')
image = fig2img(fig)
image = img_to_array(image)
image = np.array([image])
return image, fig
def predict(model, image):
''' makes prediction out of the spectrogram '''
net = MobileNetV2(include_top=False,
weights='imagenet',
input_tensor=None,
input_shape=(224,224,3))
x = net.output
x = Flatten()(x)
x = Dropout(0.5)(x)
output_layer = Dense(5, activation='softmax')(x)
loaded_model = Model(inputs=net.input, outputs=output_layer)
loaded_model.load_weights(model)
loaded_model.compile(optimizer=Adam(),
loss='categorical_crossentropy', metrics=['accuracy'])
pred = loaded_model.predict(image)
return pred
def get_bird_data(bird):
df = pd.read_excel(BIRD_DATA)
df = df[df['species']==bird].reset_index(drop=True)
name = df['name'][0]
en_name = df['en_name'][0]
desc = df['desc'][0]
return name, en_name, desc
def create_bird_path(bird):
img_path = '/static/images/'
bird = bird.lower()
img_file = bird + '.jpg'
bird_path = img_path + img_file
return bird_path
def create_result(pred, classes):
''' creates results (bird class and probability) '''
top = np.argsort(pred[0])[:-2:-1]
result = {'bird': '', 'probability': ''}
result['bird'] = classes[top[0]]
result['probability'] = int(round(pred[0][top[0]],2)*100)
return result
| [
"keras.preprocessing.image.img_to_array",
"numpy.argsort",
"numpy.array",
"tensorflow.keras.layers.Dense",
"pandas.read_excel",
"librosa.load",
"tensorflow.keras.models.Model",
"tensorflow.keras.applications.MobileNetV2",
"matplotlib.use",
"matplotlib.pyplot.gcf",
"tensorflow.keras.layers.Dropou... | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((767, 807), 'os.path.join', 'os.path.join', (['THIS_DIR', '"""bird_data.xlsx"""'], {}), "(THIS_DIR, 'bird_data.xlsx')\n", (779, 807), False, 'import os\n'), ((727, 753), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (743, 753), False, 'import os\n'), ((1043, 1066), 'numpy.roll', 'np.roll', (['buf', '(3)'], {'axis': '(2)'}), '(buf, 3, axis=2)\n', (1050, 1066), True, 'import numpy as np\n'), ((1247, 1278), 'librosa.load', 'librosa.load', (['file'], {'duration': '(10)'}), '(file, duration=10)\n', (1259, 1278), False, 'import librosa\n'), ((1291, 1296), 'matplotlib.pyplot.gcf', 'gcf', ([], {}), '()\n', (1294, 1296), False, 'from matplotlib.pyplot import gcf\n'), ((1331, 1343), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1341, 1343), True, 'import matplotlib.pyplot as plt\n'), ((1413, 1448), 'matplotlib.pyplot.Axes', 'plt.Axes', (['fig', '[0.0, 0.0, 1.0, 1.0]'], {}), '(fig, [0.0, 0.0, 1.0, 1.0])\n', (1421, 1448), True, 'import matplotlib.pyplot as plt\n'), ((1497, 1623), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', ([], {'y': 'signal', 'sr': 'sr', 'n_fft': '(1024)', 'hop_length': '(1024)', 'n_mels': '(128)', 'htk': '(True)', 'fmin': '(1400)', 'fmax': '(sr / 2)'}), '(y=signal, sr=sr, n_fft=1024, hop_length=1024,\n n_mels=128, htk=True, fmin=1400, fmax=sr / 2)\n', (1527, 1623), False, 'import librosa\n'), ((2022, 2041), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), '(image)\n', (2034, 2041), False, 'from keras.preprocessing.image import img_to_array\n'), ((2054, 2071), 'numpy.array', 'np.array', (['[image]'], {}), '([image])\n', (2062, 2071), True, 'import numpy as np\n'), ((2185, 2285), 'tensorflow.keras.applications.MobileNetV2', 'MobileNetV2', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_tensor': 'None', 'input_shape': '(224, 224, 3)'}), "(include_top=False, weights='imagenet', input_tensor=None,\n input_shape=(224, 224, 3))\n", (2196, 2285), False, 'from tensorflow.keras.applications import MobileNetV2\n'), ((2500, 2545), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'net.input', 'outputs': 'output_layer'}), '(inputs=net.input, outputs=output_layer)\n', (2505, 2545), False, 'from tensorflow.keras.models import Model, load_model\n'), ((2793, 2817), 'pandas.read_excel', 'pd.read_excel', (['BIRD_DATA'], {}), '(BIRD_DATA)\n', (2806, 2817), True, 'import pandas as pd\n'), ((1915, 1954), 'librosa.power_to_db', 'librosa.power_to_db', (['(S ** 2)'], {'ref': 'np.max'}), '(S ** 2, ref=np.max)\n', (1934, 1954), False, 'import librosa\n'), ((2391, 2400), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2398, 2400), False, 'from tensorflow.keras.layers import Flatten, Dense, Dropout\n'), ((2412, 2424), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2419, 2424), False, 'from tensorflow.keras.layers import Flatten, Dense, Dropout\n'), ((2447, 2477), 'tensorflow.keras.layers.Dense', 'Dense', (['(5)'], {'activation': '"""softmax"""'}), "(5, activation='softmax')\n", (2452, 2477), False, 'from tensorflow.keras.layers import Flatten, Dense, Dropout\n'), ((3263, 3282), 'numpy.argsort', 'np.argsort', (['pred[0]'], {}), '(pred[0])\n', (3273, 3282), True, 'import numpy as np\n'), ((2618, 2624), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (2622, 2624), False, 'from tensorflow.keras.optimizers import Adam\n')] |
import os
import random
import time
from contextlib import contextmanager
MOCK_LOUCSTFILE_CONTENT = '''
"""This is a mock locust file for unit testing"""
from locust import HttpUser, TaskSet, task, between
def index(l):
l.client.get("/")
def stats(l):
l.client.get("/stats/requests")
class UserTasks(TaskSet):
# one can specify tasks like this
tasks = [index, stats]
class UserSubclass(HttpUser):
host = "http://127.0.0.1:8089"
wait_time = between(2, 5)
tasks = [UserTasks]
class NotUserSubclass():
host = "http://localhost:8000"
'''
class MockedLocustfile:
__slots__ = ["filename", "directory", "file_path"]
@contextmanager
def mock_locustfile(filename_prefix="mock_locustfile", content=MOCK_LOUCSTFILE_CONTENT):
mocked = MockedLocustfile()
mocked.directory = os.path.dirname(os.path.abspath(__file__))
mocked.filename = "%s_%s_%i.py" % (
filename_prefix,
str(time.time()).replace(".", "_"),
random.randint(0,100000),
)
mocked.file_path = os.path.join(mocked.directory, mocked.filename)
with open(mocked.file_path, 'w') as file:
file.write(content)
try:
yield mocked
finally:
os.remove(mocked.file_path)
| [
"os.path.join",
"os.path.abspath",
"time.time",
"random.randint",
"os.remove"
] | [((1039, 1086), 'os.path.join', 'os.path.join', (['mocked.directory', 'mocked.filename'], {}), '(mocked.directory, mocked.filename)\n', (1051, 1086), False, 'import os\n'), ((838, 863), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (853, 863), False, 'import os\n'), ((1217, 1244), 'os.remove', 'os.remove', (['mocked.file_path'], {}), '(mocked.file_path)\n', (1226, 1244), False, 'import os\n'), ((984, 1009), 'random.randint', 'random.randint', (['(0)', '(100000)'], {}), '(0, 100000)\n', (998, 1009), False, 'import random\n'), ((943, 954), 'time.time', 'time.time', ([], {}), '()\n', (952, 954), False, 'import time\n')] |
# coding=utf-8
"""
This Storage service helps persistent room information given by SearchService.py
Employ a sqlite3 to implement the detail for now.
"""
import sqlite3
from Models import RoomInfo
from LogHelper import Log
from Utils import *
class StorageService:
_DEFAULT_PATH = "./rooms.db"
_TAG = "StorageService"
def __init__(self, path):
dbPath = path if path else self._DEFAULT_PATH
self._conn = self.prepareDB(dbPath)
pass
def prepareDB(self, path):
conn = sqlite3.connect(path)
cursor = conn.cursor()
try:
# table for room
cursor.execute("CREATE TABLE IF NOT EXISTS room_info (_id INTEGER PRIMARY KEY AUTOINCREMENT,\
room_id INTEGER UNIQUE, person_capacity INTEGER, city VARCHAR(20), beds INTEGER, \
localized_neighborhood VARCHAR(50), price INTEGER, pic VARCHAR(200), update_time INTEGER,\
query_str VARCHAR(100))")
# table for reservation
cursor.execute("CREATE TABLE IF NOT EXISTS reservation_info (_id INTEGER PRIMARY KEY AUTOINCREMENT,\
room_id INTEGER, date VARCHAR(20) )")
except BaseException as e:
Log.e(StorageService._TAG, "prepareDB() failed", e)
finally:
cursor.close()
conn.commit()
return conn
def saveOrUpdateSingleRoom(self, singleRoom):
"""
like saveOrUpdateRoomBatch() but only save or update one Room at a time
:param singleRoom:
:return:
"""
rooms = []
rooms.append(singleRoom)
return self.saveOrUpdateRoomBatch(rooms)
def saveOrUpdateRoomBatch(self, roomInfos):
"""
save or update information for a given room
:param roomInfos: a list of RoomInfo
:return:
"""
cursor = self._conn.cursor()
success = True
try:
for roomInfo in roomInfos:
cursor.execute("SELECT room_id FROM room_info WHERE room_id=?", (roomInfo.roomId,))
if len(cursor.fetchall()) > 0:
# exist
cursor.execute("UPDATE room_info SET person_capacity=?, city=?, beds=?,\
localized_neighborhood=?, price=?, pic=?, update_time=?, query_str=? WHERE room_id=?", (
roomInfo.personCapacity, roomInfo.city, roomInfo.beds, roomInfo.neighbourhood,
int(roomInfo.price), roomInfo.pic, getNowTimeStamp(), roomInfo.query, roomInfo.roomId))
else:
# new
cursor.execute("INSERT INTO room_info (room_id, person_capacity, city, beds, localized_neighborhood, \
price, pic, update_time, query_str) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", (
roomInfo.roomId, roomInfo.personCapacity, roomInfo.city, roomInfo.beds, roomInfo.neighbourhood,
roomInfo.price, roomInfo.pic, getNowTimeStamp(), roomInfo.query))
success = cursor.rowcount > 0 and success
except BaseException as e:
success = False
Log.w(StorageService._TAG, "saveOrUpdateRoomBatch() failed", e)
finally:
cursor.close()
self._conn.commit()
return success
def countRoomsForRecentDays(self, query, city, days):
"""
get the number of available individual rooms for recent days
:param query: the query string
:param city: the city name specified in config.json, this will take effect only when query is None
:param days: the number of days before now
:return:
"""
ret = 0
cursor = self._conn.cursor()
try:
if query:
cursor.execute("SELECT DISTINCT(room_id) FROM room_info WHERE update_time>? AND query_str=?", (getDeltaTimeStamp(datetime.datetime.now(), -days), query))
elif city:
cursor.execute("SELECT DISTINCT(room_id) FROM room_info WHERE update_time>? AND city=?",
(getDeltaTimeStamp(datetime.datetime.now(), -days), city))
else:
Log.w(StorageService._TAG, "query and city all empty!!")
return 0
ret = len(cursor.fetchall())
except BaseException as e:
Log.w(StorageService._TAG, "countRoomsForRecentDays() failed", e)
finally:
cursor.close()
self._conn.commit()
return ret
def saveReservationInfo(self, roomId):
"""
mark a room as available for today
:param roomId:
:return:
"""
cursor = self._conn.cursor()
success = False
try:
now_date = getDateStr()
cursor.execute("INSERT INTO reservation_info (room_id, date) VALUES(?, ?)", (roomId, now_date))
success = cursor.rowcount > 0
except BaseException as e:
Log.w(StorageService._TAG, "saveReservationInfo() failed", e)
finally:
cursor.close()
self._conn.commit()
return success
def getRoomById(self, roomId):
"""
query full information for a given roomId
:param roomId:
:return: a RoomInfo with given roomId if found, None if not
"""
cursor = self._conn.cursor()
ret = None
try:
cursor.execute("SELECT room_id, person_capacity, city, beds, localized_neighborhood, \
price, pic, query_str FROM room_info WHERE room_id=?", (roomId,))
values = cursor.fetchall()
if len(values) > 0:
ret = RoomInfo(values[0][0], values[0][1], values[0][2], values[0][3], values[0][4], values[0][5], values[0][6], values[0][7])
except BaseException as e:
Log.w(StorageService._TAG, "getRoomById() failed", e)
finally:
cursor.close()
self._conn.commit()
return ret
def isAvailable(self, roomId, dateStr):
"""
query whether a room specified by roomId is available on given date
:param roomId:
:param dateStr: like 2019-05-30, be sure to use '{:%Y-%m-%d}'.format(datetime)
:return: True for available that day, False otherwise
"""
cursor = self._conn.cursor()
ret = False
try:
cursor.execute("SELECT * FROM reservation_info WHERE room_id=? AND date=?", (roomId, dateStr))
ret = len(cursor.fetchall()) > 0
except BaseException as e:
Log.w(StorageService._TAG, "hasReservation() failed", e)
finally:
cursor.close()
self._conn.commit()
return ret
def close(self):
if self._conn:
self._conn.close()
| [
"LogHelper.Log.w",
"sqlite3.connect",
"LogHelper.Log.e",
"Models.RoomInfo"
] | [((519, 540), 'sqlite3.connect', 'sqlite3.connect', (['path'], {}), '(path)\n', (534, 540), False, 'import sqlite3\n'), ((1239, 1290), 'LogHelper.Log.e', 'Log.e', (['StorageService._TAG', '"""prepareDB() failed"""', 'e'], {}), "(StorageService._TAG, 'prepareDB() failed', e)\n", (1244, 1290), False, 'from LogHelper import Log\n'), ((3222, 3285), 'LogHelper.Log.w', 'Log.w', (['StorageService._TAG', '"""saveOrUpdateRoomBatch() failed"""', 'e'], {}), "(StorageService._TAG, 'saveOrUpdateRoomBatch() failed', e)\n", (3227, 3285), False, 'from LogHelper import Log\n'), ((4431, 4496), 'LogHelper.Log.w', 'Log.w', (['StorageService._TAG', '"""countRoomsForRecentDays() failed"""', 'e'], {}), "(StorageService._TAG, 'countRoomsForRecentDays() failed', e)\n", (4436, 4496), False, 'from LogHelper import Log\n'), ((5051, 5112), 'LogHelper.Log.w', 'Log.w', (['StorageService._TAG', '"""saveReservationInfo() failed"""', 'e'], {}), "(StorageService._TAG, 'saveReservationInfo() failed', e)\n", (5056, 5112), False, 'from LogHelper import Log\n'), ((5764, 5889), 'Models.RoomInfo', 'RoomInfo', (['values[0][0]', 'values[0][1]', 'values[0][2]', 'values[0][3]', 'values[0][4]', 'values[0][5]', 'values[0][6]', 'values[0][7]'], {}), '(values[0][0], values[0][1], values[0][2], values[0][3], values[0][\n 4], values[0][5], values[0][6], values[0][7])\n', (5772, 5889), False, 'from Models import RoomInfo\n'), ((5932, 5985), 'LogHelper.Log.w', 'Log.w', (['StorageService._TAG', '"""getRoomById() failed"""', 'e'], {}), "(StorageService._TAG, 'getRoomById() failed', e)\n", (5937, 5985), False, 'from LogHelper import Log\n'), ((6667, 6723), 'LogHelper.Log.w', 'Log.w', (['StorageService._TAG', '"""hasReservation() failed"""', 'e'], {}), "(StorageService._TAG, 'hasReservation() failed', e)\n", (6672, 6723), False, 'from LogHelper import Log\n'), ((4261, 4317), 'LogHelper.Log.w', 'Log.w', (['StorageService._TAG', '"""query and city all empty!!"""'], {}), "(StorageService._TAG, 'query and city all empty!!')\n", (4266, 4317), False, 'from LogHelper import Log\n')] |
# @Author : bamtercelboo
# @Datetime : 2018/7/24 10:26
# @File : HierachicalAtten.py
# @Last Modify Time : 2018/7/24 10:26
# @Contact : <EMAIL>, 163.com}
"""
FILE : HierachicalAtten.py
FUNCTION : None
"""
import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import numpy as np
import random
import time
from DataUtils.Common import *
from models.models_AL.initialize import *
torch.manual_seed(seed_num)
random.seed(seed_num)
class HierachicalAtten(nn.Module):
"""
HierachicalAtten
"""
def __init__(self, in_size, attention_size, config):
"""
:Function Hierachical Attention initial
:param in_size: Linear input size
:param attention_size: attention size
:param config: config file
"""
super(HierachicalAtten, self).__init__()
self.config = config
self.in_size = in_size
self.attention_size = attention_size
self.MLP_linear = nn.Linear(in_features=self.in_size, out_features=self.attention_size, bias=True)
init_linear_weight_bias(self.MLP_linear)
self.context_vector = nn.Linear(self.attention_size, 1, bias=False)
init_linear_weight_bias(self.context_vector)
self.dropout = nn.Dropout(p=0.0)
self.SM = nn.Softmax(dim=1)
def forward(self, input):
mlp_out = torch.tanh(self.MLP_linear(input)) ## B * T * H
score = self.SM(self.context_vector(mlp_out)) ## B * T * 1
input_atten = torch.mul(input, score) ## B * T * H
att_out = torch.sum(input_atten, dim=1, keepdim=False) ## B * H
return att_out
| [
"torch.manual_seed",
"torch.mul",
"torch.nn.Dropout",
"torch.nn.Softmax",
"random.seed",
"torch.sum",
"torch.nn.Linear"
] | [((450, 477), 'torch.manual_seed', 'torch.manual_seed', (['seed_num'], {}), '(seed_num)\n', (467, 477), False, 'import torch\n'), ((478, 499), 'random.seed', 'random.seed', (['seed_num'], {}), '(seed_num)\n', (489, 499), False, 'import random\n'), ((1011, 1096), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'self.in_size', 'out_features': 'self.attention_size', 'bias': '(True)'}), '(in_features=self.in_size, out_features=self.attention_size, bias=True\n )\n', (1020, 1096), True, 'import torch.nn as nn\n'), ((1172, 1217), 'torch.nn.Linear', 'nn.Linear', (['self.attention_size', '(1)'], {'bias': '(False)'}), '(self.attention_size, 1, bias=False)\n', (1181, 1217), True, 'import torch.nn as nn\n'), ((1295, 1312), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.0)'}), '(p=0.0)\n', (1305, 1312), True, 'import torch.nn as nn\n'), ((1332, 1349), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (1342, 1349), True, 'import torch.nn as nn\n'), ((1543, 1566), 'torch.mul', 'torch.mul', (['input', 'score'], {}), '(input, score)\n', (1552, 1566), False, 'import torch\n'), ((1600, 1644), 'torch.sum', 'torch.sum', (['input_atten'], {'dim': '(1)', 'keepdim': '(False)'}), '(input_atten, dim=1, keepdim=False)\n', (1609, 1644), False, 'import torch\n')] |
import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
class MobileNetSSD:
def __init__(self, img, num_classes, img_shape):
self.img = img
self.num_classes = num_classes
self.img_shape = img_shape
def ssd_net(self, scale=1.0):
# 300x300
tmp = self.conv_bn(self.img, 3, int(32 * scale), 2, 1, 3)
# 150x150
tmp = self.depthwise_separable(tmp, 32, 64, 32, 1, scale)
tmp = self.depthwise_separable(tmp, 64, 128, 64, 2, scale)
# 75x75
tmp = self.depthwise_separable(tmp, 128, 128, 128, 1, scale)
tmp = self.depthwise_separable(tmp, 128, 256, 128, 2, scale)
# 38x38
tmp = self.depthwise_separable(tmp, 256, 256, 256, 1, scale)
tmp = self.depthwise_separable(tmp, 256, 512, 256, 2, scale)
# 19x19
for i in range(5):
tmp = self.depthwise_separable(tmp, 512, 512, 512, 1, scale)
module11 = tmp
tmp = self.depthwise_separable(tmp, 512, 1024, 512, 2, scale)
# 10x10
module13 = self.depthwise_separable(tmp, 1024, 1024, 1024, 1, scale)
module14 = self.extra_block(module13, 256, 512, 1, 2, scale)
# 5x5
module15 = self.extra_block(module14, 128, 256, 1, 2, scale)
# 3x3
module16 = self.extra_block(module15, 128, 256, 1, 2, scale)
# 2x2
module17 = self.extra_block(module16, 64, 128, 1, 2, scale)
mbox_locs, mbox_confs, box, box_var = fluid.layers.multi_box_head(
inputs=[
module11, module13, module14, module15, module16, module17
],
image=self.img,
num_classes=self.num_classes,
min_ratio=20,
max_ratio=90,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2., 3.],
[2., 3.]],
base_size=self.img_shape[2],
offset=0.5,
flip=True)
return mbox_locs, mbox_confs, box, box_var
def conv_bn(self,
input,
filter_size,
num_filters,
stride,
padding,
channels=None,
num_groups=1,
act='relu',
use_cudnn=True):
parameter_attr = ParamAttr(learning_rate=0.1, initializer=MSRA())
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
act=None,
use_cudnn=use_cudnn,
param_attr=parameter_attr,
bias_attr=False)
return fluid.layers.batch_norm(input=conv, act=act)
def depthwise_separable(self, input, num_filters1, num_filters2, num_groups,
stride, scale):
depthwise_conv = self.conv_bn(
input=input,
filter_size=3,
num_filters=int(num_filters1 * scale),
stride=stride,
padding=1,
num_groups=int(num_groups * scale),
use_cudnn=False)
pointwise_conv = self.conv_bn(
input=depthwise_conv,
filter_size=1,
num_filters=int(num_filters2 * scale),
stride=1,
padding=0)
return pointwise_conv
def extra_block(self, input, num_filters1, num_filters2, num_groups, stride,
scale):
# 1x1 conv
pointwise_conv = self.conv_bn(
input=input,
filter_size=1,
num_filters=int(num_filters1 * scale),
stride=1,
num_groups=int(num_groups * scale),
padding=0)
# 3x3 conv
normal_conv = self.conv_bn(
input=pointwise_conv,
filter_size=3,
num_filters=int(num_filters2 * scale),
stride=2,
num_groups=int(num_groups * scale),
padding=1)
return normal_conv
def build_mobilenet_ssd(img, num_classes, img_shape):
ssd_model = MobileNetSSD(img, num_classes, img_shape)
return ssd_model.ssd_net()
| [
"paddle.fluid.layers.batch_norm",
"paddle.fluid.layers.multi_box_head",
"paddle.fluid.layers.conv2d",
"paddle.fluid.initializer.MSRA"
] | [((1535, 1965), 'paddle.fluid.layers.multi_box_head', 'fluid.layers.multi_box_head', ([], {'inputs': '[module11, module13, module14, module15, module16, module17]', 'image': 'self.img', 'num_classes': 'self.num_classes', 'min_ratio': '(20)', 'max_ratio': '(90)', 'min_sizes': '[60.0, 105.0, 150.0, 195.0, 240.0, 285.0]', 'max_sizes': '[[], 150.0, 195.0, 240.0, 285.0, 300.0]', 'aspect_ratios': '[[2.0], [2.0, 3.0], [2.0, 3.0], [2.0, 3.0], [2.0, 3.0], [2.0, 3.0]]', 'base_size': 'self.img_shape[2]', 'offset': '(0.5)', 'flip': '(True)'}), '(inputs=[module11, module13, module14, module15,\n module16, module17], image=self.img, num_classes=self.num_classes,\n min_ratio=20, max_ratio=90, min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0,\n 285.0], max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],\n aspect_ratios=[[2.0], [2.0, 3.0], [2.0, 3.0], [2.0, 3.0], [2.0, 3.0], [\n 2.0, 3.0]], base_size=self.img_shape[2], offset=0.5, flip=True)\n', (1562, 1965), True, 'import paddle.fluid as fluid\n'), ((2540, 2758), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'input', 'num_filters': 'num_filters', 'filter_size': 'filter_size', 'stride': 'stride', 'padding': 'padding', 'groups': 'num_groups', 'act': 'None', 'use_cudnn': 'use_cudnn', 'param_attr': 'parameter_attr', 'bias_attr': '(False)'}), '(input=input, num_filters=num_filters, filter_size=\n filter_size, stride=stride, padding=padding, groups=num_groups, act=\n None, use_cudnn=use_cudnn, param_attr=parameter_attr, bias_attr=False)\n', (2559, 2758), True, 'import paddle.fluid as fluid\n'), ((2885, 2929), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', ([], {'input': 'conv', 'act': 'act'}), '(input=conv, act=act)\n', (2908, 2929), True, 'import paddle.fluid as fluid\n'), ((2517, 2523), 'paddle.fluid.initializer.MSRA', 'MSRA', ([], {}), '()\n', (2521, 2523), False, 'from paddle.fluid.initializer import MSRA\n')] |
import requests
import json
import time
def is_number(s):
try:
int(s)
return True
except ValueError:
return False
def console_print(text):
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
# print(text)
r = requests.post('http://localhost:5000/api/pending_client_actions', json={'response': str(text), 'response_required': 'n'})
# print(r.status_code)
def input(prompt):
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
# print(prompt)
r = requests.post('http://localhost:5000/api/pending_client_actions', json={'response': str(prompt), 'response_required': 'y'})
# print(r.status_code)
r = requests.get('http://localhost:5000/api/pending_server_actions')
while r.status_code != 200:
r = requests.get('http://localhost:5000/api/pending_server_actions')
# print(r.status_code)
time.sleep(1)
# print("Input Response: ")
# print(r.content)
j = json.loads(r.content)
return j[0]['response']
def get_num_options(num_options):
J = None
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.post('http://localhost:5000/api/pending_client_actions', json={'response': "Please Enter a Number", 'response_required': 'y'})
# print(r.status_code)
r = requests.get('http://localhost:5000/api/pending_server_actions')
while True:
if r.status_code == 200:
j = json.loads((r.content))
if is_number(j[0]['response']):
if 0 <= int(j[0]['response']) < num_options:
return int(j[0]['response'])
r = requests.get('http://localhost:5000/api/pending_server_actions')
# print(r.status_code)
time.sleep(1)
| [
"json.loads",
"requests.post",
"time.sleep",
"requests.get"
] | [((705, 769), 'requests.get', 'requests.get', (['"""http://localhost:5000/api/pending_server_actions"""'], {}), "('http://localhost:5000/api/pending_server_actions')\n", (717, 769), False, 'import requests\n'), ((1003, 1024), 'json.loads', 'json.loads', (['r.content'], {}), '(r.content)\n', (1013, 1024), False, 'import json\n'), ((1184, 1324), 'requests.post', 'requests.post', (['"""http://localhost:5000/api/pending_client_actions"""'], {'json': "{'response': 'Please Enter a Number', 'response_required': 'y'}"}), "('http://localhost:5000/api/pending_client_actions', json={\n 'response': 'Please Enter a Number', 'response_required': 'y'})\n", (1197, 1324), False, 'import requests\n'), ((1355, 1419), 'requests.get', 'requests.get', (['"""http://localhost:5000/api/pending_server_actions"""'], {}), "('http://localhost:5000/api/pending_server_actions')\n", (1367, 1419), False, 'import requests\n'), ((814, 878), 'requests.get', 'requests.get', (['"""http://localhost:5000/api/pending_server_actions"""'], {}), "('http://localhost:5000/api/pending_server_actions')\n", (826, 878), False, 'import requests\n'), ((918, 931), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (928, 931), False, 'import time\n'), ((1676, 1740), 'requests.get', 'requests.get', (['"""http://localhost:5000/api/pending_server_actions"""'], {}), "('http://localhost:5000/api/pending_server_actions')\n", (1688, 1740), False, 'import requests\n'), ((1780, 1793), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1790, 1793), False, 'import time\n'), ((1485, 1506), 'json.loads', 'json.loads', (['r.content'], {}), '(r.content)\n', (1495, 1506), False, 'import json\n')] |
import os
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
from pandas import DataFrame
from sklearn.model_selection import train_test_split
def create_info_csv(mvtec_dir: Path) -> DataFrame:
df = pd.DataFrame({})
for data_type in ["train", "test"]:
for p in mvtec_dir.glob(f"*/{data_type}/*/*.png"):
raw_stem = p.stem
defect = p.parents[0].name
data_type = p.parents[1].name
category = p.parents[2].name
df = df.append(
{
"raw_img_path": str(p),
"raw_stem": raw_stem,
"defect": defect,
"data_type": data_type,
"category": category,
},
ignore_index=True,
)
for category in df["category"].unique():
category_df = df.query("data_type=='train' & category==@category")
_, val_index = train_test_split(
category_df.index.tolist(),
train_size=0.8,
test_size=0.2,
random_state=5,
shuffle=True,
)
df.loc[val_index, "data_type"] = "val"
df["stem"] = df.apply(
lambda x: f"{x.category}_{x.data_type}_{x.defect}_{x.raw_stem}",
axis=1,
)
df["raw_mask_path"] = df.apply(
lambda x: f"{mvtec_dir}/{x.category}/ground_truth/{x.defect}/{x.raw_stem}_mask.png",
axis=1,
)
return df
def move_images_and_masks(df: DataFrame) -> None:
os.makedirs("/data/images", exist_ok=True)
os.makedirs("/data/masks", exist_ok=True)
for i in df.index:
raw_img_path, raw_mask_path, stem = df.loc[i, ["raw_img_path", "raw_mask_path", "stem"]]
if os.path.exists(raw_mask_path):
os.rename(raw_mask_path, f"/data/masks/{stem}.png")
else:
# create masks for train images
img = cv2.imread(raw_img_path)
mask = np.zeros(img.shape)
cv2.imwrite(f"/data/masks/{stem}.png", mask)
os.rename(raw_img_path, f"/data/images/{stem}.png")
df.drop(columns=["raw_stem", "raw_img_path", "raw_mask_path"])
df.to_csv("/data/info.csv", index=False)
if __name__ == "__main__":
mvtec_dir = Path("/data/MVTec")
df = create_info_csv(mvtec_dir)
move_images_and_masks(df)
| [
"os.path.exists",
"cv2.imwrite",
"os.makedirs",
"pathlib.Path",
"os.rename",
"numpy.zeros",
"pandas.DataFrame",
"cv2.imread"
] | [((231, 247), 'pandas.DataFrame', 'pd.DataFrame', (['{}'], {}), '({})\n', (243, 247), True, 'import pandas as pd\n'), ((1542, 1584), 'os.makedirs', 'os.makedirs', (['"""/data/images"""'], {'exist_ok': '(True)'}), "('/data/images', exist_ok=True)\n", (1553, 1584), False, 'import os\n'), ((1589, 1630), 'os.makedirs', 'os.makedirs', (['"""/data/masks"""'], {'exist_ok': '(True)'}), "('/data/masks', exist_ok=True)\n", (1600, 1630), False, 'import os\n'), ((2276, 2295), 'pathlib.Path', 'Path', (['"""/data/MVTec"""'], {}), "('/data/MVTec')\n", (2280, 2295), False, 'from pathlib import Path\n'), ((1764, 1793), 'os.path.exists', 'os.path.exists', (['raw_mask_path'], {}), '(raw_mask_path)\n', (1778, 1793), False, 'import os\n'), ((2065, 2116), 'os.rename', 'os.rename', (['raw_img_path', 'f"""/data/images/{stem}.png"""'], {}), "(raw_img_path, f'/data/images/{stem}.png')\n", (2074, 2116), False, 'import os\n'), ((1807, 1858), 'os.rename', 'os.rename', (['raw_mask_path', 'f"""/data/masks/{stem}.png"""'], {}), "(raw_mask_path, f'/data/masks/{stem}.png')\n", (1816, 1858), False, 'import os\n'), ((1935, 1959), 'cv2.imread', 'cv2.imread', (['raw_img_path'], {}), '(raw_img_path)\n', (1945, 1959), False, 'import cv2\n'), ((1979, 1998), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (1987, 1998), True, 'import numpy as np\n'), ((2011, 2055), 'cv2.imwrite', 'cv2.imwrite', (['f"""/data/masks/{stem}.png"""', 'mask'], {}), "(f'/data/masks/{stem}.png', mask)\n", (2022, 2055), False, 'import cv2\n')] |
"""Compute integral flux in an energy band for the Fermi diffuse model.
"""
from astropy.units import Quantity
from gammapy.datasets import FermiGalacticCenter
cube = FermiGalacticCenter.diffuse_model()
print(cube)
energy_band = Quantity([10, 50], 'GeV')
image = cube.integral_flux_image(energy_band, energy_bins=100)
image.writeto('fermi_diffuse_integral_flux_image.fits', clobber=True)
# Some checks
surface_brightness = Quantity(image.data.mean(), 'cm^-2 s^-1 sr^-1')
print('Mean surface brightness in image: {0}'.format(surface_brightness))
| [
"gammapy.datasets.FermiGalacticCenter.diffuse_model",
"astropy.units.Quantity"
] | [((168, 203), 'gammapy.datasets.FermiGalacticCenter.diffuse_model', 'FermiGalacticCenter.diffuse_model', ([], {}), '()\n', (201, 203), False, 'from gammapy.datasets import FermiGalacticCenter\n'), ((231, 256), 'astropy.units.Quantity', 'Quantity', (['[10, 50]', '"""GeV"""'], {}), "([10, 50], 'GeV')\n", (239, 256), False, 'from astropy.units import Quantity\n')] |
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as ss
import numpy as np
import itertools
def cramers_corrected_stat(confusion_matrix):
""" calculate Cramers V statistic for categorical-categorical association.
uses correction from Bergsma and Wicher,
Journal of the Korean Statistical Society 42 (2013): 323-328
"""
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2/n
r,k = confusion_matrix.shape
phi2corr = max(0, phi2 - ((k-1)*(r-1))/(n-1))
rcorr = r - ((r-1)**2)/(n-1)
kcorr = k - ((k-1)**2)/(n-1)
return np.sqrt(phi2corr / min( (kcorr-1), (rcorr-1)))
# Page layout
## Page expands to full width
st.set_page_config(page_title='Data Science App',
layout='wide')
# Model building
def build_model(data):
sns.set_style('darkgrid')
global target_variable
st.markdown('**1.2- Dataset general info**')
st.text('Dataset shape:')
st.text(df.shape)
categorical_attributes = list(data.select_dtypes(include=['object']).columns)
st.text("Categorical Variables:")
st.text(categorical_attributes)
numerical_attributes = list(data.select_dtypes(include=['float64', 'int64']).columns)
st.text("Numerical Variables:")
st.text(numerical_attributes)
st.markdown('**1.3- Duplicated values**')
st.text(data.duplicated().sum())
st.markdown('**1.4- Missing values**')
st.text(data.isnull().sum())
st.markdown('**1.5- Unique values in the Categorical Variables**')
for col_name in data.columns:
if data[col_name].dtypes == 'object':
unique_cat = len(data[col_name].unique())
st.text("Feature '{col_name}' has {unique_cat} unique categories".format(col_name=col_name, unique_cat=unique_cat))
st.subheader('2- Exploratory Data Analysis (EDA)')
hue = target_variable
st.markdown('**2.1- Descriptive Statistics**')
st.text(data.describe())
st.markdown('**2.2- Outlier detectetion by Boxplot**')
if len(numerical_attributes) == 0:
st.text('There is no numerical variable')
else:
for a in numerical_attributes:
st.text(a)
fig = plt.figure(figsize = (20,10))
sns.boxplot(data[a])
st.pyplot(fig)
if data[target_variable].dtypes == 'O':
catplots(data)
else:
if len(data[target_variable].unique()) > 5:
numplots(data)
else:
catplots(data)
def catplots(data):
sns.set_style('darkgrid')
global target_variable
hue = target_variable
categorical_attributes = list(data.select_dtypes(include=['object']).columns)
numerical_attributes = list(data.select_dtypes(include=['float64', 'int64']).columns)
st.markdown('**2.3- Target Variable plot**')
st.text("Target variable:" + hue)
fig = plt.figure(figsize = (20,10))
ax = sns.countplot(data[hue])
for p in ax.patches:
height = p.get_height()
ax.text(x = p.get_x()+(p.get_width()/2), y = height*1.01, s = '{:.0f}'.format(height), ha = 'center')
st.pyplot(fig)
st.markdown('**2.4- Numerical Variables**')
#fig = plt.figure(figsize = (5,5))
#sns.pairplot(data, hue = hue)
#st.pyplot(fig)
st.markdown('***2.4.1- Correlation***')
try:
fig = plt.figure(figsize = (20,10))
sns.heatmap(data.corr(), cmap = 'Blues', annot = True)
st.pyplot(fig)
except:
st.text('There is no numerical variable')
st.markdown('***2.4.2- Distributions***')
for a in numerical_attributes:
st.text(a)
fig = plt.figure(figsize = (20,10))
sns.histplot(data = data , x =a , kde = True, hue = hue)
st.pyplot(fig)
st.markdown('**2.5- Categorical Variables**')
if len(categorical_attributes) == 0:
st.text('There is no categorical variable')
else:
for a in categorical_attributes:
if a == hue:
pass
else:
if len(data[a].unique()) < 13:
st.text(a)
fig = plt.figure()
g = sns.catplot(data = data, x = a, kind = 'count', col = hue, sharey=False)
for i in range(data[hue].nunique()):
ax = g.facet_axis(0,i)
for p in ax.patches:
height = p.get_height()
ax.text(x = p.get_x()+(p.get_width()/2), y = height * 1.01 , s = '{:.0f}'.format(height), ha = 'center')
g.set_xticklabels(rotation=90)
st.pyplot(g)
st.markdown('***2.5.1 - Correlation between categorical***')
corrM = np.zeros((len(categorical_attributes),len(categorical_attributes)))
for col1, col2 in itertools.combinations(categorical_attributes, 2):
idx1, idx2 = categorical_attributes.index(col1), categorical_attributes.index(col2)
corrM[idx1, idx2] = cramers_corrected_stat(pd.crosstab(data[col1], data[col2]))
corrM[idx2, idx1] = corrM[idx1, idx2]
corr = pd.DataFrame(corrM, index=categorical_attributes, columns=categorical_attributes)
fig = plt.figure(figsize=(20, 10))
sns.heatmap(corr, annot=True, cmap = 'Blues')
plt.title("Cramer V Correlation between Variables")
st.pyplot(fig)
def numplots(data):
sns.set_style('darkgrid')
global target_variable
hue = target_variable
categorical_attributes = list(data.select_dtypes(include=['object']).columns)
numerical_attributes = list(data.select_dtypes(include=['float64', 'int64']).columns)
st.markdown('**2.3- Target Variable plot**')
st.text("Target variable:" + hue)
fig = plt.figure(figsize = (20,10))
sns.histplot(data = data , x = hue , kde = True)
st.pyplot(fig)
st.markdown('**2.4- Numerical Variables**')
if len(numerical_attributes) == 0:
st.text('There is no categorical variable')
else:
for a in numerical_attributes:
if a == hue:
pass
else:
st.text(a)
fig = plt.figure(figsize = (20,10))
fig = sns.lmplot(data = data, x = a, y = hue)
st.pyplot(fig)
st.markdown('**2.5- Categorical Variables**')
if len(categorical_attributes) == 0:
st.text('There is no categorical variable')
else:
for a in categorical_attributes:
if a == hue:
pass
else:
if len(data[a].unique()) < 13:
st.text(a)
fig = plt.figure(figsize = (20,10))
sns.kdeplot(data = data, x = hue ,hue = a)
st.pyplot(fig)
st.markdown('***2.5.1 - Correlation between categorical***')
corrM = np.zeros((len(categorical_attributes),len(categorical_attributes)))
for col1, col2 in itertools.combinations(categorical_attributes, 2):
idx1, idx2 = categorical_attributes.index(col1), categorical_attributes.index(col2)
corrM[idx1, idx2] = cramers_corrected_stat(pd.crosstab(data[col1], data[col2]))
corrM[idx2, idx1] = corrM[idx1, idx2]
corr = pd.DataFrame(corrM, index=categorical_attributes, columns=categorical_attributes)
fig = plt.figure(figsize=(20, 10))
sns.heatmap(corr, annot=True, cmap = 'Blues')
plt.title("Cramer V Correlation between Variables")
st.pyplot(fig)
st.write("""
# Data Science App
""")
st.image('data.jpg')
st.write("""
In this implementation, you can do the EDA of our dataset to speed-up your analysis! \n
To use this app, follow the steps: \n
1º - Import your dateset in the sidebar on the left. \n
2º - Choose the target variable on sidebar. \n
3º - Click on the confirmation button to run the app and just wait for the results.
""")
# In[ ]:
# Sidebar - Collects user input features into dataframe
with st.sidebar.header('1. Upload your CSV data'):
uploaded_file = st.sidebar.file_uploader("Upload your input CSV file", type=["csv"])
st.sidebar.markdown("""
[Example CSV input file](https://raw.githubusercontent.com/dataprofessor/data/master/delaney_solubility_with_descriptors.csv)
""")
# Main panel
# Displays the dataset
st.subheader('1- Dataset')
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
st.markdown('**1.1. Dataset info:**')
st.write('First 5 rows of the dataset:')
st.write(df.head())
st.write('Last 5 rows of the dataset:')
st.write(df.tail())
checker = False
with st.sidebar.header('2. Select the target variable'):
target_variable = st.sidebar.selectbox('Select the target variable', list(df.columns))
if st.sidebar.button("Click to confirm the target variable"):
checker = True
if checker == True:
build_model(df)
else:
st.write("Please, input a dataset")
| [
"streamlit.image",
"pandas.read_csv",
"seaborn.histplot",
"seaborn.catplot",
"seaborn.set_style",
"streamlit.sidebar.header",
"streamlit.sidebar.markdown",
"streamlit.set_page_config",
"pandas.DataFrame",
"streamlit.sidebar.button",
"streamlit.markdown",
"streamlit.write",
"pandas.crosstab",... | [((729, 793), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Data Science App"""', 'layout': '"""wide"""'}), "(page_title='Data Science App', layout='wide')\n", (747, 793), True, 'import streamlit as st\n'), ((6631, 6669), 'streamlit.write', 'st.write', (['"""\n\t# Data Science App\n\t"""'], {}), '("""\n\t# Data Science App\n\t""")\n', (6639, 6669), True, 'import streamlit as st\n'), ((6671, 6691), 'streamlit.image', 'st.image', (['"""data.jpg"""'], {}), "('data.jpg')\n", (6679, 6691), True, 'import streamlit as st\n'), ((6693, 7030), 'streamlit.write', 'st.write', (['"""\nIn this implementation, you can do the EDA of our dataset to speed-up your analysis! \n\nTo use this app, follow the steps: \n \n1º - Import your dateset in the sidebar on the left. \n\n2º - Choose the target variable on sidebar. \n\n3º - Click on the confirmation button to run the app and just wait for the results.\n"""'], {}), '(\n """\nIn this implementation, you can do the EDA of our dataset to speed-up your analysis! \n\nTo use this app, follow the steps: \n \n1º - Import your dateset in the sidebar on the left. \n\n2º - Choose the target variable on sidebar. \n\n3º - Click on the confirmation button to run the app and just wait for the results.\n"""\n )\n', (6701, 7030), True, 'import streamlit as st\n'), ((7434, 7460), 'streamlit.subheader', 'st.subheader', (['"""1- Dataset"""'], {}), "('1- Dataset')\n", (7446, 7460), True, 'import streamlit as st\n'), ((842, 867), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (855, 867), True, 'import seaborn as sns\n'), ((895, 939), 'streamlit.markdown', 'st.markdown', (['"""**1.2- Dataset general info**"""'], {}), "('**1.2- Dataset general info**')\n", (906, 939), True, 'import streamlit as st\n'), ((941, 966), 'streamlit.text', 'st.text', (['"""Dataset shape:"""'], {}), "('Dataset shape:')\n", (948, 966), True, 'import streamlit as st\n'), ((968, 985), 'streamlit.text', 'st.text', (['df.shape'], {}), '(df.shape)\n', (975, 985), True, 'import streamlit as st\n'), ((1067, 1100), 'streamlit.text', 'st.text', (['"""Categorical Variables:"""'], {}), "('Categorical Variables:')\n", (1074, 1100), True, 'import streamlit as st\n'), ((1102, 1133), 'streamlit.text', 'st.text', (['categorical_attributes'], {}), '(categorical_attributes)\n', (1109, 1133), True, 'import streamlit as st\n'), ((1223, 1254), 'streamlit.text', 'st.text', (['"""Numerical Variables:"""'], {}), "('Numerical Variables:')\n", (1230, 1254), True, 'import streamlit as st\n'), ((1256, 1285), 'streamlit.text', 'st.text', (['numerical_attributes'], {}), '(numerical_attributes)\n', (1263, 1285), True, 'import streamlit as st\n'), ((1289, 1330), 'streamlit.markdown', 'st.markdown', (['"""**1.3- Duplicated values**"""'], {}), "('**1.3- Duplicated values**')\n", (1300, 1330), True, 'import streamlit as st\n'), ((1367, 1405), 'streamlit.markdown', 'st.markdown', (['"""**1.4- Missing values**"""'], {}), "('**1.4- Missing values**')\n", (1378, 1405), True, 'import streamlit as st\n'), ((1438, 1504), 'streamlit.markdown', 'st.markdown', (['"""**1.5- Unique values in the Categorical Variables**"""'], {}), "('**1.5- Unique values in the Categorical Variables**')\n", (1449, 1504), True, 'import streamlit as st\n'), ((1747, 1797), 'streamlit.subheader', 'st.subheader', (['"""2- Exploratory Data Analysis (EDA)"""'], {}), "('2- Exploratory Data Analysis (EDA)')\n", (1759, 1797), True, 'import streamlit as st\n'), ((1823, 1869), 'streamlit.markdown', 'st.markdown', (['"""**2.1- Descriptive Statistics**"""'], {}), "('**2.1- Descriptive Statistics**')\n", (1834, 1869), True, 'import streamlit as st\n'), ((1898, 1952), 'streamlit.markdown', 'st.markdown', (['"""**2.2- Outlier detectetion by Boxplot**"""'], {}), "('**2.2- Outlier detectetion by Boxplot**')\n", (1909, 1952), True, 'import streamlit as st\n'), ((2351, 2376), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (2364, 2376), True, 'import seaborn as sns\n'), ((2593, 2637), 'streamlit.markdown', 'st.markdown', (['"""**2.3- Target Variable plot**"""'], {}), "('**2.3- Target Variable plot**')\n", (2604, 2637), True, 'import streamlit as st\n'), ((2639, 2672), 'streamlit.text', 'st.text', (["('Target variable:' + hue)"], {}), "('Target variable:' + hue)\n", (2646, 2672), True, 'import streamlit as st\n'), ((2680, 2708), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (2690, 2708), True, 'import matplotlib.pyplot as plt\n'), ((2716, 2740), 'seaborn.countplot', 'sns.countplot', (['data[hue]'], {}), '(data[hue])\n', (2729, 2740), True, 'import seaborn as sns\n'), ((2895, 2909), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (2904, 2909), True, 'import streamlit as st\n'), ((2913, 2956), 'streamlit.markdown', 'st.markdown', (['"""**2.4- Numerical Variables**"""'], {}), "('**2.4- Numerical Variables**')\n", (2924, 2956), True, 'import streamlit as st\n'), ((3044, 3083), 'streamlit.markdown', 'st.markdown', (['"""***2.4.1- Correlation***"""'], {}), "('***2.4.1- Correlation***')\n", (3055, 3083), True, 'import streamlit as st\n'), ((3259, 3300), 'streamlit.markdown', 'st.markdown', (['"""***2.4.2- Distributions***"""'], {}), "('***2.4.2- Distributions***')\n", (3270, 3300), True, 'import streamlit as st\n'), ((3465, 3510), 'streamlit.markdown', 'st.markdown', (['"""**2.5- Categorical Variables**"""'], {}), "('**2.5- Categorical Variables**')\n", (3476, 3510), True, 'import streamlit as st\n'), ((4829, 4854), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (4842, 4854), True, 'import seaborn as sns\n'), ((5071, 5115), 'streamlit.markdown', 'st.markdown', (['"""**2.3- Target Variable plot**"""'], {}), "('**2.3- Target Variable plot**')\n", (5082, 5115), True, 'import streamlit as st\n'), ((5117, 5150), 'streamlit.text', 'st.text', (["('Target variable:' + hue)"], {}), "('Target variable:' + hue)\n", (5124, 5150), True, 'import streamlit as st\n'), ((5158, 5186), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (5168, 5186), True, 'import matplotlib.pyplot as plt\n'), ((5189, 5229), 'seaborn.histplot', 'sns.histplot', ([], {'data': 'data', 'x': 'hue', 'kde': '(True)'}), '(data=data, x=hue, kde=True)\n', (5201, 5229), True, 'import seaborn as sns\n'), ((5239, 5253), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (5248, 5253), True, 'import streamlit as st\n'), ((5258, 5301), 'streamlit.markdown', 'st.markdown', (['"""**2.4- Numerical Variables**"""'], {}), "('**2.4- Numerical Variables**')\n", (5269, 5301), True, 'import streamlit as st\n'), ((5588, 5633), 'streamlit.markdown', 'st.markdown', (['"""**2.5- Categorical Variables**"""'], {}), "('**2.5- Categorical Variables**')\n", (5599, 5633), True, 'import streamlit as st\n'), ((7101, 7145), 'streamlit.sidebar.header', 'st.sidebar.header', (['"""1. Upload your CSV data"""'], {}), "('1. Upload your CSV data')\n", (7118, 7145), True, 'import streamlit as st\n'), ((7167, 7235), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload your input CSV file"""'], {'type': "['csv']"}), "('Upload your input CSV file', type=['csv'])\n", (7191, 7235), True, 'import streamlit as st\n'), ((7240, 7404), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""\n[Example CSV input file](https://raw.githubusercontent.com/dataprofessor/data/master/delaney_solubility_with_descriptors.csv)\n"""'], {}), '(\n """\n[Example CSV input file](https://raw.githubusercontent.com/dataprofessor/data/master/delaney_solubility_with_descriptors.csv)\n"""\n )\n', (7259, 7404), True, 'import streamlit as st\n'), ((7501, 7527), 'pandas.read_csv', 'pd.read_csv', (['uploaded_file'], {}), '(uploaded_file)\n', (7512, 7527), True, 'import pandas as pd\n'), ((7532, 7569), 'streamlit.markdown', 'st.markdown', (['"""**1.1. Dataset info:**"""'], {}), "('**1.1. Dataset info:**')\n", (7543, 7569), True, 'import streamlit as st\n'), ((7574, 7614), 'streamlit.write', 'st.write', (['"""First 5 rows of the dataset:"""'], {}), "('First 5 rows of the dataset:')\n", (7582, 7614), True, 'import streamlit as st\n'), ((7643, 7682), 'streamlit.write', 'st.write', (['"""Last 5 rows of the dataset:"""'], {}), "('Last 5 rows of the dataset:')\n", (7651, 7682), True, 'import streamlit as st\n'), ((8025, 8060), 'streamlit.write', 'st.write', (['"""Please, input a dataset"""'], {}), "('Please, input a dataset')\n", (8033, 8060), True, 'import streamlit as st\n'), ((400, 437), 'scipy.stats.chi2_contingency', 'ss.chi2_contingency', (['confusion_matrix'], {}), '(confusion_matrix)\n', (419, 437), True, 'import scipy.stats as ss\n'), ((1991, 2032), 'streamlit.text', 'st.text', (['"""There is no numerical variable"""'], {}), "('There is no numerical variable')\n", (1998, 2032), True, 'import streamlit as st\n'), ((3099, 3127), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (3109, 3127), True, 'import matplotlib.pyplot as plt\n'), ((3188, 3202), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (3197, 3202), True, 'import streamlit as st\n'), ((3335, 3345), 'streamlit.text', 'st.text', (['a'], {}), '(a)\n', (3342, 3345), True, 'import streamlit as st\n'), ((3354, 3382), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (3364, 3382), True, 'import matplotlib.pyplot as plt\n'), ((3386, 3433), 'seaborn.histplot', 'sns.histplot', ([], {'data': 'data', 'x': 'a', 'kde': '(True)', 'hue': 'hue'}), '(data=data, x=a, kde=True, hue=hue)\n', (3398, 3433), True, 'import seaborn as sns\n'), ((3445, 3459), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (3454, 3459), True, 'import streamlit as st\n'), ((3552, 3595), 'streamlit.text', 'st.text', (['"""There is no categorical variable"""'], {}), "('There is no categorical variable')\n", (3559, 3595), True, 'import streamlit as st\n'), ((4137, 4197), 'streamlit.markdown', 'st.markdown', (['"""***2.5.1 - Correlation between categorical***"""'], {}), "('***2.5.1 - Correlation between categorical***')\n", (4148, 4197), True, 'import streamlit as st\n'), ((4296, 4345), 'itertools.combinations', 'itertools.combinations', (['categorical_attributes', '(2)'], {}), '(categorical_attributes, 2)\n', (4318, 4345), False, 'import itertools\n'), ((4568, 4654), 'pandas.DataFrame', 'pd.DataFrame', (['corrM'], {'index': 'categorical_attributes', 'columns': 'categorical_attributes'}), '(corrM, index=categorical_attributes, columns=\n categorical_attributes)\n', (4580, 4654), True, 'import pandas as pd\n'), ((4658, 4686), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (4668, 4686), True, 'import matplotlib.pyplot as plt\n'), ((4689, 4732), 'seaborn.heatmap', 'sns.heatmap', (['corr'], {'annot': '(True)', 'cmap': '"""Blues"""'}), "(corr, annot=True, cmap='Blues')\n", (4700, 4732), True, 'import seaborn as sns\n'), ((4737, 4788), 'matplotlib.pyplot.title', 'plt.title', (['"""Cramer V Correlation between Variables"""'], {}), "('Cramer V Correlation between Variables')\n", (4746, 4788), True, 'import matplotlib.pyplot as plt\n'), ((4791, 4805), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (4800, 4805), True, 'import streamlit as st\n'), ((5340, 5383), 'streamlit.text', 'st.text', (['"""There is no categorical variable"""'], {}), "('There is no categorical variable')\n", (5347, 5383), True, 'import streamlit as st\n'), ((5675, 5718), 'streamlit.text', 'st.text', (['"""There is no categorical variable"""'], {}), "('There is no categorical variable')\n", (5682, 5718), True, 'import streamlit as st\n'), ((5959, 6019), 'streamlit.markdown', 'st.markdown', (['"""***2.5.1 - Correlation between categorical***"""'], {}), "('***2.5.1 - Correlation between categorical***')\n", (5970, 6019), True, 'import streamlit as st\n'), ((6118, 6167), 'itertools.combinations', 'itertools.combinations', (['categorical_attributes', '(2)'], {}), '(categorical_attributes, 2)\n', (6140, 6167), False, 'import itertools\n'), ((6390, 6476), 'pandas.DataFrame', 'pd.DataFrame', (['corrM'], {'index': 'categorical_attributes', 'columns': 'categorical_attributes'}), '(corrM, index=categorical_attributes, columns=\n categorical_attributes)\n', (6402, 6476), True, 'import pandas as pd\n'), ((6480, 6508), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (6490, 6508), True, 'import matplotlib.pyplot as plt\n'), ((6511, 6554), 'seaborn.heatmap', 'sns.heatmap', (['corr'], {'annot': '(True)', 'cmap': '"""Blues"""'}), "(corr, annot=True, cmap='Blues')\n", (6522, 6554), True, 'import seaborn as sns\n'), ((6559, 6610), 'matplotlib.pyplot.title', 'plt.title', (['"""Cramer V Correlation between Variables"""'], {}), "('Cramer V Correlation between Variables')\n", (6568, 6610), True, 'import matplotlib.pyplot as plt\n'), ((6613, 6627), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (6622, 6627), True, 'import streamlit as st\n'), ((7738, 7788), 'streamlit.sidebar.header', 'st.sidebar.header', (['"""2. Select the target variable"""'], {}), "('2. Select the target variable')\n", (7755, 7788), True, 'import streamlit as st\n'), ((7891, 7948), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Click to confirm the target variable"""'], {}), "('Click to confirm the target variable')\n", (7908, 7948), True, 'import streamlit as st\n'), ((2077, 2087), 'streamlit.text', 'st.text', (['a'], {}), '(a)\n', (2084, 2087), True, 'import streamlit as st\n'), ((2097, 2125), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (2107, 2125), True, 'import matplotlib.pyplot as plt\n'), ((2130, 2150), 'seaborn.boxplot', 'sns.boxplot', (['data[a]'], {}), '(data[a])\n', (2141, 2150), True, 'import seaborn as sns\n'), ((2154, 2168), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (2163, 2168), True, 'import streamlit as st\n'), ((3214, 3255), 'streamlit.text', 'st.text', (['"""There is no numerical variable"""'], {}), "('There is no numerical variable')\n", (3221, 3255), True, 'import streamlit as st\n'), ((4480, 4515), 'pandas.crosstab', 'pd.crosstab', (['data[col1]', 'data[col2]'], {}), '(data[col1], data[col2])\n', (4491, 4515), True, 'import pandas as pd\n'), ((5464, 5474), 'streamlit.text', 'st.text', (['a'], {}), '(a)\n', (5471, 5474), True, 'import streamlit as st\n'), ((5485, 5513), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (5495, 5513), True, 'import matplotlib.pyplot as plt\n'), ((5525, 5558), 'seaborn.lmplot', 'sns.lmplot', ([], {'data': 'data', 'x': 'a', 'y': 'hue'}), '(data=data, x=a, y=hue)\n', (5535, 5558), True, 'import seaborn as sns\n'), ((5569, 5583), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (5578, 5583), True, 'import streamlit as st\n'), ((6302, 6337), 'pandas.crosstab', 'pd.crosstab', (['data[col1]', 'data[col2]'], {}), '(data[col1], data[col2])\n', (6313, 6337), True, 'import pandas as pd\n'), ((3714, 3724), 'streamlit.text', 'st.text', (['a'], {}), '(a)\n', (3721, 3724), True, 'import streamlit as st\n'), ((3736, 3748), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3746, 3748), True, 'import matplotlib.pyplot as plt\n'), ((3758, 3822), 'seaborn.catplot', 'sns.catplot', ([], {'data': 'data', 'x': 'a', 'kind': '"""count"""', 'col': 'hue', 'sharey': '(False)'}), "(data=data, x=a, kind='count', col=hue, sharey=False)\n", (3769, 3822), True, 'import seaborn as sns\n'), ((4121, 4133), 'streamlit.pyplot', 'st.pyplot', (['g'], {}), '(g)\n', (4130, 4133), True, 'import streamlit as st\n'), ((5836, 5846), 'streamlit.text', 'st.text', (['a'], {}), '(a)\n', (5843, 5846), True, 'import streamlit as st\n'), ((5858, 5886), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (5868, 5886), True, 'import matplotlib.pyplot as plt\n'), ((5893, 5929), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'data': 'data', 'x': 'hue', 'hue': 'a'}), '(data=data, x=hue, hue=a)\n', (5904, 5929), True, 'import seaborn as sns\n'), ((5941, 5955), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (5950, 5955), True, 'import streamlit as st\n')] |
import os,sys
sys.path.insert(0, "/mnt/f/dev/git/miRExplore/python/")
import time
from textdb.MiGenRelDB import MiGenRelDB
from textdb.SentenceDB import SentenceDB
from collections import defaultdict
from natsort import natsorted
sentDB, _ = SentenceDB.loadFromFile("./test/", "./development/pmid2sent", returnAll=True, redoPmid2Sent=True)
mmuDB = MiGenRelDB.loadFromFile("./aggregated_test/mirna_gene.mmu.pmid", ltype="mirna", rtype="gene")
hsaDB = MiGenRelDB.loadFromFile("./aggregated_test/mirna_gene.hsa.pmid", ltype="mirna", rtype="gene")
referenceSolution = [
('16831872','miR-9','ONECUT2','MIR_GENE'),
('16831872','miR-9','SYTL4','MIR_GENE'),
#('17438130','miR-17-92','MYC','GENE_MIR'), # not a miRNA (miR-17-92 cluster)
('17438130','let-7c','MYC','MIR_GENE'),
('17438130','let-7c','MIR17HG','MIR_GENE'), #The PPARalpha-mediated induction of c-myc via let-7C subsequently increased expression of the oncogenic mir-17-92 cluster; these events did not occur in Pparalpha-null mice.
('18185580','miR-335','SOX4','MIR_GENE'),
('18185580','miR-335','TNC','MIR_GENE'),
('18755897','miR-34','TP53','GENE_MIR'), # wrong: acetylated TP53 // correct: Finally, miR-34a itself is a transcriptional target of p53, suggesting a positive feedback loop between p53 and miR-34a.
('18755897','miR-34a','TP53','GENE_MIR'),
('18755897','miR-34a','SIRT1','MIR_GENE'),
('18755897','miR-34','SIRT1','MIR_GENE'),
('18755897','miR-34','TP53','MIR_GENE'), # wrong: acetylated TP53 // correct: Finally, miR-34a itself is a transcriptional target of p53, suggesting a positive feedback loop between p53 and miR-34a.
('18755897','miR-34','CDKN1A','MIR_GENE'), #p21 #miR-34a
('18755897','miR-34','TPT1','MIR_GENE'), # p21
('18755897','miR-34','NSG1','MIR_GENE'), # p21
('18755897','miR-34','H3F3AP6','MIR_GENE'), # p21
('18755897','miR-34','TCEAL1','MIR_GENE'), # p21
('18755897','miR-34','BBC3','MIR_GENE'), #34a, has syn PUMA
('19059913','miR-223','SPI1','GENE_MIR'),
('19059913','miR-223','NFIA','MIR_GENE'),
('19059913','miR-223','NFIC','MIR_GENE'), # TM error, means NFIA
('19059913','miR-223','CSF1R','MIR_GENE'),
('19073597','miR-133a','MYOD1','GENE_MIR'),
('19073597','miR-133a','UCP2','MIR_GENE'),
('19073597','miR-133a','BMIQ4','MIR_GENE'), # has UCP-2 as syn
('19073597','miR-133a-mediated','UCP2','MIR_GENE'),
('19073597','miR-133a-mediated','BMIQ4','MIR_GENE'), # has UCP-2 as syn
('22066022', 'miR-21', 'GPT', 'MIR_GENE'), # missing mirtex: Serum miR-21 levels correlated with histological activity index (HAI) in the liver, alanine aminotransferase (ALT), aspartate aminotransferase , bilirubin, international normalized ratio and gamma-glutamyltransferase.
('19158092','miR-21','PDCD4','MIR_GENE'),
('19158092','miR-21-mediated','PDCD4','MIR_GENE'),
#('19378336','miR-145','KRT7','MIR_GENE'), #no interaction
('19378336','miR-30','KRT7','MIR_GENE'),
('19378336','miR-133a','KRT7','MIR_GENE'),
#('19378336','miR-133b','KRT7','MIR_GENE'), #no interaction in text
#('19378336','miR-195','KRT7','MIR_GENE'), #no interaction
#('19378336','miR-125b','KRT7','MIR_GENE'), #no interaction in text
('19378336','miR-199a','KRT7','MIR_GENE'),
('19524507','miR-31','RHOA','MIR_GENE'),
('19544458','miR-92b','CORO1A','MIR_GENE'), # was p57
('19625769','miR-101','EZH2','MIR_GENE'),
('19723773','miR-290','CDKN2A','MIR_GENE'), # p16
('19839716','miR-205','ERBB3','MIR_GENE'),
('19839716','miR-205','ZEB1','MIR_GENE'),
('19956414','miR-29b','COL1A1','MIR_GENE'),
('19956414','miR-29b','OI4','MIR_GENE'), #COL1A1
('19956414','miR-29b','COL1A2','MIR_GENE'),
('19956414','miR-29b','COL4A1','MIR_GENE'),
('19956414','miR-29b','COL5A1','MIR_GENE'),
('19956414','miR-29b','COL5A2','MIR_GENE'),
('19956414','miR-29b','COL3A1','MIR_GENE'),
('19956414','miR-29b','EDS4A','MIR_GENE'), #COL3A1
('19956414','miR-29b','LAMC1','MIR_GENE'),
('19956414','miR-29b','FBN1','MIR_GENE'),
('19956414','miR-29b','SPARC','MIR_GENE'),
('19956414','miR-29b','ON','MIR_GENE'), #osteonectin
('19956414','miR-29b','BMP1','MIR_GENE'),
('19956414','miR-29b','PCOLC','MIR_GENE'), #BMP1
('19956414','miR-29b','ADAM12','MIR_GENE'),
('19956414','miR-29b','NKIRAS2','MIR_GENE'),
('20012062','miR-221','PSMD9','MIR_GENE'), #p27
('20012062','miR-222','PSMD9','MIR_GENE'),
('20012062','miR-221','SSSCA1','MIR_GENE'), #p27
('20012062','miR-222','SSSCA1','MIR_GENE'),
('20017139','miR-146a','CNTN2','GENE_MIR'),
('20017139','miR-146a','NFKB1','GENE_MIR'),
#('20046097', 'miR-449', 'CDK', 'MIR_GENE'), # CDK not a gene symbol, not in mirtex
('20046097', 'miR-449', 'E2F1', 'MIR_GENE'), #not in mirtex :miR-449 regulates CDK-Rb-E2F1 through an auto-regulatory feedback circuit.
('20046097', 'miR-449', 'RB1', 'MIR_GENE'), #not in mirtex
('20103675','miR-222','PPP2R2A','MIR_GENE'),
('20143188','miR-21','PDCD4','MIR_GENE'),
('20299489','miR-34a','ERK','GENE_MIR'),
('20299489','miR-34a','EPHB2','GENE_MIR'),# ERK syn
('20299489','miR-34a','MAPK1','GENE_MIR'),# ERK syn
('20299489','miR-34a','MAP2K1','MIR_GENE'),
('20299489','miR-221','FOS','MIR_GENE'),
('20299489','miR-222','FOS','MIR_GENE'),
('20299489','miR-34a','FOSB','GENE_MIR'), #mirtex missing: induced miR-34a expression by transactivation via the activator protein-1 binding site in the upstream region of the miR-34a gene.
('20299489','miR-34a','JUND','GENE_MIR'), # activator protein 1 syn
('20299489','miR-34a','JUN','GENE_MIR'), # induced miR-34a expression by transactivation via the activator protein-1 binding site
('20462046','miR-21','PDCD4','MIR_GENE'),
('20478254','miR-183','SLC1A1','MIR_GENE'),
('20478254','miR-96','SLC1A1','MIR_GENE'),
('20478254','miR-182','SLC1A1','MIR_GENE'),
('20498046','miR-200b','ATP2A2','MIR_GENE'),
('20498046','miR-214','ATP2A2','MIR_GENE'),
('20603081','miR-150','MYB','MIR_GENE'),
('20606648', 'miR-34a', 'BIRC5', 'MIR_GENE'), # missing in mirtex, miRNA-34a (miR-34a) induced apoptosis, inhibited survivin expression, and downregulated MAPK pathway in B16F10 cells.
('20620960','miR-200c','FAP','MIR_GENE'),
('20620960','miR-200','FAP','MIR_GENE'),
('20620960','miR-200c','GLMN','MIR_GENE'), # has FAP as syn
('20620960','miR-200','GLMN','MIR_GENE'), # has FAP as syn
('20620960','miR-200','FAS','MIR_GENE'), # CD95; quite indirect though. miR-200c regulates induction of apoptosis through CD95 by targeting FAP-1.
('20620960','miR-200c','FAS','MIR_GENE'), # CD95; quite indirect though. miR-200c regulates induction of apoptosis through CD95 by targeting FAP-1.
('20620960','miR-200','ZEB1','MIR_GENE'), # 200c
('20620960','miR-200','ZEB2','MIR_GENE'), # 200c
('20620960','miR-200','PPCD3','MIR_GENE'), # ZEB1
('20676061','miR-29c','WNT5A','MIR_GENE'),
('20676061','miR-130b','WNT5A','MIR_GENE'),
('20676061','miR-101','WNT5A','MIR_GENE'),
('20676061','miR-30b','WNT5A','MIR_GENE'),
('20676061','miR-140','WNT5A','MIR_GENE'),
('20676061','miR-29c','ZIC1','MIR_GENE'),
('20676061','miR-130b','ZIC1','MIR_GENE'),
('20676061','miR-101','ZIC1','MIR_GENE'),
('20676061','miR-30b','ZIC1','MIR_GENE'),
('20676061','miR-140','ZIC1','MIR_GENE'),
('20676061','miR-29c','TGFB1','MIR_GENE'),
('20676061','miR-130b','TGFB1','MIR_GENE'),
('20676061','miR-101','TGFB1','MIR_GENE'),
('20676061','miR-30b','TGFB1','MIR_GENE'),
('20676061','miR-140','TGFB1','MIR_GENE'),
('20676061','miR-29c','DPD1','MIR_GENE'), # has TGFB1 as syn
('20676061','miR-130b','DPD1','MIR_GENE'),
('20676061','miR-101','DPD1','MIR_GENE'),
('20676061','miR-30b','DPD1','MIR_GENE'),
('20676061','miR-140','DPD1','MIR_GENE'),
('20736365','miR-196','HOXC8','MIR_GENE'),
('20736365','miR-196','HOX3A','MIR_GENE'), # has syn HOXC8
('20859756', 'miR-126', 'TMEM8B', 'GENE_MIR'), # missing mirtex: In particular, miR-126, miR-142-3p, miR-155, miR-552, and miR-630 were all upregulated, whereas miR-146a, miR-152, miR-205, miR-365, miR-449, miR-518c, miR-584, miR-615, and miR-622 were downregulated after NGX6 transfection.
('20859756', 'miR-142', 'TMEM8B', 'GENE_MIR'),
('20859756', 'miR-155', 'TMEM8B', 'GENE_MIR'),
('20859756', 'miR-552', 'TMEM8B', 'GENE_MIR'),
('20859756', 'miR-630', 'TMEM8B', 'GENE_MIR'),
('20859756', 'miR-146a', 'TMEM8B', 'GENE_MIR'),
('20859756', 'miR-152', 'TMEM8B', 'GENE_MIR'),
('20859756', 'miR-205', 'TMEM8B', 'GENE_MIR'),
('20859756', 'miR-365', 'TMEM8B', 'GENE_MIR'),
('20859756', 'miR-449', 'TMEM8B', 'GENE_MIR'),
('20859756', 'miR-518c', 'TMEM8B', 'GENE_MIR'),
('20859756', 'miR-584', 'TMEM8B', 'GENE_MIR'),
('20859756', 'miR-615', 'TMEM8B', 'GENE_MIR'),
('20859756', 'miR-622', 'TMEM8B', 'GENE_MIR'),
('20945501', 'miR-141', 'AR', 'MIR_GENE'), # missing mirtex, inhibition of miR-141 by anti-miR-141 suppressed the growth of the LNCaP subline overexpressing AR.
('20945501', 'miR-141', 'SBMA', 'MIR_GENE'), # has AR as syn
('20945501', 'miR-141', 'DHTR', 'MIR_GENE'), # has AR as syn
('20945501', 'miR-141', 'AKR1B3', 'MIR_GENE'), # has AR as syn
('20945501', 'miR-141', 'AKR1B7', 'MIR_GENE'), # has AR as syn
('20945501', 'miR-141', 'AKR1B8', 'MIR_GENE'), # has AR as syn
('20945501', 'miR-141', 'SBMA', 'MIR_GENE'), # has AR as syn
('20945501', 'miR-141', 'AREG', 'MIR_GENE'), # has AR as syn
('20945501', 'miR-141', 'FDXR', 'MIR_GENE'), # has AR as syn
('20947507','miR-155','NFKB1','GENE_MIR'),
('20947507','miR-155','CARD11','GENE_MIR'),
('20947507','miR-155','SPI1','MIR_GENE'), # PU.1
#('20947507','miR-155','CD10','MIR_GENE'),
('20947507','miR-155','MME','MIR_GENE'), # CD10
('21088996','miR-21','PDCD4','MIR_GENE'),
('21276775','miR-145','ROBO2','MIR_GENE'),
('21276775','miR-145','SRGAP2','MIR_GENE'),
('21276775','miR-145','SRGAP3','MIR_GENE'), # has SRGAP2 syn
('21276775','miR-214','ROBO2','MIR_GENE'),
('21276775','miR-214','SRGAP2','MIR_GENE'),
('21276775','miR-214','SRGAP3','MIR_GENE'),# has SRGAP2 syn
('21285947','miR-24','INS','MIR_GENE'),
('21285947','miR-26','INS','MIR_GENE'),
('21285947','miR-182','INS','MIR_GENE'),
('21285947','miR-148','INS','MIR_GENE'),
#('21347332','miR-21','serum','GENE_MIR'), # not a gene
('21347332','miR-21','FGF2','GENE_MIR'),
('21347332','miR-21','RHOB','MIR_GENE'),
('21415212','miR-486','OLFM4','MIR_GENE'),
('21454627','mmu-miR-183','mSEL-1L','MIR_GENE'),
('21609717','miR-98-mediated','IL10','MIR_GENE'),
('21609717','miR-98','IL10','MIR_GENE'),
('21609717','miR-98','PTGS2','MIR_GENE'), #COX-2
('21609717','miR-98', 'LPS', 'GENE_MIR'),
('21609717','miR-98', 'IRF6', 'GENE_MIR'), #missing mirtex, MicroRNA-98 negatively regulates IL-10 production and endotoxin tolerance in macrophages after LPS stimulation.
('21666774','miR-21','LH (luteinizing hormone)','GENE_MIR'),
('21666774','miR-132','LH (luteinizing hormone)','GENE_MIR'),
('21666774','miR-212','LH (luteinizing hormone)','GENE_MIR'),
('21685392','miR-143','NOTCH1','MIR_GENE'), #should be N1ICD, TM issue
('21685392','miR-145','NOTCH1','MIR_GENE'),
('21685392','miR-143','TAN1','MIR_GENE'), #should be N1ICD, TM issue
('21685392','miR-145','TAN1','MIR_GENE'),
('21685392','miR-143','RBPJ','GENE_MIR'), #We also identified N1ICD complex binding to CBF1 sites within the endogenous human miR-143/145 promoter.
('21685392','miR-145','RBPJ','GENE_MIR'),
('21685392','miR-143','JAG1','GENE_MIR'), #Using SRF knockdown, we found that Jag-1/Notch induction of miR-143/145 is SRF independent, although full acquisition of contractile markers requires SRF.
('21685392','miR-145','JAG1','GENE_MIR'),
('21685392','miR-143','SRF','GENE_MIR'), #Using SRF knockdown, we found that Jag-1/Notch induction of miR-143/145 is SRF independent, although full acquisition of contractile markers requires SRF.
('21685392','miR-145','SRF','GENE_MIR'),
('21685392','miR-145','MYOCD','GENE_MIR'), #The serum response factor (SRF)/myocardin complex binds to CArG sequences to activate miR-143/145 transcription
('21685392','miR-143','MYOCD','GENE_MIR'),
('21693621','miR-21','MYC','GENE_MIR'),
('21693621','miR-29a','MYC','GENE_MIR'),
('21898400','miR-520c','MTOR','MIR_GENE'),
('21898400','miR-373','MTOR','MIR_GENE'),
('21898400','miR-520c','SIRT1','MIR_GENE'),
('21898400','miR-373','SIRT1','MIR_GENE'),
('21898400','miR-520c','MMP9','MIR_GENE'),
('21898400','miR-373','MMP9','MIR_GENE'),
('21898400','miR-520c','CLG4B','MIR_GENE'), # MMP-9
('21898400','miR-373','CLG4B','MIR_GENE'),
('22123611','miR-195','BCL2','MIR_GENE'),
('22123611','miR-195','CASP3','MIR_GENE'),
('22123611','miR-195','WT1','MIR_GENE'), #missing mirtex: miR-195-treated podocytes underwent actin rearrangement and failed to synthesize sufficient levels of WT-1 and synaptopodin proteins, which suggests that the cells had suffered injuries similar to those observed in diabetic nephropathy in both humans and animal models.
('22123611','miR-195','SYNPO','MIR_GENE'),
('22123611','miR-195','GUD','MIR_GENE'),
('22139444','miR-30c','MTA1','MIR_GENE'),
('22249219','miR-214','ADORA2A','MIR_GENE'),
('22249219','miR-15','ADORA2A','MIR_GENE'),
('22249219','miR-16','ADORA2A','MIR_GENE'),
('22269326','miR-29b','COL1A1','MIR_GENE'),
('22269326','miR-29b','COL3A1','MIR_GENE'),
('22269326','miR-29b','EDS4A','MIR_GENE'), #COL3A1 syn
('22269326','miR-29b','COL5A1','MIR_GENE'),
('22269326','miR-29b','ELN','MIR_GENE'),
('22286762','miR-21','NF-kappaB','GENE_MIR'),
('22286762','miR-10b','NF-kappaB','GENE_MIR'),
('22286762','miR-17','NF-kappaB','GENE_MIR'),
('22286762','miR-9','NF-kappaB','GENE_MIR'),
('22569260','miR-223','FOXO1','MIR_GENE'),
('22569260','miR-223','FKHR','MIR_GENE'), # FOXO1 syn
('22634495','miR-10a','CHL1','MIR_GENE'),
('22634495','miR-10a','DDX11','MIR_GENE'), # CHL1 syn
('22698995','let-7','BACH1','MIR_GENE'),
('22698995','let-7b','BACH1','MIR_GENE'),
('22698995','let-7c','BACH1','MIR_GENE'),
('22698995','miR-98','BACH1','MIR_GENE'),
#same gene symbol
('22698995','let-7','BRIP1','MIR_GENE'),
('22698995','let-7b','BRIP1','MIR_GENE'),
('22698995','let-7c','BRIP1','MIR_GENE'),
('22698995','miR-98','BRIP1','MIR_GENE'),
('22698995','let-7','HMOX1','MIR_GENE'),
('22761336','miR-96','REV1','MIR_GENE'),
('22761336','miR-96','RAD51','MIR_GENE'),
('22761336','miR-96','RECA','MIR_GENE'), #RAD51
('22761336','miR-96','RAD51A','MIR_GENE'),
('22847613','miR-130b','TP53','GENE_MIR'),
('22847613','miR-130b','ZEB1','MIR_GENE'),
('22847613','miR-130b','PPCD3','MIR_GENE'), # zeb1
('22891274','miR-146a','NFKB1','GENE_MIR'),
('22891274','miR-146a','NFKB1','MIR_GENE'),
('22891274','miR-146a','TRAF6','MIR_GENE'),
('22891274','miR-146a','IRAK1','MIR_GENE'),
('22925189','miR-30c','ERBB2','MIR_GENE'), #Her-2
('22925189','miR-30d','ERBB2','MIR_GENE'),
('22925189','miR-30e','ERBB2','MIR_GENE'),
('22925189','miR-532','ERBB2','MIR_GENE'),
('22955854','miR-144','ZFX','MIR_GENE'),
('22956424','miR-21','PTEN','MIR_GENE'),
('22956424','miR-21','MHAM','MIR_GENE'), # has PTEN syn
('22956424','miR-21','BZS','MIR_GENE'), # has PTEN syn
('22982443','miR-200c','BMI1','MIR_GENE'),
('22982443','miR-200c','ABCG2','MIR_GENE'),
('22982443','miR-200c','ABCG5','MIR_GENE'),
('22982443','miR-200c','MDR1','MIR_GENE'),
('22982443','miR-200c','TBC1D9','MIR_GENE'), # has syn MDR1
('22982443','miR-200c','ABCB1','MIR_GENE'), # has syn MDR1
('22982443','miR-200c','CDH1','MIR_GENE'),
('23010597','miR-134','FOXM1','MIR_GENE'),
('23010597','miR-134','FKHL16','MIR_GENE'), # has FOXM1 as syn
('23010597','miR-134','ITK','MIR_GENE'), # has EMT as syn; mirtex missing: Functional assays demonstrated that miR-134 inhibited EMT in NSCLC cells.
('23010597','miR-134','SLC22A3','MIR_GENE'), # has EMT as syn
('23041385','miR-21','CRP','MIR_GENE'),
#('23041385','miR-21','fibrinogen','MIR_GENE'), # not a gene
('23041385','miR-21','TGFB2','MIR_GENE'),
('23097316','miR-34c','RARg','MIR_GENE'),
('23113351','miR-29','TP53','MIR_GENE'), # missing in mirtex: While miRNA-29 members induced apoptosis through p53 gene activation, the effect of miRNA-29a on osteoblastic cells was independent on p53 expression level.
('23113351','miR-29a','TP53','MIR_GENE'),
('23113351','miR-29','BCL2','MIR_GENE'),
('23113351','miR-29','MCL1','MIR_GENE'),
('23113351','miR-29','CLEC4D','MIR_GENE'), # CLEC4D has syn mcl
('23113351','miR-29a','CLEC4D','MIR_GENE'), # CLEC4D has syn mcl
#('23113351','miR-29','E2F1','MIR_GENE'),
#('23113351','miR-29','E2F3','MIR_GENE'),
('23113351','miR-29a','BCL2','MIR_GENE'),
('23113351','miR-29a','MCL1','MIR_GENE'),
('23113351','miR-29a','E2F1','MIR_GENE'),
('23113351','miR-29a','E2F3','MIR_GENE'),
('23113351','miR-29a','E2F1','MIR_GENE'), # possibly too
('23113351','miR-29a','E2F3','MIR_GENE'), # possibly too
('23148210','miR-210','HIF1A','GENE_MIR'), # was actived in ...-dependant
('23169590','miR-451','IL6','GENE_MIR'),
('23169590','miR-451','IFNA1','GENE_MIR'), #type I IFN
('23169590','miR-451','YWHAZ','MIR_GENE'),
('23169590','miR-451','YWHAD','MIR_GENE'), # has syn YWHAZ
#('23169590','miR-451','14-3-3zeta','MIR_GENE'), # is YWHAZ
('23169590','miR-451','ZFP36','MIR_GENE'),
#Three types of primary DCs treated with antisense RNA antagomirs directed against miR-451 secreted elevated levels of IL-6, TNF, CCL5/RANTES, and CCL3/MIP1alpha, and these results were confirmed using miR-451(null) cells.
#this suggests that miR-451 suppresses these genes normalls
('23169590','miR-451','IL6','MIR_GENE'),
('23169590','miR-451','CCL3','MIR_GENE'),
('23169590','miR-451','CCL5','MIR_GENE'),
('23169590','miR-451','IL6','MIR_GENE'),
('23169590','miR-451','IFNB2','MIR_GENE'), #IL6
('23169590','miR-451','TNF','MIR_GENE'),
('23169590','miR-451','TNFA','MIR_GENE'), #TNF
#miR-451 levels are themselves increased by IL-6 and type I IFN, potentially forming a regulatory loop.
('23169590','miR-451','IL6','GENE_MIR'), #IL6
('23169590','miR-451','IFNA1','GENE_MIR'), #type I IFN
('23169590','miR-451','IFNB2','GENE_MIR'), #IL6
('23190607','miR-203','RAN','MIR_GENE'),
('23190607','miR-203','RAPH1','MIR_GENE'),
('23190607','miR-203','ALS2CR18','MIR_GENE'), # synonym
('23190608','miR-29b','SP1','GENE_MIR'),
('23190608','miR-29b','SP1','MIR_GENE'), # mirtex missing: miR-29b sensitizes multiple myeloma cells to bortezomib-induced apoptosis through the activation of a feedback loop with the transcription factor Sp1.
('23190608','miR-29b','DAND5','GENE_MIR'), # DAND5 has SP1 as syn
('23190608','miR-29b','DAND5','MIR_GENE'),
('23206698','miR-7','IRS2','MIR_GENE'),
('23396109','miR-17','PTEN','MIR_GENE'), #miR-17~92
('23396109','miR-17','MHAM','MIR_GENE'), # MHAM syn is PTEN too
('23396109','miR-17','BZS','MIR_GENE'), # BZS syn is PTEN too
#('23396109','miR-17','BIM','MIR_GENE'), # miR-17~92 is pten
('23396109','miR-17','BCL2L11','MIR_GENE'), # BCL2L11 syn is BIM
('23472202','miR-183','TAOK1','MIR_GENE'),
('23516615','miR-143','ERK5','MIR_GENE'),
('23516615','miR-143','PPARg','MIR_GENE'),
('23516615','miR-204','ERK5','MIR_GENE'),
('23516615','miR-204','PPARg','MIR_GENE'),
('23519125','miR-125a','erbB2','MIR_GENE'),
('23519125','miR-125a','erbB3','MIR_GENE'),
('23519125','miR-125b','erbB2','MIR_GENE'),
('23519125','miR-125b','erbB3','MIR_GENE'),
('23519125','miR-205','erbB2','MIR_GENE'),
('23519125','miR-205','erbB3','MIR_GENE'),
# these are no genes!
#('23527070','miR-21','collagen I','MIR_GENE'),
#('23527070','miR-21','collagen III','MIR_GENE'),
('23527070','miR-21','ELN','MIR_GENE'),
('23527070','miR-21','SMAD7','MIR_GENE'),
('23527070','miR-21','SMAD5','MIR_GENE'),
('23527070','miR-21','SMAD2','MIR_GENE'),
#('23534973','miR-152','HLA-G','MIR_GENE'), not a specific miRNA: miR-152 family
('23579289','miR-214','SP7','MIR_GENE'),
('23583389','miR-96','IRS1','MIR_GENE'),
('23592910','miR-146a','IL1B','GENE_MIR'),
('23592910','miR-146a','IFNG','GENE_MIR'),
#('23592910','miR-146a','TNFA','GENE_MIR'), # is TNF
('23592910','miR-146b','IL1B','GENE_MIR'),
('23592910','miR-146b','IFNG','GENE_MIR'),
('23592910','miR-146b','TNF','GENE_MIR'),
('23592910','miR-146a','IRAK','MIR_GENE'),
('23592910','miR-146b','IRAK','MIR_GENE'),
('23611780','miR-106b','FBXW11','MIR_GENE'), #beta-TRCP2
#('23611780','miR-106b','SNAIL','MIR_GENE'), nope. means cluster + indirect: miR-106b-25 cluster may play an important role in the metastasis of human non-small cell lung cancer cells by directly suppressing the beta-TRCP2 gene expression with a consequent increase in the expression of Snail.
('23611780','miR-93','FBXW11','MIR_GENE'),
('23630358','miR-155','MSR1','MIR_GENE'), # SR-AI syn
('23643257','miR-424','FGR','MIR_GENE'),
('23643257','miR-424','MAP2K1','MIR_GENE'),
('23643257','miR-424','MAPK1','MIR_GENE'), #mitogen-activated protein kinase 1
('23667495','miR-224','DPYSL2','MIR_GENE'),
('23667495','miR-224','KRAS','MIR_GENE'),
('23667495','miR-452','DPYSL2','MIR_GENE'),
('23667495','miR-452','KRAS','MIR_GENE'),
('23667495','miR-181c','KRAS','MIR_GENE'),
('23667495','miR-340','MECP2','MIR_GENE'),
('23667495','miR-181c','MECP2','MIR_GENE'),
('23667495','miR-340','KRAS','MIR_GENE'),
('23759586','miR-34a','SIRT1','MIR_GENE'),
('23759586','miR-125b','TP53','MIR_GENE'),
('23759586','miR-125b','SIRT1','MIR_GENE'),
('23797704','miR-21','TIMP3','MIR_GENE'),
('23797704','miR-221','TIMP3','MIR_GENE'),
('23797704','miR-21','SFD','MIR_GENE'),#is TIMP3
('23797704','miR-221','SFD','MIR_GENE'),#is TIMP3
('23797704','miR-217','TIMP3','MIR_GENE'),
('23797704','miR-217','SFD','MIR_GENE'), #is TIMP3
('23797704','miR-217','SIRT1','MIR_GENE'),
('23836497','miR-20','STAT3','MIR_GENE'),
('23836497','miR-20','CCND1','MIR_GENE'),
('23836497','miR-106a','STAT3','MIR_GENE'),
('23836497','miR-106a','CCND1','MIR_GENE'),
# same genesymbols
('23846856','miR-875','PRDX3','MIR_GENE'),
('23846856','miR-875','PRX','MIR_GENE'),
('23851184','miR-200b','WNT1','MIR_GENE'),
('23851184','miR-22','WNT1','MIR_GENE'),
('23895517','mir-494','TNFSF14','MIR_GENE'),
('23895517','mir-197','TNFSF14','MIR_GENE'),
('23968734','miR-133a','PDLIM5','MIR_GENE'), # LIM
#('23968734','miR-133a','SH3 protein 1','MIR_GENE'),
('23968734','miR-133a','LASP1','MIR_GENE'), #SH3 protein 1
('24006456','miR-29b','IGF1','MIR_GENE'),
('24006456','miR-30c','IGF1','MIR_GENE'),
('24006456','miR-29b','LIF','MIR_GENE'),
('24006456','miR-30c','LIF','MIR_GENE'),
('24006456','miR-29b','PTX3','MIR_GENE'),
('24023867','miR-135a','NR3C2','MIR_GENE'),
('24023867','miR-124','NR3C2','MIR_GENE'),
('24145190','miR-203','SNAI1','GENE_MIR'),
('24145190','miR-203','CD44','MIR_GENE'), # new, not in mirtex: we found that the levels of several EMT activators and miR-203 were positively and negatively correlated with those of CD44, respectively.
('24145190','miR-203','MDU3','MIR_GENE'),
('24145190','miR-203','MIC4','MIR_GENE'),
('24145190','miR-203','MDU2','MIR_GENE'),
('24145190','miR-203','SRC','GENE_MIR'), # missing in mirtex: Finally, we discovered that c-Src kinase activity was required for the downregulation of miR-203
('24155920','miR-21','SPRY1','MIR_GENE'),
('24155920','miR-29a','MCL1','MIR_GENE'),
('24155920','miR-29b','MCL1','MIR_GENE'),
#('24219008','miR-21-5p','TGFBR3','MIR_GENE'),
('24219008','miR-21','TGFBR3','MIR_GENE'), #add
('24219008','hsa-miR-21','TGFBR3','MIR_GENE'), #add
#('24219008','miR-21-5p','PDGFD','MIR_GENE'),
('24219008','miR-21','PDGFD','MIR_GENE'),
('24219008','hsa-miR-21','PDGFD','MIR_GENE'), #add
#('24219008','miR-21-5p','PPM1L','MIR_GENE'),
('24219008','miR-21','PPM1L','MIR_GENE'),
('24219008','hsa-miR-21','PPM1L','MIR_GENE'), #add
#('24219008','miR-181a-5p','ROPN1L','MIR_GENE'),
('24219008','miR-181a','ROPN1L','MIR_GENE'),
('24219008','hsa-miR-181a','ROPN1L','MIR_GENE'),
#('24219008','miR-181a-5p','SLC37A3','MIR_GENE'),
('24219008','hsa-miR-181a','SLC37A3','MIR_GENE'),
('24219008','miR-181a','SLC37A3','MIR_GENE'),
#('24219008','miR-24-2-5p','MYC','MIR_GENE'),
('24219008','hsa-miR-24-2','MYC','MIR_GENE'),
('24219008','hsa-miR-24','MYC','MIR_GENE'),
('24219008','miR-24-2','MYC','MIR_GENE'),
('24219008','miR-24','MYC','MIR_GENE'),
#('24219008','miR-24-2-5p','KCNJ2','MIR_GENE'),
('24219008','hsa-miR-24-2','KCNJ2','MIR_GENE'),
('24219008','hsa-miR-24','KCNJ2','MIR_GENE'),
('24219008','miR-24','KCNJ2','MIR_GENE'),
('24219008','miR-24-2','KCNJ2','MIR_GENE'),
('24219349','miR-203','BMI1','MIR_GENE'),
('24220339','miR-490','FOS','MIR_GENE'),
('24223656','miR-31','RASA1','MIR_GENE'),
('24314216','miR-106','TP53','MIR_GENE'),
('24319262','miR-34a','TP53','GENE_MIR'),
('24319262','miR-145','TP53','GENE_MIR'),
('24319262','miR-155','MAF','MIR_GENE'),
('24319262','miR-34a','TWIST2','MIR_GENE'),
('24319262','miR-34a','MAF','MIR_GENE'),
('24319262','miR-145','TWIST2','MIR_GENE'),
('24319262','miR-145','MAF','MIR_GENE'),
('24330780','miR-124','FLOT1','MIR_GENE'),
('24330780','miR-124','FLOT1','MIR_GENE'),
('24376808','miR-146a','CRK','MIR_GENE'),
('24376808','miR-424','CRK','MIR_GENE'),
('24376808','miR-146a','EGFR','MIR_GENE'),
('24376808','miR-424','EGFR','MIR_GENE'),
('24376808','miR-146a','MAPK14','MIR_GENE'), #p38 / ERK
('24376808','miR-424','MAPK14','MIR_GENE'),
('24376808','miR-146a','AIMP2','MIR_GENE'),#p38 / ERK
('24376808','miR-424','AIMP2','MIR_GENE'),
('24376808','miR-146a','AHSA1','MIR_GENE'),#p38 / ERK
('24376808','miR-424','AHSA1','MIR_GENE'),
]
refDict = defaultdict(set)
for x in referenceSolution:
refDict[x[0]].add((x[1], x[2], x[3]))
tmRemoveTMErrors = {
('19956414','miR-29b','MMRN1'), # ECM, extracellular matrix
('21415212','miR-486','GC'), #gastric cancer
('21415212','miR-486','HTC2'), #Array-CGH
('21415212','miR-486','EAF2'), #TRAITS
('21415212','miR-486','NF2'), #SCH cell line
('21703983', 'miR-632', 'PAFAH1B1'), # Notably, hsa-miR-378, hsa-miR-632, and hsa-miR-636 demonstrated particularly high discrimination between MDS and normal controls. MDS here is myelodysplastic syndromes
('21703983', 'hsa-miR-378', 'PAFAH1B1'),
('21703983', 'miR-378', 'PAFAH1B1'),
('21703983', 'hsa-miR-632', 'PAFAH1B1'),
('21703983', 'hsa-miR-636', 'PAFAH1B1'),
('21703983', 'miR-636', 'PAFAH1B1'),
('22066022', 'miR-21', 'FAM126A'), # HCC refers to hepatocellular carcinoma
('22066022', 'miR-21', 'ST14'), # HAI refers to histological activity index (HAI)
('22066022', 'miR-21', 'SPINT1'), # HAI refers to histological activity index (HAI)
('23643257','miR-424','FGR'), # recognizes FGR
('23643257','miR-424','FGFR1'),
('23643257','miR-424','KAL2'),
('23643257','miR-424','FLT2'),
('24330780','miR-124','TENM1'), #tumor node metastasis (TNM)
('24330780','miR-124','TNM'),
('24223656', 'miR-31', "TPT1"), # RAS p21 GTPase activating protein 1 (RASA1) => p21
('24223656', 'miR-31', "CDKN1A"),
('24223656', 'miR-31', "H3F3AP6"),
('24223656', 'miR-31', "TCEAL1"),
('24223656', 'miR-31', "NSG1"),
('21609717','miR-98','MT-CO2'), # accepts COX-2
('21609717','miR-98','COX8A'),
('21609717','miR-98','CPOX'),
('21609717','miR-98','MT-CO2'),
('23527070', 'miR-21', 'SMAD5'), # SMAD2/5
('23190608','miR-29b','SUPT20H'), # SUPT20H has transcription-factor as syn
('23190608','miR-29b','SUPT20H'),
('18185580','miR-335', 'SUPT20H'),
('23113351','miR-29','RB1'), # RB1 syn: osteosarcoma
('23113351','miR-29a','RB1'), # RB1 syn: osteosarcoma
('23113351','miR-29b','RB1'), # RB1 syn: osteosarcoma
('22982443','miR-200c','CDH17'), # CDH17 syn for cadherin, found in E-cadherin ...
('20603081','miR-150','GLI2'), # THP-1 refers to cells
('22139444', 'miR-30c', 'NDC80'), # refers to HEC-1-B cells ...
('23041385', 'miR-21', 'CO'), # centenarian offspring (CO)
('23041385', 'miR-21', 'CALCR'), # CTR control
('19723773','miR-290','MEF'),#mouse embryo fibroblasts (MEF)
('19723773','miR-290','MEFV'),#mouse embryo fibroblasts (MEF)
('19723773','miR-290','ELF4'),#mouse embryo fibroblasts (MEF)
('19547998', 'miR-21', 'CALR'), # SSA
('19547998', 'miR-181b', 'CALR'),
('19547998', 'miR-21', 'HP'), # hyperplastic polyps
('19547998', 'miR-181b', 'HP'),
('20103675', 'miR-222', 'FAM126A'), # HCC cell lines
('24219349','miR-203','SP'), # side population
('24219349','miR-203','TFF2'), # SP
('21088996','miR-21','BLOC1S6'), # PDAC cells (MIA-Pa-Ca-2)
('21088996','miR-21','MIA'), # PDAC cells (MIA-Pa-Ca-2)
('21088996','miR-21','CAR2'), # PDAC cells (MIA-Pa-Ca-2)
('22847613','miR-130b','SLC22A3'), # epithelial-mesenchymal transition (EMT)
('22847613','miR-130b','ITK'), # epithelial-mesenchymal transition (EMT)
('22925189', 'miR-370', 'II'), #stage II <=> gene symbol
('22925189', 'miR-370', 'IV'), #stage IV <=> gene symbol
('22925189', 'miR-30a', 'II'), #stage II <=> gene symbol
('22925189', 'miR-30a', 'IV'), #stage IV <=> gene symbol
('23592910', 'miR-146a', 'IFNA1'), # TM mismatch with interferon in interfon gamma
('23592910', 'miR-146b', 'IFNA1'),
('24006456','miR-29b','INS'), # spurious hit with insulin-like growth factor
('24006456','miR-30c','INS'), # insulin-like
('20945501', 'miR-141', 'PC'), # matches PC / prostate cancer
('20945501', 'miR-141', 'PODXL'), # matches CRPC (castration-resitant prostate cancer)
}
# gene-mir: 36 F1: 0.88 *0.135 = 0,1188
# mir-gene: 230 F1: 0.94 *0.865 = 0,8131
# all: F1: 0.9319
sent2rels = defaultdict(set)
allSents = sentDB.get_all_sentences()
doc2Rels = defaultdict(set)
for mirID in mmuDB.ltype2rel:
for rel in mmuDB.ltype2rel[mirID]:
jel = rel.toJSON()
sent2rels[rel.assocSent].add( (rel.lid, rel.rid, rel.assocInt, rel.assocCat, rel.lPOS, rel.rPOS) )
docID = rel.assocSent.split(".")[0]
doc2Rels[docID].add((rel.lid, rel.rid, rel.assocInt) )
for mirID in hsaDB.ltype2rel:
for rel in hsaDB.ltype2rel[mirID]:
jel = rel.toJSON()
sent2rels[rel.assocSent].add( (rel.lid, rel.rid, rel.assocInt, rel.assocCat, rel.lPOS, rel.rPOS) )
#print(rel.assocSent, rel.lid, rel.rid, rel.assocInt, rel.assocCat, relSent)
docID = rel.assocSent.split(".")[0]
doc2Rels[docID].add((rel.lid, rel.rid, rel.assocInt) )
from collections import Counter
#TM, REF
elemCaseCounter = Counter()
with open("test_list.bydoc.tsv", "w") as fout:
print("doc", "lid", "rid", "assocInt", sep="\t", file=fout)
allDocIDs = set([x.split(".")[0] for x in allSents])
for docID in natsorted(doc2Rels):
for elems in doc2Rels[docID]:
print(docID, *elems, sep="\t", file=fout)
refOnly = refDict[docID].difference(doc2Rels[docID])
tmOnly = doc2Rels[docID].difference(refDict[docID])
tmOnly = [x for x in tmOnly if not (docID, x[0], x[1]) in tmRemoveTMErrors]
correct = refDict[docID].intersection(doc2Rels[docID])
for x in correct:
elemCaseCounter[(True, True)] += 1
for x in refOnly:
elemCaseCounter[(False, True)] += 1
for x in tmOnly:
elemCaseCounter[(True, False)] += 1
if len(doc2Rels[docID]) == 0 and len(refDict[docID]):
continue
if len(refOnly) == 0 and len(tmOnly) == 0:
continue
print(docID, len(correct), "REFONLY", refOnly)
print(docID, len(correct), "TMONLY", tmOnly)
print()
precision = elemCaseCounter[(True, True)] / (elemCaseCounter[(True, True)]+elemCaseCounter[(True, False)])
recall = elemCaseCounter[(True, True)] / (elemCaseCounter[(True, True)]+elemCaseCounter[(False, True)])
f1 = 2* precision * recall / (precision+recall)
#specificity = elemCaseCounter[(False, False)] / (elemCaseCounter[(True, False)] + elemCaseCounter[(False, False)])
print()
print()
print("True, True", elemCaseCounter[(True, True)])
print("TM Only", elemCaseCounter[(True, False)])
print("Ref Only", elemCaseCounter[(False, True)])
print()
print("precision", precision)
print("recall", recall)
#print("specificity", specificity)
print("f1", f1)
with open("test_list.tsv", "w") as fout:
print("lid", "rid", "assocInt", "assocCat", "lpos", "rpos", "int_eval", "cat_eval", "sentID", "sent", sep="\t", file=fout)
for sentID in natsorted([x for x in allSents]):
sent = allSents[sentID]
allElems = sent2rels.get(sentID, None)
if allElems == None:
allElems = [ ("", "", "", "", "", "") ]
for lid, rid, assocInt, assocCat, lpos, rpos in allElems:
print(lid, rid, assocInt, assocCat, lpos, rpos, "FALSE", "FALSE", sentID, sent, sep="\t", file=fout)
| [
"sys.path.insert",
"textdb.SentenceDB.SentenceDB.loadFromFile",
"collections.Counter",
"textdb.MiGenRelDB.MiGenRelDB.loadFromFile",
"collections.defaultdict",
"natsort.natsorted"
] | [((15, 70), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/mnt/f/dev/git/miRExplore/python/"""'], {}), "(0, '/mnt/f/dev/git/miRExplore/python/')\n", (30, 70), False, 'import os, sys\n'), ((247, 349), 'textdb.SentenceDB.SentenceDB.loadFromFile', 'SentenceDB.loadFromFile', (['"""./test/"""', '"""./development/pmid2sent"""'], {'returnAll': '(True)', 'redoPmid2Sent': '(True)'}), "('./test/', './development/pmid2sent', returnAll=\n True, redoPmid2Sent=True)\n", (270, 349), False, 'from textdb.SentenceDB import SentenceDB\n'), ((353, 451), 'textdb.MiGenRelDB.MiGenRelDB.loadFromFile', 'MiGenRelDB.loadFromFile', (['"""./aggregated_test/mirna_gene.mmu.pmid"""'], {'ltype': '"""mirna"""', 'rtype': '"""gene"""'}), "('./aggregated_test/mirna_gene.mmu.pmid', ltype=\n 'mirna', rtype='gene')\n", (376, 451), False, 'from textdb.MiGenRelDB import MiGenRelDB\n'), ((455, 553), 'textdb.MiGenRelDB.MiGenRelDB.loadFromFile', 'MiGenRelDB.loadFromFile', (['"""./aggregated_test/mirna_gene.hsa.pmid"""'], {'ltype': '"""mirna"""', 'rtype': '"""gene"""'}), "('./aggregated_test/mirna_gene.hsa.pmid', ltype=\n 'mirna', rtype='gene')\n", (478, 553), False, 'from textdb.MiGenRelDB import MiGenRelDB\n'), ((24929, 24945), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (24940, 24945), False, 'from collections import defaultdict\n'), ((28972, 28988), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (28983, 28988), False, 'from collections import defaultdict\n'), ((29040, 29056), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (29051, 29056), False, 'from collections import defaultdict\n'), ((29832, 29841), 'collections.Counter', 'Counter', ([], {}), '()\n', (29839, 29841), False, 'from collections import Counter\n'), ((30031, 30050), 'natsort.natsorted', 'natsorted', (['doc2Rels'], {}), '(doc2Rels)\n', (30040, 30050), False, 'from natsort import natsorted\n'), ((31778, 31810), 'natsort.natsorted', 'natsorted', (['[x for x in allSents]'], {}), '([x for x in allSents])\n', (31787, 31810), False, 'from natsort import natsorted\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.urls import path
from . import views
app_name = 'api'
urlpatterns = [
path('v1/status/', views.status, name='status'),
# organization
path('v1/organization/staff/', views.list_staff_members, name='list_staff_members'),
# Venues
path('v1/venues/', views.list_venues, name='list_venues'),
path('v1/venues/<slug>/', views.detail_venue, name='detail_venue'),
# Events
path('v1/events/<slug>/speakers/', views.list_speakers, name='list_speakers'),
path('v1/events/<slug>/talks/', views.list_talks, name='list_talks'),
path('v1/events/<slug>/tracks/', views.list_tracks, name='list_tracks'),
path('v1/events/<slug>/sponsors/', views.list_sponsors, name='list_sponsors'),
path('v1/events/<slug>/tags/', views.list_tags, name='list_tags'),
path('v1/events/<slug>/', views.detail_event, name='detail_event'),
path('v1/events/all/', views.all_events, name='all_events'),
path('v1/events/', views.active_events, name='active_events'),
# Quotes
path('v1/quotes/', views.random_quote, name='random_quote'),
]
| [
"django.urls.path"
] | [((136, 183), 'django.urls.path', 'path', (['"""v1/status/"""', 'views.status'], {'name': '"""status"""'}), "('v1/status/', views.status, name='status')\n", (140, 183), False, 'from django.urls import path\n'), ((209, 297), 'django.urls.path', 'path', (['"""v1/organization/staff/"""', 'views.list_staff_members'], {'name': '"""list_staff_members"""'}), "('v1/organization/staff/', views.list_staff_members, name=\n 'list_staff_members')\n", (213, 297), False, 'from django.urls import path\n'), ((311, 368), 'django.urls.path', 'path', (['"""v1/venues/"""', 'views.list_venues'], {'name': '"""list_venues"""'}), "('v1/venues/', views.list_venues, name='list_venues')\n", (315, 368), False, 'from django.urls import path\n'), ((374, 440), 'django.urls.path', 'path', (['"""v1/venues/<slug>/"""', 'views.detail_venue'], {'name': '"""detail_venue"""'}), "('v1/venues/<slug>/', views.detail_venue, name='detail_venue')\n", (378, 440), False, 'from django.urls import path\n'), ((459, 536), 'django.urls.path', 'path', (['"""v1/events/<slug>/speakers/"""', 'views.list_speakers'], {'name': '"""list_speakers"""'}), "('v1/events/<slug>/speakers/', views.list_speakers, name='list_speakers')\n", (463, 536), False, 'from django.urls import path\n'), ((542, 610), 'django.urls.path', 'path', (['"""v1/events/<slug>/talks/"""', 'views.list_talks'], {'name': '"""list_talks"""'}), "('v1/events/<slug>/talks/', views.list_talks, name='list_talks')\n", (546, 610), False, 'from django.urls import path\n'), ((616, 687), 'django.urls.path', 'path', (['"""v1/events/<slug>/tracks/"""', 'views.list_tracks'], {'name': '"""list_tracks"""'}), "('v1/events/<slug>/tracks/', views.list_tracks, name='list_tracks')\n", (620, 687), False, 'from django.urls import path\n'), ((693, 770), 'django.urls.path', 'path', (['"""v1/events/<slug>/sponsors/"""', 'views.list_sponsors'], {'name': '"""list_sponsors"""'}), "('v1/events/<slug>/sponsors/', views.list_sponsors, name='list_sponsors')\n", (697, 770), False, 'from django.urls import path\n'), ((776, 841), 'django.urls.path', 'path', (['"""v1/events/<slug>/tags/"""', 'views.list_tags'], {'name': '"""list_tags"""'}), "('v1/events/<slug>/tags/', views.list_tags, name='list_tags')\n", (780, 841), False, 'from django.urls import path\n'), ((847, 913), 'django.urls.path', 'path', (['"""v1/events/<slug>/"""', 'views.detail_event'], {'name': '"""detail_event"""'}), "('v1/events/<slug>/', views.detail_event, name='detail_event')\n", (851, 913), False, 'from django.urls import path\n'), ((919, 978), 'django.urls.path', 'path', (['"""v1/events/all/"""', 'views.all_events'], {'name': '"""all_events"""'}), "('v1/events/all/', views.all_events, name='all_events')\n", (923, 978), False, 'from django.urls import path\n'), ((984, 1045), 'django.urls.path', 'path', (['"""v1/events/"""', 'views.active_events'], {'name': '"""active_events"""'}), "('v1/events/', views.active_events, name='active_events')\n", (988, 1045), False, 'from django.urls import path\n'), ((1064, 1123), 'django.urls.path', 'path', (['"""v1/quotes/"""', 'views.random_quote'], {'name': '"""random_quote"""'}), "('v1/quotes/', views.random_quote, name='random_quote')\n", (1068, 1123), False, 'from django.urls import path\n')] |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import flavor_disabled
from nova.compute import flavors
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
"name": 'flavor 1',
"memory_mb": '256',
"root_gb": '10',
"disabled": False,
},
'flavor 2': {
"flavorid": '2',
"name": 'flavor 2',
"memory_mb": '512',
"root_gb": '20',
"disabled": True,
},
}
def fake_instance_type_get_by_flavor_id(flavorid):
return FAKE_FLAVORS['flavor %s' % flavorid]
def fake_instance_type_get_all(*args, **kwargs):
return FAKE_FLAVORS
class FlavorDisabledTest(test.TestCase):
content_type = 'application/json'
prefix = '%s:' % flavor_disabled.Flavor_disabled.alias
def setUp(self):
super(FlavorDisabledTest, self).setUp()
ext = ('nova.api.openstack.compute.contrib'
'.flavor_disabled.Flavor_disabled')
self.flags(osapi_compute_extension=[ext])
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(flavors, "get_all_types",
fake_instance_type_get_all)
self.stubs.Set(flavors,
"get_instance_type_by_flavor_id",
fake_instance_type_get_by_flavor_id)
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app())
return res
def _get_flavor(self, body):
return jsonutils.loads(body).get('flavor')
def _get_flavors(self, body):
return jsonutils.loads(body).get('flavors')
def assertFlavorDisabled(self, flavor, disabled):
self.assertEqual(str(flavor.get('%sdisabled' % self.prefix)), disabled)
def test_show(self):
url = '/v2/fake/flavors/1'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertFlavorDisabled(self._get_flavor(res.body), 'False')
def test_detail(self):
url = '/v2/fake/flavors/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
flavors = self._get_flavors(res.body)
self.assertFlavorDisabled(flavors[0], 'False')
self.assertFlavorDisabled(flavors[1], 'True')
class FlavorDisabledXmlTest(FlavorDisabledTest):
content_type = 'application/xml'
prefix = '{%s}' % flavor_disabled.Flavor_disabled.namespace
def _get_flavor(self, body):
return etree.XML(body)
def _get_flavors(self, body):
return etree.XML(body).getchildren()
| [
"nova.tests.api.openstack.fakes.wsgi_app",
"lxml.etree.XML",
"nova.tests.api.openstack.fakes.stub_out_nw_api",
"webob.Request.blank",
"nova.openstack.common.jsonutils.loads"
] | [((1728, 1761), 'nova.tests.api.openstack.fakes.stub_out_nw_api', 'fakes.stub_out_nw_api', (['self.stubs'], {}), '(self.stubs)\n', (1749, 1761), False, 'from nova.tests.api.openstack import fakes\n'), ((2060, 2084), 'webob.Request.blank', 'webob.Request.blank', (['url'], {}), '(url)\n', (2079, 2084), False, 'import webob\n'), ((3236, 3251), 'lxml.etree.XML', 'etree.XML', (['body'], {}), '(body)\n', (3245, 3251), False, 'from lxml import etree\n'), ((2166, 2182), 'nova.tests.api.openstack.fakes.wsgi_app', 'fakes.wsgi_app', ([], {}), '()\n', (2180, 2182), False, 'from nova.tests.api.openstack import fakes\n'), ((2252, 2273), 'nova.openstack.common.jsonutils.loads', 'jsonutils.loads', (['body'], {}), '(body)\n', (2267, 2273), False, 'from nova.openstack.common import jsonutils\n'), ((2338, 2359), 'nova.openstack.common.jsonutils.loads', 'jsonutils.loads', (['body'], {}), '(body)\n', (2353, 2359), False, 'from nova.openstack.common import jsonutils\n'), ((3302, 3317), 'lxml.etree.XML', 'etree.XML', (['body'], {}), '(body)\n', (3311, 3317), False, 'from lxml import etree\n')] |
import datetime
import unittest
from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
from conflowgen.domain_models.distribution_models.truck_arrival_distribution import TruckArrivalDistribution
from conflowgen.domain_models.distribution_seeders import truck_arrival_distribution_seeder
from conflowgen.container_flow_data_generation_process.truck_for_export_containers_manager import \
TruckForExportContainersManager
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestTruckForExportContainersManager(unittest.TestCase):
def setUp(self) -> None:
"""Create container database in memory"""
sqlite_db = setup_sqlite_in_memory_db()
sqlite_db.create_tables([
TruckArrivalDistribution
])
truck_arrival_distribution_seeder.seed()
# Enables visualisation, helpful for probability distributions
# However, this blocks the execution of tests.
self.debug = False
self.manager = TruckForExportContainersManager()
self.manager.reload_distribution(
minimum_dwell_time_in_hours=3, # after ship arrival, at least 3h pass
maximum_dwell_time_in_hours=(3 * 24) # 3 days after ship arrival the container must have left the yard
)
def test_delivery_time_in_required_time_range_weekday(self):
container_departure_time = datetime.datetime(
year=2021, month=7, day=30, hour=11, minute=55
)
earliest_container_delivery = datetime.datetime(
year=2021, month=7, day=27, hour=11, minute=55
)
delivery_times = []
for i in range(1000):
delivery_time = self.manager._get_container_delivery_time(container_departure_time)
self.assertGreaterEqual(delivery_time, earliest_container_delivery,
"container must not arrive earlier than three days before export, "
f"but here we had {delivery_time} in round {i + 1}")
self.assertLessEqual(delivery_time, container_departure_time,
"container must not arrive later than their departure time "
f"but here we had {delivery_time} in round {i + 1}")
self.assertTrue(delivery_time.weekday() != 6,
f"containers do not arrive on Sundays, but here we had {delivery_time} in round {i + 1}")
delivery_times.append(delivery_time)
if self.debug:
sns.kdeplot(delivery_times, bw=0.01)
plt.show(block=True)
def test_delivery_time_in_required_time_range_with_sunday(self):
container_departure_time = datetime.datetime(
year=2021, month=8, day=2, hour=11, minute=30 # 11:30 -3h dwell time = 08:30 latest arrival
)
earliest_container_delivery = datetime.datetime(
year=2021, month=7, day=30, hour=11, minute=30
)
delivery_times = []
for i in range(1000):
delivery_time = self.manager._get_container_delivery_time(container_departure_time)
delivery_times.append(delivery_time)
self.assertGreaterEqual(delivery_time, earliest_container_delivery,
"container must not arrive earlier than three days before export, "
f"but here we had {delivery_time} in round {i + 1}")
self.assertLessEqual(delivery_time, container_departure_time,
"container must not arrive later than their departure time "
f"but here we had {delivery_time} in round {i + 1}")
self.assertTrue(delivery_time.weekday() != 6,
f"containers do not arrive on Sundays, but here we had {delivery_time} in round {i + 1}")
weekday_counter = Counter([delivery_time.weekday() for delivery_time in delivery_times])
self.assertIn(4, weekday_counter.keys(), "Probability (out of 1000 repetitions): "
"At least once a Friday must be counted (30.07.2021)")
self.assertIn(5, weekday_counter.keys(), "Probability (out of 1000 repetitions): "
"At least once a Saturday must be counted (31.07.2021)")
self.assertIn(0, weekday_counter.keys(), "Probability (out of 1000 repetitions): "
"At least once a Monday must be counted (02.08.2021)")
if self.debug:
sns.kdeplot(delivery_times, bw=0.01)
plt.show(block=True)
def test_delivery_time_in_required_time_range_with_sunday_and_at_different_day_times(self):
container_departure_time = datetime.datetime(
year=2021, month=8, day=2, hour=11, minute=2
)
earliest_container_delivery = datetime.datetime(
year=2021, month=7, day=30, hour=5, minute=0
)
delivery_times = []
for i in range(1000):
delivery_time = self.manager._get_container_delivery_time(container_departure_time)
delivery_times.append(delivery_time)
self.assertGreaterEqual(delivery_time, earliest_container_delivery,
"container must not arrive earlier than three days before export, "
f"but here we had {delivery_time} in round {i + 1}")
self.assertLessEqual(delivery_time, container_departure_time,
"container must not arrive later than their departure time "
f"but here we had {delivery_time} in round {i + 1}")
self.assertNotEqual(delivery_time.weekday(), 6,
f"containers do not arrive on Sundays, "
f"but here we had {delivery_time} in round {i + 1}")
weekday_counter = Counter([delivery_time.weekday() for delivery_time in delivery_times])
self.assertIn(4, weekday_counter.keys(), "Probability (out of 1000 repetitions): "
"At least once a Friday must be counted (30.07.2021)")
self.assertIn(5, weekday_counter.keys(), "Probability (out of 1000 repetitions): "
"At least once a Saturday must be counted (31.07.2021)")
self.assertIn(0, weekday_counter.keys(), "Probability (out of 1000 repetitions): "
"At least once a Monday must be counted (02.08.2021)")
if self.debug:
sns.kdeplot(delivery_times, bw=0.01)
plt.show(block=True)
| [
"datetime.datetime",
"conflowgen.container_flow_data_generation_process.truck_for_export_containers_manager.TruckForExportContainersManager",
"conflowgen.domain_models.distribution_seeders.truck_arrival_distribution_seeder.seed",
"seaborn.kdeplot",
"conflowgen.tests.substitute_peewee_database.setup_sqlite_i... | [((703, 730), 'conflowgen.tests.substitute_peewee_database.setup_sqlite_in_memory_db', 'setup_sqlite_in_memory_db', ([], {}), '()\n', (728, 730), False, 'from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db\n'), ((821, 861), 'conflowgen.domain_models.distribution_seeders.truck_arrival_distribution_seeder.seed', 'truck_arrival_distribution_seeder.seed', ([], {}), '()\n', (859, 861), False, 'from conflowgen.domain_models.distribution_seeders import truck_arrival_distribution_seeder\n'), ((1040, 1073), 'conflowgen.container_flow_data_generation_process.truck_for_export_containers_manager.TruckForExportContainersManager', 'TruckForExportContainersManager', ([], {}), '()\n', (1071, 1073), False, 'from conflowgen.container_flow_data_generation_process.truck_for_export_containers_manager import TruckForExportContainersManager\n'), ((1427, 1492), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2021)', 'month': '(7)', 'day': '(30)', 'hour': '(11)', 'minute': '(55)'}), '(year=2021, month=7, day=30, hour=11, minute=55)\n', (1444, 1492), False, 'import datetime\n'), ((1553, 1618), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2021)', 'month': '(7)', 'day': '(27)', 'hour': '(11)', 'minute': '(55)'}), '(year=2021, month=7, day=27, hour=11, minute=55)\n', (1570, 1618), False, 'import datetime\n'), ((2758, 2822), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2021)', 'month': '(8)', 'day': '(2)', 'hour': '(11)', 'minute': '(30)'}), '(year=2021, month=8, day=2, hour=11, minute=30)\n', (2775, 2822), False, 'import datetime\n'), ((2930, 2995), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2021)', 'month': '(7)', 'day': '(30)', 'hour': '(11)', 'minute': '(30)'}), '(year=2021, month=7, day=30, hour=11, minute=30)\n', (2947, 2995), False, 'import datetime\n'), ((4847, 4910), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2021)', 'month': '(8)', 'day': '(2)', 'hour': '(11)', 'minute': '(2)'}), '(year=2021, month=8, day=2, hour=11, minute=2)\n', (4864, 4910), False, 'import datetime\n'), ((4971, 5034), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2021)', 'month': '(7)', 'day': '(30)', 'hour': '(5)', 'minute': '(0)'}), '(year=2021, month=7, day=30, hour=5, minute=0)\n', (4988, 5034), False, 'import datetime\n'), ((2583, 2619), 'seaborn.kdeplot', 'sns.kdeplot', (['delivery_times'], {'bw': '(0.01)'}), '(delivery_times, bw=0.01)\n', (2594, 2619), True, 'import seaborn as sns\n'), ((2632, 2652), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (2640, 2652), True, 'import matplotlib.pyplot as plt\n'), ((4645, 4681), 'seaborn.kdeplot', 'sns.kdeplot', (['delivery_times'], {'bw': '(0.01)'}), '(delivery_times, bw=0.01)\n', (4656, 4681), True, 'import seaborn as sns\n'), ((4694, 4714), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (4702, 4714), True, 'import matplotlib.pyplot as plt\n'), ((6726, 6762), 'seaborn.kdeplot', 'sns.kdeplot', (['delivery_times'], {'bw': '(0.01)'}), '(delivery_times, bw=0.01)\n', (6737, 6762), True, 'import seaborn as sns\n'), ((6775, 6795), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (6783, 6795), True, 'import matplotlib.pyplot as plt\n')] |
'''
57-send Email with attachment
using gmail smtp,
You may need to pip install yagmail
'''
import yagmail
# Set up your gmail credentials.
# Any problems could be gmail blocking you
# or wrong password etc. I had to allow unsafe apps
# in my gmail security settings to get
# this to work.
YAG_SMTP = yagmail.SMTP(user="<EMAIL>", \
password="<PASSWORD>", host='smtp.gmail.com')
# email subject
SUBJECT = 'Yagmail Test'
# email content with attached file from current dir,
# or state file location.
CONTENTS = ['Hi Dude', 'image attached.', 'some-image.jpg']
# send mail
YAG_SMTP.send('<EMAIL>', SUBJECT, CONTENTS)
| [
"yagmail.SMTP"
] | [((312, 386), 'yagmail.SMTP', 'yagmail.SMTP', ([], {'user': '"""<EMAIL>"""', 'password': '"""<PASSWORD>"""', 'host': '"""smtp.gmail.com"""'}), "(user='<EMAIL>', password='<PASSWORD>', host='smtp.gmail.com')\n", (324, 386), False, 'import yagmail\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-20 12:20
import cms.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('links', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='link',
name='link_url',
field=cms.models.fields.LinkField(help_text='The URL where the user will be redirected.', max_length=1000, verbose_name='link URL'),
),
migrations.AlterField(
model_name='link',
name='new_window',
field=models.BooleanField(default=False, help_text='Open the page in a new window.'),
),
]
| [
"django.db.models.BooleanField"
] | [((621, 699), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Open the page in a new window."""'}), "(default=False, help_text='Open the page in a new window.')\n", (640, 699), False, 'from django.db import migrations, models\n')] |
# Import required libraries:
import json
import random
import pickle
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn import svm
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
# Data:
# Create enum class:
class Sentiment:
NEGATIVE = "NEGATIVE"
NEUTRAL = "NEUTRAL"
POSITIVE = "POSITIVE"
# Create data class to neaten-up code:
class Review:
def __init__(self, text, score):
self.text = text
self.score = score
self.sentiment = self.get_sentiment()
# Define sentiment function:
def get_sentiment(self):
if self.score <= 2:
return Sentiment.NEGATIVE
elif self.score == 3:
return Sentiment.NEUTRAL
else:
return Sentiment.POSITIVE
class ReviewContainer:
def __init__(self, reviews):
self.reviews = reviews
def get_text(self):
return [x.text for x in self.reviews]
def get_sentiment(self):
return [x.sentiment for x in self.reviews]
def even_distribute(self):
negative = list(filter(lambda x: x.sentiment == Sentiment.NEGATIVE, self.reviews))
neutral = list(filter(lambda x: x.sentiment == Sentiment.NEUTRAL, self.reviews))
positive = list(filter(lambda x: x.sentiment == Sentiment.POSITIVE, self.reviews))
positive_shrunk = positive[:len(negative)]
neutral_shrunk = neutral[:len(negative)]
self.reviews = negative + positive_shrunk + neutral_shrunk
random.shuffle(self.reviews)
# print(len(negative))
# print(len(positive_shrunk))
# print(len(neutral_shrunk))
# Check to see if working:
# print(negative[0].text)
# print(len(negative))
# print(neutral[0].text)
# print(len(neutral))
# print(positive[0].text)
# print(len(positive))
# Load Data:
# Save file to variable:
file_name = './data/sentiment/Books_small_10000.json'
# Create empty list for storing data:
reviews = []
# Open and read file:
with open(file_name) as f:
# Read file line by line:
for line in f:
# Print first line to screen:
# print("Line 1:")
# print(line)
# Print only reviewText to screen:
review = json.loads(line)
# print("Line 2:")
# print(review['reviewText'])
# print("Line 3:")
# print(review['overall'])
# Break out of loop to not print all reviews:
# break
# Add reviews and scores to list as an object:
reviews.append(Review(review['reviewText'], review['overall']))
# To check if working:
# print(reviews[5].sentiment)
# Prep Data:
# Define and load initial test data:
training, testing = train_test_split(reviews, test_size = 0.33, random_state = 42)
train_cont = ReviewContainer(training)
train_cont.even_distribute()
test_cont = ReviewContainer(testing)
test_cont.even_distribute()
# print(len(cont.reviews))
# Get amount of train and test data used:
# print(len(training))
# print(len(testing))
# Pass training set into statements:
# But first test the above function:
# print(training[0].text)
# print(training[0].score)
# print(training[0].sentiment)
# Pass data for prediction using list-comprehention:
# train_x = [x.text for x in training]
train_x = train_cont.get_text()
# train_y = [x.sentiment for x in training]
train_y = train_cont.get_sentiment()
# Check above statement:
# print(train_x[0])
# print(train_y[0])
# print(train_y.count(Sentiment.NEGATIVE))
# print(train_y.count(Sentiment.NEUTRAL))
# print(train_y.count(Sentiment.POSITIVE))
# Pass data for prediction using list-comprehention:
# test_x = [x.text for x in testing]
test_x = train_cont.get_text()
# test_y = [x.sentiment for x in testing]
test_y = train_cont.get_sentiment()
# Check above statement:
# print(test_x[0])
# print(test_y[0])
# print(test_y.count(Sentiment.NEGATIVE))
# print(test_y.count(Sentiment.NEUTRAL))
# print(test_y.count(Sentiment.POSITIVE))
# Pass testing set into statements:
# But first test the above function:
# print(testing[0].text)
# print(testing[0].score)
# print(testing[0].sentiment)
# Bags-of-words vectorization:
# Initialize variable:
# vectorizer = CountVectorizer()
vectorizer = TfidfVectorizer()
# Output defined training variable for useage:
# print(vectorizer.fit_transform(train_x))
train_x_vectors = vectorizer.fit_transform(train_x)
# print(train_x_vectors[0])
# print(train_x_vectors[0].toarray())
train_x_vectors_array = train_x_vectors.toarray()
# Output defined testing variable for useage:
# print(vectorizer.fit_transform(test_x))
test_x_vectors = vectorizer.transform(test_x)
# print(test_x_vectors[0])
# print(test_x_vectors[0].toarray())
test_x_vectors_array = test_x_vectors.toarray()
# Classification:
# Linear SVM:
# Define classifier:
clf_svm = svm.SVC(kernel='linear')
# Fit classifier to data:
clf_svm.fit(train_x_vectors, train_y)
# Prediciting classifier:
# print(train_x_vectors[0])
# print(clf_svm.predict(train_x_vectors[0]))
# Decision Tree:
# Define classifier:
clf_dec = DecisionTreeClassifier()
# Fit classifier to data:
clf_dec.fit(train_x_vectors, train_y)
# Prediciting classifier:
# print(train_x_vectors[0])
# print(clf_dec.predict(train_x_vectors[0]))
# Naive Bayes:
# Define classifier:
clf_gnb = GaussianNB()
# Fit classifier to data:
clf_gnb.fit(train_x_vectors_array, train_y)
# Prediciting classifier:
# print(train_x_vectors[0])
# print(clf_gnb.predict(train_x_vectors_array[0].reshape(1, -1)))
# Logistic Regression:
# Define classifier:
clf_lr = LogisticRegression()
# Fit classifier to data:
clf_lr.fit(train_x_vectors, train_y)
# Prediciting classifier:
# print(train_x_vectors[0])
# print(clf_lr.predict(train_x_vectors[0]))
# Evaluation:
# Mean accuracy:
# print(clf_svm.score(test_x_vectors, test_y))
# print(clf_dec.score(test_x_vectors, test_y))
# print(clf_gnb.score(test_x_vectors_array, test_y))
# print(clf_lr.score(test_x_vectors, test_y))
# F1 Score:
# print(f1_score(test_y, clf_svm.predict(test_x_vectors), average=None, labels=[Sentiment.NEGATIVE, Sentiment.NEUTRAL, Sentiment.POSITIVE]))
# print(f1_score(test_y, clf_dec.predict(test_x_vectors), average=None, labels=[Sentiment.NEGATIVE, Sentiment.NEUTRAL, Sentiment.POSITIVE]))
# print(f1_score(test_y, clf_gnb.predict(test_x_vectors_array), average=None, labels=[Sentiment.NEGATIVE, Sentiment.NEUTRAL, Sentiment.POSITIVE]))
# print(f1_score(test_y, clf_lr.predict(test_x_vectors), average=None, labels=[Sentiment.NEGATIVE, Sentiment.NEUTRAL, Sentiment.POSITIVE]))
# Checking ratios of sentiments:
# print(train_y.count(Sentiment.POSITIVE))
# print(train_y.count(Sentiment.NEUTRAL))
# print(train_y.count(Sentiment.NEGATIVE))
# Qualitative Analysis:
# operation_set = ["", "", ""]
# new_set = vectorizer.transform(operation_set)
# print(clf_svm.predict(new_set))
# Tuning the model with grid search:
parameters = {'kernel': ('linear', 'rbf'), 'C': (1, 2, 4, 8, 16, 32)}
svc = svm.SVC()
clf = GridSearchCV(svc, parameters, cv=5)
clf.fit(train_x_vectors, train_y)
# Saving Model:
with open('./models/sentiment_classifier.pkl', 'wb') as f:
pickle.dump(clf, f)
with open('./models/sentiment_classifier.pkl', 'rb') as f:
loaded_clf = pickle.load(f)
print(loaded_clf.predict(test_x_vectors[0]))
| [
"sklearn.model_selection.GridSearchCV",
"json.loads",
"pickle.dump",
"random.shuffle",
"sklearn.model_selection.train_test_split",
"sklearn.tree.DecisionTreeClassifier",
"pickle.load",
"sklearn.linear_model.LogisticRegression",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.naive_baye... | [((2918, 2976), 'sklearn.model_selection.train_test_split', 'train_test_split', (['reviews'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(reviews, test_size=0.33, random_state=42)\n', (2934, 2976), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((4432, 4449), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (4447, 4449), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((5020, 5044), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (5027, 5044), False, 'from sklearn import svm\n'), ((5257, 5281), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (5279, 5281), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((5492, 5504), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (5502, 5504), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((5749, 5769), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (5767, 5769), False, 'from sklearn.linear_model import LogisticRegression\n'), ((7151, 7160), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (7158, 7160), False, 'from sklearn import svm\n'), ((7167, 7202), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['svc', 'parameters'], {'cv': '(5)'}), '(svc, parameters, cv=5)\n', (7179, 7202), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((7317, 7336), 'pickle.dump', 'pickle.dump', (['clf', 'f'], {}), '(clf, f)\n', (7328, 7336), False, 'import pickle\n'), ((7414, 7428), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7425, 7428), False, 'import pickle\n'), ((1697, 1725), 'random.shuffle', 'random.shuffle', (['self.reviews'], {}), '(self.reviews)\n', (1711, 1725), False, 'import random\n'), ((2452, 2468), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2462, 2468), False, 'import json\n')] |
import math
import torch
import torch.nn as nn
from onmt.utils.misc import aeq
from onmt.utils.loss import LossComputeBase
def collapse_copy_scores(scores, batch, tgt_vocab, src_vocabs,
batch_dim=1, batch_offset=None):
"""
Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambiguous.
"""
offset = len(tgt_vocab)
for b in range(scores.size(batch_dim)):
blank = []
fill = []
batch_id = batch_offset[b] if batch_offset is not None else b
index = batch.indices.data[batch_id]
src_vocab = src_vocabs[index]
for i in range(1, len(src_vocab)):
sw = src_vocab.itos[i]
ti = tgt_vocab.stoi[sw]
if ti != 0:
blank.append(offset + i)
fill.append(ti)
if blank:
blank = torch.Tensor(blank).type_as(batch.indices.data)
fill = torch.Tensor(fill).type_as(batch.indices.data)
score = scores[:, b] if batch_dim == 1 else scores[b]
score.index_add_(1, fill, score.index_select(1, blank))
score.index_fill_(1, blank, 1e-10)
return scores
class CopyGenerator(nn.Module):
"""An implementation of pointer-generator networks
:cite:`DBLP:journals/corr/SeeLM17`.
These networks consider copying words
directly from the source sequence.
The copy generator is an extended version of the standard
generator that computes three values.
* :math:`p_{softmax}` the standard softmax over `tgt_dict`
* :math:`p(z)` the probability of copying a word from
the source
* :math:`p_{copy}` the probility of copying a particular word.
taken from the attention distribution directly.
The model returns a distribution over the extend dictionary,
computed as
:math:`p(w) = p(z=1) p_{copy}(w) + p(z=0) p_{softmax}(w)`
.. mermaid::
graph BT
A[input]
S[src_map]
B[softmax]
BB[switch]
C[attn]
D[copy]
O[output]
A --> B
A --> BB
S --> D
C --> D
D --> O
B --> O
BB --> O
Args:
input_size (int): size of input representation
output_size (int): size of output vocabulary
pad_idx (int)
"""
def __init__(self, input_size, output_size, pad_idx, conv_first):
super(CopyGenerator, self).__init__()
self.linear = nn.Linear(input_size, output_size)
self.linear_copy = nn.Linear(input_size, 1)
self.pad_idx = pad_idx
self.conv_first = conv_first
if conv_first:
self.conv_transpose = nn.ConvTranspose1d(1, 1, kernel_size=3, stride=3)
self.conv_transpose_pad1 = nn.ConvTranspose1d(1, 1, kernel_size=3, stride=3, output_padding=1)
self.conv_transpose_pad2 = nn.ConvTranspose1d(1, 1, kernel_size=3, stride=3, output_padding=2)
def forward(self, hidden, attn, src_map):
"""
Compute a distribution over the target dictionary
extended by the dynamic dictionary implied by copying
source words.
Args:
hidden (FloatTensor): hidden outputs ``(batch x tlen, input_size)``
attn (FloatTensor): attn for each ``(batch x tlen, input_size)``
src_map (FloatTensor):
A sparse indicator matrix mapping each source word to
its index in the "extended" vocab containing.
``(src_len, batch, extra_words)``
"""
if self.conv_first:
attn = torch.unsqueeze(attn, 1)
original_seq_len = src_map.shape[0]
if original_seq_len % 3 == 0:
attn = self.conv_transpose(attn)
elif original_seq_len % 3 == 1:
attn = self.conv_transpose_pad1(attn)
else:
attn = self.conv_transpose_pad2(attn)
attn = torch.squeeze(attn, 1)
# CHECKS
batch_by_tlen, _ = hidden.size()
batch_by_tlen_, slen = attn.size()
slen_, batch, cvocab = src_map.size()
aeq(batch_by_tlen, batch_by_tlen_)
aeq(slen, slen_)
# Original probabilities.
logits = self.linear(hidden)
logits[:, self.pad_idx] = -float('inf')
prob = torch.softmax(logits, 1)
# Probability of copying p(z=1) batch.
p_copy = torch.sigmoid(self.linear_copy(hidden))
# Probability of not copying: p_{word}(w) * (1 - p(z))
out_prob = torch.mul(prob, 1 - p_copy)
mul_attn = torch.mul(attn, p_copy)
copy_prob = torch.bmm(
mul_attn.view(-1, batch, slen).transpose(0, 1),
src_map.transpose(0, 1)
).transpose(0, 1)
copy_prob = copy_prob.contiguous().view(-1, cvocab)
return torch.cat([out_prob, copy_prob], 1)
class CopyGeneratorLoss(nn.Module):
"""Copy generator criterion."""
def __init__(self, vocab_size, force_copy, unk_index=0,
ignore_index=-100, eps=1e-20):
super(CopyGeneratorLoss, self).__init__()
self.force_copy = force_copy
self.eps = eps
self.vocab_size = vocab_size
self.ignore_index = ignore_index
self.unk_index = unk_index
def forward(self, scores, align, target):
"""
Args:
scores (FloatTensor): ``(batch_size*tgt_len)`` x dynamic vocab size
whose sum along dim 1 is less than or equal to 1, i.e. cols
softmaxed.
align (LongTensor): ``(batch_size x tgt_len)``
target (LongTensor): ``(batch_size x tgt_len)``
"""
# probabilities assigned by the model to the gold targets
vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze(1)
# probability of tokens copied from source
copy_ix = align.unsqueeze(1) + self.vocab_size
copy_tok_probs = scores.gather(1, copy_ix).squeeze(1)
# Set scores for unk to 0 and add eps
copy_tok_probs[align == self.unk_index] = 0
copy_tok_probs += self.eps # to avoid -inf logs
# find the indices in which you do not use the copy mechanism
non_copy = align == self.unk_index
if not self.force_copy:
non_copy = non_copy | (target != self.unk_index)
probs = torch.where(
non_copy, copy_tok_probs + vocab_probs, copy_tok_probs
)
if math.isnan(probs.log().sum()):
probs = probs - torch.min(probs) + self.eps
loss = -probs.log() # just NLLLoss; can the module be incorporated?
# Drop padding.
loss[target == self.ignore_index] = 0
return loss
class CopyGeneratorLossCompute(LossComputeBase):
"""Copy Generator Loss Computation."""
def __init__(self, criterion, generator, tgt_vocab, normalize_by_length):
super(CopyGeneratorLossCompute, self).__init__(criterion, generator)
self.tgt_vocab = tgt_vocab
self.normalize_by_length = normalize_by_length
def _make_shard_state(self, batch, output, range_, attns):
"""See base class for args description."""
if getattr(batch, "alignment", None) is None:
raise AssertionError("using -copy_attn you need to pass in "
"-dynamic_dict during preprocess stage.")
return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1], :, 0],
"copy_attn": attns.get("copy"),
"align": batch.alignment[range_[0] + 1: range_[1]]
}
def _compute_loss(self, batch, output, target, copy_attn, align):
"""Compute the loss.
The args must match :func:`self._make_shard_state()`.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
copy_attn: the copy attention value.
align: the align info.
"""
target = target.view(-1)
align = align.view(-1)
scores = self.generator(
self._bottle(output), self._bottle(copy_attn), batch.src_map
)
loss = self.criterion(scores, align, target)
# print ("loss: {}".format(loss))
# this block does not depend on the loss value computed above
# and is used only for stats
scores_data = collapse_copy_scores(
self._unbottle(scores.clone(), batch.batch_size),
batch, self.tgt_vocab, batch.dataset.src_vocabs)
scores_data = self._bottle(scores_data)
# this block does not depend on the loss value computed above
# and is used only for stats
# Correct target copy token instead of <unk>
# tgt[i] = align[i] + len(tgt_vocab)
# for i such that tgt[i] == 0 and align[i] != 0
target_data = target.clone()
unk = self.criterion.unk_index
correct_mask = (target_data == unk) & (align != unk)
offset_align = align[correct_mask] + len(self.tgt_vocab)
target_data[correct_mask] += offset_align
# Compute sum of perplexities for stats
stats = self._stats(loss.sum().clone(), scores_data, target_data)
# this part looks like it belongs in CopyGeneratorLoss
if self.normalize_by_length:
# Compute Loss as NLL divided by seq length
tgt_lens = batch.tgt[:, :, 0].ne(self.padding_idx).sum(0).float()
# Compute Total Loss per sequence in batch
loss = loss.view(-1, batch.batch_size).sum(0)
# Divide by length of each sequence and sum
loss = torch.div(loss, tgt_lens).sum()
else:
loss = loss.sum()
return loss, stats
| [
"torch.mul",
"torch.nn.ConvTranspose1d",
"torch.unsqueeze",
"torch.Tensor",
"onmt.utils.misc.aeq",
"torch.softmax",
"torch.min",
"torch.nn.Linear",
"torch.squeeze",
"torch.div",
"torch.cat",
"torch.where"
] | [((2547, 2581), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'output_size'], {}), '(input_size, output_size)\n', (2556, 2581), True, 'import torch.nn as nn\n'), ((2609, 2633), 'torch.nn.Linear', 'nn.Linear', (['input_size', '(1)'], {}), '(input_size, 1)\n', (2618, 2633), True, 'import torch.nn as nn\n'), ((4213, 4247), 'onmt.utils.misc.aeq', 'aeq', (['batch_by_tlen', 'batch_by_tlen_'], {}), '(batch_by_tlen, batch_by_tlen_)\n', (4216, 4247), False, 'from onmt.utils.misc import aeq\n'), ((4256, 4272), 'onmt.utils.misc.aeq', 'aeq', (['slen', 'slen_'], {}), '(slen, slen_)\n', (4259, 4272), False, 'from onmt.utils.misc import aeq\n'), ((4416, 4440), 'torch.softmax', 'torch.softmax', (['logits', '(1)'], {}), '(logits, 1)\n', (4429, 4440), False, 'import torch\n'), ((4628, 4655), 'torch.mul', 'torch.mul', (['prob', '(1 - p_copy)'], {}), '(prob, 1 - p_copy)\n', (4637, 4655), False, 'import torch\n'), ((4675, 4698), 'torch.mul', 'torch.mul', (['attn', 'p_copy'], {}), '(attn, p_copy)\n', (4684, 4698), False, 'import torch\n'), ((4927, 4962), 'torch.cat', 'torch.cat', (['[out_prob, copy_prob]', '(1)'], {}), '([out_prob, copy_prob], 1)\n', (4936, 4962), False, 'import torch\n'), ((6440, 6507), 'torch.where', 'torch.where', (['non_copy', '(copy_tok_probs + vocab_probs)', 'copy_tok_probs'], {}), '(non_copy, copy_tok_probs + vocab_probs, copy_tok_probs)\n', (6451, 6507), False, 'import torch\n'), ((2759, 2808), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['(1)', '(1)'], {'kernel_size': '(3)', 'stride': '(3)'}), '(1, 1, kernel_size=3, stride=3)\n', (2777, 2808), True, 'import torch.nn as nn\n'), ((2848, 2915), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['(1)', '(1)'], {'kernel_size': '(3)', 'stride': '(3)', 'output_padding': '(1)'}), '(1, 1, kernel_size=3, stride=3, output_padding=1)\n', (2866, 2915), True, 'import torch.nn as nn\n'), ((2955, 3022), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['(1)', '(1)'], {'kernel_size': '(3)', 'stride': '(3)', 'output_padding': '(2)'}), '(1, 1, kernel_size=3, stride=3, output_padding=2)\n', (2973, 3022), True, 'import torch.nn as nn\n'), ((3667, 3691), 'torch.unsqueeze', 'torch.unsqueeze', (['attn', '(1)'], {}), '(attn, 1)\n', (3682, 3691), False, 'import torch\n'), ((4022, 4044), 'torch.squeeze', 'torch.squeeze', (['attn', '(1)'], {}), '(attn, 1)\n', (4035, 4044), False, 'import torch\n'), ((921, 940), 'torch.Tensor', 'torch.Tensor', (['blank'], {}), '(blank)\n', (933, 940), False, 'import torch\n'), ((988, 1006), 'torch.Tensor', 'torch.Tensor', (['fill'], {}), '(fill)\n', (1000, 1006), False, 'import torch\n'), ((6609, 6625), 'torch.min', 'torch.min', (['probs'], {}), '(probs)\n', (6618, 6625), False, 'import torch\n'), ((9799, 9824), 'torch.div', 'torch.div', (['loss', 'tgt_lens'], {}), '(loss, tgt_lens)\n', (9808, 9824), False, 'import torch\n')] |
# Copyright (c) 2018, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import json
import os
from pathlib import Path
import requests
from django.conf import settings
from jinja2 import Environment, BaseLoader
from requests.exceptions import ConnectionError
from pan_cnc.lib import snippet_utils
class SaltUtil():
username = "saltuser"
password = "<PASSWORD>"
base_url = "http://provisioner:9000"
login_url = '/login'
auth_token = ''
def __get_salt_auth_token(self):
try:
print('Here we go')
# FIXME - check timeout here
if self.auth_token != '':
print('we have an auth token already')
return True
else:
print('No auth token found!')
_auth_json = """
{
"username" : "%s",
"password" : <PASSWORD>",
"eauth": "pam"
}
""" % (self.username, self.password)
print(_auth_json)
aj = json.loads(_auth_json)
headers = {"Content-Type": "application/json"}
url = self.base_url + self.login_url
print('Using url: %s' % url)
res = requests.post(url, json=aj, headers=headers)
print(res)
if res.status_code == 200:
json_results = res.json()
if 'return' in json_results:
self._auth_token = json_results['return'][0]['token']
print(self._auth_token)
return True
else:
return False
else:
print(res.text)
return False
except ConnectionError as ce:
print(ce)
return False
except Exception as e:
print(e)
return False
def get_minion_list(self):
print('HERE WE GO')
minion_list = list()
if not self.__get_salt_auth_token():
print('Could not connect to provisioner')
return minion_list
url = self.base_url + '/minions'
headers = {"X-Auth-Token": self._auth_token}
res = requests.get(url, headers=headers)
print(res)
print(res.text)
minion_list_json = res.json()
print(minion_list_json)
if res.status_code != 200:
print('Invalid return code')
return minion_list
if 'return' not in minion_list_json:
print('Invalid return data')
return minion_list
return_dict = minion_list_json['return'][0]
for m in list(return_dict.keys()):
print(m)
minion_list.append(m)
return minion_list
def deploy_template(self, template):
if not self.__get_salt_auth_token():
print('Could not connect to provisioner')
return 'Could not login to provisioner!'
url = self.base_url + '/'
headers = {"X-Auth-Token": self._auth_token}
payload_json = json.loads(template)
try:
res = requests.post(url, json=payload_json, headers=headers)
print(res.status_code)
return res.text
except ConnectionError as ce:
print(ce)
return 'Error during deploy'
# def deploy_service(self, service, context):
# if not self.__get_salt_auth_token():
# print('Could not connect to provisioner')
# return 'No good'
#
# snippets_dir = Path(os.path.join(settings.BASE_DIR, 'mssp', 'snippets'))
#
# template = snippet_utils.render_snippet_template()
# try:
# for snippet in service['snippets']:
# template_name = snippet['file']
#
# template_full_path = os.path.join(snippets_dir, service['name'], template_name)
# with open(template_full_path, 'r') as template:
# template_string = template.read()
# template_template = Environment(loader=BaseLoader()).from_string(template_string)
# payload = template_template.render(context)
# print(payload)
# url = self.base_url + '/'
# headers = {"X-Auth-Token": self._auth_token}
# payload_json = json.loads(payload)
# try:
# res = requests.post(url, json=payload_json, headers=headers)
# print(res.status_code)
# return res.text
# except ConnectionError as ce:
# print(ce)
# return 'Error during deploy'
#
# except Exception as e:
# print(e)
# print('Caught an error deploying service')
| [
"json.loads",
"requests.post",
"requests.get"
] | [((2893, 2927), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (2905, 2927), False, 'import requests\n'), ((3751, 3771), 'json.loads', 'json.loads', (['template'], {}), '(template)\n', (3761, 3771), False, 'import json\n'), ((1731, 1753), 'json.loads', 'json.loads', (['_auth_json'], {}), '(_auth_json)\n', (1741, 1753), False, 'import json\n'), ((1922, 1966), 'requests.post', 'requests.post', (['url'], {'json': 'aj', 'headers': 'headers'}), '(url, json=aj, headers=headers)\n', (1935, 1966), False, 'import requests\n'), ((3803, 3857), 'requests.post', 'requests.post', (['url'], {'json': 'payload_json', 'headers': 'headers'}), '(url, json=payload_json, headers=headers)\n', (3816, 3857), False, 'import requests\n')] |
import sys
import click
from utilities_common.cli import AbbreviationGroup, pass_db
#
# 'feature' group ('config feature ...')
#
@click.group(cls=AbbreviationGroup, name='feature', invoke_without_command=False)
def feature():
"""Configure features"""
pass
#
# 'state' command ('config feature state ...')
#
@feature.command('state', short_help="Enable/disable a feature")
@click.argument('name', metavar='<feature-name>', required=True)
@click.argument('state', metavar='<state>', required=True, type=click.Choice(["enabled", "disabled"]))
@pass_db
def feature_state(db, name, state):
"""Enable/disable a feature"""
entry_data = db.cfgdb.get_entry('FEATURE', name)
if not entry_data:
click.echo("Feature '{}' doesn't exist".format(name))
sys.exit(1)
if entry_data['state'] == "always_enabled":
click.echo("Feature '{}' state is always enabled and can not be modified".format(name))
return
db.cfgdb.mod_entry('FEATURE', name, {'state': state})
#
# 'autorestart' command ('config feature autorestart ...')
#
@feature.command(name='autorestart', short_help="Enable/disable autosrestart of a feature")
@click.argument('name', metavar='<feature-name>', required=True)
@click.argument('autorestart', metavar='<autorestart>', required=True, type=click.Choice(["enabled", "disabled"]))
@pass_db
def feature_autorestart(db, name, autorestart):
"""Enable/disable autorestart of a feature"""
entry_data = db.cfgdb.get_entry('FEATURE', name)
if not entry_data:
click.echo("Feature '{}' doesn't exist".format(name))
sys.exit(1)
if entry_data['auto_restart'] == "always_enabled":
click.echo("Feature '{}' auto-restart is always enabled and can not be modified".format(name))
return
db.cfgdb.mod_entry('FEATURE', name, {'auto_restart': autorestart})
| [
"click.group",
"click.Choice",
"click.argument",
"sys.exit"
] | [((132, 217), 'click.group', 'click.group', ([], {'cls': 'AbbreviationGroup', 'name': '"""feature"""', 'invoke_without_command': '(False)'}), "(cls=AbbreviationGroup, name='feature', invoke_without_command=False\n )\n", (143, 217), False, 'import click\n'), ((384, 447), 'click.argument', 'click.argument', (['"""name"""'], {'metavar': '"""<feature-name>"""', 'required': '(True)'}), "('name', metavar='<feature-name>', required=True)\n", (398, 447), False, 'import click\n'), ((1166, 1229), 'click.argument', 'click.argument', (['"""name"""'], {'metavar': '"""<feature-name>"""', 'required': '(True)'}), "('name', metavar='<feature-name>', required=True)\n", (1180, 1229), False, 'import click\n'), ((778, 789), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (786, 789), False, 'import sys\n'), ((512, 549), 'click.Choice', 'click.Choice', (["['enabled', 'disabled']"], {}), "(['enabled', 'disabled'])\n", (524, 549), False, 'import click\n'), ((1599, 1610), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1607, 1610), False, 'import sys\n'), ((1306, 1343), 'click.Choice', 'click.Choice', (["['enabled', 'disabled']"], {}), "(['enabled', 'disabled'])\n", (1318, 1343), False, 'import click\n')] |
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests For Capacity Weigher."""
from datetime import datetime
from unittest import mock
import ddt
from cinder.common import constants
from cinder import context
from cinder.scheduler import weights
from cinder.tests.unit.scheduler import fakes
from cinder.tests.unit import test
from cinder.volume import volume_utils
@ddt.ddt
class CapacityWeigherTestCase(test.TestCase):
def setUp(self):
super(CapacityWeigherTestCase, self).setUp()
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.OrderedHostWeightHandler(
'cinder.scheduler.weights')
def _get_weighed_hosts(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {'size': 1}
return self.weight_handler.get_weighed_objects(
[weights.capacity.CapacityWeigher],
hosts,
weight_properties)
@mock.patch('cinder.db.sqlalchemy.api.service_get_all')
def _get_all_backends(self, _mock_service_get_all, disabled=False):
ctxt = context.get_admin_context()
fakes.mock_host_manager_db_calls(_mock_service_get_all,
disabled=disabled)
backend_states = self.host_manager.get_all_backend_states(ctxt)
_mock_service_get_all.assert_called_once_with(
ctxt,
None, # backend_match_level
topic=constants.VOLUME_TOPIC, frozen=False, disabled=disabled)
return backend_states
# If thin and thin_provisioning_support are True,
# use the following formula:
# free = (total * host_state.max_over_subscription_ratio
# - host_state.provisioned_capacity_gb
# - math.floor(total * reserved))
# Otherwise, use the following formula:
# free = free_space - math.floor(total * reserved)
@ddt.data(
{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},
'winner': 'host2'},
{'volume_type': {'extra_specs': {'provisioning:type': 'thick'}},
'winner': 'host1'},
{'volume_type': {'extra_specs': {}},
'winner': 'host2'},
{'volume_type': {},
'winner': 'host2'},
{'volume_type': None,
'winner': 'host2'},
)
@ddt.unpack
def test_default_of_spreading_first(self, volume_type, winner):
backend_info_list = self._get_all_backends()
# Results for the 1st test
# {'provisioning:type': 'thin'}:
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=1024-math.floor(1024*0.1)=922
# Norm=0.837837837838
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=2048*1.5-1748-math.floor(2048*0.1)=1120
# Norm=1.0
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=256-512*0=256
# Norm=0.292383292383
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=2048*1.0-2047-math.floor(2048*0.05)=-101
# Norm=0.0
# host5: free_capacity_gb=unknown free=-1
# Norm=0.0819000819001
# so, host2 should win:
weight_properties = {
'size': 1,
'volume_type': volume_type,
}
weighed_host = self._get_weighed_hosts(
backend_info_list,
weight_properties=weight_properties)[0]
self.assertEqual(1.0, weighed_host.weight)
self.assertEqual(winner,
volume_utils.extract_host(weighed_host.obj.host))
@ddt.data(
{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},
'winner': 'host4'},
{'volume_type': {'extra_specs': {'provisioning:type': 'thick'}},
'winner': 'host2'},
{'volume_type': {'extra_specs': {}},
'winner': 'host4'},
{'volume_type': {},
'winner': 'host4'},
{'volume_type': None,
'winner': 'host4'},
)
@ddt.unpack
def test_capacity_weight_multiplier1(self, volume_type, winner):
self.flags(capacity_weight_multiplier=-1.0)
backend_info_list = self._get_all_backends()
# Results for the 1st test
# {'provisioning:type': 'thin'}:
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=-(1024-math.floor(1024*0.1))=-922
# Norm=-0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=-(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=-(256-512*0)=-256
# Norm=--0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=-(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=unknown free=-float('inf')
# Norm=-1.0
# so, host4 should win:
weight_properties = {
'size': 1,
'volume_type': volume_type,
}
weighed_host = self._get_weighed_hosts(
backend_info_list,
weight_properties=weight_properties)
weighed_host = weighed_host[0]
self.assertEqual(0.0, weighed_host.weight)
self.assertEqual(winner,
volume_utils.extract_host(weighed_host.obj.host))
@ddt.data(
{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},
'winner': 'host2'},
{'volume_type': {'extra_specs': {'provisioning:type': 'thick'}},
'winner': 'host1'},
{'volume_type': {'extra_specs': {}},
'winner': 'host2'},
{'volume_type': {},
'winner': 'host2'},
{'volume_type': None,
'winner': 'host2'},
)
@ddt.unpack
def test_capacity_weight_multiplier2(self, volume_type, winner):
self.flags(capacity_weight_multiplier=2.0)
backend_info_list = self._get_all_backends()
# Results for the 1st test
# {'provisioning:type': 'thin'}:
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))*2=1844
# Norm=1.67567567568
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))*2=2240
# Norm=2.0
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)*2=512
# Norm=0.584766584767
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))*2=-202
# Norm=0.0
# host5: free_capacity_gb=unknown free=-2
# Norm=0.1638001638
# so, host2 should win:
weight_properties = {
'size': 1,
'volume_type': volume_type,
}
weighed_host = self._get_weighed_hosts(
backend_info_list,
weight_properties=weight_properties)[0]
self.assertEqual(1.0 * 2, weighed_host.weight)
self.assertEqual(winner,
volume_utils.extract_host(weighed_host.obj.host))
def test_capacity_weight_no_unknown_or_infinite(self):
self.flags(capacity_weight_multiplier=-1.0)
del self.host_manager.service_states['host5']
backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm=-0.837837837838
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-1.0
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.292383292383
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host2 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host2',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_free_unknown(self):
self.flags(capacity_weight_multiplier=-1.0)
self.host_manager.service_states['host5'] = {
'total_capacity_gb': 3000,
'free_capacity_gb': 'unknown',
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': datetime.utcnow()}
backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm= -0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=unknown free=3000
# Norm=-1.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_cap_unknown(self):
self.flags(capacity_weight_multiplier=-1.0)
self.host_manager.service_states['host5'] = {
'total_capacity_gb': 'unknown',
'free_capacity_gb': 3000,
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': datetime.utcnow()}
backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm= -0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=3000 free=unknown
# Norm=-1.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_free_infinite(self):
self.flags(capacity_weight_multiplier=-1.0)
self.host_manager.service_states['host5'] = {
'total_capacity_gb': 3000,
'free_capacity_gb': 'infinite',
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': datetime.utcnow()}
backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm= -0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=infinite free=3000
# Norm=-1.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_cap_infinite(self):
self.flags(capacity_weight_multiplier=-1.0)
self.host_manager.service_states['host5'] = {
'total_capacity_gb': 'infinite',
'free_capacity_gb': 3000,
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': datetime.utcnow()}
backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm= -0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=3000 free=infinite
# Norm=-1.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
| [
"cinder.tests.unit.scheduler.fakes.FakeHostManager",
"cinder.scheduler.weights.OrderedHostWeightHandler",
"cinder.tests.unit.scheduler.fakes.mock_host_manager_db_calls",
"datetime.datetime.utcnow",
"cinder.context.get_admin_context",
"ddt.data",
"unittest.mock.patch",
"cinder.volume.volume_utils.extra... | [((1558, 1612), 'unittest.mock.patch', 'mock.patch', (['"""cinder.db.sqlalchemy.api.service_get_all"""'], {}), "('cinder.db.sqlalchemy.api.service_get_all')\n", (1568, 1612), False, 'from unittest import mock\n'), ((2494, 2828), 'ddt.data', 'ddt.data', (["{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}}, 'winner':\n 'host2'}", "{'volume_type': {'extra_specs': {'provisioning:type': 'thick'}}, 'winner':\n 'host1'}", "{'volume_type': {'extra_specs': {}}, 'winner': 'host2'}", "{'volume_type': {}, 'winner': 'host2'}", "{'volume_type': None, 'winner': 'host2'}"], {}), "({'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},\n 'winner': 'host2'}, {'volume_type': {'extra_specs': {\n 'provisioning:type': 'thick'}}, 'winner': 'host1'}, {'volume_type': {\n 'extra_specs': {}}, 'winner': 'host2'}, {'volume_type': {}, 'winner':\n 'host2'}, {'volume_type': None, 'winner': 'host2'})\n", (2502, 2828), False, 'import ddt\n'), ((4318, 4652), 'ddt.data', 'ddt.data', (["{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}}, 'winner':\n 'host4'}", "{'volume_type': {'extra_specs': {'provisioning:type': 'thick'}}, 'winner':\n 'host2'}", "{'volume_type': {'extra_specs': {}}, 'winner': 'host4'}", "{'volume_type': {}, 'winner': 'host4'}", "{'volume_type': None, 'winner': 'host4'}"], {}), "({'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},\n 'winner': 'host4'}, {'volume_type': {'extra_specs': {\n 'provisioning:type': 'thick'}}, 'winner': 'host2'}, {'volume_type': {\n 'extra_specs': {}}, 'winner': 'host4'}, {'volume_type': {}, 'winner':\n 'host4'}, {'volume_type': None, 'winner': 'host4'})\n", (4326, 4652), False, 'import ddt\n'), ((6264, 6598), 'ddt.data', 'ddt.data', (["{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}}, 'winner':\n 'host2'}", "{'volume_type': {'extra_specs': {'provisioning:type': 'thick'}}, 'winner':\n 'host1'}", "{'volume_type': {'extra_specs': {}}, 'winner': 'host2'}", "{'volume_type': {}, 'winner': 'host2'}", "{'volume_type': None, 'winner': 'host2'}"], {}), "({'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},\n 'winner': 'host2'}, {'volume_type': {'extra_specs': {\n 'provisioning:type': 'thick'}}, 'winner': 'host1'}, {'volume_type': {\n 'extra_specs': {}}, 'winner': 'host2'}, {'volume_type': {}, 'winner':\n 'host2'}, {'volume_type': None, 'winner': 'host2'})\n", (6272, 6598), False, 'import ddt\n'), ((1122, 1145), 'cinder.tests.unit.scheduler.fakes.FakeHostManager', 'fakes.FakeHostManager', ([], {}), '()\n', (1143, 1145), False, 'from cinder.tests.unit.scheduler import fakes\n'), ((1176, 1236), 'cinder.scheduler.weights.OrderedHostWeightHandler', 'weights.OrderedHostWeightHandler', (['"""cinder.scheduler.weights"""'], {}), "('cinder.scheduler.weights')\n", (1208, 1236), False, 'from cinder.scheduler import weights\n'), ((1700, 1727), 'cinder.context.get_admin_context', 'context.get_admin_context', ([], {}), '()\n', (1725, 1727), False, 'from cinder import context\n'), ((1736, 1810), 'cinder.tests.unit.scheduler.fakes.mock_host_manager_db_calls', 'fakes.mock_host_manager_db_calls', (['_mock_service_get_all'], {'disabled': 'disabled'}), '(_mock_service_get_all, disabled=disabled)\n', (1768, 1810), False, 'from cinder.tests.unit.scheduler import fakes\n'), ((4262, 4310), 'cinder.volume.volume_utils.extract_host', 'volume_utils.extract_host', (['weighed_host.obj.host'], {}), '(weighed_host.obj.host)\n', (4287, 4310), False, 'from cinder.volume import volume_utils\n'), ((6208, 6256), 'cinder.volume.volume_utils.extract_host', 'volume_utils.extract_host', (['weighed_host.obj.host'], {}), '(weighed_host.obj.host)\n', (6233, 6256), False, 'from cinder.volume import volume_utils\n'), ((8101, 8149), 'cinder.volume.volume_utils.extract_host', 'volume_utils.extract_host', (['weighed_host.obj.host'], {}), '(weighed_host.obj.host)\n', (8126, 8149), False, 'from cinder.volume import volume_utils\n'), ((9309, 9354), 'cinder.volume.volume_utils.extract_host', 'volume_utils.extract_host', (['best_host.obj.host'], {}), '(best_host.obj.host)\n', (9334, 9354), False, 'from cinder.volume import volume_utils\n'), ((9538, 9584), 'cinder.volume.volume_utils.extract_host', 'volume_utils.extract_host', (['worst_host.obj.host'], {}), '(worst_host.obj.host)\n', (9563, 9584), False, 'from cinder.volume import volume_utils\n'), ((10119, 10136), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (10134, 10136), False, 'from datetime import datetime\n'), ((11225, 11270), 'cinder.volume.volume_utils.extract_host', 'volume_utils.extract_host', (['best_host.obj.host'], {}), '(best_host.obj.host)\n', (11250, 11270), False, 'from cinder.volume import volume_utils\n'), ((11454, 11500), 'cinder.volume.volume_utils.extract_host', 'volume_utils.extract_host', (['worst_host.obj.host'], {}), '(worst_host.obj.host)\n', (11479, 11500), False, 'from cinder.volume import volume_utils\n'), ((12034, 12051), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (12049, 12051), False, 'from datetime import datetime\n'), ((13140, 13185), 'cinder.volume.volume_utils.extract_host', 'volume_utils.extract_host', (['best_host.obj.host'], {}), '(best_host.obj.host)\n', (13165, 13185), False, 'from cinder.volume import volume_utils\n'), ((13369, 13415), 'cinder.volume.volume_utils.extract_host', 'volume_utils.extract_host', (['worst_host.obj.host'], {}), '(worst_host.obj.host)\n', (13394, 13415), False, 'from cinder.volume import volume_utils\n'), ((13952, 13969), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (13967, 13969), False, 'from datetime import datetime\n'), ((15059, 15104), 'cinder.volume.volume_utils.extract_host', 'volume_utils.extract_host', (['best_host.obj.host'], {}), '(best_host.obj.host)\n', (15084, 15104), False, 'from cinder.volume import volume_utils\n'), ((15288, 15334), 'cinder.volume.volume_utils.extract_host', 'volume_utils.extract_host', (['worst_host.obj.host'], {}), '(worst_host.obj.host)\n', (15313, 15334), False, 'from cinder.volume import volume_utils\n'), ((15870, 15887), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (15885, 15887), False, 'from datetime import datetime\n'), ((16977, 17022), 'cinder.volume.volume_utils.extract_host', 'volume_utils.extract_host', (['best_host.obj.host'], {}), '(best_host.obj.host)\n', (17002, 17022), False, 'from cinder.volume import volume_utils\n'), ((17206, 17252), 'cinder.volume.volume_utils.extract_host', 'volume_utils.extract_host', (['worst_host.obj.host'], {}), '(worst_host.obj.host)\n', (17231, 17252), False, 'from cinder.volume import volume_utils\n')] |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cupy import (arange, array, asarray, atleast_1d, intc, integer,
isscalar, issubdtype, take, unique, where)
from bisect import bisect_left
def next_fast_len(target):
"""
Find the next fast size of input data to `fft`, for zero-padding, etc.
SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this
returns the next composite of the prime factors 2, 3, and 5 which is
greater than or equal to `target`. (These are also known as 5-smooth
numbers, regular numbers, or Hamming numbers.)
Parameters
----------
target : int
Length to start searching from. Must be a positive integer.
Returns
-------
out : int
The first 5-smooth number greater than or equal to `target`.
Notes
-----
.. versionadded:: 0.18.0
Examples
--------
On a particular machine, an FFT of prime length takes 133 ms:
>>> from scipy import fftpack
>>> min_len = 10007 # prime length is worst case for speed
>>> a = np.random.randn(min_len)
>>> b = fftpack.fft(a)
Zero-padding to the next 5-smooth length reduces computation time to
211 us, a speedup of 630 times:
>>> fftpack.helper.next_fast_len(min_len)
10125
>>> b = fftpack.fft(a, 10125)
Rounding up to the next power of 2 is not optimal, taking 367 us to
compute, 1.7 times as long as the 5-smooth size:
>>> b = fftpack.fft(a, 16384)
"""
hams = (8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40, 45, 48,
50, 54, 60, 64, 72, 75, 80, 81, 90, 96, 100, 108, 120, 125, 128,
135, 144, 150, 160, 162, 180, 192, 200, 216, 225, 240, 243, 250,
256, 270, 288, 300, 320, 324, 360, 375, 384, 400, 405, 432, 450,
480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720, 729,
750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024, 1080, 1125,
1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458, 1500, 1536,
1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025, 2048, 2160,
2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700, 2880, 2916,
3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645, 3750, 3840,
3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800, 4860, 5000,
5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144, 6250, 6400,
6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776, 8000, 8100,
8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000)
target = int(target)
if target <= 6:
return target
# Quickly check if it's already a power of 2
if not (target & (target - 1)):
return target
# Get result quickly for small sizes, since FFT itself is similarly fast.
if target <= hams[-1]:
return hams[bisect_left(hams, target)]
match = float('inf') # Anything found will be smaller
p5 = 1
while p5 < target:
p35 = p5
while p35 < target:
# Ceiling integer division, avoiding conversion to float
# (quotient = ceil(target / p35))
quotient = -(-target // p35)
# Quickly find next power of 2 >= quotient
p2 = 2**((quotient - 1).bit_length())
N = p2 * p35
if N == target:
return N
elif N < match:
match = N
p35 *= 3
if p35 == target:
return p35
if p35 < match:
match = p35
p5 *= 5
if p5 == target:
return p5
if p5 < match:
match = p5
return match
def _init_nd_shape_and_axes(x, shape, axes):
"""Handle shape and axes arguments for n-dimensional transforms.
Returns the shape and axes in a standard form, taking into account negative
values and checking for various potential errors.
Parameters
----------
x : array_like
The input array.
shape : int or array_like of ints or None
The shape of the result. If both `shape` and `axes` (see below) are
None, `shape` is ``x.shape``; if `shape` is None but `axes` is
not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``.
If `shape` is -1, the size of the corresponding dimension of `x` is
used.
axes : int or array_like of ints or None
Axes along which the calculation is computed.
The default is over all axes.
Negative indices are automatically converted to their positive
counterpart.
Returns
-------
shape : array
The shape of the result. It is a 1D integer array.
axes : array
The shape of the result. It is a 1D integer array.
"""
x = asarray(x)
noshape = shape is None
noaxes = axes is None
if noaxes:
axes = arange(x.ndim, dtype=intc)
else:
axes = atleast_1d(axes)
if axes.size == 0:
axes = axes.astype(intc)
if not axes.ndim == 1:
raise ValueError("when given, axes values must be a scalar or vector")
if not issubdtype(axes.dtype, integer):
raise ValueError("when given, axes values must be integers")
axes = where(axes < 0, axes + x.ndim, axes)
if axes.size != 0 and (axes.max() >= x.ndim or axes.min() < 0):
raise ValueError("axes exceeds dimensionality of input")
if axes.size != 0 and unique(axes).shape != axes.shape:
raise ValueError("all axes must be unique")
if not noshape:
shape = atleast_1d(shape)
elif isscalar(x):
shape = array([], dtype=intc)
elif noaxes:
shape = array(x.shape, dtype=intc)
else:
shape = take(x.shape, axes)
if shape.size == 0:
shape = shape.astype(intc)
if shape.ndim != 1:
raise ValueError("when given, shape values must be a scalar or vector")
if not issubdtype(shape.dtype, integer):
raise ValueError("when given, shape values must be integers")
if axes.shape != shape.shape:
raise ValueError("when given, axes and shape arguments"
" have to be of the same length")
shape = where(shape == -1, array(x.shape)[axes], shape)
if shape.size != 0 and (shape < 1).any():
raise ValueError(
"invalid number of data points ({0}) specified".format(shape))
return shape, axes
def _init_nd_shape_and_axes_sorted(x, shape, axes):
"""Handle and sort shape and axes arguments for n-dimensional transforms.
This is identical to `_init_nd_shape_and_axes`, except the axes are
returned in sorted order and the shape is reordered to match.
Parameters
----------
x : array_like
The input array.
shape : int or array_like of ints or None
The shape of the result. If both `shape` and `axes` (see below) are
None, `shape` is ``x.shape``; if `shape` is None but `axes` is
not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``.
If `shape` is -1, the size of the corresponding dimension of `x` is
used.
axes : int or array_like of ints or None
Axes along which the calculation is computed.
The default is over all axes.
Negative indices are automatically converted to their positive
counterpart.
Returns
-------
shape : array
The shape of the result. It is a 1D integer array.
axes : array
The shape of the result. It is a 1D integer array.
"""
x = asarray(x)
noaxes = axes is None
shape, axes = _init_nd_shape_and_axes(x, shape, axes)
if not noaxes:
shape = shape[axes.argsort()]
axes.sort()
return shape, axes
| [
"cupy.isscalar",
"cupy.issubdtype",
"cupy.arange",
"cupy.take",
"cupy.unique",
"cupy.atleast_1d",
"cupy.where",
"cupy.array",
"cupy.asarray",
"bisect.bisect_left"
] | [((5331, 5341), 'cupy.asarray', 'asarray', (['x'], {}), '(x)\n', (5338, 5341), False, 'from cupy import arange, array, asarray, atleast_1d, intc, integer, isscalar, issubdtype, take, unique, where\n'), ((5785, 5821), 'cupy.where', 'where', (['(axes < 0)', '(axes + x.ndim)', 'axes'], {}), '(axes < 0, axes + x.ndim, axes)\n', (5790, 5821), False, 'from cupy import arange, array, asarray, atleast_1d, intc, integer, isscalar, issubdtype, take, unique, where\n'), ((8086, 8096), 'cupy.asarray', 'asarray', (['x'], {}), '(x)\n', (8093, 8096), False, 'from cupy import arange, array, asarray, atleast_1d, intc, integer, isscalar, issubdtype, take, unique, where\n'), ((5427, 5453), 'cupy.arange', 'arange', (['x.ndim'], {'dtype': 'intc'}), '(x.ndim, dtype=intc)\n', (5433, 5453), False, 'from cupy import arange, array, asarray, atleast_1d, intc, integer, isscalar, issubdtype, take, unique, where\n'), ((5479, 5495), 'cupy.atleast_1d', 'atleast_1d', (['axes'], {}), '(axes)\n', (5489, 5495), False, 'from cupy import arange, array, asarray, atleast_1d, intc, integer, isscalar, issubdtype, take, unique, where\n'), ((5671, 5702), 'cupy.issubdtype', 'issubdtype', (['axes.dtype', 'integer'], {}), '(axes.dtype, integer)\n', (5681, 5702), False, 'from cupy import arange, array, asarray, atleast_1d, intc, integer, isscalar, issubdtype, take, unique, where\n'), ((6105, 6122), 'cupy.atleast_1d', 'atleast_1d', (['shape'], {}), '(shape)\n', (6115, 6122), False, 'from cupy import arange, array, asarray, atleast_1d, intc, integer, isscalar, issubdtype, take, unique, where\n'), ((6132, 6143), 'cupy.isscalar', 'isscalar', (['x'], {}), '(x)\n', (6140, 6143), False, 'from cupy import arange, array, asarray, atleast_1d, intc, integer, isscalar, issubdtype, take, unique, where\n'), ((6465, 6497), 'cupy.issubdtype', 'issubdtype', (['shape.dtype', 'integer'], {}), '(shape.dtype, integer)\n', (6475, 6497), False, 'from cupy import arange, array, asarray, atleast_1d, intc, integer, isscalar, issubdtype, take, unique, where\n'), ((3419, 3444), 'bisect.bisect_left', 'bisect_left', (['hams', 'target'], {}), '(hams, target)\n', (3430, 3444), False, 'from bisect import bisect_left\n'), ((6161, 6182), 'cupy.array', 'array', (['[]'], {'dtype': 'intc'}), '([], dtype=intc)\n', (6166, 6182), False, 'from cupy import arange, array, asarray, atleast_1d, intc, integer, isscalar, issubdtype, take, unique, where\n'), ((6758, 6772), 'cupy.array', 'array', (['x.shape'], {}), '(x.shape)\n', (6763, 6772), False, 'from cupy import arange, array, asarray, atleast_1d, intc, integer, isscalar, issubdtype, take, unique, where\n'), ((5982, 5994), 'cupy.unique', 'unique', (['axes'], {}), '(axes)\n', (5988, 5994), False, 'from cupy import arange, array, asarray, atleast_1d, intc, integer, isscalar, issubdtype, take, unique, where\n'), ((6216, 6242), 'cupy.array', 'array', (['x.shape'], {'dtype': 'intc'}), '(x.shape, dtype=intc)\n', (6221, 6242), False, 'from cupy import arange, array, asarray, atleast_1d, intc, integer, isscalar, issubdtype, take, unique, where\n'), ((6269, 6288), 'cupy.take', 'take', (['x.shape', 'axes'], {}), '(x.shape, axes)\n', (6273, 6288), False, 'from cupy import arange, array, asarray, atleast_1d, intc, integer, isscalar, issubdtype, take, unique, where\n')] |
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2014 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Authors: <NAME>, <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Comment: Adapted from InsarProc/runGeocode.py
import logging
import stdproc
from stdproc.rectify.geocode.Geocodable import Geocodable
import isceobj
#from contextlib import nested
from iscesys.ImageUtil.ImageUtil import ImageUtil as IU
from iscesys.StdOEL.StdOELPy import create_writer
import os
logger = logging.getLogger('isce.isceProc.runGeocode')
posIndx = 1
def runGeocode(self, prodlist, unwrapflag, bbox):
'''Generalized geocoding of all the files listed above (in prodlist).'''
if isinstance(prodlist, str):
from isceobj.Util.StringUtils import StringUtils as SU
tobeGeocoded = SU.listify(prodlist)
else:
tobeGeocoded = prodlist
#####Remove the unwrapped interferogram if no unwrapping is done
if not unwrapflag:
try:
tobeGeocoded.remove(self._isce.unwrappedIntFilename)
except ValueError:
pass
print('Number of products to geocode: ', len(tobeGeocoded))
stdWriter = create_writer("log", "", True, filename="geo.log")
velocity, height = self._isce.vh()
if bbox is not None:
snwe = list(bbox)
if len(snwe) != 4:
raise valueError('Bounding box should be a list/tuple of length 4')
else:
snwe = self._isce.topo.snwe
infos = {}
for attribute in ['demCropFilename', 'numberRangeLooks', 'numberAzimuthLooks',
'is_mocomp', 'demImage', 'peg', 'dopplerCentroid']:
infos[attribute] = getattr(self._isce, attribute)
for sceneid1, sceneid2 in self._isce.selectedPairs:
pair = (sceneid1, sceneid2)
for pol in self._isce.selectedPols:
frame1 = self._isce.frames[sceneid1][pol]
formSLC1 = self._isce.formSLCs[sceneid1][pol]
sid = self._isce.formatname(pair, pol)
infos['outputPath'] = os.path.join(self.getoutputdir(sceneid1, sceneid2), sid)
catalog = isceobj.Catalog.createCatalog(self._isce.procDoc.name)
run(tobeGeocoded, frame1, formSLC1, velocity, height, snwe, infos, catalog=catalog, sceneid=sid)
self._isce.procDoc.addAllFromCatalog(catalog)
def run(tobeGeocoded, frame1, formSLC1, velocity, height, snwe, infos, catalog=None, sceneid='NO_ID'):
logger.info("Geocoding Image: %s" % sceneid)
stdWriter = create_writer("log", "", True, filename=infos['outputPath'] + ".geo.log")
planet = frame1.getInstrument().getPlatform().getPlanet()
doppler = infos['dopplerCentroid'].getDopplerCoefficients(inHz=False)[0]
#####Geocode one by one
for prod in tobeGeocoded:
prodPath = infos['outputPath'] + '.' + prod
if not os.path.isfile(prodPath):
logger.info("File not found. Skipping %s" % prodPath) #KK some prods are only in refScene folder! (tbd)
continue
#else:
objGeo = stdproc.createGeocode('insarapp_geocode_' + os.path.basename(prod).replace('.',''))
objGeo.configure()
objGeo.referenceOrbit = formSLC1.getMocompPosition(posIndx)
####IF statements to check for user configuration
if objGeo.minimumLatitude is None:
objGeo.minimumLatitude = snwe[0]
if objGeo.maximumLatitude is None:
objGeo.maximumLatitude = snwe[1]
if objGeo.minimumLongitude is None:
objGeo.minimumLongitude = snwe[2]
if objGeo.maximumLongitude is None:
objGeo.maximumLongitude = snwe[3]
if objGeo.demCropFilename is None:
objGeo.demCropFilename = infos['outputPath'] + '.' + infos['demCropFilename']
if objGeo.dopplerCentroidConstantTerm is None:
objGeo.dopplerCentroidConstantTerm = doppler
if objGeo.bodyFixedVelocity is None:
objGeo.bodyFixedVelocity = velocity
if objGeo.spacecraftHeight is None:
objGeo.spacecraftHeight = height
if objGeo.numberRangeLooks is None:
objGeo.numberRangeLooks = infos['numberRangeLooks']
if objGeo.numberAzimuthLooks is None:
objGeo.numberAzimuthLooks = infos['numberAzimuthLooks']
if objGeo.isMocomp is None:
objGeo.isMocomp = infos['is_mocomp']
objGeo.stdWriter = stdWriter
#create the instance of the image and return the method is supposed to use
ge = Geocodable()
inImage, objGeo.method = ge.create(prodPath)
if objGeo.method is None:
objGeo.method = method
if inImage:
demImage = isceobj.createDemImage()
IU.copyAttributes(infos['demImage'], demImage)
objGeo(peg=infos['peg'], frame=frame1,
planet=planet, dem=demImage, tobegeocoded=inImage,
geoPosting=None, referenceslc=formSLC1)
if catalog is not None:
isceobj.Catalog.recordInputsAndOutputs(catalog, objGeo,
"runGeocode.%s.%s" % (sceneid, prodPath),
logger,
"runGeocode.%s.%s" % (sceneid, prodPath))
stdWriter.finalize()
| [
"logging.getLogger",
"isceobj.Catalog.createCatalog",
"isceobj.Util.StringUtils.StringUtils.listify",
"os.path.isfile",
"isceobj.Catalog.recordInputsAndOutputs",
"iscesys.StdOEL.StdOELPy.create_writer",
"os.path.basename",
"isceobj.createDemImage",
"iscesys.ImageUtil.ImageUtil.ImageUtil.copyAttribut... | [((1743, 1788), 'logging.getLogger', 'logging.getLogger', (['"""isce.isceProc.runGeocode"""'], {}), "('isce.isceProc.runGeocode')\n", (1760, 1788), False, 'import logging\n'), ((2410, 2460), 'iscesys.StdOEL.StdOELPy.create_writer', 'create_writer', (['"""log"""', '""""""', '(True)'], {'filename': '"""geo.log"""'}), "('log', '', True, filename='geo.log')\n", (2423, 2460), False, 'from iscesys.StdOEL.StdOELPy import create_writer\n'), ((3754, 3827), 'iscesys.StdOEL.StdOELPy.create_writer', 'create_writer', (['"""log"""', '""""""', '(True)'], {'filename': "(infos['outputPath'] + '.geo.log')"}), "('log', '', True, filename=infos['outputPath'] + '.geo.log')\n", (3767, 3827), False, 'from iscesys.StdOEL.StdOELPy import create_writer\n'), ((2050, 2070), 'isceobj.Util.StringUtils.StringUtils.listify', 'SU.listify', (['prodlist'], {}), '(prodlist)\n', (2060, 2070), True, 'from isceobj.Util.StringUtils import StringUtils as SU\n'), ((5758, 5770), 'stdproc.rectify.geocode.Geocodable.Geocodable', 'Geocodable', ([], {}), '()\n', (5768, 5770), False, 'from stdproc.rectify.geocode.Geocodable import Geocodable\n'), ((3363, 3417), 'isceobj.Catalog.createCatalog', 'isceobj.Catalog.createCatalog', (['self._isce.procDoc.name'], {}), '(self._isce.procDoc.name)\n', (3392, 3417), False, 'import isceobj\n'), ((4094, 4118), 'os.path.isfile', 'os.path.isfile', (['prodPath'], {}), '(prodPath)\n', (4108, 4118), False, 'import os\n'), ((5937, 5961), 'isceobj.createDemImage', 'isceobj.createDemImage', ([], {}), '()\n', (5959, 5961), False, 'import isceobj\n'), ((5974, 6020), 'iscesys.ImageUtil.ImageUtil.ImageUtil.copyAttributes', 'IU.copyAttributes', (["infos['demImage']", 'demImage'], {}), "(infos['demImage'], demImage)\n", (5991, 6020), True, 'from iscesys.ImageUtil.ImageUtil import ImageUtil as IU\n'), ((6270, 6421), 'isceobj.Catalog.recordInputsAndOutputs', 'isceobj.Catalog.recordInputsAndOutputs', (['catalog', 'objGeo', "('runGeocode.%s.%s' % (sceneid, prodPath))", 'logger', "('runGeocode.%s.%s' % (sceneid, prodPath))"], {}), "(catalog, objGeo, 'runGeocode.%s.%s' %\n (sceneid, prodPath), logger, 'runGeocode.%s.%s' % (sceneid, prodPath))\n", (6308, 6421), False, 'import isceobj\n'), ((4333, 4355), 'os.path.basename', 'os.path.basename', (['prod'], {}), '(prod)\n', (4349, 4355), False, 'import os\n')] |
import pandas as pd
# Takes a dataframe, the column to groupby, and a list of columns
# Applies percent change to columns within each group
def apply_pct_change(df: pd.DataFrame, groupby: str,
columns: list) -> pd.DataFrame:
ids = df[groupby].unique()
output_df = pd.DataFrame()
for current_id in ids:
current = df.loc[df[groupby] == current_id]
current[columns] = current[columns].pct_change(fill_method='ffill')
output_df = output_df.append(current)
return output_df
| [
"pandas.DataFrame"
] | [((299, 313), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (311, 313), True, 'import pandas as pd\n')] |
import pytest
from pytest_factoryboy import register
from example.factories import BlogFactory, AuthorFactory, AuthorBioFactory, EntryFactory, CommentFactory
register(BlogFactory)
register(AuthorFactory)
register(AuthorBioFactory)
register(EntryFactory)
register(CommentFactory)
@pytest.fixture
def single_entry(blog, author, entry_factory, comment_factory):
entry = entry_factory(blog=blog, authors=(author,))
comment_factory(entry=entry)
return entry
@pytest.fixture
def multiple_entries(blog_factory, author_factory, entry_factory, comment_factory):
entries = [
entry_factory(blog=blog_factory(), authors=(author_factory(),)),
entry_factory(blog=blog_factory(), authors=(author_factory(),)),
]
comment_factory(entry=entries[0])
comment_factory(entry=entries[1])
return entries
| [
"pytest_factoryboy.register"
] | [((160, 181), 'pytest_factoryboy.register', 'register', (['BlogFactory'], {}), '(BlogFactory)\n', (168, 181), False, 'from pytest_factoryboy import register\n'), ((182, 205), 'pytest_factoryboy.register', 'register', (['AuthorFactory'], {}), '(AuthorFactory)\n', (190, 205), False, 'from pytest_factoryboy import register\n'), ((206, 232), 'pytest_factoryboy.register', 'register', (['AuthorBioFactory'], {}), '(AuthorBioFactory)\n', (214, 232), False, 'from pytest_factoryboy import register\n'), ((233, 255), 'pytest_factoryboy.register', 'register', (['EntryFactory'], {}), '(EntryFactory)\n', (241, 255), False, 'from pytest_factoryboy import register\n'), ((256, 280), 'pytest_factoryboy.register', 'register', (['CommentFactory'], {}), '(CommentFactory)\n', (264, 280), False, 'from pytest_factoryboy import register\n')] |
import platform
import os
import logging.handlers
from lbrynet import build_type, __version__ as lbrynet_version
log = logging.getLogger(__name__)
def get_platform() -> dict:
p = {
"processor": platform.processor(),
"python_version": platform.python_version(),
"platform": platform.platform(),
"os_release": platform.release(),
"os_system": platform.system(),
"lbrynet_version": lbrynet_version,
"build": build_type.BUILD, # CI server sets this during build step
}
if p["os_system"] == "Linux":
import distro
p["distro"] = distro.info()
p["desktop"] = os.environ.get('XDG_CURRENT_DESKTOP', 'Unknown')
return p
| [
"platform.platform",
"os.environ.get",
"platform.release",
"distro.info",
"platform.system",
"platform.processor",
"platform.python_version"
] | [((210, 230), 'platform.processor', 'platform.processor', ([], {}), '()\n', (228, 230), False, 'import platform\n'), ((258, 283), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (281, 283), False, 'import platform\n'), ((305, 324), 'platform.platform', 'platform.platform', ([], {}), '()\n', (322, 324), False, 'import platform\n'), ((348, 366), 'platform.release', 'platform.release', ([], {}), '()\n', (364, 366), False, 'import platform\n'), ((389, 406), 'platform.system', 'platform.system', ([], {}), '()\n', (404, 406), False, 'import platform\n'), ((612, 625), 'distro.info', 'distro.info', ([], {}), '()\n', (623, 625), False, 'import distro\n'), ((649, 697), 'os.environ.get', 'os.environ.get', (['"""XDG_CURRENT_DESKTOP"""', '"""Unknown"""'], {}), "('XDG_CURRENT_DESKTOP', 'Unknown')\n", (663, 697), False, 'import os\n')] |
import tensorflow as tf
# numpy 是个科学计算的工具包,这里通过Numpy生成模拟数据
from numpy.random import RandomState
# 训练数据batch的大小
batch_size = 8
# 定义神经网络的参数,这里还是沿用3.4.2 小结中给出的神经网络结构
w1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
w2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
# 在shape的维度上使用None可以方便使用不打的batch大小,在训练时需要把数据
# 分成比较小的batch,但是在测试时,可以一次性使用全部数据,当数据集比较小时这样比较
# 方便测试,但是数据集比较大时放入一个batch会导致内存溢出
x = tf.placeholder(tf.float32, shape=(None, 2), name="x-input")
y_ = tf.placeholder(tf.float32, shape=(None, 1), name='y-input')
# 定义神经网络向前传播的过程 x w1 w2 两层神经
a = tf.matmul(x, w1)
y = tf.matmul(a, w2)
# 定义损失函数和反向传播的算法
# tf.clip_by_value 因为 log 会产生 none (如 log-3 ), 用它来限定不出现none
# 替代方法 cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv + 1e-10))
cross_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
# 通过随机数生成一个模拟数据集
rdm = RandomState(1)
X = rdm.rand(128, 2)
# Y 为对数据集数据 进行 结果收集分类 和大于1 为1 小于 1为0
# 定义规则来给样本的标签。在这里所有x1 + x2 < 1 的样本都被认为是正样本(比如零件合格)
# 而其他为负样本(比如样本不合格)。和TensorFlow 游乐场中的表示法不大一样的地方是,
# 这里的0表示负样本,1 表示正样本。大部分解决分类问题的神经网络都采用
# 0 和 1 的表示方法
Y = [[int(x1 + x2) < 1] for (x1, x2) in X]
# 创建一个会话运行TensorFlow程序
with tf.Session() as sess:
# 初始化变量
init_op = tf.global_variables_initializer()
sess.run(init_op)
# 在训练之前神经网络参数
print("w1:", sess.run(w1))
print("w2:", sess.run(w2))
print("\n")
'''
训练之前神经网络参数的值
w1: [[-0.81131822 1.48459876 0.06532937]
[-2.44270396 0.0992484 0.59122431]]
w2: [[-0.81131822]
[ 1.48459876]
[ 0.06532937]]
'''
# 设定训练的轮数
STEPS = 5000
for i in range(STEPS):
start = (i * batch_size) % 128
end = (i * batch_size) % 128 + batch_size
# 通过选取样本训练神经网络并更新参数
sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})
if i % 1000 == 0:
# 每隔一段时间计算在所有数据上的交叉熵并输出
total_cross_entropy = sess.run(cross_entropy, feed_dict={x: X, y_: Y})
print("After %d training steps(s), cross entropy on all data is %g" % (i, total_cross_entropy))
'''
输出结果
After 0 training steps(s), cross entropy on all data is 0.0674925
After 1000 training steps(s), cross entropy on all data is 0.0163385
After 2000 training steps(s), cross entropy on all data is 0.00907547
After 3000 training steps(s), cross entropy on all data is 0.00714436
After 4000 training steps(s), cross entropy on all data is 0.00578471
通过这个结果可以发现随着训练的进行,交叉熵是逐渐减小的。交叉熵越小说明预测的结果和真实的结果差距越小
'''
print("\n")
print("w1:", sess.run(w1))
print("w2:", sess.run(w2))
'''
w1: [[-1.9618274 2.58235407 1.68203783]
[-3.4681716 1.06982327 2.11788988]]
w2: [[-1.8247149 ]
[ 2.68546653]
[ 1.41819501]]
可以发现这两个参数的取值已经发生了编发,这个变化是训练的结果
它使得这个神经网络能根号的拟合提供的训练数据
'''
'''
1、定义神经网络的结构和前向传播的输出结果
2、定义损失函数以及选择反向传播的优化算法
3、生成会话(tf.Session)并且在训练数据上反复运行反向传播优化算法
'''
| [
"tensorflow.random_normal",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.matmul",
"tensorflow.clip_by_value",
"tensorflow.train.AdamOptimizer",
"numpy.random.RandomState"
] | [((416, 475), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 2)', 'name': '"""x-input"""'}), "(tf.float32, shape=(None, 2), name='x-input')\n", (430, 475), True, 'import tensorflow as tf\n'), ((481, 540), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 1)', 'name': '"""y-input"""'}), "(tf.float32, shape=(None, 1), name='y-input')\n", (495, 540), True, 'import tensorflow as tf\n'), ((577, 593), 'tensorflow.matmul', 'tf.matmul', (['x', 'w1'], {}), '(x, w1)\n', (586, 593), True, 'import tensorflow as tf\n'), ((598, 614), 'tensorflow.matmul', 'tf.matmul', (['a', 'w2'], {}), '(a, w2)\n', (607, 614), True, 'import tensorflow as tf\n'), ((927, 941), 'numpy.random.RandomState', 'RandomState', (['(1)'], {}), '(1)\n', (938, 941), False, 'from numpy.random import RandomState\n'), ((182, 224), 'tensorflow.random_normal', 'tf.random_normal', (['[2, 3]'], {'stddev': '(1)', 'seed': '(1)'}), '([2, 3], stddev=1, seed=1)\n', (198, 224), True, 'import tensorflow as tf\n'), ((243, 285), 'tensorflow.random_normal', 'tf.random_normal', (['[3, 1]'], {'stddev': '(1)', 'seed': '(1)'}), '([3, 1], stddev=1, seed=1)\n', (259, 285), True, 'import tensorflow as tf\n'), ((1225, 1237), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1235, 1237), True, 'import tensorflow as tf\n'), ((1273, 1306), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1304, 1306), True, 'import tensorflow as tf\n'), ((849, 878), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.001)'], {}), '(0.001)\n', (871, 878), True, 'import tensorflow as tf\n'), ((802, 833), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['y', '(1e-10)', '(1.0)'], {}), '(y, 1e-10, 1.0)\n', (818, 833), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
"""
Copyright (c) 2006-2021 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
from lib.core.data import logger
from plugins.generic.enumeration import Enumeration as GenericEnumeration
class Enumeration(GenericEnumeration):
def getBanner(self):
warnMsg = "on FrontBase it is not possible to get the banner"
logger.warn(warnMsg)
return None
def getPrivileges(self, *args, **kwargs):
warnMsg = "on FrontBase it is not possible to enumerate the user privileges"
logger.warn(warnMsg)
return {}
def getHostname(self):
warnMsg = "on FrontBase it is not possible to enumerate the hostname"
logger.warn(warnMsg)
def getStatements(self):
warnMsg = "on FrontBase it is not possible to enumerate the SQL statements"
logger.warn(warnMsg)
return []
| [
"lib.core.data.logger.warn"
] | [((391, 411), 'lib.core.data.logger.warn', 'logger.warn', (['warnMsg'], {}), '(warnMsg)\n', (402, 411), False, 'from lib.core.data import logger\n'), ((573, 593), 'lib.core.data.logger.warn', 'logger.warn', (['warnMsg'], {}), '(warnMsg)\n', (584, 593), False, 'from lib.core.data import logger\n'), ((727, 747), 'lib.core.data.logger.warn', 'logger.warn', (['warnMsg'], {}), '(warnMsg)\n', (738, 747), False, 'from lib.core.data import logger\n'), ((870, 890), 'lib.core.data.logger.warn', 'logger.warn', (['warnMsg'], {}), '(warnMsg)\n', (881, 890), False, 'from lib.core.data import logger\n')] |
"""Retokenization helpers
This module provides helpers for projecting span annotations from one tokenization to another.
Notes:
* Code is ported from https://github.com/nyu-mll/jiant/blob/master/jiant/utils/retokenize.py
* Please keep this code as a standalone utility; don't make this module depend on jiant modules.
"""
from typing import Iterable, Sequence, Tuple, Union
from Levenshtein.StringMatcher import StringMatcher
from nltk.tokenize.util import string_span_tokenize
import numpy as np
_DTYPE = np.int32
def _mat_from_blocks_dense(mb, n_chars_src, n_chars_tgt):
M = np.zeros((n_chars_src, n_chars_tgt), dtype=_DTYPE)
for i in range(len(mb)):
b = mb[i] # current block
# Fill in-between this block and last block
if i > 0:
lb = mb[i - 1] # last block
s0 = lb[0] + lb[2] # top
e0 = b[0] # bottom
s1 = lb[1] + lb[2] # left
e1 = b[1] # right
M[s0:e0, s1:e1] = 1
# Fill matching region on diagonal
M[b[0]: b[0] + b[2], b[1]: b[1] + b[2]] = 2 * \
np.identity(b[2], dtype=_DTYPE)
return M
def _mat_from_spans_dense(spans: Sequence[Tuple[int, int]], n_chars: int) -> np.ndarray:
"""Construct a token-to-char matrix from a list of char spans."""
M = np.zeros((len(spans), n_chars), dtype=_DTYPE)
for i, s in enumerate(spans):
M[i, s[0]: s[1]] = 1
return M
def token_to_char(text: str, sep=" ") -> np.ndarray:
"""Takes a string, space tokenizes the string, and returns a mapping from tokens to chars.
Examples:
>>> token_to_char("testing 1, 2, 3")
# produces a (m) token by (M) char matrix:
t e s t i n g 1 , 2 , 3
testing [[1 1 1 1 1 1 1 0 0 0 0 0 0 0 0]
1, [0 0 0 0 0 0 0 0 1 1 0 0 0 0 0]
2, [0 0 0 0 0 0 0 0 0 0 0 1 1 0 0]
3 [0 0 0 0 0 0 0 0 0 0 0 0 0 0 1]]
Args:
text (str): string to tokenize and build the token to char mapping.
Returns:
np.ndarray mapping from (m) tokens to (M) chars.
"""
spans = string_span_tokenize(text, sep=sep)
return _mat_from_spans_dense(tuple(spans), len(text))
def _mat_from_blocks(
mb: Sequence[Tuple[int, int, int]], n_chars_src: int, n_chars_tgt: int
) -> np.ndarray:
"""Construct a char-to-char matrix from a list of matching blocks.
mb is a sequence of (s1, s2, n_char) tuples, where s1 and s2 are the start indices in the
first and second sequence, and n_char is the length of the block.
Args:
mb: list of triples of non-overlapping matching subsequences in source str and target.
n_chars_src (int): number of chars in the source string.
n_chars_tgt (int): number of chars in the target string.
Returns:
np.ndarray adjacency matrix mapping chars in the source str to chars in the target str.
"""
return _mat_from_blocks_dense(mb, n_chars_src, n_chars_tgt)
def char_to_char(source: str, target: str) -> np.ndarray:
"""Find the character adjacency matrix mapping source string chars to target string chars.
Uses StringMatcher from Levenshtein package to find non-overlapping matching subsequences in
input strings. Uses the result to create a character adjacency matrix from source to target.
(https://docs.python.org/2/library/difflib.html#difflib.SequenceMatcher.get_matching_blocks)
Args:
source (str): string of source chars.
target (str): string of target chars.
Returns:
np.ndarray adjacency matrix mapping chars in the source str to chars in the target str.
"""
sm = StringMatcher(seq1=source, seq2=target)
mb = sm.get_matching_blocks()
return _mat_from_blocks(mb, len(source), len(target))
class TokenAligner(object):
"""Align two similiar tokenizations.
Args:
source (Union[Iterable[str], str]): Source text tokens or string.
target (Union[Iterable[str], str]): Target text tokens or string.
Usage:
ta = TokenAligner(source_tokens, target_tokens)
target_span = ta.project_span(*source_span)
Uses Levenshtein distance to align two similar tokenizations, and provide facilities to project
indices and spans from the source tokenization to the target.
Let source contain m tokens and M chars, and target contain n tokens and N chars. The token
alignment is treated as a (sparse) m x n adjacency matrix T representing the bipartite graph
between the source and target tokens.
This is constructed by performing a character-level alignment using Levenshtein distance to
obtain a (M x N) character adjacency matrix C. We then construct token-to-character matricies
U (m x M) and V (n x N) and construct T as:
T = (U C V')
where V' denotes the transpose.
Spans of non-aligned bytes are assumed to contain a many-to-many alignment of all chars in that
range. This can lead to unwanted alignments if, for example, two consecutive tokens are mapped
to escape sequences:
source: ["'s", "["]
target: ["'", "s", "["]
In the above case, "'s'" may be spuriously aligned to "'" while "[" has no character match
with "s" or "[", and so is aligned to both. To make a correct alignment more likely, ensure
that the characters in target correspond as closely as possible to those in source. For example,
the following will align correctly:
source: ["'s", "["]
target: ["'", "s", "["]
"""
def __init__(self, source: Union[Iterable[str], str], target: Union[Iterable[str], str]):
# Coerce source and target to space-delimited string.
if not isinstance(source, str):
source = " ".join(source)
if not isinstance(target, str):
target = " ".join(target)
# (m X M) source token idx to source char idx
self.U = token_to_char(source)
# (n x N) target token idx to target char idx
self.V = token_to_char(target)
# (M x N) source char idx to target char idx
self.C = char_to_char(source, target)
# Token transfer matrix from (m) tokens in source to (n) tokens in the target. Mat value at
# index i, j measures the character overlap btwn the ith source token and jth target token.
self.source_token_idx_to_target_token_idx = self.U.dot(
self.C).dot(self.V.T)
self.source_token_idx_to_target_char_idx = self.U.dot(self.C)
self.source_char_idx_to_target_token_idx = self.C.dot(self.V.T)
def project_token_idxs(self, idxs: Union[int, Sequence[int]]) -> Sequence[int]:
"""Project source token index(s) to target token indices.
Takes a list of token indices in the source token sequence, and returns the corresponding
tokens in the target sequence.
Args:
idxs (Union[int, Sequence[int]]): source token index(s) to get related target indices.
Examples:
>>> source_tokens = ['abc', 'def', 'ghi', 'jkl']
>>> target_tokens = ['abc', 'd', 'ef', 'ghi', 'jkl']
>>> ta = TokenAligner(source_tokens, target_tokens)
>>> print(ta.project_token_idxs([1, 2]))
[1 2 3]
Returns:
List[int] representing the target indices associated with the provided source indices.
"""
if isinstance(idxs, int):
idxs = [idxs]
# column indices
return self.source_token_idx_to_target_token_idx[idxs].nonzero()[1]
@staticmethod
def _project_span(mat, start, end, inclusive):
if inclusive:
end = end + 1
target_matches = mat[start:end].nonzero()[1].tolist()
if len(target_matches) == 0:
raise ValueError(
f"Project {(start, end)} into empty span in target sequence")
output_start, output_end = min(target_matches), max(target_matches)
if not inclusive:
output_end = output_end + 1
return (output_start, output_end)
def project_token_span(self, start, end, inclusive=False) -> Tuple[int, int]:
"""Project a span from source to target token sequence.
Notes:
When param inclusive=False, the end index is interpreted as exclusive,
and the end of the span returned by the function will also be exclusive.
When param inclusive=True, both start and end indexes are interpreted as inclusive,
and the span returned by the function will also be inclusive.
Examples:
>>> source_tokens = ['abc', 'def', 'ghi', 'jkl']
>>> target_tokens = ['abc', 'd', 'ef', 'ghi', 'jkl']
>>> ta = TokenAligner(source_tokens, target_tokens)
>>> start, end = 0, 2
>>> print(ta.project_token_span(start, end))
(0, 3)
Raise:
When target span is empty
Returns:
Tuple[int, int] representing the target span corresponding to the source span.
"""
return self._project_span(
mat=self.source_token_idx_to_target_token_idx, start=start, end=end, inclusive=inclusive
)
def project_token_to_char_span(self, start, end, inclusive=False) -> Tuple[int, int]:
"""Project a span from source to target token sequence.
Notes:
When param inclusive=False, the end index is interpreted as exclusive,
and the end of the span returned by the function will also be exclusive.
When param inclusive=True, both start and end indexes are interpreted as inclusive,
and the span returned by the function will also be inclusive.
Examples:
>>> source_tokens = ['abc', 'def', 'ghi', 'jkl']
>>> target_str = 'abc d ef ghi jkl'
>>> ta = TokenAligner(source_tokens, target_str)
>>> start, end = 0, 2
>>> print(ta.project_token_to_char_span(start, end))
(0, 8)
Raise:
When target span is empty
Returns:
Tuple[int, int] representing the target span corresponding to the source span.
"""
return self._project_span(
mat=self.source_token_idx_to_target_char_idx, start=start, end=end, inclusive=inclusive
)
def project_char_to_token_span(self, start, end, inclusive=False) -> Tuple[int, int]:
"""Project a span from source to target token sequence.
Notes:
When param inclusive=False, the end index is interpreted as exclusive,
and the end of the span returned by the function will also be exclusive.
When param inclusive=True, both start and end indexes are interpreted as inclusive,
and the span returned by the function will also be inclusive.
Examples:
>>> source_str = 'abc def ghi jkl'
>>> target_tokens = ['abc', 'd', 'ef', 'ghi', 'jkl']
>>> ta = TokenAligner(source_str, target_tokens)
>>> start, end = 0, 4
>>> print(ta.project_char_to_token_span(start, end))
(0, 1)
Raise:
When target span is empty
Returns:
Tuple[int, int] representing the target span corresponding to the source span.
"""
return self._project_span(
mat=self.source_char_idx_to_target_token_idx, start=start, end=end, inclusive=inclusive
)
| [
"numpy.identity",
"numpy.zeros",
"Levenshtein.StringMatcher.StringMatcher",
"nltk.tokenize.util.string_span_tokenize"
] | [((597, 647), 'numpy.zeros', 'np.zeros', (['(n_chars_src, n_chars_tgt)'], {'dtype': '_DTYPE'}), '((n_chars_src, n_chars_tgt), dtype=_DTYPE)\n', (605, 647), True, 'import numpy as np\n'), ((2133, 2168), 'nltk.tokenize.util.string_span_tokenize', 'string_span_tokenize', (['text'], {'sep': 'sep'}), '(text, sep=sep)\n', (2153, 2168), False, 'from nltk.tokenize.util import string_span_tokenize\n'), ((3676, 3715), 'Levenshtein.StringMatcher.StringMatcher', 'StringMatcher', ([], {'seq1': 'source', 'seq2': 'target'}), '(seq1=source, seq2=target)\n', (3689, 3715), False, 'from Levenshtein.StringMatcher import StringMatcher\n'), ((1106, 1137), 'numpy.identity', 'np.identity', (['b[2]'], {'dtype': '_DTYPE'}), '(b[2], dtype=_DTYPE)\n', (1117, 1137), True, 'import numpy as np\n')] |
import requests, hashlib
class Ecommerce:
def __init__(
self,
store,
apiKey,
secretCode = '',
collectUrl = 'http://192.168.127.12:8086/api/ecommerce/collect/',
payoutUrl = 'http://192.168.127.12:8086/api/ecommerce/payout/',
depositUrl = 'http://192.168.127.12:8086/api/ecommerce/deposit/',
changeKeyUrl = 'http://192.168.127.12:8086/api/ecommerce/changekey/',
transactionStatusUrl = 'http://35.204.26.22:8086/api/ecommerce/transaction/status/',
acceptUrl = '',
cancelUrl = '',
declineUrl = '',
notifyUrl = ''):
self.store = store
self.apiKey = apiKey
self.secretCode = secretCode
self.collectUrl = collectUrl
self.payoutUrl = payoutUrl
self.depositUrl = depositUrl
self.changeKeyUrl = changeKeyUrl
self.transactionStatusUrl = transactionStatusUrl
self.acceptUrl = acceptUrl
self.cancelUrl = cancelUrl
self.declineUrl = declineUrl
self.notifyUrl = notifyUrl
def collect(
self,
provider,
reference,
amount,
code = '',
purchaseRef = '',
description = ''):
if provider == 'mtn_mobilemoney_cm':
return self.makePayment(
provider,
reference,
amount,
code,
purchaseRef,
description)
elif provider == 'orange_money_cm':
return self.makePayment(
provider,
reference,
amount,
code,
purchaseRef,
description)
elif provider == 'express_union_mobilemoney':
return self.makePayment(
provider,
'237' + reference,
amount,
code,
purchaseRef,
description)
elif provider == 'afrikpay':
return self.makePayment(
provider,
'237' + reference,
amount,
code,
purchaseRef,
description)
else:
print('Invalid provider')
def deposit(self):
hash = hashlib.md5((str(self.store) + str(self.apiKey)).encode()).hexdigest()
params = {
'store': self.store,
'hash': hash
}
response = requests.post(url = self.depositUrl, params = params)
return response.json()
def payout(
self,
provider,
reference,
amount,
purchaseRef = '',
description = ''):
if provider == 'mtn_mobilemoney_cm':
return self.makePayout(
provider,
reference,
amount,
purchaseRef,
description)
elif provider == 'orange_money_cm':
return self.makePayout(
provider,
reference,
amount,
purchaseRef,
description)
elif provider == 'express_union_mobilemoney':
return self.makePayout(
provider,
'237' + reference,
amount,
purchaseRef,
description)
elif provider == 'afrikpay':
return self.makePayout(
provider,
'237' + reference,
amount,
purchaseRef,
description)
else:
print('Invalid provider')
def changeKey(self):
hash = hashlib.md5((str(self.store) + str(self.apiKey)).encode()).hexdigest()
params = {
'store': self.store,
'hash': hash
}
response = requests.post(url = self.changeKeyUrl, params = params)
return response.json()
def transactionStatus(
self,
purchaseRef):
hash = hashlib.md5((str(purchaseRef) + str(self.apiKey)).encode()).hexdigest()
params = {
'purchaseref': purchaseRef,
'store': self.store,
'hash': hash
}
response = requests.post(url = self.transactionStatusUrl, params = params)
return response.json()
def makePayment(
self,
provider,
reference,
amount,
code = '',
purchaseRef = '',
description = ''):
hash = hashlib.md5((str(provider) + str(reference) + str(amount) + str(self.apiKey)).encode()).hexdigest()
params = {
'provider': provider,
'reference': reference,
'amount': amount,
'description': description,
'purchaseref': purchaseRef,
'store': self.store,
'hash': hash,
'code': code,
'notifurl': self.notifyUrl,
'accepturl': self.acceptUrl,
'cancelurl': self.cancelUrl,
'declineurl': self.declineUrl
}
response = requests.post(url = self.collectUrl, params = params)
return response.json()
def makePayout(
self,
provider,
reference,
amount,
purchaseRef = '',
description = ''):
hash = hashlib.md5((str(provider) + str(reference) + str(amount) + str(self.apiKey)).encode()).hexdigest()
password = hashlib.md5((str(self.secretCode)).encode()).hexdigest()
params = {
'provider': provider,
'reference': reference,
'amount': amount,
'description': description,
'purchaseref': purchaseRef,
'store': self.store,
'hash': hash,
'password': password
}
response = requests.post(url = self.payoutUrl, params = params)
return response.json()
def __repr__(self):
return str(self.store + " - " + self.apiKey)
| [
"requests.post"
] | [((2461, 2510), 'requests.post', 'requests.post', ([], {'url': 'self.depositUrl', 'params': 'params'}), '(url=self.depositUrl, params=params)\n', (2474, 2510), False, 'import requests, hashlib\n'), ((3834, 3885), 'requests.post', 'requests.post', ([], {'url': 'self.changeKeyUrl', 'params': 'params'}), '(url=self.changeKeyUrl, params=params)\n', (3847, 3885), False, 'import requests, hashlib\n'), ((4219, 4278), 'requests.post', 'requests.post', ([], {'url': 'self.transactionStatusUrl', 'params': 'params'}), '(url=self.transactionStatusUrl, params=params)\n', (4232, 4278), False, 'import requests, hashlib\n'), ((5070, 5119), 'requests.post', 'requests.post', ([], {'url': 'self.collectUrl', 'params': 'params'}), '(url=self.collectUrl, params=params)\n', (5083, 5119), False, 'import requests, hashlib\n'), ((5808, 5856), 'requests.post', 'requests.post', ([], {'url': 'self.payoutUrl', 'params': 'params'}), '(url=self.payoutUrl, params=params)\n', (5821, 5856), False, 'import requests, hashlib\n')] |