hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f734a7810a7e04bf77d068a86ee4ed61a560a6e3 | 2,872 | py | Python | bench_utils/timeit.py | drkostas/bench-utils | 0ec976238175ee8137a54101e679f0faf58996b9 | [
"Apache-2.0"
] | 3 | 2021-12-22T14:29:17.000Z | 2022-01-26T21:21:36.000Z | bench_utils/timeit.py | drkostas/bench-utils | 0ec976238175ee8137a54101e679f0faf58996b9 | [
"Apache-2.0"
] | null | null | null | bench_utils/timeit.py | drkostas/bench-utils | 0ec976238175ee8137a54101e679f0faf58996b9 | [
"Apache-2.0"
] | null | null | null | from contextlib import ContextDecorator
from typing import Callable, IO, Union
from functools import wraps
from time import time
from termcolor_logger import ColorLogger
time_logger = ColorLogger('Timeit', 'white')
class timeit(ContextDecorator):
custom_print: str
skip: bool
total: Union[float, None]
internal_only: bool
file: IO
def __init__(self, **kwargs):
"""Decorator/ContextManager for counting the execution times of functions and code blocks
Args:
custom_print: Custom print string Use {duration} to reference the running time.
When used as decorator it can also be formatted using
`func_name`, `args`, and {0}, {1}, .. to reference the function's
first, second, ... argument.
skip: If True, don't time this time. Suitable when inside loops
file: Write the timing output to a file too
"""
self.total = None
self.skip = False
self.internal_only = False
self.__dict__.update(kwargs)
def __call__(self, func: Callable):
""" This is called only when invoked as a decorator
Args:
func: The method to wrap
"""
@wraps(func)
def timed(*args, **kwargs):
with self._recreate_cm():
self.func_name = func.__name__
self.args = args
self.kwargs = kwargs
self.all_args = (*args, *kwargs.values()) if kwargs != {} else args
return func(*args, **kwargs)
return timed
def __enter__(self, *args, **kwargs):
if not self.skip:
self.ts = time()
return self
def __exit__(self, type, value, traceback):
if self.skip:
return
self.te = time()
self.total = self.te - self.ts
if hasattr(self, 'func_name'):
if not hasattr(self, 'custom_print'):
print_string = 'Func: {func_name!r} with args: {args!r} took: {duration:2.5f} sec(s)'
else:
print_string = self.custom_print
time_logger.info(print_string.format(*self.args, func_name=self.func_name,
args=self.all_args,
duration=self.total,
**self.kwargs))
else:
if not hasattr(self, 'custom_print'):
print_string = 'Code block took: {duration:2.5f} sec(s)'
else:
print_string = self.custom_print
if hasattr(self, 'file'):
self.file.write(print_string.format(duration=self.total))
if not self.internal_only:
time_logger.info(print_string.format(duration=self.total))
| 35.02439 | 101 | 0.55188 | from contextlib import ContextDecorator
from typing import Callable, IO, Union
from functools import wraps
from time import time
from termcolor_logger import ColorLogger
time_logger = ColorLogger('Timeit', 'white')
class timeit(ContextDecorator):
custom_print: str
skip: bool
total: Union[float, None]
internal_only: bool
file: IO
def __init__(self, **kwargs):
self.total = None
self.skip = False
self.internal_only = False
self.__dict__.update(kwargs)
def __call__(self, func: Callable):
@wraps(func)
def timed(*args, **kwargs):
with self._recreate_cm():
self.func_name = func.__name__
self.args = args
self.kwargs = kwargs
self.all_args = (*args, *kwargs.values()) if kwargs != {} else args
return func(*args, **kwargs)
return timed
def __enter__(self, *args, **kwargs):
if not self.skip:
self.ts = time()
return self
def __exit__(self, type, value, traceback):
if self.skip:
return
self.te = time()
self.total = self.te - self.ts
if hasattr(self, 'func_name'):
if not hasattr(self, 'custom_print'):
print_string = 'Func: {func_name!r} with args: {args!r} took: {duration:2.5f} sec(s)'
else:
print_string = self.custom_print
time_logger.info(print_string.format(*self.args, func_name=self.func_name,
args=self.all_args,
duration=self.total,
**self.kwargs))
else:
if not hasattr(self, 'custom_print'):
print_string = 'Code block took: {duration:2.5f} sec(s)'
else:
print_string = self.custom_print
if hasattr(self, 'file'):
self.file.write(print_string.format(duration=self.total))
if not self.internal_only:
time_logger.info(print_string.format(duration=self.total))
| true | true |
f734a811ed32e5cecdcde508cdef13f526385529 | 120,801 | py | Python | menpowidgets/base.py | apapaion/menpowidgets | 237a39ddf4e65c57e8165f8a87f25a25f34d4698 | [
"BSD-3-Clause"
] | null | null | null | menpowidgets/base.py | apapaion/menpowidgets | 237a39ddf4e65c57e8165f8a87f25a25f34d4698 | [
"BSD-3-Clause"
] | null | null | null | menpowidgets/base.py | apapaion/menpowidgets | 237a39ddf4e65c57e8165f8a87f25a25f34d4698 | [
"BSD-3-Clause"
] | null | null | null | from collections import Sized, OrderedDict
import matplotlib.pyplot as plt
from matplotlib import collections as mc
import numpy as np
import ipywidgets
import IPython.display as ipydisplay
from menpo.base import name_of_callable
from menpo.image import MaskedImage, Image
from menpo.image.base import _convert_patches_list_to_single_array
from menpo.shape import TriMesh, ColouredTriMesh, TexturedTriMesh
from menpo.visualize import print_dynamic
from menpo.landmark import LandmarkManager
from .options import (RendererOptionsWidget, TextPrintWidget,
SaveMatplotlibFigureOptionsWidget, AnimationOptionsWidget,
ImageOptionsWidget, LandmarkOptionsWidget,
PlotMatplotlibOptionsWidget, PatchOptionsWidget,
LinearModelParametersWidget, CameraSnapshotWidget,
Shape2DOptionsWidget, Shape3DOptionsWidget,
SaveMayaviFigureOptionsWidget, Mesh3DOptionsWidget)
from .tools import LogoWidget, SwitchWidget
from .utils import (extract_group_labels_from_landmarks,
extract_groups_labels_from_image, render_image,
render_patches)
from .checks import check_n_parameters
from .style import map_styles_to_hex_colours
def menpowidgets_src_dir_path():
r"""
The path to the top of the menpowidgets package.
Useful for locating where the logos folder is stored.
Returns
-------
path : ``pathlib.Path``
The full path to the top of the Menpo package
"""
# to avoid cluttering the menpowidgets.base namespace
from pathlib import Path
import os.path
return Path(os.path.abspath(__file__)).parent
def visualize_shapes_2d(shapes, figure_size=(7, 7), browser_style='buttons',
custom_info_callback=None):
r"""
Widget that allows browsing through a `list` of
2D shapes. The supported objects are:
================================== =
Object
================================== =
`menpo.shape.PointCloud`
`menpo.shape.PointUndirectedGraph`
`menpo.shape.PointDirectedGraph`
`menpo.shape.PointTree`
`menpo.shape.LabelledPointGraph`
`menpo.shape.TriMesh`
================================== =
Any instance of the above can be combined in the input `list`.
Parameters
----------
shapes : `list`
The `list` of objects to be visualized. It can contain a combination of
================================== =
Object
================================== =
`menpo.shape.PointCloud`
`menpo.shape.PointUndirectedGraph`
`menpo.shape.PointDirectedGraph`
`menpo.shape.PointTree`
`menpo.shape.LabelledPointGraph`
`menpo.shape.TriMesh`
================================== =
or subclasses of those.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
browser_style : ``{'buttons', 'slider'}``, optional
It defines whether the selector of the objects will have the form of
plus/minus buttons or a slider.
custom_info_callback: `function` or ``None``, optional
If not ``None``, it should be a function that accepts a 2D shape
and returns a list of custom messages to be printed about it. Each
custom message will be printed in a separate line.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
# Make sure that shapes is a list even with one member
if not isinstance(shapes, Sized):
shapes = [shapes]
# Get the number of shapes
n_shapes = len(shapes)
# Define the styling options
main_style = 'warning'
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# Get selected shape index
i = shape_number_wid.selected_values if n_shapes > 1 else 0
# Create options dictionary
options = dict()
options.update(shape_options_wid.selected_values['lines'])
options.update(shape_options_wid.selected_values['markers'])
options['image_view'] = shape_options_wid.selected_values['image_view']
options.update(
renderer_options_wid.selected_values['numbering_matplotlib'])
options.update(renderer_options_wid.selected_values['axes'])
# Correct options based on the type of the shape
if hasattr(shapes[i], 'labels'):
# If the shape is a LabelledPointUndirectedGraph ...
# ...use the legend options
options.update(renderer_options_wid.selected_values['legend'])
# ...use with_labels
options['with_labels'] = \
shape_options_wid.selected_values['with_labels']
# ...correct colours
line_colour = []
marker_face_colour = []
marker_edge_colour = []
for lbl in options['with_labels']:
idx = shapes[i].labels.index(lbl)
line_colour.append(options['line_colour'][idx])
marker_face_colour.append(options['marker_face_colour'][idx])
marker_edge_colour.append(options['marker_edge_colour'][idx])
options['line_colour'] = line_colour
options['marker_face_colour'] = marker_face_colour
options['marker_edge_colour'] = marker_edge_colour
else:
# If shape is PointCloud, TriMesh or PointGraph
# ...correct colours
options['line_colour'] = options['line_colour'][0]
options['marker_face_colour'] = options['marker_face_colour'][0]
options['marker_edge_colour'] = options['marker_edge_colour'][0]
# Get figure size
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * figure_size[0],
renderer_options_wid.selected_values['zoom_one'] * figure_size[1])
# Render shape with selected options
save_figure_wid.renderer = shapes[i].view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
figure_size=new_figure_size, **options)
# Force rendering
save_figure_wid.renderer.force_draw()
# Update info text widget
update_info(shapes[i], custom_info_callback=custom_info_callback)
# Define function that updates the info text
def update_info(shape, custom_info_callback=None):
min_b, max_b = shape.bounds()
rang = shape.range()
cm = shape.centre()
text_per_line = [
"> {}".format(name_of_callable(shape)),
"> {} points".format(shape.n_points),
"> Bounds: [{0:.1f}-{1:.1f}]W, [{2:.1f}-{3:.1f}]H".format(
min_b[0], max_b[0], min_b[1], max_b[1]),
"> Range: {0:.1f}W, {1:.1f}H".format(rang[0], rang[1]),
"> Centre of mass: ({0:.1f}, {1:.1f})".format(cm[0], cm[1]),
"> Norm: {0:.2f}".format(shape.norm())]
if custom_info_callback is not None:
# iterate over the list of messages returned by the callback
# function and append them in the text_per_line.
for msg in custom_info_callback(shape):
text_per_line.append('> {}'.format(msg))
info_wid.set_widget_state(text_per_line=text_per_line)
# If the object is a LabelledPointUndirectedGraph, grab the labels
labels = None
if hasattr(shapes[0], 'labels'):
labels = shapes[0].labels
# Create widgets
shape_options_wid = Shape2DOptionsWidget(
labels=labels, render_function=render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],
labels=None, axes_x_limits=0.1, axes_y_limits=0.1,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMatplotlibFigureOptionsWidget()
# Group widgets
if n_shapes > 1:
# Define function that updates options' widgets state
def update_widgets(change):
# Get current shape and check if it has labels
i = change['new']
labels = None
if hasattr(shapes[i], 'labels'):
labels = shapes[i].labels
# Update shape options
shape_options_wid.set_widget_state(labels=labels,
allow_callback=True)
# Shape selection slider
index = {'min': 0, 'max': n_shapes-1, 'step': 1, 'index': 0}
shape_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Shape', loop_enabled=True,
continuous_update=False)
# Header widget
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
header_wid = ipywidgets.HBox([logo_wid, shape_number_wid])
header_wid.layout.align_items = 'center'
header_wid.layout.margin = '0px 0px 10px 0px'
else:
# Header widget
header_wid = LogoWidget(style=main_style)
header_wid.layout.margin = '0px 10px 0px 0px'
options_box = ipywidgets.Tab(
[info_wid, shape_options_wid, renderer_options_wid, save_figure_wid])
tab_titles = ['Info', 'Shape', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_shapes > 1:
wid = ipywidgets.VBox([header_wid, options_box])
else:
wid = ipywidgets.HBox([header_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
# Display final widget
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
# Trigger initial visualization
render_function({})
def visualize_shapes_3d(shapes, browser_style='buttons',
custom_info_callback=None):
r"""
Widget that allows browsing through a `list` of
3D shapes. The supported objects are:
==================================
Object
==================================
`menpo.shape.PointCloud`
`menpo.shape.PointUndirectedGraph`
`menpo.shape.PointDirectedGraph`
`menpo.shape.PointTree`
`menpo.shape.LabelledPointGraph`
==================================
Any instance of the above can be combined in the input `list`.
Parameters
----------
shapes : `list`
The `list` of objects to be visualized. It can contain a combination of
==================================
Object
==================================
`menpo.shape.PointCloud`
`menpo.shape.PointUndirectedGraph`
`menpo.shape.PointDirectedGraph`
`menpo.shape.PointTree`
`menpo.shape.LabelledPointGraph`
==================================
or subclasses of those.
browser_style : ``{'buttons', 'slider'}``, optional
It defines whether the selector of the objects will have the form of
plus/minus buttons or a slider.
custom_info_callback: `function` or ``None``, optional
If not ``None``, it should be a function that accepts a 2D shape
and returns a list of custom messages to be printed about it. Each
custom message will be printed in a separate line.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print_dynamic('Initializing...')
# Make sure that shapes is a list even with one member
if not isinstance(shapes, Sized):
shapes = [shapes]
# Get the number of shapes
n_shapes = len(shapes)
# Define the styling options
main_style = 'warning'
# Define render function
def render_function(change):
# Clear current figure
save_figure_wid.renderer.clear_figure()
ipydisplay.clear_output(wait=True)
# Get selected shape index
i = shape_number_wid.selected_values if n_shapes > 1 else 0
# Update info text widget
update_info(shapes[i], custom_info_callback=custom_info_callback)
# Create options dictionary
options = dict()
if isinstance(shapes[i], TriMesh):
# Note that 3D TriMesh has a totally different set of options
# compared to any other PointCloud or PointGraph. However, in order
# for visualize_shapes_3d to support TriMeshes, we simply use the
# options that are common. This means that most of the widget's
# options will have no effect on rendering...
options['mesh_type'] = 'wireframe'
if shape_options_wid.selected_values['markers']['render_markers']:
options['mesh_type'] = 'fancymesh'
options['line_width'] = \
shape_options_wid.selected_values['lines']['line_width']
options['colour'] = \
shape_options_wid.selected_values['lines']['line_colour'][0]
options['marker_style'] = \
shape_options_wid.selected_values['markers']['marker_style']
options['marker_size'] = \
shape_options_wid.selected_values['markers']['marker_size']
options['marker_resolution'] = \
shape_options_wid.selected_values['markers']['marker_resolution']
options['step'] = \
shape_options_wid.selected_values['markers']['step']
else:
options.update(shape_options_wid.selected_values['lines'])
options.update(shape_options_wid.selected_values['markers'])
options.update(
renderer_options_wid.selected_values['numbering_mayavi'])
# Correct options based on the type of the shape
if hasattr(shapes[i], 'labels'):
# If the shape is a LabelledPointUndirectedGraph ...
# ...use with_labels
options['with_labels'] = \
shape_options_wid.selected_values['with_labels']
# ...correct colours
line_colour = []
marker_colour = []
for lbl in options['with_labels']:
idx = shapes[i].labels.index(lbl)
line_colour.append(options['line_colour'][idx])
marker_colour.append(options['marker_colour'][idx])
options['line_colour'] = line_colour
options['marker_colour'] = marker_colour
else:
# If shape is PointCloud, TriMesh or PointGraph
# ...correct colours
options['line_colour'] = options['line_colour'][0]
options['marker_colour'] = options['marker_colour'][0]
# Render shape with selected options
save_figure_wid.renderer = shapes[i].view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
alpha=1.0, **options)
# Force rendering
save_figure_wid.renderer.force_draw()
# Define function that updates the info text
def update_info(shape, custom_info_callback=None):
min_b, max_b = shape.bounds()
rang = shape.range()
cm = shape.centre()
text_per_line = [
"> {}".format(name_of_callable(shape)),
"> {} points".format(shape.n_points),
"> Bounds: [{0:.1f}-{1:.1f}]X, [{2:.1f}-{3:.1f}]Y, "
"[{4:.1f}-{5:.1f}]Z".format(min_b[0], max_b[0], min_b[1], max_b[1],
min_b[2], max_b[2]),
"> Range: {0:.1f}X, {1:.1f}Y, {2:.1f}Z".format(rang[0], rang[1],
rang[2]),
"> Centre of mass: ({0:.1f}X, {1:.1f}Y, {2:.1f}Z)".format(
cm[0], cm[1], cm[2]),
"> Norm: {0:.2f}".format(shape.norm())]
if custom_info_callback is not None:
# iterate over the list of messages returned by the callback
# function and append them in the text_per_line.
for msg in custom_info_callback(shape):
text_per_line.append('> {}'.format(msg))
info_wid.set_widget_state(text_per_line=text_per_line)
# If the object is a LabelledPointUndirectedGraph, grab the labels
labels = None
if hasattr(shapes[0], 'labels'):
labels = shapes[0].labels
# Create widgets
shape_options_wid = Shape3DOptionsWidget(
labels=labels, render_function=render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['numbering_mayavi'], labels=None,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMayaviFigureOptionsWidget()
# Group widgets
if n_shapes > 1:
# Define function that updates options' widgets state
def update_widgets(change):
# Get current shape and check if it has labels
i = change['new']
labels = None
if hasattr(shapes[i], 'labels'):
labels = shapes[i].labels
# Update shape options
shape_options_wid.set_widget_state(labels=labels,
allow_callback=True)
# Shape selection slider
index = {'min': 0, 'max': n_shapes-1, 'step': 1, 'index': 0}
shape_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Shape', loop_enabled=True,
continuous_update=False)
# Header widget
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
header_wid = ipywidgets.HBox([logo_wid, shape_number_wid])
header_wid.layout.align_items = 'center'
header_wid.layout.margin = '0px 0px 10px 0px'
else:
# Header widget
header_wid = LogoWidget(style=main_style)
header_wid.layout.margin = '0px 10px 0px 0px'
options_box = ipywidgets.Tab(
[info_wid, shape_options_wid, renderer_options_wid, save_figure_wid])
tab_titles = ['Info', 'Shape', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_shapes > 1:
wid = ipywidgets.VBox([header_wid, options_box])
else:
wid = ipywidgets.HBox([header_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
# Display final widget
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
# Trigger initial visualization
render_function({})
print_dynamic('')
def visualize_landmarks_2d(landmarks, figure_size=(7, 7),
browser_style='buttons', custom_info_callback=None):
r"""
Widget that allows browsing through a `list` of
`menpo.landmark.LandmarkManager` (or subclass) objects. The landmark
managers can have a combination of different attributes, e.g.
landmark groups and labels etc.
Parameters
----------
landmarks : `list` of `menpo.landmark.LandmarkManager` or subclass
The `list` of landmark managers to be visualized.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
browser_style : ``{'buttons', 'slider'}``, optional
It defines whether the selector of the objects will have the form of
plus/minus buttons or a slider.
custom_info_callback: `function` or ``None``, optional
If not None, it should be a function that accepts a landmark group and
returns a list of custom messages to be printed per landmark group.
Each custom message will be printed in a separate line.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
# Make sure that landmarks is a list even with one landmark manager member
if isinstance(landmarks, LandmarkManager):
landmarks = [landmarks]
# Get the number of landmark managers
n_landmarks = len(landmarks)
# Define the styling options
main_style = 'info'
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# get selected index and selected group
i = landmark_number_wid.selected_values if n_landmarks > 1 else 0
g = landmark_options_wid.selected_values['landmarks']['group']
if landmark_options_wid.selected_values['landmarks']['render_landmarks']:
# get shape
shape = landmarks[i][g]
# Create options dictionary
options = dict()
options.update(landmark_options_wid.selected_values['lines'])
options.update(landmark_options_wid.selected_values['markers'])
options['image_view'] = landmark_options_wid.selected_values['image_view']
options.update(
renderer_options_wid.selected_values['numbering_matplotlib'])
options.update(renderer_options_wid.selected_values['axes'])
# Correct options based on the type of the shape
if hasattr(shape, 'labels'):
# If the shape is a LabelledPointUndirectedGraph ...
# ...use the legend options
options.update(renderer_options_wid.selected_values['legend'])
# ...use with_labels
options['with_labels'] = \
landmark_options_wid.selected_values['landmarks']['with_labels']
# ...correct colours
line_colour = []
marker_face_colour = []
marker_edge_colour = []
for lbl in options['with_labels']:
id = shape.labels.index(lbl)
line_colour.append(options['line_colour'][id])
marker_face_colour.append(options['marker_face_colour'][id])
marker_edge_colour.append(options['marker_edge_colour'][id])
options['line_colour'] = line_colour
options['marker_face_colour'] = marker_face_colour
options['marker_edge_colour'] = marker_edge_colour
else:
# If shape is PointCloud, TriMesh or PointGraph
# ...correct colours
options['line_colour'] = options['line_colour'][0]
options['marker_face_colour'] = options['marker_face_colour'][0]
options['marker_edge_colour'] = options['marker_edge_colour'][0]
# Get figure size
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] *
figure_size[0],
renderer_options_wid.selected_values['zoom_one'] *
figure_size[1])
# Render shape with selected options
save_figure_wid.renderer = shape.view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
figure_size=new_figure_size, **options)
# Force rendering
save_figure_wid.renderer.force_draw()
else:
ipydisplay.clear_output()
# update info text widget
update_info(landmarks[i], g, custom_info_callback=custom_info_callback)
# Define function that updates the info text
def update_info(landmarks, group, custom_info_callback=None):
if group is not None:
min_b, max_b = landmarks[group].bounds()
rang = landmarks[group].range()
cm = landmarks[group].centre()
text_per_line = [
"> {} landmark points".format(landmarks[group].n_points),
"> {}".format(name_of_callable(landmarks[group])),
"> Bounds: [{0:.1f}-{1:.1f}]W, [{2:.1f}-{3:.1f}]H".format(
min_b[0], max_b[0], min_b[1], max_b[1]),
"> Range: {0:.1f}W, {1:.1f}H".format(rang[0], rang[1]),
"> Centre of mass: ({0:.1f}, {1:.1f})".format(cm[0], cm[1]),
"> Norm: {0:.2f}".format(landmarks[group].norm())]
if custom_info_callback is not None:
# iterate over the list of messages returned by the callback
# function and append them in the text_per_line.
for msg in custom_info_callback(landmarks[group]):
text_per_line.append('> {}'.format(msg))
else:
text_per_line = ["No landmarks available."]
info_wid.set_widget_state(text_per_line=text_per_line)
# Create widgets
groups_keys, labels_keys = extract_group_labels_from_landmarks(landmarks[0])
first_label = labels_keys[0] if labels_keys else None
landmark_options_wid = LandmarkOptionsWidget(
group_keys=groups_keys, labels_keys=labels_keys,
type='2D', render_function=render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],
labels=first_label, axes_x_limits=0.1, axes_y_limits=0.1,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMatplotlibFigureOptionsWidget()
# Group widgets
if n_landmarks > 1:
# Define function that updates options' widgets state
def update_widgets(change):
# Get new groups and labels
i = landmark_number_wid.selected_values
g_keys, l_keys = extract_group_labels_from_landmarks(
landmarks[i])
# Update landmarks options
landmark_options_wid.set_widget_state(
group_keys=g_keys, labels_keys=l_keys, allow_callback=True)
# Landmark selection slider
index = {'min': 0, 'max': n_landmarks-1, 'step': 1, 'index': 0}
landmark_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Shape', loop_enabled=True,
continuous_update=False)
# Header widget
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
header_wid = ipywidgets.HBox([logo_wid, landmark_number_wid])
header_wid.layout.align_items = 'center'
header_wid.layout.margin = '0px 0px 10px 0px'
else:
# Header widget
header_wid = LogoWidget(style=main_style)
header_wid.layout.margin = '0px 10px 0px 0px'
options_box = ipywidgets.Tab(
children=[info_wid, landmark_options_wid, renderer_options_wid,
save_figure_wid])
tab_titles = ['Info', 'Landmarks', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_landmarks > 1:
wid = ipywidgets.VBox([header_wid, options_box])
else:
wid = ipywidgets.HBox([header_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
# Display final widget
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
# Trigger initial visualization
render_function({})
def visualize_landmarks_3d(landmarks, browser_style='buttons',
custom_info_callback=None):
r"""
Widget that allows browsing through a `list` of
`menpo.landmark.LandmarkManager` (or subclass) objects. The landmark
managers can have a combination of different attributes, e.g.
landmark groups and labels etc.
Parameters
----------
landmarks : `list` of `menpo.landmark.LandmarkManager` or subclass
The `list` of landmark managers to be visualized.
browser_style : ``{'buttons', 'slider'}``, optional
It defines whether the selector of the objects will have the form of
plus/minus buttons or a slider.
custom_info_callback: `function` or ``None``, optional
If not None, it should be a function that accepts a landmark group and
returns a list of custom messages to be printed per landmark group.
Each custom message will be printed in a separate line.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
# Make sure that landmarks is a list even with one landmark manager member
if not isinstance(landmarks, list):
landmarks = [landmarks]
# Get the number of landmark managers
n_landmarks = len(landmarks)
# Define the styling options
main_style = 'info'
# Define render function
def render_function(change):
# Clear current figure
save_figure_wid.renderer.clear_figure()
ipydisplay.clear_output(wait=True)
# get selected index and selected group
i = landmark_number_wid.selected_values if n_landmarks > 1 else 0
g = landmark_options_wid.selected_values['landmarks']['group']
# update info text widget
update_info(landmarks[i], g, custom_info_callback=custom_info_callback)
if landmark_options_wid.selected_values['landmarks']['render_landmarks']:
# get shape
shape = landmarks[i][g]
options = dict()
if isinstance(shape, TriMesh):
# Note that 3D TriMesh has a totally different set of options
# compared to any other PointCloud or PointGraph. However, in
# order for visualize_landmarks_3d to support TriMeshes, we
# simply use the options that are common. This means that most
# of the widget's options will have no effect on rendering...
options['mesh_type'] = 'wireframe'
if landmark_options_wid.selected_values['markers'][
'render_markers']:
options['mesh_type'] = 'fancymesh'
options['line_width'] = \
landmark_options_wid.selected_values['lines']['line_width']
options['colour'] = \
landmark_options_wid.selected_values['lines']['line_colour'][0]
options['marker_style'] = \
landmark_options_wid.selected_values['markers']['marker_style']
options['marker_size'] = \
landmark_options_wid.selected_values['markers']['marker_size']
options['marker_resolution'] = \
landmark_options_wid.selected_values['markers'][
'marker_resolution']
options['step'] = \
landmark_options_wid.selected_values['markers']['step']
else:
options.update(landmark_options_wid.selected_values['lines'])
options.update(landmark_options_wid.selected_values['markers'])
options.update(
renderer_options_wid.selected_values['numbering_mayavi'])
# Correct options based on the type of the shape
if hasattr(shape, 'labels'):
# If the shape is a LabelledPointUndirectedGraph ...
# ...use with_labels
options['with_labels'] = \
landmark_options_wid.selected_values['landmarks']['with_labels']
# ...correct colours
line_colour = []
marker_colour = []
for lbl in options['with_labels']:
idx = shape.labels.index(lbl)
line_colour.append(options['line_colour'][idx])
marker_colour.append(options['marker_colour'][idx])
options['line_colour'] = line_colour
options['marker_colour'] = marker_colour
else:
# If shape is PointCloud, TriMesh or PointGraph
# ...correct colours
options['line_colour'] = options['line_colour'][0]
options['marker_colour'] = options['marker_colour'][0]
# Render shape with selected options
save_figure_wid.renderer = shape.view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
alpha=1.0, **options)
# Force rendering
save_figure_wid.renderer.force_draw()
else:
ipydisplay.clear_output()
# Define function that updates the info text
def update_info(landmarks, group, custom_info_callback=None):
if group is not None:
min_b, max_b = landmarks[group].bounds()
rang = landmarks[group].range()
cm = landmarks[group].centre()
text_per_line = [
"> {} landmark points".format(landmarks[group].n_points),
"> {}".format(name_of_callable(landmarks[group])),
"> Bounds: [{0:.1f}-{1:.1f}]X, [{2:.1f}-{3:.1f}]Y, "
"[{4:.1f}-{5:.1f}]Z".format(
min_b[0], max_b[0], min_b[1], max_b[1], min_b[2], max_b[2]),
"> Range: {0:.1f}X, {1:.1f}Y, {2:.1f}Z".format(rang[0], rang[1],
rang[2]),
"> Centre of mass: ({0:.1f}X, {1:.1f}Y, {2:.1f}Z)".format(
cm[0], cm[1], cm[2]),
"> Norm: {0:.2f}".format(landmarks[group].norm())]
if custom_info_callback is not None:
# iterate over the list of messages returned by the callback
# function and append them in the text_per_line.
for msg in custom_info_callback(landmarks[group]):
text_per_line.append('> {}'.format(msg))
else:
text_per_line = ["No landmarks available."]
info_wid.set_widget_state(text_per_line=text_per_line)
# Create widgets
groups_keys, labels_keys = extract_group_labels_from_landmarks(landmarks[0])
first_label = labels_keys[0] if labels_keys else None
landmark_options_wid = LandmarkOptionsWidget(
group_keys=groups_keys, labels_keys=labels_keys,
type='3D', render_function=render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['numbering_mayavi'], labels=first_label,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMayaviFigureOptionsWidget()
# Group widgets
if n_landmarks > 1:
# Define function that updates options' widgets state
def update_widgets(change):
# Get new groups and labels
i = landmark_number_wid.selected_values
g_keys, l_keys = extract_group_labels_from_landmarks(
landmarks[i])
# Update landmarks options
landmark_options_wid.set_widget_state(
group_keys=g_keys, labels_keys=l_keys, allow_callback=True)
# Landmark selection slider
index = {'min': 0, 'max': n_landmarks-1, 'step': 1, 'index': 0}
landmark_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Shape', loop_enabled=True,
continuous_update=False)
# Header widget
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
header_wid = ipywidgets.HBox([logo_wid, landmark_number_wid])
header_wid.layout.align_items = 'center'
header_wid.layout.margin = '0px 0px 10px 0px'
else:
# Header widget
header_wid = LogoWidget(style=main_style)
header_wid.layout.margin = '0px 10px 0px 0px'
options_box = ipywidgets.Tab(
children=[info_wid, landmark_options_wid, renderer_options_wid,
save_figure_wid])
tab_titles = ['Info', 'Landmarks', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_landmarks > 1:
wid = ipywidgets.VBox([header_wid, options_box])
else:
wid = ipywidgets.HBox([header_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
# Display final widget
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
# Trigger initial visualization
render_function({})
print_dynamic('')
def visualize_meshes_3d(meshes, browser_style='buttons',
custom_info_callback=None):
r"""
Widget that allows browsing through a `list` of 3D meshes. The supported
objects are:
==================================
Object
==================================
`menpo.shape.TriMesh`
`menpo.shape.ColouredTriMesdh`
`menpo.shape.TexturedTriMesh`
==================================
Any instance of the above can be combined in the input `list`.
Parameters
----------
meshes : `list`
The `list` of objects to be visualized. It can contain a combination of
==================================
Object
==================================
`menpo.shape.TriMesh`
`menpo.shape.ColouredTriMesdh`
`menpo.shape.TexturedTriMesh`
==================================
or subclasses of those.
browser_style : ``{'buttons', 'slider'}``, optional
It defines whether the selector of the objects will have the form of
plus/minus buttons or a slider.
custom_info_callback: `function` or ``None``, optional
If not ``None``, it should be a function that accepts a 3D mesh
and returns a list of custom messages to be printed about it. Each
custom message will be printed in a separate line.
"""
# Ensure that the code is being run inside a Jupyter kernel!!
from menpowidgets.utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
out = ipywidgets.Output()
ipydisplay.display(out)
with out:
ipydisplay.clear_output(wait=True)
print('Initializing...')
# Make sure that meshes is a list even with one member
if not isinstance(meshes, Sized):
meshes = [meshes]
# Get the number of meshes
n_meshes = len(meshes)
# Define the styling options
main_style = 'warning'
# Define render function
def render_function(_):
# Clear current figure
save_figure_wid.renderer.clear_figure()
with out:
ipydisplay.clear_output(wait=True)
# Get selected mesh index
i = mesh_number_wid.selected_values if n_meshes > 1 else 0
# Update info text widget
update_info(meshes[i], custom_info_callback=custom_info_callback)
# Render instance
save_figure_wid.renderer = meshes[i].view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
**mesh_options_wid.selected_values)
# Force rendering
save_figure_wid.renderer.force_draw()
# Define function that updates the info text
def update_info(mesh, custom_info_callback=None):
min_b, max_b = mesh.bounds()
rang = mesh.range()
cm = mesh.centre()
text_per_line = [
"> {}".format(name_of_callable(mesh)),
"> {} points".format(mesh.n_points),
"> Bounds: [{0:.1f}-{1:.1f}]X, [{2:.1f}-{3:.1f}]Y, "
"[{4:.1f}-{5:.1f}]Z".format(
min_b[0], max_b[0], min_b[1], max_b[1], min_b[2], max_b[2]),
"> Range: {0:.1f}X, {1:.1f}Y, {2:.1f}Z".format(rang[0], rang[1],
rang[2]),
"> Centre of mass: ({0:.1f}X, {1:.1f}Y, {2:.1f}Z)".format(
cm[0], cm[1], cm[2]),
"> Norm: {0:.2f}".format(mesh.norm())]
if custom_info_callback is not None:
# iterate over the list of messages returned by the callback
# function and append them in the text_per_line.
for msg in custom_info_callback(mesh):
text_per_line.append('> {}'.format(msg))
info_wid.set_widget_state(text_per_line=text_per_line)
# Create widgets
mesh_options_wid = Mesh3DOptionsWidget(
textured=(isinstance(meshes[0], ColouredTriMesh) or
isinstance(meshes[0], TexturedTriMesh)),
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMayaviFigureOptionsWidget()
# Group widgets
if n_meshes > 1:
# Define function that updates options' widgets state
def update_widgets(change):
i = change['new']
# Update shape options
mesh_options_wid.set_widget_state(
textured=(isinstance(meshes[i], ColouredTriMesh) or
isinstance(meshes[i], TexturedTriMesh)),
allow_callback=True)
# selection slider
index = {'min': 0, 'max': n_meshes-1, 'step': 1, 'index': 0}
mesh_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Mesh', loop_enabled=True,
continuous_update=False)
# Header widget
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
header_wid = ipywidgets.HBox([logo_wid, mesh_number_wid])
header_wid.layout.align_items = 'center'
header_wid.layout.margin = '0px 0px 10px 0px'
else:
# Header widget
header_wid = LogoWidget(style=main_style)
header_wid.layout.margin = '0px 10px 0px 0px'
options_box = ipywidgets.Tab([info_wid, mesh_options_wid, save_figure_wid])
tab_titles = ['Info', 'Mesh', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_meshes > 1:
wid = ipywidgets.VBox([header_wid, options_box])
else:
wid = ipywidgets.HBox([header_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
# Display final widget
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
# Trigger initial visualization
render_function({})
with out:
print_dynamic('')
def visualize_images(images, figure_size=(7, 7), browser_style='buttons',
custom_info_callback=None):
r"""
Widget that allows browsing through a `list` of `menpo.image.Image` (or
subclass) objects. The images can have a combination of different
attributes, e.g. masked or not, landmarked or not, without multiple
landmark groups and labels etc.
Parameters
----------
images : `list` of `menpo.image.Image` or subclass
The `list` of images to be visualized.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
browser_style : ``{'buttons', 'slider'}``, optional
It defines whether the selector of the objects will have the form of
plus/minus buttons or a slider.
custom_info_callback: `function` or ``None``, optional
If not None, it should be a function that accepts an image and returns
a list of custom messages to be printed per image. Each custom message
will be printed in a separate line.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
# Make sure that images is a list even with one member
if not isinstance(images, Sized):
images = [images]
# Get the number of images
n_images = len(images)
# Define the styling options
main_style = 'info'
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# get selected index and selected group
i = image_number_wid.selected_values if n_images > 1 else 0
g = landmark_options_wid.selected_values['landmarks']['group']
# check if image is masked
image_is_masked = isinstance(images[i], MaskedImage)
# Create options dictionary
options = dict()
options.update(landmark_options_wid.selected_values['lines'])
options.update(landmark_options_wid.selected_values['markers'])
options.update(
renderer_options_wid.selected_values['numbering_matplotlib'])
options.update(renderer_options_wid.selected_values['axes'])
options.update(renderer_options_wid.selected_values['legend'])
options.update(image_options_wid.selected_values)
options.update(landmark_options_wid.selected_values['landmarks'])
# Correct options based on the type of the shape
if (images[i].has_landmarks and
hasattr(images[i].landmarks[g], 'labels')):
# If the shape is a LabelledPointUndirectedGraph ...
# ...correct colours
line_colour = []
marker_face_colour = []
marker_edge_colour = []
for lbl in options['with_labels']:
id = images[i].landmarks[g].labels.index(lbl)
line_colour.append(options['line_colour'][id])
marker_face_colour.append(options['marker_face_colour'][id])
marker_edge_colour.append(options['marker_edge_colour'][id])
options['line_colour'] = line_colour
options['marker_face_colour'] = marker_face_colour
options['marker_edge_colour'] = marker_edge_colour
else:
# If shape is PointCloud, TriMesh or PointGraph
# ...correct colours
options['line_colour'] = options['line_colour'][0]
options['marker_face_colour'] = options['marker_face_colour'][0]
options['marker_edge_colour'] = options['marker_edge_colour'][0]
# Get figure size
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] *
figure_size[0],
renderer_options_wid.selected_values['zoom_one'] *
figure_size[1])
# Render shape with selected options
save_figure_wid.renderer = render_image(
image=images[i], renderer=save_figure_wid.renderer,
image_is_masked=image_is_masked, figure_size=new_figure_size,
**options)
# Update info
update_info(images[i], image_is_masked, g,
custom_info_callback=custom_info_callback)
# Define function that updates the info text
def update_info(img, image_is_masked, group, custom_info_callback=None):
# Prepare masked (or non-masked) string
masked_str = 'Masked Image' if image_is_masked else 'Image'
# Get image path, if available
path_str = img.path if hasattr(img, 'path') else 'No path available'
# Create text lines
text_per_line = [
"> {} of size {} with {} channel{}".format(
masked_str, img._str_shape(), img.n_channels,
's' * (img.n_channels > 1)),
"> Path: '{}'".format(path_str)]
if image_is_masked:
text_per_line.append(
"> {} masked pixels (attached mask {:.1%} true)".format(
img.n_true_pixels(), img.mask.proportion_true()))
text_per_line.append("> min={:.3f}, max={:.3f}".format(
img.pixels.min(), img.pixels.max()))
if img.has_landmarks:
text_per_line.append("> {} landmark points".format(
img.landmarks[group].n_points))
if custom_info_callback is not None:
# iterate over the list of messages returned by the callback
# function and append them in the text_per_line.
for msg in custom_info_callback(img):
text_per_line.append('> {}'.format(msg))
info_wid.set_widget_state(text_per_line=text_per_line)
# Create widgets
groups_keys, labels_keys = extract_groups_labels_from_image(images[0])
first_label = labels_keys[0] if labels_keys else None
image_options_wid = ImageOptionsWidget(
n_channels=images[0].n_channels,
image_is_masked=isinstance(images[0], MaskedImage),
render_function=render_function)
landmark_options_wid = LandmarkOptionsWidget(
group_keys=groups_keys, labels_keys=labels_keys,
type='2D', render_function=render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],
labels=first_label, axes_x_limits=None, axes_y_limits=None,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMatplotlibFigureOptionsWidget()
# Group widgets
if n_images > 1:
# Define function that updates options' widgets state
def update_widgets(change):
# Get new groups and labels, then update landmark options
i = image_number_wid.selected_values
g_keys, l_keys = extract_groups_labels_from_image(images[i])
# Update landmarks options
landmark_options_wid.set_widget_state(
group_keys=g_keys, labels_keys=l_keys, allow_callback=False)
# Update channels options
image_options_wid.set_widget_state(
n_channels=images[i].n_channels,
image_is_masked=isinstance(images[i], MaskedImage),
allow_callback=True)
# Image selection slider
index = {'min': 0, 'max': n_images-1, 'step': 1, 'index': 0}
image_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Image', loop_enabled=True,
continuous_update=False)
# Header widget
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
header_wid = ipywidgets.HBox([logo_wid, image_number_wid])
header_wid.layout.align_items = 'center'
header_wid.layout.margin = '0px 0px 10px 0px'
else:
# Header widget
header_wid = LogoWidget(style=main_style)
header_wid.layout.margin = '0px 10px 0px 0px'
options_box = ipywidgets.Tab(
children=[info_wid, image_options_wid, landmark_options_wid,
renderer_options_wid, save_figure_wid])
tab_titles = ['Info', 'Image', 'Landmarks', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_images > 1:
wid = ipywidgets.VBox([header_wid, options_box])
else:
wid = ipywidgets.HBox([header_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
# Display final widget
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
# Trigger initial visualization
render_function({})
def visualize_patches(patches, patch_centers, figure_size=(7, 7),
browser_style='buttons', custom_info_callback=None):
r"""
Widget that allows browsing through a `list` of patch-based images.
The patches argument can have any of the two formats that are returned from
the `extract_patches()` and `extract_patches_around_landmarks()` methods
of `menpo.image.Image`. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` `menpo.image.Image` objects
The patches can have a combination of different attributes, e.g. number of
centers, number of offsets, number of channels etc.
Parameters
----------
patches : `list`
The `list` of patch-based images to be visualized. It can consist of
objects with any of the two formats that are returned from the
`extract_patches()` and `extract_patches_around_landmarks()` methods.
Specifically, it can either be an
``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray` or a
`list` of ``n_center * n_offset`` `menpo.image.Image` objects.
patch_centers : `list` of `menpo.shape.PointCloud`
The centers to set the patches around. If the `list` has only one
`menpo.shape.PointCloud` then this will be used for all patches members.
Otherwise, it needs to have the same length as patches.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
browser_style : ``{'buttons', 'slider'}``, optional
It defines whether the selector of the objects will have the form of
plus/minus buttons or a slider.
custom_info_callback: `function` or ``None``, optional
If not None, it should be a function that accepts an image and returns
a list of custom messages to be printed per image. Each custom message
will be printed in a separate line.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
# Make sure that patches is a list even with one member
if (isinstance(patches, list) and isinstance(patches[0], Image)) or \
not isinstance(patches, list):
patches = [patches]
# Make sure that patch_centers is a list even with one shape
if not isinstance(patch_centers, list):
patch_centers = [patch_centers] * len(patches)
elif isinstance(patch_centers, list) and len(patch_centers) == 1:
patch_centers *= len(patches)
# Make sure all patch-based images are in the single array format
for i in range(len(patches)):
if isinstance(patches[i], list):
patches[i] = _convert_patches_list_to_single_array(
patches[i], patch_centers[i].n_points)
# Get the number of patch_based images
n_patches = len(patches)
# Define the styling options
main_style = 'info'
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# get selected index
i = image_number_wid.selected_values if n_patches > 1 else 0
# Create options dictionary
options = dict()
options.update(shape_options_wid.selected_values['lines'])
options.update(shape_options_wid.selected_values['markers'])
options.update(
renderer_options_wid.selected_values['numbering_matplotlib'])
options.update(renderer_options_wid.selected_values['axes'])
image_options = dict(image_options_wid.selected_values)
del image_options['masked_enabled']
options.update(image_options)
options.update(patch_options_wid.selected_values)
options['line_colour'] = options['line_colour'][0]
options['marker_face_colour'] = options['marker_face_colour'][0]
options['marker_edge_colour'] = options['marker_edge_colour'][0]
# Get figure size
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * figure_size[0],
renderer_options_wid.selected_values['zoom_one'] * figure_size[1])
# Render image with selected options
save_figure_wid.renderer = render_patches(
patches=patches[i], patch_centers=patch_centers[i],
renderer=save_figure_wid.renderer, figure_size=new_figure_size,
**options)
# update info text widget
update_info(patches[i], custom_info_callback=custom_info_callback)
# Define function that updates the info text
def update_info(ptchs, custom_info_callback=None):
text_per_line = [
"> Patch-Based Image with {} patche{} and {} offset{}.".format(
ptchs.shape[0], 's' * (ptchs.shape[0] > 1), ptchs.shape[1],
's' * (ptchs.shape[1] > 1)),
"> Each patch has size {}H x {}W with {} channel{}.".format(
ptchs.shape[3], ptchs.shape[4], ptchs.shape[2],
's' * (ptchs.shape[2] > 1)),
"> min={:.3f}, max={:.3f}".format(ptchs.min(), ptchs.max())]
if custom_info_callback is not None:
# iterate over the list of messages returned by the callback
# function and append them in the text_per_line.
for msg in custom_info_callback(ptchs):
text_per_line.append('> {}'.format(msg))
info_wid.set_widget_state(text_per_line=text_per_line)
# Create widgets
shape_options_wid = Shape2DOptionsWidget(
labels=None, render_function=None)
shape_options_wid.line_options_wid.render_lines_switch.button_wid.value = False
shape_options_wid.add_render_function(render_function)
patch_options_wid = PatchOptionsWidget(
n_patches=patches[0].shape[0], n_offsets=patches[0].shape[1],
render_function=render_function)
image_options_wid = ImageOptionsWidget(
n_channels=patches[0].shape[2], image_is_masked=False,
render_function=None)
image_options_wid.interpolation_checkbox.button_wid.value = False
image_options_wid.add_render_function(render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['zoom_one', 'axes', 'numbering_matplotlib'], labels=None,
axes_x_limits=None, axes_y_limits=None,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMatplotlibFigureOptionsWidget()
# Group widgets
if n_patches > 1:
# Define function that updates options' widgets state
def update_widgets(change):
# Selected object
i = image_number_wid.selected_values
# Update patch options
patch_options_wid.set_widget_state(
n_patches=patches[i].shape[0], n_offsets=patches[i].shape[1],
allow_callback=False)
# Update channels options
image_options_wid.set_widget_state(
n_channels=patches[i].shape[2], image_is_masked=False,
allow_callback=True)
# Image selection slider
index = {'min': 0, 'max': n_patches-1, 'step': 1, 'index': 0}
image_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Image', loop_enabled=True,
continuous_update=False)
# Header widget
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
header_wid = ipywidgets.HBox([logo_wid, image_number_wid])
header_wid.layout.align_items = 'center'
header_wid.layout.margin = '0px 0px 10px 0px'
else:
# Header widget
header_wid = LogoWidget(style=main_style)
header_wid.layout.margin = '0px 10px 0px 0px'
options_box = ipywidgets.Tab(
children=[info_wid, patch_options_wid, image_options_wid,
shape_options_wid, renderer_options_wid, save_figure_wid])
tab_titles = ['Info', 'Patches', 'Image', 'Shape', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_patches > 1:
wid = ipywidgets.VBox([header_wid, options_box])
else:
wid = ipywidgets.HBox([header_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
# Display final widget
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
# Trigger initial visualization
render_function({})
def plot_graph(x_axis, y_axis, legend_entries=None, figure_size=(9, 5)):
r"""
Widget that allows plotting various curves in a graph.
The widget has options tabs regarding the graph and the renderer (lines,
markers, legend, figure, axes, grid) and saving the figure to file.
Parameters
----------
x_axis : `list` of `float`
The values of the horizontal axis. Note that these values are common for
all the curves.
y_axis : `list` of `lists` of `float`
A `list` that stores a `list` of values to be plotted for each curve.
legend_entries : `list` or `str` or ``None``, optional
The `list` of names that will appear on the legend for each curve. If
``None``, then the names format is ``curve {}.format(i)``.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
from menpo.visualize import plot_curve
print('Initializing...')
# Get number of curves to be plotted
n_curves = len(y_axis)
# Define the styling options
main_style = 'danger'
# Parse options
if legend_entries is None:
legend_entries = ["curve {}".format(i) for i in range(n_curves)]
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# plot with selected options
opts = plot_wid.selected_values.copy()
new_figure_size = (
plot_wid.selected_values['zoom'][0] * figure_size[0],
plot_wid.selected_values['zoom'][1] * figure_size[1])
del opts['zoom']
save_figure_wid.renderer = plot_curve(
x_axis=x_axis, y_axis=y_axis, figure_size=new_figure_size,
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
**opts)
# show plot
save_figure_wid.renderer.force_draw()
# Create widgets
plot_wid = PlotMatplotlibOptionsWidget(
legend_entries=legend_entries, render_function=render_function)
save_figure_wid = SaveMatplotlibFigureOptionsWidget()
# Group widgets
logo = LogoWidget(style=main_style)
logo.layout.margin = '0px 10px 0px 0px'
tmp_children = list(plot_wid.tab_box.children)
tmp_children.append(save_figure_wid)
plot_wid.tab_box.children = tmp_children
plot_wid.tab_box.set_title(0, 'Labels')
plot_wid.tab_box.set_title(1, 'Style')
plot_wid.tab_box.set_title(2, 'Legend')
plot_wid.tab_box.set_title(3, 'Axes')
plot_wid.tab_box.set_title(4, 'Zoom')
plot_wid.tab_box.set_title(5, 'Grid')
plot_wid.tab_box.set_title(6, 'Export')
# Display final widget
wid = ipywidgets.HBox([logo, plot_wid])
wid.box_style = main_style
wid.layout.border = '2px solid' + map_styles_to_hex_colours(main_style)
plot_wid.container.border = '0px'
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
# Trigger initial visualization
render_function({})
def save_matplotlib_figure(renderer):
r"""
Widget that allows to save a figure, which was generated with Matplotlib,
to file.
Parameters
----------
renderer : `menpo.visualize.viewmatplotlib.MatplotlibRenderer`
The Matplotlib renderer object.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
# Create sub-widgets
logo_wid = LogoWidget()
logo_wid.layout.margin = '0px 10px 0px 0px'
save_figure_wid = SaveMatplotlibFigureOptionsWidget(renderer,
style='warning')
wid = ipywidgets.HBox([logo_wid, save_figure_wid])
# Display widget
ipydisplay.display(wid)
def save_mayavi_figure(renderer):
r"""
Widget that allows to save a figure, which was generated with Mayavi,
to file.
Parameters
----------
renderer : `menpo3d.visualize.viewmayavi.MayaviRenderer`
The Mayavi renderer object.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
# Create sub-widgets
logo_wid = LogoWidget()
logo_wid.layout.margin = '0px 10px 0px 0px'
save_figure_wid = SaveMayaviFigureOptionsWidget(renderer,
style='warning')
wid = ipywidgets.HBox([logo_wid, save_figure_wid])
# Display widget
ipydisplay.display(wid)
def visualize_shape_model_2d(shape_model, n_parameters=5, mode='multiple',
parameters_bounds=(-3.0, 3.0), figure_size=(7, 7)):
r"""
Widget that allows the dynamic visualization of a multi-scale linear
statistical 2D shape model.
Parameters
----------
shape_model : `list` of `menpo.shape.PCAModel` or `subclass`
The multi-scale shape model to be visualized. Note that each level can
have different number of components.
n_parameters : `int` or `list` of `int` or ``None``, optional
The number of principal components to be used for the parameters
sliders. If `int`, then the number of sliders per level is the minimum
between `n_parameters` and the number of active components per level.
If `list` of `int`, then a number of sliders is defined per level.
If ``None``, all the active components per level will have a slider.
mode : ``{'single', 'multiple'}``, optional
If ``'single'``, then only a single slider is constructed along with a
drop down menu. If ``'multiple'``, then a slider is constructed for each
parameter.
parameters_bounds : (`float`, `float`), optional
The minimum and maximum bounds, in std units, for the sliders.
figure_size : (`int`, `int`), optional
The size of the plotted figures.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
from menpo.visualize.viewmatplotlib import (_set_axes_options,
_parse_axes_limits)
out = ipywidgets.Output()
ipydisplay.display(out)
with out:
ipydisplay.clear_output(wait=True)
print('Initializing...')
# Make sure that shape_model is a list even with one member
if not isinstance(shape_model, list):
shape_model = [shape_model]
# Get the number of levels (i.e. number of shape models)
n_levels = len(shape_model)
# Define the styling options
main_style = 'warning'
# Get the maximum number of components per level
max_n_params = [sp.n_active_components for sp in shape_model]
# Check the given number of parameters (the returned n_parameters is a list
# of len n_scales)
n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
with out:
ipydisplay.clear_output(wait=True)
# Get selected level
level = 0
if n_levels > 1:
level = level_wid.value
# Compute weights
parameters = model_parameters_wid.selected_values
weights = (parameters *
shape_model[level].eigenvalues[:len(parameters)] ** 0.5)
# Get the mean
mean = shape_model[level].mean()
# Create options dictionary
options = dict()
options.update(shape_options_wid.selected_values['lines'])
options.update(shape_options_wid.selected_values['markers'])
options['image_view'] = shape_options_wid.selected_values['image_view']
options.update(
renderer_options_wid.selected_values['numbering_matplotlib'])
options.update(renderer_options_wid.selected_values['axes'])
# Correct options based on the type of the shape
if hasattr(mean, 'labels'):
# If the shape is a LabelledPointUndirectedGraph ...
# ...use the legend options
options.update(renderer_options_wid.selected_values['legend'])
# ...use with_labels
options['with_labels'] = \
shape_options_wid.selected_values['with_labels']
# ...correct colours
line_colour = []
marker_face_colour = []
marker_edge_colour = []
for lbl in options['with_labels']:
idx = mean.labels.index(lbl)
line_colour.append(options['line_colour'][idx])
marker_face_colour.append(options['marker_face_colour'][idx])
marker_edge_colour.append(options['marker_edge_colour'][idx])
options['line_colour'] = line_colour
options['marker_face_colour'] = marker_face_colour
options['marker_edge_colour'] = marker_edge_colour
else:
# If shape is PointCloud, TriMesh or PointGraph
# ...correct colours
options['line_colour'] = options['line_colour'][0]
options['marker_face_colour'] = options['marker_face_colour'][0]
options['marker_edge_colour'] = options['marker_edge_colour'][0]
# Get figure size
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * figure_size[0],
renderer_options_wid.selected_values['zoom_one'] * figure_size[1])
# Render with selected options
if mode_wid.value == 1:
# Deformation mode
# Compute instance
instance = shape_model[level].instance(weights)
# Render mean shape
if mean_wid.selected_values:
save_figure_wid.renderer = mean.view(
figure_id=save_figure_wid.renderer.figure_id,
new_figure=False, figure_size=None,
image_view=options['image_view'],
render_lines=options['render_lines'],
line_colour='yellow', line_style=options['line_style'],
line_width=options['line_width'],
render_markers=options['render_markers'],
marker_style=options['marker_style'],
marker_size=options['marker_size'],
marker_face_colour='yellow', marker_edge_colour='yellow',
marker_edge_width=options['marker_edge_width'])
# Render instance
save_figure_wid.renderer = instance.view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
figure_size=new_figure_size, **options)
# Get instance range
instance_range = instance.range()
else:
# Vectors mode
# Compute instance
instance_lower = shape_model[level].instance([-p for p in weights])
instance_upper = shape_model[level].instance(weights)
# Render mean shape
save_figure_wid.renderer = mean.view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
figure_size=new_figure_size, **options)
# Render vectors
ax = plt.gca()
x_min = np.Inf
y_min = np.Inf
x_max = -np.Inf
y_max = -np.Inf
for p in range(mean.n_points):
xm = mean.points[p, 0]
ym = mean.points[p, 1]
xl = instance_lower.points[p, 0]
yl = instance_lower.points[p, 1]
xu = instance_upper.points[p, 0]
yu = instance_upper.points[p, 1]
if options['image_view']:
# image mode
lines = [[(ym, xm), (yl, xl)], [(ym, xm), (yu, xu)]]
else:
# point cloud mode
lines = [[(xm, ym), (xl, yl)], [(xm, ym), (xu, yu)]]
lc = mc.LineCollection(lines, colors=('g', 'b'),
linestyles='solid', linewidths=2)
# update min, max
y_min = np.min([y_min, xl, xu])
y_max = np.max([y_max, xl, xu])
x_min = np.min([x_min, yl, yu])
x_max = np.max([x_max, yl, yu])
# add collection
ax.add_collection(lc)
# parse axes limits
axes_x_limits, axes_y_limits = _parse_axes_limits(
x_min, x_max, y_min, y_max, options['axes_x_limits'],
options['axes_y_limits'])
_set_axes_options(
ax, render_axes=options['render_axes'],
inverted_y_axis=options['image_view'],
axes_font_name=options['axes_font_name'],
axes_font_size=options['axes_font_size'],
axes_font_style=options['axes_font_style'],
axes_font_weight=options['axes_font_weight'],
axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits,
axes_x_ticks=options['axes_x_ticks'],
axes_y_ticks=options['axes_y_ticks'])
# Get instance range
instance_range = mean.range()
# Force rendering
save_figure_wid.renderer.force_draw()
# Update info
update_info(level, instance_range)
# Define function that updates the info text
def update_info(level, instance_range):
text_per_line = [
"> Level {} out of {}".format(level + 1, n_levels),
"> {} components in total".format(shape_model[level].n_components),
"> {} active components".format(
shape_model[level].n_active_components),
"> {:.1f}% variance kept".format(
shape_model[level].variance_ratio() * 100),
"> Instance range: {:.1f} x {:.1f}".format(instance_range[0],
instance_range[1]),
"> {} landmark points, {} features".format(
shape_model[level].mean().n_points,
shape_model[level].n_features)]
info_wid.set_widget_state(text_per_line=text_per_line)
# Plot variance function
def plot_variance(name):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# Get selected level
level = level_wid.value if n_levels > 1 else 0
# Render
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * 10,
renderer_options_wid.selected_values['zoom_one'] * 3)
plt.subplot(121)
save_figure_wid.renderer = shape_model[level].plot_eigenvalues_ratio(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False)
plt.subplot(122)
save_figure_wid.renderer = \
shape_model[level].plot_eigenvalues_cumulative_ratio(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
figure_size=new_figure_size)
save_figure_wid.renderer.force_draw()
# Create widgets
mode_dict = OrderedDict()
mode_dict['Deformation'] = 1
mode_dict['Vectors'] = 2
mode_wid = ipywidgets.RadioButtons(
options=mode_dict, description='Mode', value=1,
layout=ipywidgets.Layout(width='6cm'))
mode_wid.observe(render_function, names='value', type='change')
mean_wid = SwitchWidget(
selected_value=False, description='Render mean shape',
description_location='right', switch_type='checkbox')
mean_wid.observe(render_function, names='selected_values', type='change')
# Function that controls mean shape checkbox visibility
def mean_visible(change):
if change['new'] == 1:
mean_wid.button_wid.disabled = False
else:
mean_wid.button_wid.disabled = True
mean_wid.set_widget_state(False, allow_callback=False)
mode_wid.observe(mean_visible, names='value', type='change')
model_parameters_wid = LinearModelParametersWidget(
n_parameters[0], render_function, params_str='Parameter ',
mode=mode, params_bounds=parameters_bounds, params_step=0.1,
plot_variance_visible=True, plot_variance_function=plot_variance,
animation_step=0.5, interval=0., loop_enabled=True,
continuous_update=False)
labels = None
if hasattr(shape_model[0].mean(), 'labels'):
labels = shape_model[0].mean().labels
shape_options_wid = Shape2DOptionsWidget(
labels=labels, render_function=render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],
labels=None, axes_x_limits=0.1, axes_y_limits=0.1,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMatplotlibFigureOptionsWidget()
# Group widgets
if n_levels > 1:
# Define function that updates options' widgets state
def update_widgets(change):
model_parameters_wid.set_widget_state(
n_parameters=n_parameters[change['new']],
params_str='Parameter ', allow_callback=True)
# Create pyramid radiobuttons
radio_str = OrderedDict()
for l in range(n_levels):
if l == 0:
radio_str["Level {} (low)".format(l)] = l
elif l == n_levels - 1:
radio_str["Level {} (high)".format(l)] = l
else:
radio_str["Level {}".format(l)] = l
level_wid = ipywidgets.RadioButtons(
options=radio_str, description='Pyramid', value=n_levels-1,
layout=ipywidgets.Layout(width='6cm'))
level_wid.observe(update_widgets, names='value', type='change')
level_wid.observe(render_function, names='value', type='change')
radio_children = [level_wid, mode_wid, mean_wid]
else:
radio_children = [mode_wid, mean_wid]
radio_wids = ipywidgets.VBox(radio_children)
tmp_wid = ipywidgets.HBox([radio_wids, model_parameters_wid])
options_box = ipywidgets.Tab(
children=[tmp_wid, shape_options_wid, renderer_options_wid, info_wid,
save_figure_wid])
tab_titles = ['Model', 'Shape', 'Renderer', 'Info', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
wid = ipywidgets.HBox([logo_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
# Display final widget
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
# Trigger initial visualization
render_function({})
def visualize_shape_model_3d(shape_model, n_parameters=5, mode='multiple',
parameters_bounds=(-15.0, 15.0)):
r"""
Widget that allows the dynamic visualization of a multi-scale linear
statistical 3D shape model.
Parameters
----------
shape_model : `list` of `menpo.shape.PCAModel` or `subclass`
The multi-scale shape model to be visualized. Note that each level can
have different number of components.
n_parameters : `int` or `list` of `int` or ``None``, optional
The number of principal components to be used for the parameters
sliders. If `int`, then the number of sliders per level is the minimum
between `n_parameters` and the number of active components per level.
If `list` of `int`, then a number of sliders is defined per level.
If ``None``, all the active components per level will have a slider.
mode : ``{'single', 'multiple'}``, optional
If ``'single'``, then only a single slider is constructed along with a
drop down menu. If ``'multiple'``, then a slider is constructed for each
parameter.
parameters_bounds : (`float`, `float`), optional
The minimum and maximum bounds, in std units, for the sliders.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
out = ipywidgets.Output()
ipydisplay.display(out)
with out:
ipydisplay.clear_output(wait=True)
print('Initializing...')
# Make sure that shape_model is a list even with one member
if not isinstance(shape_model, list):
shape_model = [shape_model]
# Get the number of levels (i.e. number of shape models)
n_levels = len(shape_model)
# Check if the model is TriMesh or any other 3D shape class
is_trimesh = isinstance(shape_model[0].template_instance, TriMesh)
# Define the styling options
main_style = 'warning'
# Get the maximum number of components per level
max_n_params = [sp.n_active_components for sp in shape_model]
# Check the given number of parameters (the returned n_parameters is a list
# of len n_scales)
n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
save_figure_wid.renderer.clear_figure()
with out:
ipydisplay.clear_output(wait=True)
# Get selected level
level = 0
if n_levels > 1:
level = level_wid.value
# Compute weights
parameters = model_parameters_wid.selected_values
weights = (parameters *
shape_model[level].eigenvalues[:len(parameters)] ** 0.5)
# Compute instance
instance = shape_model[level].instance(weights)
# Create options dictionary
options = dict()
if is_trimesh:
options.update(shape_options_wid.selected_values)
else:
options.update(shape_options_wid.selected_values['lines'])
options.update(shape_options_wid.selected_values['markers'])
options.update(
renderer_options_wid.selected_values['numbering_mayavi'])
# Correct options based on the type of the shape
if hasattr(instance, 'labels'):
# If the shape is a LabelledPointUndirectedGraph ...
# ...use with_labels
options['with_labels'] = \
shape_options_wid.selected_values['with_labels']
# ...correct colours
line_colour = []
marker_colour = []
for lbl in options['with_labels']:
idx = instance.labels.index(lbl)
line_colour.append(options['line_colour'][idx])
marker_colour.append(options['marker_colour'][idx])
options['line_colour'] = line_colour
options['marker_colour'] = marker_colour
else:
# If shape is PointCloud, TriMesh or PointGraph
# ...correct colours
options['line_colour'] = options['line_colour'][0]
options['marker_colour'] = options['marker_colour'][0]
# Update info
update_info(level, instance.range())
# Render instance
save_figure_wid.renderer = instance.view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
**options)
# Force rendering
save_figure_wid.renderer.force_draw()
# Define function that updates the info text
def update_info(level, instance_range):
text_per_line = [
"> Level {} out of {}".format(level + 1, n_levels),
"> {} components in total".format(shape_model[level].n_components),
"> {} active components".format(
shape_model[level].n_active_components),
"> {:.1f}% variance kept".format(
shape_model[level].variance_ratio() * 100),
"> Instance range: {:.1f} x {:.1f}".format(instance_range[0],
instance_range[1]),
"> {} points".format(
shape_model[level].mean().n_points)]
info_wid.set_widget_state(text_per_line=text_per_line)
# Plot variance function
def plot_variance(name):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
# Get selected level
level = level_wid.value if n_levels > 1 else 0
# Render
with out:
ipydisplay.clear_output(wait=True)
plt.subplot(121)
shape_model[level].plot_eigenvalues_ratio()
plt.subplot(122)
shape_model[level].plot_eigenvalues_cumulative_ratio()
plt.show()
# Create widgets
model_parameters_wid = LinearModelParametersWidget(
n_parameters[0], render_function, params_str='Parameter ',
mode=mode, params_bounds=parameters_bounds, params_step=0.1,
plot_variance_visible=True, plot_variance_function=plot_variance,
animation_step=0.5, interval=0., loop_enabled=True,
continuous_update=False)
if is_trimesh:
shape_options_wid = Mesh3DOptionsWidget(textured=False,
render_function=render_function)
else:
labels = None
if hasattr(shape_model[0].mean(), 'labels'):
labels = shape_model[0].mean().labels
shape_options_wid = Shape3DOptionsWidget(labels=labels,
render_function=render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['numbering_mayavi'], labels=None,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMayaviFigureOptionsWidget()
# Group widgets
if n_levels > 1:
# Define function that updates options' widgets state
def update_widgets(change):
model_parameters_wid.set_widget_state(
n_parameters=n_parameters[change['new']],
params_str='Parameter ', allow_callback=True)
# Create pyramid radiobuttons
radio_str = OrderedDict()
for l in range(n_levels):
if l == 0:
radio_str["Level {} (low)".format(l)] = l
elif l == n_levels - 1:
radio_str["Level {} (high)".format(l)] = l
else:
radio_str["Level {}".format(l)] = l
level_wid = ipywidgets.RadioButtons(
options=radio_str, description='Pyramid', value=n_levels-1,
layout=ipywidgets.Layout(width='6cm'))
level_wid.observe(update_widgets, names='value', type='change')
level_wid.observe(render_function, names='value', type='change')
tmp_wid = ipywidgets.HBox([level_wid, model_parameters_wid])
else:
tmp_wid = ipywidgets.HBox(children=[model_parameters_wid])
if is_trimesh:
options_box = ipywidgets.Tab(
children=[tmp_wid, shape_options_wid, info_wid, save_figure_wid])
tab_titles = ['Model', 'Mesh', 'Info', 'Export']
else:
options_box = ipywidgets.Tab(
children=[tmp_wid, shape_options_wid, renderer_options_wid, info_wid,
save_figure_wid])
tab_titles = ['Model', 'Shape', 'Renderer', 'Info', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
wid = ipywidgets.HBox([logo_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
# Display final widget
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
# Trigger initial visualization
render_function({})
with out:
print_dynamic('')
def visualize_appearance_model(appearance_model, n_parameters=5,
mode='multiple', parameters_bounds=(-3.0, 3.0),
figure_size=(7, 7)):
r"""
Widget that allows the dynamic visualization of a multi-scale linear
statistical appearance model.
Parameters
----------
appearance_model : `list` of `menpo.model.PCAModel` or subclass
The multi-scale appearance model to be visualized. Note that each level
can have different number of components.
n_parameters : `int` or `list` of `int` or ``None``, optional
The number of principal components to be used for the parameters
sliders. If `int`, then the number of sliders per level is the minimum
between `n_parameters` and the number of active components per level.
If `list` of `int`, then a number of sliders is defined per level.
If ``None``, all the active components per level will have a slider.
mode : ``{'single', 'multiple'}``, optional
If ``'single'``, then only a single slider is constructed along with a
drop down menu. If ``'multiple'``, then a slider is constructed for each
parameter.
parameters_bounds : (`float`, `float`), optional
The minimum and maximum bounds, in std units, for the sliders.
figure_size : (`int`, `int`), optional
The size of the plotted figures.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
# Make sure that appearance_model is a list even with one member
if not isinstance(appearance_model, list):
appearance_model = [appearance_model]
# Get the number of levels (i.e. number of appearance models)
n_levels = len(appearance_model)
# Define the styling options
main_style = 'success'
# Get the maximum number of components per level
max_n_params = [ap.n_active_components for ap in appearance_model]
# Check the given number of parameters (the returned n_parameters is a list
# of len n_scales)
n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# Get selected level
level = level_wid.value if n_levels > 1 else 0
# Compute weights and instance
parameters = model_parameters_wid.selected_values
weights = (parameters *
appearance_model[level].eigenvalues[:len(parameters)] ** 0.5)
instance = appearance_model[level].instance(weights)
image_is_masked = isinstance(instance, MaskedImage)
g = landmark_options_wid.selected_values['landmarks']['group']
# Create options dictionary
options = dict()
options.update(landmark_options_wid.selected_values['lines'])
options.update(landmark_options_wid.selected_values['markers'])
options.update(
renderer_options_wid.selected_values['numbering_matplotlib'])
options.update(renderer_options_wid.selected_values['axes'])
options.update(renderer_options_wid.selected_values['legend'])
options.update(image_options_wid.selected_values)
options.update(landmark_options_wid.selected_values['landmarks'])
# Correct options based on the type of the shape
if (instance.has_landmarks and
hasattr(instance.landmarks[g], 'labels')):
# If the shape is a LabelledPointUndirectedGraph ...
# ...correct colours
line_colour = []
marker_face_colour = []
marker_edge_colour = []
for lbl in options['with_labels']:
id = instance.landmarks[g].labels.index(lbl)
line_colour.append(options['line_colour'][id])
marker_face_colour.append(options['marker_face_colour'][id])
marker_edge_colour.append(options['marker_edge_colour'][id])
options['line_colour'] = line_colour
options['marker_face_colour'] = marker_face_colour
options['marker_edge_colour'] = marker_edge_colour
else:
# If shape is PointCloud, TriMesh or PointGraph
# ...correct colours
options['line_colour'] = options['line_colour'][0]
options['marker_face_colour'] = options['marker_face_colour'][0]
options['marker_edge_colour'] = options['marker_edge_colour'][0]
# Get figure size
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] *
figure_size[0],
renderer_options_wid.selected_values['zoom_one'] *
figure_size[1])
# Render shape with selected options
save_figure_wid.renderer = render_image(
image=instance, renderer=save_figure_wid.renderer,
image_is_masked=image_is_masked, figure_size=new_figure_size,
**options)
# Update info
update_info(instance, level, g)
# Define function that updates the info text
def update_info(image, level, group):
lvl_app_mod = appearance_model[level]
lp = 0 if group is None else image.landmarks[group].n_points
text_per_line = [
"> Level: {} out of {}.".format(level + 1, n_levels),
"> {} components in total.".format(lvl_app_mod.n_components),
"> {} active components.".format(lvl_app_mod.n_active_components),
"> {:.1f}% variance kept.".format(
lvl_app_mod.variance_ratio() * 100),
"> Reference shape of size {} with {} channel{}.".format(
image._str_shape(),
image.n_channels, 's' * (image.n_channels > 1)),
"> {} features.".format(lvl_app_mod.n_features),
"> {} landmark points.".format(lp),
"> Instance: min={:.3f}, max={:.3f}".format(image.pixels.min(),
image.pixels.max())]
info_wid.set_widget_state(text_per_line=text_per_line)
# Plot variance function
def plot_variance(name):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# Get selected level
level = level_wid.value if n_levels > 1 else 0
# Render
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * 10,
renderer_options_wid.selected_values['zoom_one'] * 3)
plt.subplot(121)
save_figure_wid.renderer = \
appearance_model[level].plot_eigenvalues_ratio(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False)
plt.subplot(122)
save_figure_wid.renderer = \
appearance_model[level].plot_eigenvalues_cumulative_ratio(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
figure_size=new_figure_size)
save_figure_wid.renderer.force_draw()
# Create widgets
model_parameters_wid = LinearModelParametersWidget(
n_parameters[0], render_function, params_str='Parameter ',
mode=mode, params_bounds=parameters_bounds, params_step=0.1,
plot_variance_visible=True, plot_variance_function=plot_variance,
animation_step=0.5, interval=0., loop_enabled=True,
continuous_update=False)
groups_keys, labels_keys = extract_groups_labels_from_image(
appearance_model[0].mean())
image_options_wid = ImageOptionsWidget(
n_channels=appearance_model[0].mean().n_channels,
image_is_masked=isinstance(appearance_model[0].mean(),
MaskedImage),
render_function=render_function)
landmark_options_wid = LandmarkOptionsWidget(
group_keys=groups_keys, labels_keys=labels_keys,
type='2D', render_function=render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],
axes_x_limits=None, axes_y_limits=None, labels=None,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMatplotlibFigureOptionsWidget()
# Group widgets
tmp_children = [model_parameters_wid]
if n_levels > 1:
# Define function that updates options' widgets state
def update_widgets(change):
value = change['new']
# Update model parameters widget
model_parameters_wid.set_widget_state(
n_parameters[value], params_str='Parameter ',
allow_callback=False)
# Update landmarks options
g_keys, l_keys = extract_groups_labels_from_image(
appearance_model[value].mean())
landmark_options_wid.set_widget_state(
group_keys=g_keys, labels_keys=l_keys, allow_callback=False)
# Update channels options
image_options_wid.set_widget_state(
n_channels=appearance_model[value].mean().n_channels,
image_is_masked=isinstance(
appearance_model[value].mean(), MaskedImage),
allow_callback=True)
# Create pyramid radiobuttons
radio_str = OrderedDict()
for l in range(n_levels):
if l == 0:
radio_str["Level {} (low)".format(l)] = l
elif l == n_levels - 1:
radio_str["Level {} (high)".format(l)] = l
else:
radio_str["Level {}".format(l)] = l
level_wid = ipywidgets.RadioButtons(
options=radio_str, description='Pyramid', value=n_levels-1,
layout=ipywidgets.Layout(width='6cm'))
level_wid.observe(update_widgets, names='value', type='change')
level_wid.observe(render_function, names='value', type='change')
tmp_children.insert(0, level_wid)
tmp_wid = ipywidgets.HBox(tmp_children)
options_box = ipywidgets.Tab(
children=[tmp_wid, image_options_wid, landmark_options_wid,
renderer_options_wid, info_wid, save_figure_wid])
tab_titles = ['Model', 'Image', 'Landmarks', 'Renderer', 'Info', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
wid = ipywidgets.HBox([logo_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
# Display final widget
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
# Trigger initial visualization
render_function({})
def visualize_patch_appearance_model(appearance_model, centers,
n_parameters=5, mode='multiple',
parameters_bounds=(-3.0, 3.0),
figure_size=(7, 7)):
r"""
Widget that allows the dynamic visualization of a multi-scale linear
statistical patch-based appearance model.
Parameters
----------
appearance_model : `list` of `menpo.model.PCAModel` or subclass
The multi-scale patch-based appearance model to be visualized. Note that
each level can have different number of components.
centers : `list` of `menpo.shape.PointCloud` or subclass
The centers to set the patches around. If the `list` has only one
`menpo.shape.PointCloud` then this will be used for all appearance model
levels. Otherwise, it needs to have the same length as
`appearance_model`.
n_parameters : `int` or `list` of `int` or ``None``, optional
The number of principal components to be used for the parameters
sliders. If `int`, then the number of sliders per level is the minimum
between `n_parameters` and the number of active components per level.
If `list` of `int`, then a number of sliders is defined per level.
If ``None``, all the active components per level will have a slider.
mode : ``{'single', 'multiple'}``, optional
If ``'single'``, then only a single slider is constructed along with a
drop down menu. If ``'multiple'``, then a slider is constructed for each
parameter.
parameters_bounds : (`float`, `float`), optional
The minimum and maximum bounds, in std units, for the sliders.
figure_size : (`int`, `int`), optional
The size of the plotted figures.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
# Make sure that appearance_model is a list even with one member
if not isinstance(appearance_model, list):
appearance_model = [appearance_model]
# Get the number of levels (i.e. number of appearance models)
n_levels = len(appearance_model)
# Make sure that centers is a list even with one pointcloud
if not isinstance(centers, list):
centers = [centers] * n_levels
elif isinstance(centers, list) and len(centers) == 1:
centers *= n_levels
# Define the styling options
main_style = 'success'
# Get the maximum number of components per level
max_n_params = [ap.n_active_components for ap in appearance_model]
# Check the given number of parameters (the returned n_parameters is a list
# of len n_scales)
n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# Get selected level
level = level_wid.value if n_levels > 1 else 0
# Compute weights and instance
parameters = model_parameters_wid.selected_values
weights = (parameters *
appearance_model[level].eigenvalues[:len(parameters)] ** 0.5)
instance = appearance_model[level].instance(weights)
# Create options dictionary
options = dict()
options.update(shape_options_wid.selected_values['lines'])
options.update(shape_options_wid.selected_values['markers'])
options.update(
renderer_options_wid.selected_values['numbering_matplotlib'])
options.update(renderer_options_wid.selected_values['axes'])
image_options = dict(image_options_wid.selected_values)
del image_options['masked_enabled']
options.update(image_options)
options.update(patch_options_wid.selected_values)
options['line_colour'] = options['line_colour'][0]
options['marker_face_colour'] = options['marker_face_colour'][0]
options['marker_edge_colour'] = options['marker_edge_colour'][0]
# Get figure size
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * figure_size[0],
renderer_options_wid.selected_values['zoom_one'] * figure_size[1])
# Render image with selected options
save_figure_wid.renderer = render_patches(
patches=instance.pixels, patch_centers=centers[level],
renderer=save_figure_wid.renderer, figure_size=new_figure_size,
**options)
# Update info
update_info(instance, level)
# Define function that updates the info text
def update_info(image, level):
lvl_app_mod = appearance_model[level]
text_per_line = [
"> Level: {} out of {}.".format(level + 1, n_levels),
"> {} components in total.".format(lvl_app_mod.n_components),
"> {} active components.".format(lvl_app_mod.n_active_components),
"> {:.1f}% variance kept.".format(
lvl_app_mod.variance_ratio() * 100),
"> Each patch has size {}H x {}W with {} channel{}.".format(
image.pixels.shape[3], image.pixels.shape[4],
image.pixels.shape[2], 's' * (image.pixels.shape[2] > 1)),
"> {} features.".format(lvl_app_mod.n_features),
"> {} landmark points.".format(image.pixels.shape[0]),
"> Instance: min={:.3f}, max={:.3f}".format(image.pixels.min(),
image.pixels.max())]
info_wid.set_widget_state(text_per_line=text_per_line)
# Plot variance function
def plot_variance(name):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# Get selected level
level = 0
if n_levels > 1:
level = level_wid.value
# Render
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * 10,
renderer_options_wid.selected_values['zoom_one'] * 3)
plt.subplot(121)
save_figure_wid.renderer = \
appearance_model[level].plot_eigenvalues_ratio(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False)
plt.subplot(122)
save_figure_wid.renderer = \
appearance_model[level].plot_eigenvalues_cumulative_ratio(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
figure_size=new_figure_size)
save_figure_wid.renderer.force_draw()
# Create widgets
model_parameters_wid = LinearModelParametersWidget(
n_parameters[0], render_function, params_str='Parameter ',
mode=mode, params_bounds=parameters_bounds, params_step=0.1,
plot_variance_visible=True, plot_variance_function=plot_variance,
animation_step=0.5, interval=0., loop_enabled=True,
continuous_update=False)
shape_options_wid = Shape2DOptionsWidget(
labels=None, render_function=None)
shape_options_wid.line_options_wid.render_lines_switch.button_wid.value = False
shape_options_wid.add_render_function(render_function)
patch_options_wid = PatchOptionsWidget(
n_patches=appearance_model[0].mean().pixels.shape[0],
n_offsets=appearance_model[0].mean().pixels.shape[1],
render_function=render_function)
image_options_wid = ImageOptionsWidget(
n_channels=appearance_model[0].mean().pixels.shape[2],
image_is_masked=isinstance(appearance_model[0].mean(), MaskedImage),
render_function=None)
image_options_wid.interpolation_checkbox.button_wid.value = False
image_options_wid.add_render_function(render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['zoom_one', 'axes', 'numbering_matplotlib'], labels=None,
axes_x_limits=None, axes_y_limits=None, render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMatplotlibFigureOptionsWidget()
# Group widgets
tmp_children = [model_parameters_wid]
if n_levels > 1:
# Define function that updates options' widgets state
def update_widgets(change):
value = change['new']
# Update model parameters widget
model_parameters_wid.set_widget_state(
n_parameters[value], params_str='Parameter ',
allow_callback=False)
# Update patch options
patch_options_wid.set_widget_state(
n_patches=appearance_model[value].mean().pixels.shape[0],
n_offsets=appearance_model[value].mean().pixels.shape[1],
allow_callback=False)
# Update channels options
image_options_wid.set_widget_state(
n_channels=appearance_model[value].mean().pixels.shape[2],
image_is_masked=isinstance(appearance_model[value].mean(),
MaskedImage),
allow_callback=True)
# Define pyramid radiobuttons
radio_str = OrderedDict()
for l in range(n_levels):
if l == 0:
radio_str["Level {} (low)".format(l)] = l
elif l == n_levels - 1:
radio_str["Level {} (high)".format(l)] = l
else:
radio_str["Level {}".format(l)] = l
level_wid = ipywidgets.RadioButtons(
options=radio_str, description='Pyramid', value=n_levels-1,
layout=ipywidgets.Layout(width='6cm'))
level_wid.observe(update_widgets, names='value', type='change')
level_wid.observe(render_function, names='value', type='change')
tmp_children.insert(0, level_wid)
tmp_wid = ipywidgets.HBox(tmp_children)
options_box = ipywidgets.Tab(
children=[tmp_wid, patch_options_wid, image_options_wid,
shape_options_wid, renderer_options_wid, info_wid,
save_figure_wid])
tab_titles = ['Model', 'Patches', 'Channels', 'Shape', 'Renderer', 'Info',
'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
wid = ipywidgets.HBox([logo_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
# Display final widget
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
# Trigger initial visualization
render_function({})
def visualize_morphable_model(mm, n_shape_parameters=5, n_texture_parameters=5,
mode='multiple', parameters_bounds=(-15.0, 15.0)):
r"""
Widget that allows the dynamic visualization of a 3D Morphable Model.
Parameters
----------
mm : `menpo3d.morhpablemodel.ColouredMorphableModel` or `subclass`
The multi-scale 3D Morphable Model to be visualized.
n_shape_parameters : `int` or `list` of `int` or ``None``, optional
The number of principal components to be used for the shape parameters
sliders. If `int`, then the number of sliders per level is the minimum
between `n_parameters` and the number of active components per level.
If `list` of `int`, then a number of sliders is defined per level.
If ``None``, all the active components per level will have a slider.
n_texture_parameters : `int` or `list` of `int` or ``None``, optional
The number of principal components to be used for the tecture
parameters sliders. If `int`, then the number of sliders per level is
the minimum between `n_parameters` and the number of active components
per level. If `list` of `int`, then a number of sliders is defined per
level. If ``None``, all the active components per level will have a
slider.
mode : ``{'single', 'multiple'}``, optional
If ``'single'``, then only a single slider is constructed along with a
drop down menu. If ``'multiple'``, then a slider is constructed for each
parameter.
parameters_bounds : (`float`, `float`), optional
The minimum and maximum bounds, in std units, for the sliders.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print_dynamic('Initializing...')
# Define the styling options
main_style = 'info'
# Check the given number of parameters
n_shape_parameters = check_n_parameters(
n_shape_parameters, 1, [mm.shape_model.n_active_components])
n_texture_parameters = check_n_parameters(
n_texture_parameters, 1, [mm.texture_model.n_active_components])
# Define render function
def render_function(change):
# Clear current figure
save_figure_wid.renderer.clear_figure()
ipydisplay.clear_output(wait=True)
# Compute weights
shape_weights = shape_model_parameters_wid.selected_values
shape_weights = (
shape_weights *
mm.shape_model.eigenvalues[:len(shape_weights)] ** 0.5)
texture_weights = texture_model_parameters_wid.selected_values
texture_weights = (
texture_weights *
mm.texture_model.eigenvalues[:len(texture_weights)] ** 0.5)
instance = mm.instance(shape_weights=shape_weights,
texture_weights=texture_weights)
# TODO: Is this really needed?
instance = instance.clip_texture()
# Update info
update_info(mm, instance)
# Render instance
save_figure_wid.renderer = instance.view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
**mesh_options_wid.selected_values)
# Force rendering
save_figure_wid.renderer.force_draw()
# Define function that updates the info text
def update_info(mm, instance):
text_per_line = [
"> {} vertices, {} triangles".format(mm.n_vertices,
mm.n_triangles),
"> {} shape components ({:.2f}% of variance)".format(
mm.shape_model.n_components,
mm.shape_model.variance_ratio() * 100),
"> {} texture channels".format(mm.n_channels),
"> {} texture components ({:.2f}% of variance)".format(
mm.texture_model.n_components,
mm.texture_model.variance_ratio() * 100),
"> Instance: min={:.3f} , max={:.3f}".format(
instance.colours.min(), instance.colours.max())]
info_wid.set_widget_state(text_per_line=text_per_line)
# Plot shape variance function
def plot_shape_variance(name):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# Render
plt.subplot(121)
mm.shape_model.plot_eigenvalues_ratio()
plt.subplot(122)
mm.shape_model.plot_eigenvalues_cumulative_ratio()
plt.show()
# Plot texture variance function
def plot_texture_variance(name):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# Render
plt.subplot(121)
mm.texture_model.plot_eigenvalues_ratio()
plt.subplot(122)
mm.texture_model.plot_eigenvalues_cumulative_ratio()
plt.show()
# Create widgets
shape_model_parameters_wid = LinearModelParametersWidget(
n_shape_parameters[0], render_function, params_str='Parameter ',
mode=mode, params_bounds=parameters_bounds, params_step=0.1,
plot_variance_visible=True, plot_variance_function=plot_shape_variance,
animation_step=0.5, interval=0., loop_enabled=True)
texture_model_parameters_wid = LinearModelParametersWidget(
n_texture_parameters[0], render_function, params_str='Parameter ',
mode=mode, params_bounds=parameters_bounds, params_step=0.1,
plot_variance_visible=True, plot_variance_function=plot_texture_variance,
animation_step=0.5, interval=0., loop_enabled=True)
mesh_options_wid = Mesh3DOptionsWidget(textured=True,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMayaviFigureOptionsWidget()
# Group widgets
model_parameters_wid = ipywidgets.HBox(
[ipywidgets.Tab([shape_model_parameters_wid,
texture_model_parameters_wid])])
model_parameters_wid.children[0].set_title(0, 'Shape')
model_parameters_wid.children[0].set_title(1, 'Texture')
options_box = ipywidgets.Tab([model_parameters_wid, mesh_options_wid,
info_wid, save_figure_wid])
tab_titles = ['Model', 'Mesh', 'Info', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
wid = ipywidgets.HBox([logo_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
# Display final widget
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
# Trigger initial visualization
render_function({})
print_dynamic('')
def webcam_widget(canvas_width=640, hd=True, n_preview_windows=5):
r"""
Webcam widget for taking snapshots. The snapshots are dynamically previewed
in a FIFO stack of thumbnails.
Parameters
----------
canvas_width : `int`, optional
The initial width of the rendered canvas. Note that this doesn't actually
change the webcam resolution. It simply rescales the rendered image, as
well as the size of the returned screenshots.
hd : `bool`, optional
If ``True``, then the webcam will be set to high definition (HD), i.e.
720 x 1280. Otherwise the default resolution will be used.
n_preview_windows : `int`, optional
The number of preview thumbnails that will be used as a FIFO stack to
show the captured screenshots. It must be at least 4.
Returns
-------
snapshots : `list` of `menpo.image.Image`
The list of captured images.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
# Set update function
images = []
def update(_):
images.append(wid.selected_values[-1])
# Create widgets
wid = CameraSnapshotWidget(
canvas_width=canvas_width, hd=hd, n_preview_windows=n_preview_windows,
preview_windows_margin=3, style='danger', preview_style='warning',
render_function=update)
wid.container.layout.border = (
'2px solid' + map_styles_to_hex_colours('danger'))
# Display widget
ipydisplay.display(wid)
# Return
return images
| 42.550546 | 88 | 0.631021 | from collections import Sized, OrderedDict
import matplotlib.pyplot as plt
from matplotlib import collections as mc
import numpy as np
import ipywidgets
import IPython.display as ipydisplay
from menpo.base import name_of_callable
from menpo.image import MaskedImage, Image
from menpo.image.base import _convert_patches_list_to_single_array
from menpo.shape import TriMesh, ColouredTriMesh, TexturedTriMesh
from menpo.visualize import print_dynamic
from menpo.landmark import LandmarkManager
from .options import (RendererOptionsWidget, TextPrintWidget,
SaveMatplotlibFigureOptionsWidget, AnimationOptionsWidget,
ImageOptionsWidget, LandmarkOptionsWidget,
PlotMatplotlibOptionsWidget, PatchOptionsWidget,
LinearModelParametersWidget, CameraSnapshotWidget,
Shape2DOptionsWidget, Shape3DOptionsWidget,
SaveMayaviFigureOptionsWidget, Mesh3DOptionsWidget)
from .tools import LogoWidget, SwitchWidget
from .utils import (extract_group_labels_from_landmarks,
extract_groups_labels_from_image, render_image,
render_patches)
from .checks import check_n_parameters
from .style import map_styles_to_hex_colours
def menpowidgets_src_dir_path():
from pathlib import Path
import os.path
return Path(os.path.abspath(__file__)).parent
def visualize_shapes_2d(shapes, figure_size=(7, 7), browser_style='buttons',
custom_info_callback=None):
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
if not isinstance(shapes, Sized):
shapes = [shapes]
n_shapes = len(shapes)
main_style = 'warning'
def render_function(change):
ipydisplay.clear_output(wait=True)
i = shape_number_wid.selected_values if n_shapes > 1 else 0
options = dict()
options.update(shape_options_wid.selected_values['lines'])
options.update(shape_options_wid.selected_values['markers'])
options['image_view'] = shape_options_wid.selected_values['image_view']
options.update(
renderer_options_wid.selected_values['numbering_matplotlib'])
options.update(renderer_options_wid.selected_values['axes'])
if hasattr(shapes[i], 'labels'):
options.update(renderer_options_wid.selected_values['legend'])
options['with_labels'] = \
shape_options_wid.selected_values['with_labels']
line_colour = []
marker_face_colour = []
marker_edge_colour = []
for lbl in options['with_labels']:
idx = shapes[i].labels.index(lbl)
line_colour.append(options['line_colour'][idx])
marker_face_colour.append(options['marker_face_colour'][idx])
marker_edge_colour.append(options['marker_edge_colour'][idx])
options['line_colour'] = line_colour
options['marker_face_colour'] = marker_face_colour
options['marker_edge_colour'] = marker_edge_colour
else:
options['line_colour'] = options['line_colour'][0]
options['marker_face_colour'] = options['marker_face_colour'][0]
options['marker_edge_colour'] = options['marker_edge_colour'][0]
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * figure_size[0],
renderer_options_wid.selected_values['zoom_one'] * figure_size[1])
save_figure_wid.renderer = shapes[i].view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
figure_size=new_figure_size, **options)
save_figure_wid.renderer.force_draw()
update_info(shapes[i], custom_info_callback=custom_info_callback)
def update_info(shape, custom_info_callback=None):
min_b, max_b = shape.bounds()
rang = shape.range()
cm = shape.centre()
text_per_line = [
"> {}".format(name_of_callable(shape)),
"> {} points".format(shape.n_points),
"> Bounds: [{0:.1f}-{1:.1f}]W, [{2:.1f}-{3:.1f}]H".format(
min_b[0], max_b[0], min_b[1], max_b[1]),
"> Range: {0:.1f}W, {1:.1f}H".format(rang[0], rang[1]),
"> Centre of mass: ({0:.1f}, {1:.1f})".format(cm[0], cm[1]),
"> Norm: {0:.2f}".format(shape.norm())]
if custom_info_callback is not None:
for msg in custom_info_callback(shape):
text_per_line.append('> {}'.format(msg))
info_wid.set_widget_state(text_per_line=text_per_line)
labels = None
if hasattr(shapes[0], 'labels'):
labels = shapes[0].labels
shape_options_wid = Shape2DOptionsWidget(
labels=labels, render_function=render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],
labels=None, axes_x_limits=0.1, axes_y_limits=0.1,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMatplotlibFigureOptionsWidget()
if n_shapes > 1:
def update_widgets(change):
# Get current shape and check if it has labels
i = change['new']
labels = None
if hasattr(shapes[i], 'labels'):
labels = shapes[i].labels
# Update shape options
shape_options_wid.set_widget_state(labels=labels,
allow_callback=True)
# Shape selection slider
index = {'min': 0, 'max': n_shapes-1, 'step': 1, 'index': 0}
shape_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Shape', loop_enabled=True,
continuous_update=False)
# Header widget
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
header_wid = ipywidgets.HBox([logo_wid, shape_number_wid])
header_wid.layout.align_items = 'center'
header_wid.layout.margin = '0px 0px 10px 0px'
else:
# Header widget
header_wid = LogoWidget(style=main_style)
header_wid.layout.margin = '0px 10px 0px 0px'
options_box = ipywidgets.Tab(
[info_wid, shape_options_wid, renderer_options_wid, save_figure_wid])
tab_titles = ['Info', 'Shape', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_shapes > 1:
wid = ipywidgets.VBox([header_wid, options_box])
else:
wid = ipywidgets.HBox([header_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
render_function({})
def visualize_shapes_3d(shapes, browser_style='buttons',
custom_info_callback=None):
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print_dynamic('Initializing...')
if not isinstance(shapes, Sized):
shapes = [shapes]
n_shapes = len(shapes)
main_style = 'warning'
def render_function(change):
save_figure_wid.renderer.clear_figure()
ipydisplay.clear_output(wait=True)
i = shape_number_wid.selected_values if n_shapes > 1 else 0
update_info(shapes[i], custom_info_callback=custom_info_callback)
options = dict()
if isinstance(shapes[i], TriMesh):
# options will have no effect on rendering...
options['mesh_type'] = 'wireframe'
if shape_options_wid.selected_values['markers']['render_markers']:
options['mesh_type'] = 'fancymesh'
options['line_width'] = \
shape_options_wid.selected_values['lines']['line_width']
options['colour'] = \
shape_options_wid.selected_values['lines']['line_colour'][0]
options['marker_style'] = \
shape_options_wid.selected_values['markers']['marker_style']
options['marker_size'] = \
shape_options_wid.selected_values['markers']['marker_size']
options['marker_resolution'] = \
shape_options_wid.selected_values['markers']['marker_resolution']
options['step'] = \
shape_options_wid.selected_values['markers']['step']
else:
options.update(shape_options_wid.selected_values['lines'])
options.update(shape_options_wid.selected_values['markers'])
options.update(
renderer_options_wid.selected_values['numbering_mayavi'])
# Correct options based on the type of the shape
if hasattr(shapes[i], 'labels'):
# If the shape is a LabelledPointUndirectedGraph ...
# ...use with_labels
options['with_labels'] = \
shape_options_wid.selected_values['with_labels']
# ...correct colours
line_colour = []
marker_colour = []
for lbl in options['with_labels']:
idx = shapes[i].labels.index(lbl)
line_colour.append(options['line_colour'][idx])
marker_colour.append(options['marker_colour'][idx])
options['line_colour'] = line_colour
options['marker_colour'] = marker_colour
else:
# If shape is PointCloud, TriMesh or PointGraph
# ...correct colours
options['line_colour'] = options['line_colour'][0]
options['marker_colour'] = options['marker_colour'][0]
# Render shape with selected options
save_figure_wid.renderer = shapes[i].view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
alpha=1.0, **options)
# Force rendering
save_figure_wid.renderer.force_draw()
# Define function that updates the info text
def update_info(shape, custom_info_callback=None):
min_b, max_b = shape.bounds()
rang = shape.range()
cm = shape.centre()
text_per_line = [
"> {}".format(name_of_callable(shape)),
"> {} points".format(shape.n_points),
"> Bounds: [{0:.1f}-{1:.1f}]X, [{2:.1f}-{3:.1f}]Y, "
"[{4:.1f}-{5:.1f}]Z".format(min_b[0], max_b[0], min_b[1], max_b[1],
min_b[2], max_b[2]),
"> Range: {0:.1f}X, {1:.1f}Y, {2:.1f}Z".format(rang[0], rang[1],
rang[2]),
"> Centre of mass: ({0:.1f}X, {1:.1f}Y, {2:.1f}Z)".format(
cm[0], cm[1], cm[2]),
"> Norm: {0:.2f}".format(shape.norm())]
if custom_info_callback is not None:
# iterate over the list of messages returned by the callback
# function and append them in the text_per_line.
for msg in custom_info_callback(shape):
text_per_line.append('> {}'.format(msg))
info_wid.set_widget_state(text_per_line=text_per_line)
# If the object is a LabelledPointUndirectedGraph, grab the labels
labels = None
if hasattr(shapes[0], 'labels'):
labels = shapes[0].labels
# Create widgets
shape_options_wid = Shape3DOptionsWidget(
labels=labels, render_function=render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['numbering_mayavi'], labels=None,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMayaviFigureOptionsWidget()
# Group widgets
if n_shapes > 1:
# Define function that updates options' widgets state
def update_widgets(change):
i = change['new']
labels = None
if hasattr(shapes[i], 'labels'):
labels = shapes[i].labels
shape_options_wid.set_widget_state(labels=labels,
allow_callback=True)
index = {'min': 0, 'max': n_shapes-1, 'step': 1, 'index': 0}
shape_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Shape', loop_enabled=True,
continuous_update=False)
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
header_wid = ipywidgets.HBox([logo_wid, shape_number_wid])
header_wid.layout.align_items = 'center'
header_wid.layout.margin = '0px 0px 10px 0px'
else:
header_wid = LogoWidget(style=main_style)
header_wid.layout.margin = '0px 10px 0px 0px'
options_box = ipywidgets.Tab(
[info_wid, shape_options_wid, renderer_options_wid, save_figure_wid])
tab_titles = ['Info', 'Shape', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_shapes > 1:
wid = ipywidgets.VBox([header_wid, options_box])
else:
wid = ipywidgets.HBox([header_wid, options_box])
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
# Display final widget
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
# Trigger initial visualization
render_function({})
print_dynamic('')
def visualize_landmarks_2d(landmarks, figure_size=(7, 7),
browser_style='buttons', custom_info_callback=None):
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
# Make sure that landmarks is a list even with one landmark manager member
if isinstance(landmarks, LandmarkManager):
landmarks = [landmarks]
# Get the number of landmark managers
n_landmarks = len(landmarks)
# Define the styling options
main_style = 'info'
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# get selected index and selected group
i = landmark_number_wid.selected_values if n_landmarks > 1 else 0
g = landmark_options_wid.selected_values['landmarks']['group']
if landmark_options_wid.selected_values['landmarks']['render_landmarks']:
# get shape
shape = landmarks[i][g]
# Create options dictionary
options = dict()
options.update(landmark_options_wid.selected_values['lines'])
options.update(landmark_options_wid.selected_values['markers'])
options['image_view'] = landmark_options_wid.selected_values['image_view']
options.update(
renderer_options_wid.selected_values['numbering_matplotlib'])
options.update(renderer_options_wid.selected_values['axes'])
# Correct options based on the type of the shape
if hasattr(shape, 'labels'):
# If the shape is a LabelledPointUndirectedGraph ...
# ...use the legend options
options.update(renderer_options_wid.selected_values['legend'])
# ...use with_labels
options['with_labels'] = \
landmark_options_wid.selected_values['landmarks']['with_labels']
# ...correct colours
line_colour = []
marker_face_colour = []
marker_edge_colour = []
for lbl in options['with_labels']:
id = shape.labels.index(lbl)
line_colour.append(options['line_colour'][id])
marker_face_colour.append(options['marker_face_colour'][id])
marker_edge_colour.append(options['marker_edge_colour'][id])
options['line_colour'] = line_colour
options['marker_face_colour'] = marker_face_colour
options['marker_edge_colour'] = marker_edge_colour
else:
# If shape is PointCloud, TriMesh or PointGraph
# ...correct colours
options['line_colour'] = options['line_colour'][0]
options['marker_face_colour'] = options['marker_face_colour'][0]
options['marker_edge_colour'] = options['marker_edge_colour'][0]
# Get figure size
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] *
figure_size[0],
renderer_options_wid.selected_values['zoom_one'] *
figure_size[1])
# Render shape with selected options
save_figure_wid.renderer = shape.view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
figure_size=new_figure_size, **options)
# Force rendering
save_figure_wid.renderer.force_draw()
else:
ipydisplay.clear_output()
# update info text widget
update_info(landmarks[i], g, custom_info_callback=custom_info_callback)
# Define function that updates the info text
def update_info(landmarks, group, custom_info_callback=None):
if group is not None:
min_b, max_b = landmarks[group].bounds()
rang = landmarks[group].range()
cm = landmarks[group].centre()
text_per_line = [
"> {} landmark points".format(landmarks[group].n_points),
"> {}".format(name_of_callable(landmarks[group])),
"> Bounds: [{0:.1f}-{1:.1f}]W, [{2:.1f}-{3:.1f}]H".format(
min_b[0], max_b[0], min_b[1], max_b[1]),
"> Range: {0:.1f}W, {1:.1f}H".format(rang[0], rang[1]),
"> Centre of mass: ({0:.1f}, {1:.1f})".format(cm[0], cm[1]),
"> Norm: {0:.2f}".format(landmarks[group].norm())]
if custom_info_callback is not None:
# iterate over the list of messages returned by the callback
# function and append them in the text_per_line.
for msg in custom_info_callback(landmarks[group]):
text_per_line.append('> {}'.format(msg))
else:
text_per_line = ["No landmarks available."]
info_wid.set_widget_state(text_per_line=text_per_line)
# Create widgets
groups_keys, labels_keys = extract_group_labels_from_landmarks(landmarks[0])
first_label = labels_keys[0] if labels_keys else None
landmark_options_wid = LandmarkOptionsWidget(
group_keys=groups_keys, labels_keys=labels_keys,
type='2D', render_function=render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],
labels=first_label, axes_x_limits=0.1, axes_y_limits=0.1,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMatplotlibFigureOptionsWidget()
# Group widgets
if n_landmarks > 1:
# Define function that updates options' widgets state
def update_widgets(change):
i = landmark_number_wid.selected_values
g_keys, l_keys = extract_group_labels_from_landmarks(
landmarks[i])
landmark_options_wid.set_widget_state(
group_keys=g_keys, labels_keys=l_keys, allow_callback=True)
index = {'min': 0, 'max': n_landmarks-1, 'step': 1, 'index': 0}
landmark_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Shape', loop_enabled=True,
continuous_update=False)
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
header_wid = ipywidgets.HBox([logo_wid, landmark_number_wid])
header_wid.layout.align_items = 'center'
header_wid.layout.margin = '0px 0px 10px 0px'
else:
header_wid = LogoWidget(style=main_style)
header_wid.layout.margin = '0px 10px 0px 0px'
options_box = ipywidgets.Tab(
children=[info_wid, landmark_options_wid, renderer_options_wid,
save_figure_wid])
tab_titles = ['Info', 'Landmarks', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_landmarks > 1:
wid = ipywidgets.VBox([header_wid, options_box])
else:
wid = ipywidgets.HBox([header_wid, options_box])
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
# Display final widget
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
# Trigger initial visualization
render_function({})
def visualize_landmarks_3d(landmarks, browser_style='buttons',
custom_info_callback=None):
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
# Make sure that landmarks is a list even with one landmark manager member
if not isinstance(landmarks, list):
landmarks = [landmarks]
# Get the number of landmark managers
n_landmarks = len(landmarks)
# Define the styling options
main_style = 'info'
# Define render function
def render_function(change):
# Clear current figure
save_figure_wid.renderer.clear_figure()
ipydisplay.clear_output(wait=True)
# get selected index and selected group
i = landmark_number_wid.selected_values if n_landmarks > 1 else 0
g = landmark_options_wid.selected_values['landmarks']['group']
# update info text widget
update_info(landmarks[i], g, custom_info_callback=custom_info_callback)
if landmark_options_wid.selected_values['landmarks']['render_landmarks']:
# get shape
shape = landmarks[i][g]
options = dict()
if isinstance(shape, TriMesh):
# Note that 3D TriMesh has a totally different set of options
# compared to any other PointCloud or PointGraph. However, in
# order for visualize_landmarks_3d to support TriMeshes, we
# simply use the options that are common. This means that most
# of the widget's options will have no effect on rendering...
options['mesh_type'] = 'wireframe'
if landmark_options_wid.selected_values['markers'][
'render_markers']:
options['mesh_type'] = 'fancymesh'
options['line_width'] = \
landmark_options_wid.selected_values['lines']['line_width']
options['colour'] = \
landmark_options_wid.selected_values['lines']['line_colour'][0]
options['marker_style'] = \
landmark_options_wid.selected_values['markers']['marker_style']
options['marker_size'] = \
landmark_options_wid.selected_values['markers']['marker_size']
options['marker_resolution'] = \
landmark_options_wid.selected_values['markers'][
'marker_resolution']
options['step'] = \
landmark_options_wid.selected_values['markers']['step']
else:
options.update(landmark_options_wid.selected_values['lines'])
options.update(landmark_options_wid.selected_values['markers'])
options.update(
renderer_options_wid.selected_values['numbering_mayavi'])
if hasattr(shape, 'labels'):
options['with_labels'] = \
landmark_options_wid.selected_values['landmarks']['with_labels']
line_colour = []
marker_colour = []
for lbl in options['with_labels']:
idx = shape.labels.index(lbl)
line_colour.append(options['line_colour'][idx])
marker_colour.append(options['marker_colour'][idx])
options['line_colour'] = line_colour
options['marker_colour'] = marker_colour
else:
options['line_colour'] = options['line_colour'][0]
options['marker_colour'] = options['marker_colour'][0]
save_figure_wid.renderer = shape.view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
alpha=1.0, **options)
save_figure_wid.renderer.force_draw()
else:
ipydisplay.clear_output()
def update_info(landmarks, group, custom_info_callback=None):
if group is not None:
min_b, max_b = landmarks[group].bounds()
rang = landmarks[group].range()
cm = landmarks[group].centre()
text_per_line = [
"> {} landmark points".format(landmarks[group].n_points),
"> {}".format(name_of_callable(landmarks[group])),
"> Bounds: [{0:.1f}-{1:.1f}]X, [{2:.1f}-{3:.1f}]Y, "
"[{4:.1f}-{5:.1f}]Z".format(
min_b[0], max_b[0], min_b[1], max_b[1], min_b[2], max_b[2]),
"> Range: {0:.1f}X, {1:.1f}Y, {2:.1f}Z".format(rang[0], rang[1],
rang[2]),
"> Centre of mass: ({0:.1f}X, {1:.1f}Y, {2:.1f}Z)".format(
cm[0], cm[1], cm[2]),
"> Norm: {0:.2f}".format(landmarks[group].norm())]
if custom_info_callback is not None:
for msg in custom_info_callback(landmarks[group]):
text_per_line.append('> {}'.format(msg))
else:
text_per_line = ["No landmarks available."]
info_wid.set_widget_state(text_per_line=text_per_line)
groups_keys, labels_keys = extract_group_labels_from_landmarks(landmarks[0])
first_label = labels_keys[0] if labels_keys else None
landmark_options_wid = LandmarkOptionsWidget(
group_keys=groups_keys, labels_keys=labels_keys,
type='3D', render_function=render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['numbering_mayavi'], labels=first_label,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMayaviFigureOptionsWidget()
if n_landmarks > 1:
def update_widgets(change):
# Get new groups and labels
i = landmark_number_wid.selected_values
g_keys, l_keys = extract_group_labels_from_landmarks(
landmarks[i])
# Update landmarks options
landmark_options_wid.set_widget_state(
group_keys=g_keys, labels_keys=l_keys, allow_callback=True)
# Landmark selection slider
index = {'min': 0, 'max': n_landmarks-1, 'step': 1, 'index': 0}
landmark_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Shape', loop_enabled=True,
continuous_update=False)
# Header widget
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
header_wid = ipywidgets.HBox([logo_wid, landmark_number_wid])
header_wid.layout.align_items = 'center'
header_wid.layout.margin = '0px 0px 10px 0px'
else:
# Header widget
header_wid = LogoWidget(style=main_style)
header_wid.layout.margin = '0px 10px 0px 0px'
options_box = ipywidgets.Tab(
children=[info_wid, landmark_options_wid, renderer_options_wid,
save_figure_wid])
tab_titles = ['Info', 'Landmarks', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_landmarks > 1:
wid = ipywidgets.VBox([header_wid, options_box])
else:
wid = ipywidgets.HBox([header_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
render_function({})
print_dynamic('')
def visualize_meshes_3d(meshes, browser_style='buttons',
custom_info_callback=None):
from menpowidgets.utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
out = ipywidgets.Output()
ipydisplay.display(out)
with out:
ipydisplay.clear_output(wait=True)
print('Initializing...')
if not isinstance(meshes, Sized):
meshes = [meshes]
n_meshes = len(meshes)
main_style = 'warning'
def render_function(_):
save_figure_wid.renderer.clear_figure()
with out:
ipydisplay.clear_output(wait=True)
i = mesh_number_wid.selected_values if n_meshes > 1 else 0
update_info(meshes[i], custom_info_callback=custom_info_callback)
save_figure_wid.renderer = meshes[i].view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
**mesh_options_wid.selected_values)
save_figure_wid.renderer.force_draw()
def update_info(mesh, custom_info_callback=None):
min_b, max_b = mesh.bounds()
rang = mesh.range()
cm = mesh.centre()
text_per_line = [
"> {}".format(name_of_callable(mesh)),
"> {} points".format(mesh.n_points),
"> Bounds: [{0:.1f}-{1:.1f}]X, [{2:.1f}-{3:.1f}]Y, "
"[{4:.1f}-{5:.1f}]Z".format(
min_b[0], max_b[0], min_b[1], max_b[1], min_b[2], max_b[2]),
"> Range: {0:.1f}X, {1:.1f}Y, {2:.1f}Z".format(rang[0], rang[1],
rang[2]),
"> Centre of mass: ({0:.1f}X, {1:.1f}Y, {2:.1f}Z)".format(
cm[0], cm[1], cm[2]),
"> Norm: {0:.2f}".format(mesh.norm())]
if custom_info_callback is not None:
for msg in custom_info_callback(mesh):
text_per_line.append('> {}'.format(msg))
info_wid.set_widget_state(text_per_line=text_per_line)
mesh_options_wid = Mesh3DOptionsWidget(
textured=(isinstance(meshes[0], ColouredTriMesh) or
isinstance(meshes[0], TexturedTriMesh)),
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMayaviFigureOptionsWidget()
if n_meshes > 1:
def update_widgets(change):
i = change['new']
# Update shape options
mesh_options_wid.set_widget_state(
textured=(isinstance(meshes[i], ColouredTriMesh) or
isinstance(meshes[i], TexturedTriMesh)),
allow_callback=True)
# selection slider
index = {'min': 0, 'max': n_meshes-1, 'step': 1, 'index': 0}
mesh_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Mesh', loop_enabled=True,
continuous_update=False)
# Header widget
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
header_wid = ipywidgets.HBox([logo_wid, mesh_number_wid])
header_wid.layout.align_items = 'center'
header_wid.layout.margin = '0px 0px 10px 0px'
else:
# Header widget
header_wid = LogoWidget(style=main_style)
header_wid.layout.margin = '0px 10px 0px 0px'
options_box = ipywidgets.Tab([info_wid, mesh_options_wid, save_figure_wid])
tab_titles = ['Info', 'Mesh', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_meshes > 1:
wid = ipywidgets.VBox([header_wid, options_box])
else:
wid = ipywidgets.HBox([header_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
render_function({})
with out:
print_dynamic('')
def visualize_images(images, figure_size=(7, 7), browser_style='buttons',
custom_info_callback=None):
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
if not isinstance(images, Sized):
images = [images]
n_images = len(images)
main_style = 'info'
def render_function(change):
ipydisplay.clear_output(wait=True)
i = image_number_wid.selected_values if n_images > 1 else 0
g = landmark_options_wid.selected_values['landmarks']['group']
image_is_masked = isinstance(images[i], MaskedImage)
options = dict()
options.update(landmark_options_wid.selected_values['lines'])
options.update(landmark_options_wid.selected_values['markers'])
options.update(
renderer_options_wid.selected_values['numbering_matplotlib'])
options.update(renderer_options_wid.selected_values['axes'])
options.update(renderer_options_wid.selected_values['legend'])
options.update(image_options_wid.selected_values)
options.update(landmark_options_wid.selected_values['landmarks'])
if (images[i].has_landmarks and
hasattr(images[i].landmarks[g], 'labels')):
line_colour = []
marker_face_colour = []
marker_edge_colour = []
for lbl in options['with_labels']:
id = images[i].landmarks[g].labels.index(lbl)
line_colour.append(options['line_colour'][id])
marker_face_colour.append(options['marker_face_colour'][id])
marker_edge_colour.append(options['marker_edge_colour'][id])
options['line_colour'] = line_colour
options['marker_face_colour'] = marker_face_colour
options['marker_edge_colour'] = marker_edge_colour
else:
options['line_colour'] = options['line_colour'][0]
options['marker_face_colour'] = options['marker_face_colour'][0]
options['marker_edge_colour'] = options['marker_edge_colour'][0]
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] *
figure_size[0],
renderer_options_wid.selected_values['zoom_one'] *
figure_size[1])
save_figure_wid.renderer = render_image(
image=images[i], renderer=save_figure_wid.renderer,
image_is_masked=image_is_masked, figure_size=new_figure_size,
**options)
update_info(images[i], image_is_masked, g,
custom_info_callback=custom_info_callback)
def update_info(img, image_is_masked, group, custom_info_callback=None):
masked_str = 'Masked Image' if image_is_masked else 'Image'
path_str = img.path if hasattr(img, 'path') else 'No path available'
text_per_line = [
"> {} of size {} with {} channel{}".format(
masked_str, img._str_shape(), img.n_channels,
's' * (img.n_channels > 1)),
"> Path: '{}'".format(path_str)]
if image_is_masked:
text_per_line.append(
"> {} masked pixels (attached mask {:.1%} true)".format(
img.n_true_pixels(), img.mask.proportion_true()))
text_per_line.append("> min={:.3f}, max={:.3f}".format(
img.pixels.min(), img.pixels.max()))
if img.has_landmarks:
text_per_line.append("> {} landmark points".format(
img.landmarks[group].n_points))
if custom_info_callback is not None:
for msg in custom_info_callback(img):
text_per_line.append('> {}'.format(msg))
info_wid.set_widget_state(text_per_line=text_per_line)
groups_keys, labels_keys = extract_groups_labels_from_image(images[0])
first_label = labels_keys[0] if labels_keys else None
image_options_wid = ImageOptionsWidget(
n_channels=images[0].n_channels,
image_is_masked=isinstance(images[0], MaskedImage),
render_function=render_function)
landmark_options_wid = LandmarkOptionsWidget(
group_keys=groups_keys, labels_keys=labels_keys,
type='2D', render_function=render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],
labels=first_label, axes_x_limits=None, axes_y_limits=None,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMatplotlibFigureOptionsWidget()
if n_images > 1:
def update_widgets(change):
# Get new groups and labels, then update landmark options
i = image_number_wid.selected_values
g_keys, l_keys = extract_groups_labels_from_image(images[i])
# Update landmarks options
landmark_options_wid.set_widget_state(
group_keys=g_keys, labels_keys=l_keys, allow_callback=False)
# Update channels options
image_options_wid.set_widget_state(
n_channels=images[i].n_channels,
image_is_masked=isinstance(images[i], MaskedImage),
allow_callback=True)
# Image selection slider
index = {'min': 0, 'max': n_images-1, 'step': 1, 'index': 0}
image_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Image', loop_enabled=True,
continuous_update=False)
# Header widget
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
header_wid = ipywidgets.HBox([logo_wid, image_number_wid])
header_wid.layout.align_items = 'center'
header_wid.layout.margin = '0px 0px 10px 0px'
else:
# Header widget
header_wid = LogoWidget(style=main_style)
header_wid.layout.margin = '0px 10px 0px 0px'
options_box = ipywidgets.Tab(
children=[info_wid, image_options_wid, landmark_options_wid,
renderer_options_wid, save_figure_wid])
tab_titles = ['Info', 'Image', 'Landmarks', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_images > 1:
wid = ipywidgets.VBox([header_wid, options_box])
else:
wid = ipywidgets.HBox([header_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
render_function({})
def visualize_patches(patches, patch_centers, figure_size=(7, 7),
browser_style='buttons', custom_info_callback=None):
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
if (isinstance(patches, list) and isinstance(patches[0], Image)) or \
not isinstance(patches, list):
patches = [patches]
if not isinstance(patch_centers, list):
patch_centers = [patch_centers] * len(patches)
elif isinstance(patch_centers, list) and len(patch_centers) == 1:
patch_centers *= len(patches)
for i in range(len(patches)):
if isinstance(patches[i], list):
patches[i] = _convert_patches_list_to_single_array(
patches[i], patch_centers[i].n_points)
n_patches = len(patches)
main_style = 'info'
def render_function(change):
ipydisplay.clear_output(wait=True)
i = image_number_wid.selected_values if n_patches > 1 else 0
options = dict()
options.update(shape_options_wid.selected_values['lines'])
options.update(shape_options_wid.selected_values['markers'])
options.update(
renderer_options_wid.selected_values['numbering_matplotlib'])
options.update(renderer_options_wid.selected_values['axes'])
image_options = dict(image_options_wid.selected_values)
del image_options['masked_enabled']
options.update(image_options)
options.update(patch_options_wid.selected_values)
options['line_colour'] = options['line_colour'][0]
options['marker_face_colour'] = options['marker_face_colour'][0]
options['marker_edge_colour'] = options['marker_edge_colour'][0]
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * figure_size[0],
renderer_options_wid.selected_values['zoom_one'] * figure_size[1])
save_figure_wid.renderer = render_patches(
patches=patches[i], patch_centers=patch_centers[i],
renderer=save_figure_wid.renderer, figure_size=new_figure_size,
**options)
update_info(patches[i], custom_info_callback=custom_info_callback)
def update_info(ptchs, custom_info_callback=None):
text_per_line = [
"> Patch-Based Image with {} patche{} and {} offset{}.".format(
ptchs.shape[0], 's' * (ptchs.shape[0] > 1), ptchs.shape[1],
's' * (ptchs.shape[1] > 1)),
"> Each patch has size {}H x {}W with {} channel{}.".format(
ptchs.shape[3], ptchs.shape[4], ptchs.shape[2],
's' * (ptchs.shape[2] > 1)),
"> min={:.3f}, max={:.3f}".format(ptchs.min(), ptchs.max())]
if custom_info_callback is not None:
for msg in custom_info_callback(ptchs):
text_per_line.append('> {}'.format(msg))
info_wid.set_widget_state(text_per_line=text_per_line)
shape_options_wid = Shape2DOptionsWidget(
labels=None, render_function=None)
shape_options_wid.line_options_wid.render_lines_switch.button_wid.value = False
shape_options_wid.add_render_function(render_function)
patch_options_wid = PatchOptionsWidget(
n_patches=patches[0].shape[0], n_offsets=patches[0].shape[1],
render_function=render_function)
image_options_wid = ImageOptionsWidget(
n_channels=patches[0].shape[2], image_is_masked=False,
render_function=None)
image_options_wid.interpolation_checkbox.button_wid.value = False
image_options_wid.add_render_function(render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['zoom_one', 'axes', 'numbering_matplotlib'], labels=None,
axes_x_limits=None, axes_y_limits=None,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMatplotlibFigureOptionsWidget()
if n_patches > 1:
def update_widgets(change):
# Selected object
i = image_number_wid.selected_values
# Update patch options
patch_options_wid.set_widget_state(
n_patches=patches[i].shape[0], n_offsets=patches[i].shape[1],
allow_callback=False)
# Update channels options
image_options_wid.set_widget_state(
n_channels=patches[i].shape[2], image_is_masked=False,
allow_callback=True)
# Image selection slider
index = {'min': 0, 'max': n_patches-1, 'step': 1, 'index': 0}
image_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Image', loop_enabled=True,
continuous_update=False)
# Header widget
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
header_wid = ipywidgets.HBox([logo_wid, image_number_wid])
header_wid.layout.align_items = 'center'
header_wid.layout.margin = '0px 0px 10px 0px'
else:
# Header widget
header_wid = LogoWidget(style=main_style)
header_wid.layout.margin = '0px 10px 0px 0px'
options_box = ipywidgets.Tab(
children=[info_wid, patch_options_wid, image_options_wid,
shape_options_wid, renderer_options_wid, save_figure_wid])
tab_titles = ['Info', 'Patches', 'Image', 'Shape', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_patches > 1:
wid = ipywidgets.VBox([header_wid, options_box])
else:
wid = ipywidgets.HBox([header_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
render_function({})
def plot_graph(x_axis, y_axis, legend_entries=None, figure_size=(9, 5)):
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
from menpo.visualize import plot_curve
print('Initializing...')
n_curves = len(y_axis)
main_style = 'danger'
if legend_entries is None:
legend_entries = ["curve {}".format(i) for i in range(n_curves)]
def render_function(change):
ipydisplay.clear_output(wait=True)
opts = plot_wid.selected_values.copy()
new_figure_size = (
plot_wid.selected_values['zoom'][0] * figure_size[0],
plot_wid.selected_values['zoom'][1] * figure_size[1])
del opts['zoom']
save_figure_wid.renderer = plot_curve(
x_axis=x_axis, y_axis=y_axis, figure_size=new_figure_size,
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
**opts)
save_figure_wid.renderer.force_draw()
plot_wid = PlotMatplotlibOptionsWidget(
legend_entries=legend_entries, render_function=render_function)
save_figure_wid = SaveMatplotlibFigureOptionsWidget()
logo = LogoWidget(style=main_style)
logo.layout.margin = '0px 10px 0px 0px'
tmp_children = list(plot_wid.tab_box.children)
tmp_children.append(save_figure_wid)
plot_wid.tab_box.children = tmp_children
plot_wid.tab_box.set_title(0, 'Labels')
plot_wid.tab_box.set_title(1, 'Style')
plot_wid.tab_box.set_title(2, 'Legend')
plot_wid.tab_box.set_title(3, 'Axes')
plot_wid.tab_box.set_title(4, 'Zoom')
plot_wid.tab_box.set_title(5, 'Grid')
plot_wid.tab_box.set_title(6, 'Export')
wid = ipywidgets.HBox([logo, plot_wid])
wid.box_style = main_style
wid.layout.border = '2px solid' + map_styles_to_hex_colours(main_style)
plot_wid.container.border = '0px'
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
render_function({})
def save_matplotlib_figure(renderer):
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
logo_wid = LogoWidget()
logo_wid.layout.margin = '0px 10px 0px 0px'
save_figure_wid = SaveMatplotlibFigureOptionsWidget(renderer,
style='warning')
wid = ipywidgets.HBox([logo_wid, save_figure_wid])
ipydisplay.display(wid)
def save_mayavi_figure(renderer):
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
logo_wid = LogoWidget()
logo_wid.layout.margin = '0px 10px 0px 0px'
save_figure_wid = SaveMayaviFigureOptionsWidget(renderer,
style='warning')
wid = ipywidgets.HBox([logo_wid, save_figure_wid])
ipydisplay.display(wid)
def visualize_shape_model_2d(shape_model, n_parameters=5, mode='multiple',
parameters_bounds=(-3.0, 3.0), figure_size=(7, 7)):
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
from menpo.visualize.viewmatplotlib import (_set_axes_options,
_parse_axes_limits)
out = ipywidgets.Output()
ipydisplay.display(out)
with out:
ipydisplay.clear_output(wait=True)
print('Initializing...')
if not isinstance(shape_model, list):
shape_model = [shape_model]
n_levels = len(shape_model)
main_style = 'warning'
max_n_params = [sp.n_active_components for sp in shape_model]
n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)
def render_function(change):
with out:
ipydisplay.clear_output(wait=True)
level = 0
if n_levels > 1:
level = level_wid.value
parameters = model_parameters_wid.selected_values
weights = (parameters *
shape_model[level].eigenvalues[:len(parameters)] ** 0.5)
mean = shape_model[level].mean()
options = dict()
options.update(shape_options_wid.selected_values['lines'])
options.update(shape_options_wid.selected_values['markers'])
options['image_view'] = shape_options_wid.selected_values['image_view']
options.update(
renderer_options_wid.selected_values['numbering_matplotlib'])
options.update(renderer_options_wid.selected_values['axes'])
if hasattr(mean, 'labels'):
options.update(renderer_options_wid.selected_values['legend'])
options['with_labels'] = \
shape_options_wid.selected_values['with_labels']
line_colour = []
marker_face_colour = []
marker_edge_colour = []
for lbl in options['with_labels']:
idx = mean.labels.index(lbl)
line_colour.append(options['line_colour'][idx])
marker_face_colour.append(options['marker_face_colour'][idx])
marker_edge_colour.append(options['marker_edge_colour'][idx])
options['line_colour'] = line_colour
options['marker_face_colour'] = marker_face_colour
options['marker_edge_colour'] = marker_edge_colour
else:
options['line_colour'] = options['line_colour'][0]
options['marker_face_colour'] = options['marker_face_colour'][0]
options['marker_edge_colour'] = options['marker_edge_colour'][0]
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * figure_size[0],
renderer_options_wid.selected_values['zoom_one'] * figure_size[1])
if mode_wid.value == 1:
instance = shape_model[level].instance(weights)
if mean_wid.selected_values:
save_figure_wid.renderer = mean.view(
figure_id=save_figure_wid.renderer.figure_id,
new_figure=False, figure_size=None,
image_view=options['image_view'],
render_lines=options['render_lines'],
line_colour='yellow', line_style=options['line_style'],
line_width=options['line_width'],
render_markers=options['render_markers'],
marker_style=options['marker_style'],
marker_size=options['marker_size'],
marker_face_colour='yellow', marker_edge_colour='yellow',
marker_edge_width=options['marker_edge_width'])
save_figure_wid.renderer = instance.view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
figure_size=new_figure_size, **options)
instance_range = instance.range()
else:
instance_lower = shape_model[level].instance([-p for p in weights])
instance_upper = shape_model[level].instance(weights)
save_figure_wid.renderer = mean.view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
figure_size=new_figure_size, **options)
ax = plt.gca()
x_min = np.Inf
y_min = np.Inf
x_max = -np.Inf
y_max = -np.Inf
for p in range(mean.n_points):
xm = mean.points[p, 0]
ym = mean.points[p, 1]
xl = instance_lower.points[p, 0]
yl = instance_lower.points[p, 1]
xu = instance_upper.points[p, 0]
yu = instance_upper.points[p, 1]
if options['image_view']:
lines = [[(ym, xm), (yl, xl)], [(ym, xm), (yu, xu)]]
else:
lines = [[(xm, ym), (xl, yl)], [(xm, ym), (xu, yu)]]
lc = mc.LineCollection(lines, colors=('g', 'b'),
linestyles='solid', linewidths=2)
y_min = np.min([y_min, xl, xu])
y_max = np.max([y_max, xl, xu])
x_min = np.min([x_min, yl, yu])
x_max = np.max([x_max, yl, yu])
ax.add_collection(lc)
axes_x_limits, axes_y_limits = _parse_axes_limits(
x_min, x_max, y_min, y_max, options['axes_x_limits'],
options['axes_y_limits'])
_set_axes_options(
ax, render_axes=options['render_axes'],
inverted_y_axis=options['image_view'],
axes_font_name=options['axes_font_name'],
axes_font_size=options['axes_font_size'],
axes_font_style=options['axes_font_style'],
axes_font_weight=options['axes_font_weight'],
axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits,
axes_x_ticks=options['axes_x_ticks'],
axes_y_ticks=options['axes_y_ticks'])
instance_range = mean.range()
save_figure_wid.renderer.force_draw()
update_info(level, instance_range)
def update_info(level, instance_range):
text_per_line = [
"> Level {} out of {}".format(level + 1, n_levels),
"> {} components in total".format(shape_model[level].n_components),
"> {} active components".format(
shape_model[level].n_active_components),
"> {:.1f}% variance kept".format(
shape_model[level].variance_ratio() * 100),
"> Instance range: {:.1f} x {:.1f}".format(instance_range[0],
instance_range[1]),
"> {} landmark points, {} features".format(
shape_model[level].mean().n_points,
shape_model[level].n_features)]
info_wid.set_widget_state(text_per_line=text_per_line)
def plot_variance(name):
ipydisplay.clear_output(wait=True)
level = level_wid.value if n_levels > 1 else 0
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * 10,
renderer_options_wid.selected_values['zoom_one'] * 3)
plt.subplot(121)
save_figure_wid.renderer = shape_model[level].plot_eigenvalues_ratio(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False)
plt.subplot(122)
save_figure_wid.renderer = \
shape_model[level].plot_eigenvalues_cumulative_ratio(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
figure_size=new_figure_size)
save_figure_wid.renderer.force_draw()
mode_dict = OrderedDict()
mode_dict['Deformation'] = 1
mode_dict['Vectors'] = 2
mode_wid = ipywidgets.RadioButtons(
options=mode_dict, description='Mode', value=1,
layout=ipywidgets.Layout(width='6cm'))
mode_wid.observe(render_function, names='value', type='change')
mean_wid = SwitchWidget(
selected_value=False, description='Render mean shape',
description_location='right', switch_type='checkbox')
mean_wid.observe(render_function, names='selected_values', type='change')
def mean_visible(change):
if change['new'] == 1:
mean_wid.button_wid.disabled = False
else:
mean_wid.button_wid.disabled = True
mean_wid.set_widget_state(False, allow_callback=False)
mode_wid.observe(mean_visible, names='value', type='change')
model_parameters_wid = LinearModelParametersWidget(
n_parameters[0], render_function, params_str='Parameter ',
mode=mode, params_bounds=parameters_bounds, params_step=0.1,
plot_variance_visible=True, plot_variance_function=plot_variance,
animation_step=0.5, interval=0., loop_enabled=True,
continuous_update=False)
labels = None
if hasattr(shape_model[0].mean(), 'labels'):
labels = shape_model[0].mean().labels
shape_options_wid = Shape2DOptionsWidget(
labels=labels, render_function=render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],
labels=None, axes_x_limits=0.1, axes_y_limits=0.1,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMatplotlibFigureOptionsWidget()
if n_levels > 1:
def update_widgets(change):
model_parameters_wid.set_widget_state(
n_parameters=n_parameters[change['new']],
params_str='Parameter ', allow_callback=True)
# Create pyramid radiobuttons
radio_str = OrderedDict()
for l in range(n_levels):
if l == 0:
radio_str["Level {} (low)".format(l)] = l
elif l == n_levels - 1:
radio_str["Level {} (high)".format(l)] = l
else:
radio_str["Level {}".format(l)] = l
level_wid = ipywidgets.RadioButtons(
options=radio_str, description='Pyramid', value=n_levels-1,
layout=ipywidgets.Layout(width='6cm'))
level_wid.observe(update_widgets, names='value', type='change')
level_wid.observe(render_function, names='value', type='change')
radio_children = [level_wid, mode_wid, mean_wid]
else:
radio_children = [mode_wid, mean_wid]
radio_wids = ipywidgets.VBox(radio_children)
tmp_wid = ipywidgets.HBox([radio_wids, model_parameters_wid])
options_box = ipywidgets.Tab(
children=[tmp_wid, shape_options_wid, renderer_options_wid, info_wid,
save_figure_wid])
tab_titles = ['Model', 'Shape', 'Renderer', 'Info', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
wid = ipywidgets.HBox([logo_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
render_function({})
def visualize_shape_model_3d(shape_model, n_parameters=5, mode='multiple',
parameters_bounds=(-15.0, 15.0)):
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
out = ipywidgets.Output()
ipydisplay.display(out)
with out:
ipydisplay.clear_output(wait=True)
print('Initializing...')
if not isinstance(shape_model, list):
shape_model = [shape_model]
n_levels = len(shape_model)
is_trimesh = isinstance(shape_model[0].template_instance, TriMesh)
main_style = 'warning'
max_n_params = [sp.n_active_components for sp in shape_model]
n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)
def render_function(change):
save_figure_wid.renderer.clear_figure()
with out:
ipydisplay.clear_output(wait=True)
level = 0
if n_levels > 1:
level = level_wid.value
parameters = model_parameters_wid.selected_values
weights = (parameters *
shape_model[level].eigenvalues[:len(parameters)] ** 0.5)
instance = shape_model[level].instance(weights)
options = dict()
if is_trimesh:
options.update(shape_options_wid.selected_values)
else:
options.update(shape_options_wid.selected_values['lines'])
options.update(shape_options_wid.selected_values['markers'])
options.update(
renderer_options_wid.selected_values['numbering_mayavi'])
if hasattr(instance, 'labels'):
options['with_labels'] = \
shape_options_wid.selected_values['with_labels']
line_colour = []
marker_colour = []
for lbl in options['with_labels']:
idx = instance.labels.index(lbl)
line_colour.append(options['line_colour'][idx])
marker_colour.append(options['marker_colour'][idx])
options['line_colour'] = line_colour
options['marker_colour'] = marker_colour
else:
options['line_colour'] = options['line_colour'][0]
options['marker_colour'] = options['marker_colour'][0]
update_info(level, instance.range())
save_figure_wid.renderer = instance.view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
**options)
save_figure_wid.renderer.force_draw()
def update_info(level, instance_range):
text_per_line = [
"> Level {} out of {}".format(level + 1, n_levels),
"> {} components in total".format(shape_model[level].n_components),
"> {} active components".format(
shape_model[level].n_active_components),
"> {:.1f}% variance kept".format(
shape_model[level].variance_ratio() * 100),
"> Instance range: {:.1f} x {:.1f}".format(instance_range[0],
instance_range[1]),
"> {} points".format(
shape_model[level].mean().n_points)]
info_wid.set_widget_state(text_per_line=text_per_line)
def plot_variance(name):
level = level_wid.value if n_levels > 1 else 0
with out:
ipydisplay.clear_output(wait=True)
plt.subplot(121)
shape_model[level].plot_eigenvalues_ratio()
plt.subplot(122)
shape_model[level].plot_eigenvalues_cumulative_ratio()
plt.show()
model_parameters_wid = LinearModelParametersWidget(
n_parameters[0], render_function, params_str='Parameter ',
mode=mode, params_bounds=parameters_bounds, params_step=0.1,
plot_variance_visible=True, plot_variance_function=plot_variance,
animation_step=0.5, interval=0., loop_enabled=True,
continuous_update=False)
if is_trimesh:
shape_options_wid = Mesh3DOptionsWidget(textured=False,
render_function=render_function)
else:
labels = None
if hasattr(shape_model[0].mean(), 'labels'):
labels = shape_model[0].mean().labels
shape_options_wid = Shape3DOptionsWidget(labels=labels,
render_function=render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['numbering_mayavi'], labels=None,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMayaviFigureOptionsWidget()
if n_levels > 1:
def update_widgets(change):
model_parameters_wid.set_widget_state(
n_parameters=n_parameters[change['new']],
params_str='Parameter ', allow_callback=True)
# Create pyramid radiobuttons
radio_str = OrderedDict()
for l in range(n_levels):
if l == 0:
radio_str["Level {} (low)".format(l)] = l
elif l == n_levels - 1:
radio_str["Level {} (high)".format(l)] = l
else:
radio_str["Level {}".format(l)] = l
level_wid = ipywidgets.RadioButtons(
options=radio_str, description='Pyramid', value=n_levels-1,
layout=ipywidgets.Layout(width='6cm'))
level_wid.observe(update_widgets, names='value', type='change')
level_wid.observe(render_function, names='value', type='change')
tmp_wid = ipywidgets.HBox([level_wid, model_parameters_wid])
else:
tmp_wid = ipywidgets.HBox(children=[model_parameters_wid])
if is_trimesh:
options_box = ipywidgets.Tab(
children=[tmp_wid, shape_options_wid, info_wid, save_figure_wid])
tab_titles = ['Model', 'Mesh', 'Info', 'Export']
else:
options_box = ipywidgets.Tab(
children=[tmp_wid, shape_options_wid, renderer_options_wid, info_wid,
save_figure_wid])
tab_titles = ['Model', 'Shape', 'Renderer', 'Info', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
wid = ipywidgets.HBox([logo_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
render_function({})
with out:
print_dynamic('')
def visualize_appearance_model(appearance_model, n_parameters=5,
mode='multiple', parameters_bounds=(-3.0, 3.0),
figure_size=(7, 7)):
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
if not isinstance(appearance_model, list):
appearance_model = [appearance_model]
n_levels = len(appearance_model)
main_style = 'success'
max_n_params = [ap.n_active_components for ap in appearance_model]
n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)
def render_function(change):
ipydisplay.clear_output(wait=True)
level = level_wid.value if n_levels > 1 else 0
parameters = model_parameters_wid.selected_values
weights = (parameters *
appearance_model[level].eigenvalues[:len(parameters)] ** 0.5)
instance = appearance_model[level].instance(weights)
image_is_masked = isinstance(instance, MaskedImage)
g = landmark_options_wid.selected_values['landmarks']['group']
options = dict()
options.update(landmark_options_wid.selected_values['lines'])
options.update(landmark_options_wid.selected_values['markers'])
options.update(
renderer_options_wid.selected_values['numbering_matplotlib'])
options.update(renderer_options_wid.selected_values['axes'])
options.update(renderer_options_wid.selected_values['legend'])
options.update(image_options_wid.selected_values)
options.update(landmark_options_wid.selected_values['landmarks'])
if (instance.has_landmarks and
hasattr(instance.landmarks[g], 'labels')):
line_colour = []
marker_face_colour = []
marker_edge_colour = []
for lbl in options['with_labels']:
id = instance.landmarks[g].labels.index(lbl)
line_colour.append(options['line_colour'][id])
marker_face_colour.append(options['marker_face_colour'][id])
marker_edge_colour.append(options['marker_edge_colour'][id])
options['line_colour'] = line_colour
options['marker_face_colour'] = marker_face_colour
options['marker_edge_colour'] = marker_edge_colour
else:
options['line_colour'] = options['line_colour'][0]
options['marker_face_colour'] = options['marker_face_colour'][0]
options['marker_edge_colour'] = options['marker_edge_colour'][0]
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] *
figure_size[0],
renderer_options_wid.selected_values['zoom_one'] *
figure_size[1])
save_figure_wid.renderer = render_image(
image=instance, renderer=save_figure_wid.renderer,
image_is_masked=image_is_masked, figure_size=new_figure_size,
**options)
update_info(instance, level, g)
def update_info(image, level, group):
lvl_app_mod = appearance_model[level]
lp = 0 if group is None else image.landmarks[group].n_points
text_per_line = [
"> Level: {} out of {}.".format(level + 1, n_levels),
"> {} components in total.".format(lvl_app_mod.n_components),
"> {} active components.".format(lvl_app_mod.n_active_components),
"> {:.1f}% variance kept.".format(
lvl_app_mod.variance_ratio() * 100),
"> Reference shape of size {} with {} channel{}.".format(
image._str_shape(),
image.n_channels, 's' * (image.n_channels > 1)),
"> {} features.".format(lvl_app_mod.n_features),
"> {} landmark points.".format(lp),
"> Instance: min={:.3f}, max={:.3f}".format(image.pixels.min(),
image.pixels.max())]
info_wid.set_widget_state(text_per_line=text_per_line)
def plot_variance(name):
ipydisplay.clear_output(wait=True)
level = level_wid.value if n_levels > 1 else 0
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * 10,
renderer_options_wid.selected_values['zoom_one'] * 3)
plt.subplot(121)
save_figure_wid.renderer = \
appearance_model[level].plot_eigenvalues_ratio(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False)
plt.subplot(122)
save_figure_wid.renderer = \
appearance_model[level].plot_eigenvalues_cumulative_ratio(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
figure_size=new_figure_size)
save_figure_wid.renderer.force_draw()
model_parameters_wid = LinearModelParametersWidget(
n_parameters[0], render_function, params_str='Parameter ',
mode=mode, params_bounds=parameters_bounds, params_step=0.1,
plot_variance_visible=True, plot_variance_function=plot_variance,
animation_step=0.5, interval=0., loop_enabled=True,
continuous_update=False)
groups_keys, labels_keys = extract_groups_labels_from_image(
appearance_model[0].mean())
image_options_wid = ImageOptionsWidget(
n_channels=appearance_model[0].mean().n_channels,
image_is_masked=isinstance(appearance_model[0].mean(),
MaskedImage),
render_function=render_function)
landmark_options_wid = LandmarkOptionsWidget(
group_keys=groups_keys, labels_keys=labels_keys,
type='2D', render_function=render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],
axes_x_limits=None, axes_y_limits=None, labels=None,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMatplotlibFigureOptionsWidget()
tmp_children = [model_parameters_wid]
if n_levels > 1:
def update_widgets(change):
value = change['new']
# Update model parameters widget
model_parameters_wid.set_widget_state(
n_parameters[value], params_str='Parameter ',
allow_callback=False)
# Update landmarks options
g_keys, l_keys = extract_groups_labels_from_image(
appearance_model[value].mean())
landmark_options_wid.set_widget_state(
group_keys=g_keys, labels_keys=l_keys, allow_callback=False)
# Update channels options
image_options_wid.set_widget_state(
n_channels=appearance_model[value].mean().n_channels,
image_is_masked=isinstance(
appearance_model[value].mean(), MaskedImage),
allow_callback=True)
# Create pyramid radiobuttons
radio_str = OrderedDict()
for l in range(n_levels):
if l == 0:
radio_str["Level {} (low)".format(l)] = l
elif l == n_levels - 1:
radio_str["Level {} (high)".format(l)] = l
else:
radio_str["Level {}".format(l)] = l
level_wid = ipywidgets.RadioButtons(
options=radio_str, description='Pyramid', value=n_levels-1,
layout=ipywidgets.Layout(width='6cm'))
level_wid.observe(update_widgets, names='value', type='change')
level_wid.observe(render_function, names='value', type='change')
tmp_children.insert(0, level_wid)
tmp_wid = ipywidgets.HBox(tmp_children)
options_box = ipywidgets.Tab(
children=[tmp_wid, image_options_wid, landmark_options_wid,
renderer_options_wid, info_wid, save_figure_wid])
tab_titles = ['Model', 'Image', 'Landmarks', 'Renderer', 'Info', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
wid = ipywidgets.HBox([logo_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
render_function({})
def visualize_patch_appearance_model(appearance_model, centers,
n_parameters=5, mode='multiple',
parameters_bounds=(-3.0, 3.0),
figure_size=(7, 7)):
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
if not isinstance(appearance_model, list):
appearance_model = [appearance_model]
n_levels = len(appearance_model)
if not isinstance(centers, list):
centers = [centers] * n_levels
elif isinstance(centers, list) and len(centers) == 1:
centers *= n_levels
main_style = 'success'
max_n_params = [ap.n_active_components for ap in appearance_model]
n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)
def render_function(change):
ipydisplay.clear_output(wait=True)
level = level_wid.value if n_levels > 1 else 0
parameters = model_parameters_wid.selected_values
weights = (parameters *
appearance_model[level].eigenvalues[:len(parameters)] ** 0.5)
instance = appearance_model[level].instance(weights)
options = dict()
options.update(shape_options_wid.selected_values['lines'])
options.update(shape_options_wid.selected_values['markers'])
options.update(
renderer_options_wid.selected_values['numbering_matplotlib'])
options.update(renderer_options_wid.selected_values['axes'])
image_options = dict(image_options_wid.selected_values)
del image_options['masked_enabled']
options.update(image_options)
options.update(patch_options_wid.selected_values)
options['line_colour'] = options['line_colour'][0]
options['marker_face_colour'] = options['marker_face_colour'][0]
options['marker_edge_colour'] = options['marker_edge_colour'][0]
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * figure_size[0],
renderer_options_wid.selected_values['zoom_one'] * figure_size[1])
save_figure_wid.renderer = render_patches(
patches=instance.pixels, patch_centers=centers[level],
renderer=save_figure_wid.renderer, figure_size=new_figure_size,
**options)
update_info(instance, level)
def update_info(image, level):
lvl_app_mod = appearance_model[level]
text_per_line = [
"> Level: {} out of {}.".format(level + 1, n_levels),
"> {} components in total.".format(lvl_app_mod.n_components),
"> {} active components.".format(lvl_app_mod.n_active_components),
"> {:.1f}% variance kept.".format(
lvl_app_mod.variance_ratio() * 100),
"> Each patch has size {}H x {}W with {} channel{}.".format(
image.pixels.shape[3], image.pixels.shape[4],
image.pixels.shape[2], 's' * (image.pixels.shape[2] > 1)),
"> {} features.".format(lvl_app_mod.n_features),
"> {} landmark points.".format(image.pixels.shape[0]),
"> Instance: min={:.3f}, max={:.3f}".format(image.pixels.min(),
image.pixels.max())]
info_wid.set_widget_state(text_per_line=text_per_line)
def plot_variance(name):
ipydisplay.clear_output(wait=True)
level = 0
if n_levels > 1:
level = level_wid.value
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * 10,
renderer_options_wid.selected_values['zoom_one'] * 3)
plt.subplot(121)
save_figure_wid.renderer = \
appearance_model[level].plot_eigenvalues_ratio(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False)
plt.subplot(122)
save_figure_wid.renderer = \
appearance_model[level].plot_eigenvalues_cumulative_ratio(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
figure_size=new_figure_size)
save_figure_wid.renderer.force_draw()
model_parameters_wid = LinearModelParametersWidget(
n_parameters[0], render_function, params_str='Parameter ',
mode=mode, params_bounds=parameters_bounds, params_step=0.1,
plot_variance_visible=True, plot_variance_function=plot_variance,
animation_step=0.5, interval=0., loop_enabled=True,
continuous_update=False)
shape_options_wid = Shape2DOptionsWidget(
labels=None, render_function=None)
shape_options_wid.line_options_wid.render_lines_switch.button_wid.value = False
shape_options_wid.add_render_function(render_function)
patch_options_wid = PatchOptionsWidget(
n_patches=appearance_model[0].mean().pixels.shape[0],
n_offsets=appearance_model[0].mean().pixels.shape[1],
render_function=render_function)
image_options_wid = ImageOptionsWidget(
n_channels=appearance_model[0].mean().pixels.shape[2],
image_is_masked=isinstance(appearance_model[0].mean(), MaskedImage),
render_function=None)
image_options_wid.interpolation_checkbox.button_wid.value = False
image_options_wid.add_render_function(render_function)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['zoom_one', 'axes', 'numbering_matplotlib'], labels=None,
axes_x_limits=None, axes_y_limits=None, render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMatplotlibFigureOptionsWidget()
tmp_children = [model_parameters_wid]
if n_levels > 1:
def update_widgets(change):
value = change['new']
# Update model parameters widget
model_parameters_wid.set_widget_state(
n_parameters[value], params_str='Parameter ',
allow_callback=False)
# Update patch options
patch_options_wid.set_widget_state(
n_patches=appearance_model[value].mean().pixels.shape[0],
n_offsets=appearance_model[value].mean().pixels.shape[1],
allow_callback=False)
# Update channels options
image_options_wid.set_widget_state(
n_channels=appearance_model[value].mean().pixels.shape[2],
image_is_masked=isinstance(appearance_model[value].mean(),
MaskedImage),
allow_callback=True)
# Define pyramid radiobuttons
radio_str = OrderedDict()
for l in range(n_levels):
if l == 0:
radio_str["Level {} (low)".format(l)] = l
elif l == n_levels - 1:
radio_str["Level {} (high)".format(l)] = l
else:
radio_str["Level {}".format(l)] = l
level_wid = ipywidgets.RadioButtons(
options=radio_str, description='Pyramid', value=n_levels-1,
layout=ipywidgets.Layout(width='6cm'))
level_wid.observe(update_widgets, names='value', type='change')
level_wid.observe(render_function, names='value', type='change')
tmp_children.insert(0, level_wid)
tmp_wid = ipywidgets.HBox(tmp_children)
options_box = ipywidgets.Tab(
children=[tmp_wid, patch_options_wid, image_options_wid,
shape_options_wid, renderer_options_wid, info_wid,
save_figure_wid])
tab_titles = ['Model', 'Patches', 'Channels', 'Shape', 'Renderer', 'Info',
'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
wid = ipywidgets.HBox([logo_wid, options_box])
# Set widget's style
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
render_function({})
def visualize_morphable_model(mm, n_shape_parameters=5, n_texture_parameters=5,
mode='multiple', parameters_bounds=(-15.0, 15.0)):
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print_dynamic('Initializing...')
main_style = 'info'
n_shape_parameters = check_n_parameters(
n_shape_parameters, 1, [mm.shape_model.n_active_components])
n_texture_parameters = check_n_parameters(
n_texture_parameters, 1, [mm.texture_model.n_active_components])
def render_function(change):
save_figure_wid.renderer.clear_figure()
ipydisplay.clear_output(wait=True)
shape_weights = shape_model_parameters_wid.selected_values
shape_weights = (
shape_weights *
mm.shape_model.eigenvalues[:len(shape_weights)] ** 0.5)
texture_weights = texture_model_parameters_wid.selected_values
texture_weights = (
texture_weights *
mm.texture_model.eigenvalues[:len(texture_weights)] ** 0.5)
instance = mm.instance(shape_weights=shape_weights,
texture_weights=texture_weights)
instance = instance.clip_texture()
update_info(mm, instance)
save_figure_wid.renderer = instance.view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
**mesh_options_wid.selected_values)
save_figure_wid.renderer.force_draw()
def update_info(mm, instance):
text_per_line = [
"> {} vertices, {} triangles".format(mm.n_vertices,
mm.n_triangles),
"> {} shape components ({:.2f}% of variance)".format(
mm.shape_model.n_components,
mm.shape_model.variance_ratio() * 100),
"> {} texture channels".format(mm.n_channels),
"> {} texture components ({:.2f}% of variance)".format(
mm.texture_model.n_components,
mm.texture_model.variance_ratio() * 100),
"> Instance: min={:.3f} , max={:.3f}".format(
instance.colours.min(), instance.colours.max())]
info_wid.set_widget_state(text_per_line=text_per_line)
def plot_shape_variance(name):
ipydisplay.clear_output(wait=True)
plt.subplot(121)
mm.shape_model.plot_eigenvalues_ratio()
plt.subplot(122)
mm.shape_model.plot_eigenvalues_cumulative_ratio()
plt.show()
def plot_texture_variance(name):
ipydisplay.clear_output(wait=True)
plt.subplot(121)
mm.texture_model.plot_eigenvalues_ratio()
plt.subplot(122)
mm.texture_model.plot_eigenvalues_cumulative_ratio()
plt.show()
shape_model_parameters_wid = LinearModelParametersWidget(
n_shape_parameters[0], render_function, params_str='Parameter ',
mode=mode, params_bounds=parameters_bounds, params_step=0.1,
plot_variance_visible=True, plot_variance_function=plot_shape_variance,
animation_step=0.5, interval=0., loop_enabled=True)
texture_model_parameters_wid = LinearModelParametersWidget(
n_texture_parameters[0], render_function, params_str='Parameter ',
mode=mode, params_bounds=parameters_bounds, params_step=0.1,
plot_variance_visible=True, plot_variance_function=plot_texture_variance,
animation_step=0.5, interval=0., loop_enabled=True)
mesh_options_wid = Mesh3DOptionsWidget(textured=True,
render_function=render_function)
info_wid = TextPrintWidget(text_per_line=[''])
save_figure_wid = SaveMayaviFigureOptionsWidget()
model_parameters_wid = ipywidgets.HBox(
[ipywidgets.Tab([shape_model_parameters_wid,
texture_model_parameters_wid])])
model_parameters_wid.children[0].set_title(0, 'Shape')
model_parameters_wid.children[0].set_title(1, 'Texture')
options_box = ipywidgets.Tab([model_parameters_wid, mesh_options_wid,
info_wid, save_figure_wid])
tab_titles = ['Model', 'Mesh', 'Info', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
logo_wid = LogoWidget(style=main_style)
logo_wid.layout.margin = '0px 10px 0px 0px'
wid = ipywidgets.HBox([logo_wid, options_box])
wid.box_style = main_style
wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)
# Display final widget
final_box = ipywidgets.Box([wid])
final_box.layout.display = 'flex'
ipydisplay.display(final_box)
# Trigger initial visualization
render_function({})
print_dynamic('')
def webcam_widget(canvas_width=640, hd=True, n_preview_windows=5):
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
# Set update function
images = []
def update(_):
images.append(wid.selected_values[-1])
# Create widgets
wid = CameraSnapshotWidget(
canvas_width=canvas_width, hd=hd, n_preview_windows=n_preview_windows,
preview_windows_margin=3, style='danger', preview_style='warning',
render_function=update)
wid.container.layout.border = (
'2px solid' + map_styles_to_hex_colours('danger'))
# Display widget
ipydisplay.display(wid)
# Return
return images
| true | true |
f734a86b72c070548e64cd2ada2afc4598dd3bb2 | 758 | py | Python | resources/lib/streamlink/plugins/rtlxl.py | rrosajp/script.module.streamlink-1 | 2ad5076c9c4c38288af94825064cc471da8142d8 | [
"BSD-2-Clause"
] | 1 | 2021-04-25T16:09:09.000Z | 2021-04-25T16:09:09.000Z | resources/lib/streamlink/plugins/rtlxl.py | rrosajp/script.module.streamlink-1 | 2ad5076c9c4c38288af94825064cc471da8142d8 | [
"BSD-2-Clause"
] | null | null | null | resources/lib/streamlink/plugins/rtlxl.py | rrosajp/script.module.streamlink-1 | 2ad5076c9c4c38288af94825064cc471da8142d8 | [
"BSD-2-Clause"
] | null | null | null | import re
import json
from streamlink.plugin import Plugin
from streamlink.stream import HLSStream
_url_re = re.compile(r"http(?:s)?://(?:\w+\.)?rtl.nl/video/(?P<uuid>.*?)\Z", re.IGNORECASE)
class rtlxl(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
match = _url_re.match(self.url)
uuid = match.group("uuid")
videourlfeed = self.session.http.get(
'https://tm-videourlfeed.rtl.nl/api/url/{}?device=pc&drm&format=hls'.format(uuid)
).text
videourlfeedjson = json.loads(videourlfeed)
playlist_url = videourlfeedjson["url"]
return HLSStream.parse_variant_playlist(self.session, playlist_url)
__plugin__ = rtlxl
| 26.137931 | 93 | 0.666227 | import re
import json
from streamlink.plugin import Plugin
from streamlink.stream import HLSStream
_url_re = re.compile(r"http(?:s)?://(?:\w+\.)?rtl.nl/video/(?P<uuid>.*?)\Z", re.IGNORECASE)
class rtlxl(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
match = _url_re.match(self.url)
uuid = match.group("uuid")
videourlfeed = self.session.http.get(
'https://tm-videourlfeed.rtl.nl/api/url/{}?device=pc&drm&format=hls'.format(uuid)
).text
videourlfeedjson = json.loads(videourlfeed)
playlist_url = videourlfeedjson["url"]
return HLSStream.parse_variant_playlist(self.session, playlist_url)
__plugin__ = rtlxl
| true | true |
f734aab43acf3765f95a4b2ab60c56fad32168bf | 1,577 | py | Python | CodeSignal/Challenges/MZ/06_Quest_Efficiency_Item.py | Zubieta/CPP | fb4a3cbf2e4edcc590df15663cd28fb9ecab679c | [
"MIT"
] | 8 | 2017-03-02T07:56:45.000Z | 2021-08-07T20:20:19.000Z | CodeSignal/Challenges/MZ/06_Quest_Efficiency_Item.py | zubie7a/Algorithms | fb4a3cbf2e4edcc590df15663cd28fb9ecab679c | [
"MIT"
] | null | null | null | CodeSignal/Challenges/MZ/06_Quest_Efficiency_Item.py | zubie7a/Algorithms | fb4a3cbf2e4edcc590df15663cd28fb9ecab679c | [
"MIT"
] | 1 | 2021-08-07T20:20:20.000Z | 2021-08-07T20:20:20.000Z | # https://app.codesignal.com/company-challenges/mz/zCYv3tuxRE4JajQNY
def questEfficiencyItem(hours, points, time_for_quests):
# Time is short, you want to complete as many quests as possible
# but it's difficult to do so. So we want to maximize the points
# we can obtain with quests in a given limited time.
# hours: hours it takes to complete a quest
# points: points each quest gives you
# time_for_quests: the limit of time to do stuff.
# Recursively, at each position, decide whether to take this quest
# or not. This 'iteration' can be done since the order of the quests
# doesn't matter so you can check from left to right whether to take
# each one or not, generating unique combinations.
def recursive(idx, score_acum, time_left):
# Time ran out, acum with last step is invalid.
if time_left < 0:
return 0
# Time was precise, return until here.
if time_left == 0:
return score_acum
# Ran out of quests to
if idx == len(hours):
return score_acum
score = 0
hours_idx = hours[idx]
points_idx = points[idx]
# At each position decide whats better, whether to consume it or
# advance to the next without consuming current.
res_1 = recursive(idx + 1, score_acum + points_idx, time_left - hours_idx)
res_2 = recursive(idx + 1, score_acum, time_left)
return max(res_1, res_2)
# Start with 0 accumulated points and all the time left.
return recursive(0, 0, time_for_quests)
| 43.805556 | 82 | 0.665821 |
def questEfficiencyItem(hours, points, time_for_quests):
# we can obtain with quests in a given limited time.
# hours: hours it takes to complete a quest
# points: points each quest gives you
# time_for_quests: the limit of time to do stuff.
# Recursively, at each position, decide whether to take this quest
# or not. This 'iteration' can be done since the order of the quests
# doesn't matter so you can check from left to right whether to take
def recursive(idx, score_acum, time_left):
if time_left < 0:
return 0
if time_left == 0:
return score_acum
if idx == len(hours):
return score_acum
score = 0
hours_idx = hours[idx]
points_idx = points[idx]
res_1 = recursive(idx + 1, score_acum + points_idx, time_left - hours_idx)
res_2 = recursive(idx + 1, score_acum, time_left)
return max(res_1, res_2)
return recursive(0, 0, time_for_quests)
| true | true |
f734ab273b7cc42f0ec809fb4d69086c50c004da | 4,047 | py | Python | tests/_fixers/_stdlib.py | eldorplus/importlib | 48047b7de74c0e75fecbc0b846864e523e57ecc6 | [
"PSF-2.0",
"BSD-2-Clause"
] | null | null | null | tests/_fixers/_stdlib.py | eldorplus/importlib | 48047b7de74c0e75fecbc0b846864e523e57ecc6 | [
"PSF-2.0",
"BSD-2-Clause"
] | null | null | null | tests/_fixers/_stdlib.py | eldorplus/importlib | 48047b7de74c0e75fecbc0b846864e523e57ecc6 | [
"PSF-2.0",
"BSD-2-Clause"
] | null | null | null | import os
import shutil
import sys
import tempfile
import types
from importlib2._fixers import (swap, SimpleNamespace, new_class,
_thread, builtins)
from importlib2._fixers._modules import mod_from_ns
def fix_builtins(builtins=builtins):
sys.modules.setdefault('builtins', builtins)
def fix_types(types=types):
types.SimpleNamespace = SimpleNamespace
types.new_class = new_class
return types
def fix_collections():
try:
import collections.abc
except ImportError:
import collections
collections.abc = collections
sys.modules['collections.abc'] = collections
def fix_tempfile():
if not hasattr(tempfile, 'TemporaryDirectory'):
class TemporaryDirectory(object):
def __init__(self):
self.name = tempfile.mkdtemp()
def __enter__(self):
return self
def __exit__(self, *args):
shutil.rmtree(self.name, ignore_errors=True)
tempfile.TemporaryDirectory = TemporaryDirectory
def fix_os(os=os):
if not hasattr(os, 'fsencode'):
os.fsencode = lambda s: s
if not hasattr(os, 'fsdecode'):
os.fsdecode = lambda s: s
def fix_thread(_thread=_thread):
sys.modules['_thread'] = _thread
if not hasattr(_thread, 'TIMEOUT_MAX'):
_thread.TIMEOUT_MAX = 10 # XXX Make it accurate.
if not hasattr(_thread, '_set_sentinel'):
_thread._set_sentinel = lambda: _thread.allocate_lock()
def inject_threading():
from . import threading
sys.modules['threading'] = threading
#################################################
# testing
def fix_unittest():
import unittest
# Add in unittest.TestCase.subTest.
if not hasattr(unittest.TestCase, 'subTest'):
from contextlib import contextmanager
@contextmanager
def subTest(self, *args, **kwargs):
yield
unittest.TestCase.subTest = subTest
# Add in a fake unittest.mock.
try:
import unittest.mock
except ImportError:
def patched(obj, attr):
def mocked(*args, **kwargs):
try:
exc = mocked.side_effect
except AttributeError:
return mocked.return_value
else:
raise exc
return swap(obj, attr, mocked, pop=False)
from importlib2 import _bootstrap
mock = _bootstrap._new_module('unittest.mock')
mock.__loader__ = _bootstrap.BuiltinImporter
mock.__spec__ = _bootstrap.ModuleSpec(mock.__name__, mock.__loader__,
origin=__file__)
mock.patch = lambda: None
mock.patch.object = patched
sys.modules['unittest.mock'] = mock
unittest.mock = mock
def _format_obj(obj):
if isinstance(obj, dict) and '__builtins__' in obj:
refmod = mod_from_ns(obj)
return ('<ns for module {!r} ({} {})>'
).format(obj['__name__'], refmod, id(refmod))
else:
return '{} {}'.format(obj, id(obj))
def check_mod(module_name, mod=None, orig=None):
if module_name is None:
if mod is None:
raise TypeError('missing module_name')
module_name = mod.__name__
if module_name is None:
raise ImportError('{!r}: mod.__name__ is None'.format(mod))
if mod is None:
if module_name not in sys.modules:
return
mod = sys.modules[module_name]
# Check the module.
if module_name.startswith('importlib'):
if not hasattr(mod, '_bootstrap'):
try:
f = mod._resolve_name
except AttributeError:
f = mod.ModuleSpec.__init__
bsname = f.__globals__['__name__']
assert bsname is not None, module_name
def fix_support(support=None):
if support is None:
from tests import support
if not hasattr(support, 'check_mod'):
support.check_mod = check_mod
| 28.702128 | 77 | 0.604151 | import os
import shutil
import sys
import tempfile
import types
from importlib2._fixers import (swap, SimpleNamespace, new_class,
_thread, builtins)
from importlib2._fixers._modules import mod_from_ns
def fix_builtins(builtins=builtins):
sys.modules.setdefault('builtins', builtins)
def fix_types(types=types):
types.SimpleNamespace = SimpleNamespace
types.new_class = new_class
return types
def fix_collections():
try:
import collections.abc
except ImportError:
import collections
collections.abc = collections
sys.modules['collections.abc'] = collections
def fix_tempfile():
if not hasattr(tempfile, 'TemporaryDirectory'):
class TemporaryDirectory(object):
def __init__(self):
self.name = tempfile.mkdtemp()
def __enter__(self):
return self
def __exit__(self, *args):
shutil.rmtree(self.name, ignore_errors=True)
tempfile.TemporaryDirectory = TemporaryDirectory
def fix_os(os=os):
if not hasattr(os, 'fsencode'):
os.fsencode = lambda s: s
if not hasattr(os, 'fsdecode'):
os.fsdecode = lambda s: s
def fix_thread(_thread=_thread):
sys.modules['_thread'] = _thread
if not hasattr(_thread, 'TIMEOUT_MAX'):
_thread.TIMEOUT_MAX = 10
if not hasattr(_thread, '_set_sentinel'):
_thread._set_sentinel = lambda: _thread.allocate_lock()
def inject_threading():
from . import threading
sys.modules['threading'] = threading
__' in obj:
refmod = mod_from_ns(obj)
return ('<ns for module {!r} ({} {})>'
).format(obj['__name__'], refmod, id(refmod))
else:
return '{} {}'.format(obj, id(obj))
def check_mod(module_name, mod=None, orig=None):
if module_name is None:
if mod is None:
raise TypeError('missing module_name')
module_name = mod.__name__
if module_name is None:
raise ImportError('{!r}: mod.__name__ is None'.format(mod))
if mod is None:
if module_name not in sys.modules:
return
mod = sys.modules[module_name]
if module_name.startswith('importlib'):
if not hasattr(mod, '_bootstrap'):
try:
f = mod._resolve_name
except AttributeError:
f = mod.ModuleSpec.__init__
bsname = f.__globals__['__name__']
assert bsname is not None, module_name
def fix_support(support=None):
if support is None:
from tests import support
if not hasattr(support, 'check_mod'):
support.check_mod = check_mod
| true | true |
f734ac2fb9831a1d3787413031ed1eda99d96841 | 1,869 | py | Python | daemons/build_tf_deploy_config.py | ucsc-cgp/cgp-data-store | 1d583d654a07a14df0c566a66fb6ba574be36cbc | [
"MIT"
] | 46 | 2017-03-24T15:56:09.000Z | 2021-03-15T19:49:07.000Z | daemons/build_tf_deploy_config.py | ucsc-cgp/cgp-data-store | 1d583d654a07a14df0c566a66fb6ba574be36cbc | [
"MIT"
] | 1,799 | 2017-04-04T17:54:28.000Z | 2020-11-19T12:30:13.000Z | daemons/build_tf_deploy_config.py | ucsc-cgp/cgp-data-store | 1d583d654a07a14df0c566a66fb6ba574be36cbc | [
"MIT"
] | 13 | 2017-03-27T23:49:35.000Z | 2021-01-18T07:39:49.000Z | #!/usr/bin/env python
"""
This script generates Terraform scripting needed for daemons that deploy infrastructure.
"""
import os
import glob
import json
import boto3
import argparse
daemons_root = os.path.abspath(os.path.dirname(__file__))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("daemon")
args = parser.parse_args()
env_vars_to_lambda = os.environ['EXPORT_ENV_VARS_TO_LAMBDA'].split()
terraform_backend_template = """terraform {{
backend "s3" {{
bucket = "{bucket}"
key = "dss-{daemon}-{stage}.tfstate"
region = "{region}"
{profile_setting}
}}
}}
"""
terraform_providers_template = """
provider aws {{
region = "{aws_region}"
}}
"""
account_id = boto3.client("sts").get_caller_identity()['Account']
backend_bucket = os.environ['DSS_TERRAFORM_BACKEND_BUCKET_TEMPLATE'].format(account_id=account_id)
terraform_variable_info = {'variable': dict()}
for key in env_vars_to_lambda:
terraform_variable_info['variable'][key] = {
'default': os.environ[key]
}
with open(os.path.join(daemons_root, args.daemon, "backend.tf"), "w") as fp:
if os.environ.get('AWS_PROFILE'):
profile = os.environ['AWS_PROFILE']
profile_setting = f'profile = "{profile}"'
else:
profile_setting = ''
fp.write(terraform_backend_template.format(
bucket=backend_bucket,
daemon=args.daemon,
stage=os.environ['DSS_DEPLOYMENT_STAGE'],
region=os.environ['AWS_DEFAULT_REGION'],
profile_setting=profile_setting,
))
with open(os.path.join(daemons_root, args.daemon, "variables.tf"), "w") as fp:
fp.write(json.dumps(terraform_variable_info, indent=2))
with open(os.path.join(daemons_root, args.daemon, "providers.tf"), "w") as fp:
fp.write(terraform_providers_template.format(
aws_region=os.environ['AWS_DEFAULT_REGION'],
))
| 26.7 | 98 | 0.702515 |
import os
import glob
import json
import boto3
import argparse
daemons_root = os.path.abspath(os.path.dirname(__file__))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("daemon")
args = parser.parse_args()
env_vars_to_lambda = os.environ['EXPORT_ENV_VARS_TO_LAMBDA'].split()
terraform_backend_template = """terraform {{
backend "s3" {{
bucket = "{bucket}"
key = "dss-{daemon}-{stage}.tfstate"
region = "{region}"
{profile_setting}
}}
}}
"""
terraform_providers_template = """
provider aws {{
region = "{aws_region}"
}}
"""
account_id = boto3.client("sts").get_caller_identity()['Account']
backend_bucket = os.environ['DSS_TERRAFORM_BACKEND_BUCKET_TEMPLATE'].format(account_id=account_id)
terraform_variable_info = {'variable': dict()}
for key in env_vars_to_lambda:
terraform_variable_info['variable'][key] = {
'default': os.environ[key]
}
with open(os.path.join(daemons_root, args.daemon, "backend.tf"), "w") as fp:
if os.environ.get('AWS_PROFILE'):
profile = os.environ['AWS_PROFILE']
profile_setting = f'profile = "{profile}"'
else:
profile_setting = ''
fp.write(terraform_backend_template.format(
bucket=backend_bucket,
daemon=args.daemon,
stage=os.environ['DSS_DEPLOYMENT_STAGE'],
region=os.environ['AWS_DEFAULT_REGION'],
profile_setting=profile_setting,
))
with open(os.path.join(daemons_root, args.daemon, "variables.tf"), "w") as fp:
fp.write(json.dumps(terraform_variable_info, indent=2))
with open(os.path.join(daemons_root, args.daemon, "providers.tf"), "w") as fp:
fp.write(terraform_providers_template.format(
aws_region=os.environ['AWS_DEFAULT_REGION'],
))
| true | true |
f734ac3225b00088002daa226213683a43e43d53 | 8,450 | py | Python | view/__init__.py | mstrechen/pacman | fc9b609857476588fa269dff4acee744f51d9fbb | [
"MIT"
] | null | null | null | view/__init__.py | mstrechen/pacman | fc9b609857476588fa269dff4acee744f51d9fbb | [
"MIT"
] | null | null | null | view/__init__.py | mstrechen/pacman | fc9b609857476588fa269dff4acee744f51d9fbb | [
"MIT"
] | null | null | null | import typing as t
import pygame
from labyrinth.labyrinth import Labyrinth
from view.banana import Banana
from view.dot import Dot
from view.ghost import Ghost
from view.pacman import Pacman
MAX_DISPLAY_WIDTH = 1000
MAX_DISPLAY_HEIGHT = 500
class View:
cell_size: int
sprites: t.Dict[str, pygame.sprite.Sprite]
sprites_group: pygame.sprite.Group
labyrinth: Labyrinth = None
SMOOTH_MOVEMENT = ['pacman', 'ghosts']
SPRITES_Z_INDEX = {
'pacman': 0,
'target': -1,
'ghosts': 1,
}
REGULAR_SPRITES = ['pacman', 'target']
LIST_SPRITES = ['ghosts']
STATIC_SPRITES = ['dots']
def __init__(self):
pygame.init()
self.sprites_group = pygame.sprite.Group()
self.display_info = pygame.display.Info()
self.sprites: t.Dict[t.Union[pygame.sprite.Sprite, t.List[pygame.sprite.Sprite]]] = {}
self.benchmarking = {}
self.screen = pygame.display.set_mode([MAX_DISPLAY_WIDTH, MAX_DISPLAY_HEIGHT])
self.clock = pygame.time.Clock()
def draw_labyrinth(self, labyrinth: t.Optional[Labyrinth] = None):
self.labyrinth = labyrinth or self.labyrinth
labyrinth = self.labyrinth
cell_size = min(
self.display_info.current_h // len(labyrinth.raw_img),
self.display_info.current_w // len(labyrinth.raw_img[0])
)
self.screen = pygame.display.set_mode([cell_size * len(labyrinth.raw_img[0]), cell_size * len(labyrinth.raw_img)])
self.cell_size = cell_size
for line_no, line in enumerate(labyrinth.raw_img):
for char_no, char in enumerate(line):
color = (0, 0, 0) if char == ' ' else (0, 0, 200)
pygame.draw.rect(
self.screen, color,
(char_no * cell_size, line_no * cell_size, cell_size, cell_size)
)
def set_initial_state(self, state: t.Dict[str, t.Any]):
self.state = state
if 'pacman' in state:
x, y = state['pacman']
pacman = Pacman(size=self.cell_size)
self.sprites['pacman'] = pacman
pacman.rect.move_ip(y * self.cell_size, x * self.cell_size)
if 'target' in state:
x, y = state['target']
target = Banana(size=self.cell_size)
self.sprites['target'] = target
target.rect.move_ip(y * self.cell_size, x * self.cell_size)
if 'ghosts' in state:
self.sprites['ghosts'] = [
Ghost(size=self.cell_size)
for _ in state['ghosts']
]
for i, (x, y) in enumerate(state['ghosts']):
self.sprites['ghosts'][i].rect.move_ip(y * self.cell_size, x * self.cell_size)
if 'dots' in state:
self.sprites['dots'] = [
Dot(size=self.cell_size)
for _ in state['dots']
]
for i, (x, y) in enumerate(state['dots']):
sprite: Dot = self.sprites['dots'][i]
sprite.rect.move_ip(y * self.cell_size + sprite.offset, x * self.cell_size + sprite.offset)
self.sprites_group.add(*self.sorted_sprites)
self.render()
def update_state(self, state: t.Dict[str, t.Any], benchmarking: t.Dict[str, t.Any]):
self.benchmarking = benchmarking
self.update_rotations(self.state, state)
steps = 5 if self.cell_size > 8 else 1
for int_state in self._generate_intermediate_states(steps, self.state, state):
self.state.update(int_state)
self.sync()
self.render()
self.state.update(state)
self.sync()
self.render()
def sync(self):
if 'pacman' in self.sprites:
pacman = self.sprites['pacman']
x, y = self.state['pacman']
pacman.rect.update([y * self.cell_size, x * self.cell_size, self.cell_size, self.cell_size])
if 'target' in self.sprites:
target = self.sprites['target']
x, y = self.state['target']
target.rect.update([y * self.cell_size, x * self.cell_size, self.cell_size, self.cell_size])
if 'ghosts' in self.sprites:
for source, target in zip(self.state['ghosts'], self.sprites['ghosts']):
x, y = source
target.rect.update([y * self.cell_size, x * self.cell_size, self.cell_size, self.cell_size])
if 'dots' in self.state:
self.sprites['dots'] = [
Dot(size=self.cell_size)
for _ in self.state['dots']
]
for i, (x, y) in enumerate(self.state['dots']):
sprite: Dot = self.sprites['dots'][i]
sprite.rect.move_ip(y * self.cell_size + sprite.offset, x * self.cell_size + sprite.offset)
def render(self):
self.draw_labyrinth()
self.draw_dots()
self.sprites_group.draw(self.screen)
self.show_benchmarking()
pygame.display.flip()
self.clock.tick(30)
@property
def sorted_sprites(self):
res = sorted(
[
(key, value)
for key, value in self.sprites.items()
if key in self.REGULAR_SPRITES
],
key=lambda kv: self.SPRITES_Z_INDEX.get(kv[0])
)
res = list(map(lambda kv: kv[1], res))
for sprite_list in sorted(
[
(key, value)
for key, value in self.sprites.items()
if key in self.LIST_SPRITES
],
key=lambda kv: self.SPRITES_Z_INDEX.get(kv[0])
):
res += sprite_list[1]
return res
def update_rotations(self, from_state, to_state):
pacman = self.sprites['pacman']
pacman.rotation = self._get_rotation(from_state['pacman'], to_state['pacman'])
def show_benchmarking(self):
font = pygame.font.Font(None, 36)
for i, (key, value) in enumerate(self.benchmarking.items()):
if isinstance(value, float):
text = f'{key} : {value:.5f}'
else:
text = f'{key}: {value}'
text = font.render(text, True, (0, 140, 0))
place = text.get_rect(topleft=(10, 10 + i * 36))
self.screen.blit(text, place)
@classmethod
def _generate_intermediate_states(cls, count: int, state_from: t.Dict[str, t.Any], state_to: t.Dict[str, t.Any])\
-> t.List[t.Dict[str, t.Any]]:
# TODO: fix teleports - need some complex logic
intermediate_states = []
for i in range(count):
intermediate_state = {}
for key, value in state_from.items():
if key not in cls.SMOOTH_MOVEMENT:
intermediate_state[key] = value if i == 0 or key not in state_to else state_to[key]
elif (
(key not in state_to)
or (not isinstance(value, list) and not isinstance(value, tuple))
or len(value) != 2
):
intermediate_state[key] = value
else:
if key in cls.REGULAR_SPRITES:
intermediate_state[key] = (
value[0] + (state_to[key][0] - value[0]) * i / count,
value[1] + (state_to[key][1] - value[1]) * i / count
)
else:
intermediate_state[key] = [
(
source[0] + (dest[0] - source[0]) * i / count,
source[1] + (dest[1] - source[1]) * i / count
)
for source, dest in zip(value, state_to[key])
]
intermediate_states.append(intermediate_state)
return intermediate_states
@staticmethod
def _get_rotation(from_xy, to_xy):
x_f, y_f = from_xy
x_t, y_t = to_xy
if x_t < x_f:
return 3
if x_t > x_f:
return 1
if y_t < y_f:
return 0
return 2
def draw_dots(self):
if 'dots' not in self.sprites:
return
sprites = self.sprites['dots']
sprites_group = pygame.sprite.Group()
sprites_group.add(*sprites)
sprites_group.draw(self.screen)
| 38.235294 | 122 | 0.539053 | import typing as t
import pygame
from labyrinth.labyrinth import Labyrinth
from view.banana import Banana
from view.dot import Dot
from view.ghost import Ghost
from view.pacman import Pacman
MAX_DISPLAY_WIDTH = 1000
MAX_DISPLAY_HEIGHT = 500
class View:
cell_size: int
sprites: t.Dict[str, pygame.sprite.Sprite]
sprites_group: pygame.sprite.Group
labyrinth: Labyrinth = None
SMOOTH_MOVEMENT = ['pacman', 'ghosts']
SPRITES_Z_INDEX = {
'pacman': 0,
'target': -1,
'ghosts': 1,
}
REGULAR_SPRITES = ['pacman', 'target']
LIST_SPRITES = ['ghosts']
STATIC_SPRITES = ['dots']
def __init__(self):
pygame.init()
self.sprites_group = pygame.sprite.Group()
self.display_info = pygame.display.Info()
self.sprites: t.Dict[t.Union[pygame.sprite.Sprite, t.List[pygame.sprite.Sprite]]] = {}
self.benchmarking = {}
self.screen = pygame.display.set_mode([MAX_DISPLAY_WIDTH, MAX_DISPLAY_HEIGHT])
self.clock = pygame.time.Clock()
def draw_labyrinth(self, labyrinth: t.Optional[Labyrinth] = None):
self.labyrinth = labyrinth or self.labyrinth
labyrinth = self.labyrinth
cell_size = min(
self.display_info.current_h // len(labyrinth.raw_img),
self.display_info.current_w // len(labyrinth.raw_img[0])
)
self.screen = pygame.display.set_mode([cell_size * len(labyrinth.raw_img[0]), cell_size * len(labyrinth.raw_img)])
self.cell_size = cell_size
for line_no, line in enumerate(labyrinth.raw_img):
for char_no, char in enumerate(line):
color = (0, 0, 0) if char == ' ' else (0, 0, 200)
pygame.draw.rect(
self.screen, color,
(char_no * cell_size, line_no * cell_size, cell_size, cell_size)
)
def set_initial_state(self, state: t.Dict[str, t.Any]):
self.state = state
if 'pacman' in state:
x, y = state['pacman']
pacman = Pacman(size=self.cell_size)
self.sprites['pacman'] = pacman
pacman.rect.move_ip(y * self.cell_size, x * self.cell_size)
if 'target' in state:
x, y = state['target']
target = Banana(size=self.cell_size)
self.sprites['target'] = target
target.rect.move_ip(y * self.cell_size, x * self.cell_size)
if 'ghosts' in state:
self.sprites['ghosts'] = [
Ghost(size=self.cell_size)
for _ in state['ghosts']
]
for i, (x, y) in enumerate(state['ghosts']):
self.sprites['ghosts'][i].rect.move_ip(y * self.cell_size, x * self.cell_size)
if 'dots' in state:
self.sprites['dots'] = [
Dot(size=self.cell_size)
for _ in state['dots']
]
for i, (x, y) in enumerate(state['dots']):
sprite: Dot = self.sprites['dots'][i]
sprite.rect.move_ip(y * self.cell_size + sprite.offset, x * self.cell_size + sprite.offset)
self.sprites_group.add(*self.sorted_sprites)
self.render()
def update_state(self, state: t.Dict[str, t.Any], benchmarking: t.Dict[str, t.Any]):
self.benchmarking = benchmarking
self.update_rotations(self.state, state)
steps = 5 if self.cell_size > 8 else 1
for int_state in self._generate_intermediate_states(steps, self.state, state):
self.state.update(int_state)
self.sync()
self.render()
self.state.update(state)
self.sync()
self.render()
def sync(self):
if 'pacman' in self.sprites:
pacman = self.sprites['pacman']
x, y = self.state['pacman']
pacman.rect.update([y * self.cell_size, x * self.cell_size, self.cell_size, self.cell_size])
if 'target' in self.sprites:
target = self.sprites['target']
x, y = self.state['target']
target.rect.update([y * self.cell_size, x * self.cell_size, self.cell_size, self.cell_size])
if 'ghosts' in self.sprites:
for source, target in zip(self.state['ghosts'], self.sprites['ghosts']):
x, y = source
target.rect.update([y * self.cell_size, x * self.cell_size, self.cell_size, self.cell_size])
if 'dots' in self.state:
self.sprites['dots'] = [
Dot(size=self.cell_size)
for _ in self.state['dots']
]
for i, (x, y) in enumerate(self.state['dots']):
sprite: Dot = self.sprites['dots'][i]
sprite.rect.move_ip(y * self.cell_size + sprite.offset, x * self.cell_size + sprite.offset)
def render(self):
self.draw_labyrinth()
self.draw_dots()
self.sprites_group.draw(self.screen)
self.show_benchmarking()
pygame.display.flip()
self.clock.tick(30)
@property
def sorted_sprites(self):
res = sorted(
[
(key, value)
for key, value in self.sprites.items()
if key in self.REGULAR_SPRITES
],
key=lambda kv: self.SPRITES_Z_INDEX.get(kv[0])
)
res = list(map(lambda kv: kv[1], res))
for sprite_list in sorted(
[
(key, value)
for key, value in self.sprites.items()
if key in self.LIST_SPRITES
],
key=lambda kv: self.SPRITES_Z_INDEX.get(kv[0])
):
res += sprite_list[1]
return res
def update_rotations(self, from_state, to_state):
pacman = self.sprites['pacman']
pacman.rotation = self._get_rotation(from_state['pacman'], to_state['pacman'])
def show_benchmarking(self):
font = pygame.font.Font(None, 36)
for i, (key, value) in enumerate(self.benchmarking.items()):
if isinstance(value, float):
text = f'{key} : {value:.5f}'
else:
text = f'{key}: {value}'
text = font.render(text, True, (0, 140, 0))
place = text.get_rect(topleft=(10, 10 + i * 36))
self.screen.blit(text, place)
@classmethod
def _generate_intermediate_states(cls, count: int, state_from: t.Dict[str, t.Any], state_to: t.Dict[str, t.Any])\
-> t.List[t.Dict[str, t.Any]]:
intermediate_states = []
for i in range(count):
intermediate_state = {}
for key, value in state_from.items():
if key not in cls.SMOOTH_MOVEMENT:
intermediate_state[key] = value if i == 0 or key not in state_to else state_to[key]
elif (
(key not in state_to)
or (not isinstance(value, list) and not isinstance(value, tuple))
or len(value) != 2
):
intermediate_state[key] = value
else:
if key in cls.REGULAR_SPRITES:
intermediate_state[key] = (
value[0] + (state_to[key][0] - value[0]) * i / count,
value[1] + (state_to[key][1] - value[1]) * i / count
)
else:
intermediate_state[key] = [
(
source[0] + (dest[0] - source[0]) * i / count,
source[1] + (dest[1] - source[1]) * i / count
)
for source, dest in zip(value, state_to[key])
]
intermediate_states.append(intermediate_state)
return intermediate_states
@staticmethod
def _get_rotation(from_xy, to_xy):
x_f, y_f = from_xy
x_t, y_t = to_xy
if x_t < x_f:
return 3
if x_t > x_f:
return 1
if y_t < y_f:
return 0
return 2
def draw_dots(self):
if 'dots' not in self.sprites:
return
sprites = self.sprites['dots']
sprites_group = pygame.sprite.Group()
sprites_group.add(*sprites)
sprites_group.draw(self.screen)
| true | true |
f734ac4222c50c3f665746f358b2578f221a455c | 4,696 | py | Python | tools/utils/crawl_urls.py | p3t3r67x0/purple_jo | a58e89c8597aaa612b3f2784ff3259342b7b9fe6 | [
"MIT"
] | 8 | 2020-03-25T21:06:14.000Z | 2022-02-17T03:22:06.000Z | tools/utils/crawl_urls.py | webtobesocial/purple_jo | a58e89c8597aaa612b3f2784ff3259342b7b9fe6 | [
"MIT"
] | 7 | 2021-03-31T19:31:44.000Z | 2022-03-12T00:08:20.000Z | tools/utils/crawl_urls.py | webtobesocial/purple_jo | a58e89c8597aaa612b3f2784ff3259342b7b9fe6 | [
"MIT"
] | 3 | 2020-11-21T22:07:08.000Z | 2021-11-03T09:10:49.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import time
import requests
import multiprocessing
import argparse
from lxml import html
from urllib.parse import urljoin
from urllib.parse import urlparse
from fake_useragent import UserAgent
from lxml.etree import ParserError
from lxml.etree import XMLSyntaxError
from requests.exceptions import Timeout
from requests.exceptions import InvalidURL
from requests.exceptions import InvalidSchema
from requests.exceptions import MissingSchema
from requests.exceptions import ConnectionError
from requests.exceptions import ChunkedEncodingError
from requests.exceptions import ContentDecodingError
from requests.exceptions import TooManyRedirects
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError
from pymongo.errors import AutoReconnect
from pymongo.errors import WriteError
from idna.core import IDNAError
from datetime import datetime
def check_mail(url):
return re.match(r'\b[\w.+-]+?@[-_\w]+[.]+[-_.\w]+\b', url)
def connect(host):
return MongoClient('mongodb://{}:27017'.format(host))
def retrieve_domains(db_ip_data, skip, limit):
return db_ip_data.dns.find({'domain_crawled': {'$exists': False}})[limit - skip:limit]
def update_data(db_ip_data, domain):
try:
res = db_ip_data.dns.update_one({'domain': domain}, {'$set': {'domain_crawled': datetime.utcnow()}}, upsert=False)
if res.modified_count > 0:
print('INFO: domain {} crawled and updated with {} documents'.format(domain, res.modified_count))
except DuplicateKeyError:
pass
def add_urls(db_url_data, db_ip_data, url, domain):
try:
post = {'url': url.lower(), 'created': datetime.utcnow()}
post_id = db_url_data.url.insert_one(post).inserted_id
print(u'INFO: the url {} was added with the id {}'.format(url, post_id))
update_data(db_ip_data, domain)
except AutoReconnect:
time.sleep(30)
except (DuplicateKeyError, WriteError) as e:
print(e)
def get_urls(db, ua, url):
try:
headers = {'User-Agent': ua.chrome}
res = requests.get('http://{}'.format(url), timeout=1, headers=headers)
content = res.text
except (Timeout, ConnectionError, TooManyRedirects):
return None
except (IDNAError, InvalidURL, InvalidSchema, MissingSchema, ContentDecodingError, ChunkedEncodingError):
return None
try:
doc = html.document_fromstring(content)
except (ValueError, ParserError, XMLSyntaxError):
return None
links = doc.xpath('//a/@href')
base_url = 'http://{}'.format(url)
url_set = set()
for link in links:
link = link.lower().strip()
if link.startswith('#') or link.startswith('+') or link.startswith('tel:') or link.startswith('javascript:') or link.startswith('mailto:'):
continue
elif link.startswith('/'):
link = urljoin(base_url, link)
elif link.startswith('?'):
link = urljoin(base_url, link)
elif link.startswith('..'):
link = urljoin(base_url, link.replace('..', ''))
if urlparse(link).netloc:
url_set.add(link)
print(url_set)
return url_set
def worker(host, skip, limit):
client = connect(host)
db_url_data = client.url_data
db_ip_data = client.ip_data
ua = UserAgent()
try:
domains = retrieve_domains(db_ip_data, limit, skip)
except CursorNotFound:
client.close()
return
for domain in domains:
print(u'INFO: the domain {} is beeing processed'.format(domain['domain']))
links = get_urls(db, ua, domain['domain'])
if links is not None and len(links) > 0:
for link in links:
add_urls(db_url_data, db_ip_data, link, domain['domain'])
client.close()
return
def argparser():
parser = argparse.ArgumentParser()
parser.add_argument('--worker', help='set worker count', type=int, required=True)
parser.add_argument('--host', help='set the host', type=str, required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = argparser()
client = connect(args.host)
db = client.ip_data
jobs = []
threads = args.worker
amount = round(db.dns.estimated_document_count() / (threads + 50000))
limit = amount
print(limit, amount)
for f in range(threads):
j = multiprocessing.Process(target=worker, args=(args.host, limit, amount))
jobs.append(j)
j.start()
limit = limit + amount
for j in jobs:
client.close()
j.join()
print('exitcode = {}'.format(j.exitcode))
| 28.460606 | 147 | 0.664608 |
import re
import time
import requests
import multiprocessing
import argparse
from lxml import html
from urllib.parse import urljoin
from urllib.parse import urlparse
from fake_useragent import UserAgent
from lxml.etree import ParserError
from lxml.etree import XMLSyntaxError
from requests.exceptions import Timeout
from requests.exceptions import InvalidURL
from requests.exceptions import InvalidSchema
from requests.exceptions import MissingSchema
from requests.exceptions import ConnectionError
from requests.exceptions import ChunkedEncodingError
from requests.exceptions import ContentDecodingError
from requests.exceptions import TooManyRedirects
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError
from pymongo.errors import AutoReconnect
from pymongo.errors import WriteError
from idna.core import IDNAError
from datetime import datetime
def check_mail(url):
return re.match(r'\b[\w.+-]+?@[-_\w]+[.]+[-_.\w]+\b', url)
def connect(host):
return MongoClient('mongodb://{}:27017'.format(host))
def retrieve_domains(db_ip_data, skip, limit):
return db_ip_data.dns.find({'domain_crawled': {'$exists': False}})[limit - skip:limit]
def update_data(db_ip_data, domain):
try:
res = db_ip_data.dns.update_one({'domain': domain}, {'$set': {'domain_crawled': datetime.utcnow()}}, upsert=False)
if res.modified_count > 0:
print('INFO: domain {} crawled and updated with {} documents'.format(domain, res.modified_count))
except DuplicateKeyError:
pass
def add_urls(db_url_data, db_ip_data, url, domain):
try:
post = {'url': url.lower(), 'created': datetime.utcnow()}
post_id = db_url_data.url.insert_one(post).inserted_id
print(u'INFO: the url {} was added with the id {}'.format(url, post_id))
update_data(db_ip_data, domain)
except AutoReconnect:
time.sleep(30)
except (DuplicateKeyError, WriteError) as e:
print(e)
def get_urls(db, ua, url):
try:
headers = {'User-Agent': ua.chrome}
res = requests.get('http://{}'.format(url), timeout=1, headers=headers)
content = res.text
except (Timeout, ConnectionError, TooManyRedirects):
return None
except (IDNAError, InvalidURL, InvalidSchema, MissingSchema, ContentDecodingError, ChunkedEncodingError):
return None
try:
doc = html.document_fromstring(content)
except (ValueError, ParserError, XMLSyntaxError):
return None
links = doc.xpath('//a/@href')
base_url = 'http://{}'.format(url)
url_set = set()
for link in links:
link = link.lower().strip()
if link.startswith('#') or link.startswith('+') or link.startswith('tel:') or link.startswith('javascript:') or link.startswith('mailto:'):
continue
elif link.startswith('/'):
link = urljoin(base_url, link)
elif link.startswith('?'):
link = urljoin(base_url, link)
elif link.startswith('..'):
link = urljoin(base_url, link.replace('..', ''))
if urlparse(link).netloc:
url_set.add(link)
print(url_set)
return url_set
def worker(host, skip, limit):
client = connect(host)
db_url_data = client.url_data
db_ip_data = client.ip_data
ua = UserAgent()
try:
domains = retrieve_domains(db_ip_data, limit, skip)
except CursorNotFound:
client.close()
return
for domain in domains:
print(u'INFO: the domain {} is beeing processed'.format(domain['domain']))
links = get_urls(db, ua, domain['domain'])
if links is not None and len(links) > 0:
for link in links:
add_urls(db_url_data, db_ip_data, link, domain['domain'])
client.close()
return
def argparser():
parser = argparse.ArgumentParser()
parser.add_argument('--worker', help='set worker count', type=int, required=True)
parser.add_argument('--host', help='set the host', type=str, required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = argparser()
client = connect(args.host)
db = client.ip_data
jobs = []
threads = args.worker
amount = round(db.dns.estimated_document_count() / (threads + 50000))
limit = amount
print(limit, amount)
for f in range(threads):
j = multiprocessing.Process(target=worker, args=(args.host, limit, amount))
jobs.append(j)
j.start()
limit = limit + amount
for j in jobs:
client.close()
j.join()
print('exitcode = {}'.format(j.exitcode))
| true | true |
f734ae1b367f0cd2d5054b2947d084a9683c0171 | 2,106 | py | Python | multilingual_t5/baseline_mr/baseline_mr.py | sumanthd17/mt5 | c99b4e3ad1c69908c852c730a1323ccb52d48f58 | [
"Apache-2.0"
] | null | null | null | multilingual_t5/baseline_mr/baseline_mr.py | sumanthd17/mt5 | c99b4e3ad1c69908c852c730a1323ccb52d48f58 | [
"Apache-2.0"
] | null | null | null | multilingual_t5/baseline_mr/baseline_mr.py | sumanthd17/mt5 | c99b4e3ad1c69908c852c730a1323ccb52d48f58 | [
"Apache-2.0"
] | null | null | null | """baseline_mr dataset."""
import tensorflow_datasets as tfds
import tensorflow as tf
# TODO(baseline_mr): Markdown description that will appear on the catalog page.
_DESCRIPTION = """
Description is **formatted** as markdown.
It should also contain any processing which has been applied (if any),
(e.g. corrupted example skipped, images cropped,...):
"""
# TODO(baseline_mr): BibTeX citation
_CITATION = """
"""
class BaselineMr(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for baseline_mr dataset."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
# TODO(baseline_mr): Specifies the tfds.core.DatasetInfo object
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'source': tfds.features.Text(),
'target': tfds.features.Text(),
}),
homepage='https://dataset-homepage/',
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
# TODO(baseline_mr): Downloads the data and defines the splits
path = dl_manager.download_and_extract('https://storage.googleapis.com/ai4b-anuvaad-nmt/baselines/mT5/baseline_mr/strict-en-mr.zip')
# TODO(baseline_mr): Returns the Dict[split names, Iterator[Key, Example]]
return {
'train': self._generate_examples(source=path/'en-mr/train/train.mr', target=path/'en-mr/train/train.en'),
'validation': self._generate_examples(source=path/'en-mr/dev/dev.mr', target=path/'en-mr/dev/dev.en')
}
def _generate_examples(self, source, target):
"""Yields examples."""
# TODO(baseline_mr): Yields (key, example) tuples from the dataset
src = tf.io.gfile.GFile(source, 'r').readlines()
tgt = tf.io.gfile.GFile(target, 'r').readlines()
for idx, row in enumerate(zip(src, tgt)):
yield idx, {
'source': row[0],
'target': row[1],
}
| 33.967742 | 136 | 0.672365 |
import tensorflow_datasets as tfds
import tensorflow as tf
_DESCRIPTION = """
Description is **formatted** as markdown.
It should also contain any processing which has been applied (if any),
(e.g. corrupted example skipped, images cropped,...):
"""
_CITATION = """
"""
class BaselineMr(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'source': tfds.features.Text(),
'target': tfds.features.Text(),
}),
homepage='https://dataset-homepage/',
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.download_and_extract('https://storage.googleapis.com/ai4b-anuvaad-nmt/baselines/mT5/baseline_mr/strict-en-mr.zip')
return {
'train': self._generate_examples(source=path/'en-mr/train/train.mr', target=path/'en-mr/train/train.en'),
'validation': self._generate_examples(source=path/'en-mr/dev/dev.mr', target=path/'en-mr/dev/dev.en')
}
def _generate_examples(self, source, target):
src = tf.io.gfile.GFile(source, 'r').readlines()
tgt = tf.io.gfile.GFile(target, 'r').readlines()
for idx, row in enumerate(zip(src, tgt)):
yield idx, {
'source': row[0],
'target': row[1],
}
| true | true |
f734ae3edb3e3e64c8d186b6444c1c175228a9b2 | 4,769 | py | Python | lib-dynload/_recordclass/lib/recordclass/test/test_litelist.py | tabulon-ext/dedupsqlfs | 9dfbed17450e7f2a499a7381e0368d08ae3c700d | [
"MIT"
] | 22 | 2015-04-09T09:00:00.000Z | 2022-03-23T00:16:04.000Z | lib-dynload/_recordclass/lib/recordclass/test/test_litelist.py | tabulon-ext/dedupsqlfs | 9dfbed17450e7f2a499a7381e0368d08ae3c700d | [
"MIT"
] | 119 | 2015-02-11T21:39:27.000Z | 2021-07-27T23:04:49.000Z | lib-dynload/_recordclass/lib/recordclass/test/test_litelist.py | tabulon-ext/dedupsqlfs | 9dfbed17450e7f2a499a7381e0368d08ae3c700d | [
"MIT"
] | 7 | 2016-03-16T11:53:45.000Z | 2022-02-24T13:47:31.000Z | import unittest
from recordclass import litelist
import gc
import pickle
import sys
class litelistTest(unittest.TestCase):
def test_len(self):
a = litelist([])
self.assertEqual(len(a), 0)
a = litelist([1])
self.assertEqual(len(a), 1)
def test_items(self):
a = litelist([1,2,3])
self.assertEqual(a[0], 1)
self.assertEqual(a[-1], 3)
a[1] = 100
self.assertEqual(a[1], 100)
def test_remove(self):
a = litelist([1,2,3])
a.remove(2)
self.assertEqual(a[0], 1)
self.assertEqual(a[-1], 3)
a = litelist([1,2,3])
a.remove(1)
self.assertEqual(a[0], 2)
self.assertEqual(a[-1], 3)
a = litelist([1,2,3])
a.remove(3)
self.assertEqual(a[0], 1)
self.assertEqual(a[-1], 2)
def test_gc(self):
a = litelist([1,2,3])
self.assertEqual(sys.getsizeof(a), a.__sizeof__())
def test_append(self):
a = litelist([])
a.append(1)
self.assertEqual(a[0], 1)
a.append(2)
self.assertEqual(a[1], 2)
a.append(3)
self.assertEqual(a[2], 3)
def test_extend1(self):
a = litelist([])
a.extend([1,2,3])
self.assertEqual(a[0], 1)
self.assertEqual(a[1], 2)
self.assertEqual(a[2], 3)
def test_extend2(self):
a = litelist([1,2,3])
a.extend([4,5,6])
self.assertEqual(a[3], 4)
self.assertEqual(a[4], 5)
self.assertEqual(a[5], 6)
def test_repr(self):
a = litelist([])
self.assertEqual(repr(a), "litelist([])")
a = litelist([1])
self.assertEqual(repr(a), "litelist([1])")
a = litelist([1, 2])
self.assertEqual(repr(a), "litelist([1, 2])")
def test_iter(self):
a = litelist([1,2,3])
self.assertEqual(list(a), [1,2,3])
self.assertEqual(tuple(a), (1,2,3))
def test_iter2(self):
from recordclass.litelist import litelistiter
a = litelist([1,2,3])
self.assertTrue(isinstance(iter(a), litelistiter))
def test_getslice1(self):
a = litelist([1,2,3])
self.assertEqual(len(a[1:1]), 0)
self.assertEqual(repr(a[1:1]), "litelist([])")
self.assertEqual(len(a[1:2]), 1)
self.assertEqual(repr(a[1:2]), "litelist([2])")
self.assertEqual(len(a[:-1]), 2)
self.assertEqual(repr(a[:-1]), "litelist([1, 2])")
def test_getslice2(self):
a = litelist([1,2,3])
self.assertEqual(repr(a[:]), "litelist([1, 2, 3])")
def test_setslice1(self):
a = litelist([1,2,3])
a[1:1] = []
self.assertEqual(repr(a), "litelist([1, 2, 3])")
def test_setslice2(self):
a = litelist([1,2,3])
a[1:2] = [100]
self.assertEqual(repr(a), "litelist([1, 100, 3])")
def test_setslice3(self):
a = litelist([1,2,3])
a[:-1] = [100,200]
self.assertEqual(repr(a), "litelist([100, 200, 3])")
def test_setslice4(self):
a = litelist([1,2,3])
a[:] = [100,200,300]
self.assertEqual(repr(a), "litelist([100, 200, 300])")
def test_delitem1(self):
a = litelist([1,2,3,4,5])
del a[1]
self.assertEqual(repr(a), "litelist([1, 3, 4, 5])")
def test_delitem2(self):
a = litelist([1,2,3,4,5])
del a[0]
self.assertEqual(repr(a), "litelist([2, 3, 4, 5])")
def test_delitem3(self):
a = litelist([1,2,3,4,5])
del a[4]
self.assertEqual(repr(a), "litelist([1, 2, 3, 4])")
def test_delitem4(self):
a = litelist([1,2,3,4,5])
del a[-1]
self.assertEqual(repr(a), "litelist([1, 2, 3, 4])")
def test_iterator_pickle(self):
# Userlist iterators don't support pickling yet since
# they are based on generators.
data = litelist([4, 5, 6, 7])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
itorg = iter(data)
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(litelist(it)), list(data))
it = pickle.loads(d)
next(it)
d = pickle.dumps(it)
self.assertEqual(list(litelist(it)), list(data[1:]))
def test_refleak_on_assignemnt(self):
a = 1
ll = litelist([a,2,3])
c = sys.getrefcount(a)
b = ll[0]
self.assertEqual(sys.getrefcount(a), c+1)
ll[0] = None
self.assertEqual(sys.getrefcount(a), c)
def main():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(litelistTest))
return suite
| 28.728916 | 66 | 0.528413 | import unittest
from recordclass import litelist
import gc
import pickle
import sys
class litelistTest(unittest.TestCase):
def test_len(self):
a = litelist([])
self.assertEqual(len(a), 0)
a = litelist([1])
self.assertEqual(len(a), 1)
def test_items(self):
a = litelist([1,2,3])
self.assertEqual(a[0], 1)
self.assertEqual(a[-1], 3)
a[1] = 100
self.assertEqual(a[1], 100)
def test_remove(self):
a = litelist([1,2,3])
a.remove(2)
self.assertEqual(a[0], 1)
self.assertEqual(a[-1], 3)
a = litelist([1,2,3])
a.remove(1)
self.assertEqual(a[0], 2)
self.assertEqual(a[-1], 3)
a = litelist([1,2,3])
a.remove(3)
self.assertEqual(a[0], 1)
self.assertEqual(a[-1], 2)
def test_gc(self):
a = litelist([1,2,3])
self.assertEqual(sys.getsizeof(a), a.__sizeof__())
def test_append(self):
a = litelist([])
a.append(1)
self.assertEqual(a[0], 1)
a.append(2)
self.assertEqual(a[1], 2)
a.append(3)
self.assertEqual(a[2], 3)
def test_extend1(self):
a = litelist([])
a.extend([1,2,3])
self.assertEqual(a[0], 1)
self.assertEqual(a[1], 2)
self.assertEqual(a[2], 3)
def test_extend2(self):
a = litelist([1,2,3])
a.extend([4,5,6])
self.assertEqual(a[3], 4)
self.assertEqual(a[4], 5)
self.assertEqual(a[5], 6)
def test_repr(self):
a = litelist([])
self.assertEqual(repr(a), "litelist([])")
a = litelist([1])
self.assertEqual(repr(a), "litelist([1])")
a = litelist([1, 2])
self.assertEqual(repr(a), "litelist([1, 2])")
def test_iter(self):
a = litelist([1,2,3])
self.assertEqual(list(a), [1,2,3])
self.assertEqual(tuple(a), (1,2,3))
def test_iter2(self):
from recordclass.litelist import litelistiter
a = litelist([1,2,3])
self.assertTrue(isinstance(iter(a), litelistiter))
def test_getslice1(self):
a = litelist([1,2,3])
self.assertEqual(len(a[1:1]), 0)
self.assertEqual(repr(a[1:1]), "litelist([])")
self.assertEqual(len(a[1:2]), 1)
self.assertEqual(repr(a[1:2]), "litelist([2])")
self.assertEqual(len(a[:-1]), 2)
self.assertEqual(repr(a[:-1]), "litelist([1, 2])")
def test_getslice2(self):
a = litelist([1,2,3])
self.assertEqual(repr(a[:]), "litelist([1, 2, 3])")
def test_setslice1(self):
a = litelist([1,2,3])
a[1:1] = []
self.assertEqual(repr(a), "litelist([1, 2, 3])")
def test_setslice2(self):
a = litelist([1,2,3])
a[1:2] = [100]
self.assertEqual(repr(a), "litelist([1, 100, 3])")
def test_setslice3(self):
a = litelist([1,2,3])
a[:-1] = [100,200]
self.assertEqual(repr(a), "litelist([100, 200, 3])")
def test_setslice4(self):
a = litelist([1,2,3])
a[:] = [100,200,300]
self.assertEqual(repr(a), "litelist([100, 200, 300])")
def test_delitem1(self):
a = litelist([1,2,3,4,5])
del a[1]
self.assertEqual(repr(a), "litelist([1, 3, 4, 5])")
def test_delitem2(self):
a = litelist([1,2,3,4,5])
del a[0]
self.assertEqual(repr(a), "litelist([2, 3, 4, 5])")
def test_delitem3(self):
a = litelist([1,2,3,4,5])
del a[4]
self.assertEqual(repr(a), "litelist([1, 2, 3, 4])")
def test_delitem4(self):
a = litelist([1,2,3,4,5])
del a[-1]
self.assertEqual(repr(a), "litelist([1, 2, 3, 4])")
def test_iterator_pickle(self):
# they are based on generators.
data = litelist([4, 5, 6, 7])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
itorg = iter(data)
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(litelist(it)), list(data))
it = pickle.loads(d)
next(it)
d = pickle.dumps(it)
self.assertEqual(list(litelist(it)), list(data[1:]))
def test_refleak_on_assignemnt(self):
a = 1
ll = litelist([a,2,3])
c = sys.getrefcount(a)
b = ll[0]
self.assertEqual(sys.getrefcount(a), c+1)
ll[0] = None
self.assertEqual(sys.getrefcount(a), c)
def main():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(litelistTest))
return suite
| true | true |
f734afa1170846a895fc159dcf6c25886daf9a07 | 3,509 | py | Python | gnes/preprocessor/video/shotdetect.py | dixiak/gnes | 12513d29157a06bd22923717fd0c19a856f20193 | [
"Apache-2.0"
] | null | null | null | gnes/preprocessor/video/shotdetect.py | dixiak/gnes | 12513d29157a06bd22923717fd0c19a856f20193 | [
"Apache-2.0"
] | null | null | null | gnes/preprocessor/video/shotdetect.py | dixiak/gnes | 12513d29157a06bd22923717fd0c19a856f20193 | [
"Apache-2.0"
] | null | null | null | # Tencent is pleased to support the open source community by making GNES available.
#
# Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import numpy as np
from ..base import BaseVideoPreprocessor
from ..helper import compute_descriptor, compare_descriptor, detect_peak_boundary, compare_ecr
from ..io_utils import video as video_util
from ...proto import gnes_pb2, array2blob
class ShotDetectPreprocessor(BaseVideoPreprocessor):
store_args_kwargs = True
def __init__(self,
frame_size: str = '192:168',
descriptor: str = 'block_hsv_histogram',
distance_metric: str = 'bhattacharya',
detect_method: str = 'threshold',
frame_rate: int = 10,
frame_num: int = -1,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.frame_size = frame_size
self.descriptor = descriptor
self.distance_metric = distance_metric
self.detect_method = detect_method
self.frame_rate = frame_rate
self.frame_num = frame_num
self._detector_kwargs = kwargs
def detect_shots(self, frames: 'np.ndarray') -> List[List['np.ndarray']]:
descriptors = []
for frame in frames:
descriptor = compute_descriptor(
frame, method=self.descriptor, **self._detector_kwargs)
descriptors.append(descriptor)
# compute distances between frames
if self.distance_metric == 'edge_change_ration':
dists = compare_ecr(descriptors)
else:
dists = [
compare_descriptor(pair[0], pair[1], self.distance_metric)
for pair in zip(descriptors[:-1], descriptors[1:])
]
shot_bounds = detect_peak_boundary(dists, self.detect_method)
shots = []
for ci in range(0, len(shot_bounds) - 1):
shots.append(frames[shot_bounds[ci]:shot_bounds[ci + 1]])
return shots
def apply(self, doc: 'gnes_pb2.Document') -> None:
super().apply(doc)
if doc.raw_bytes:
all_frames = video_util.capture_frames(
input_data=doc.raw_bytes,
scale=self.frame_size,
fps=self.frame_rate,
vframes=self.frame_num)
num_frames = len(all_frames)
assert num_frames > 0
shots = self.detect_shots(all_frames)
for ci, frames in enumerate(shots):
c = doc.chunks.add()
c.doc_id = doc.doc_id
# chunk_data = np.concatenate(frames, axis=0)
chunk_data = np.array(frames)
c.blob.CopyFrom(array2blob(chunk_data))
c.offset = ci
c.weight = len(frames) / num_frames
else:
self.logger.error('bad document: "raw_bytes" is empty!')
| 37.329787 | 94 | 0.620405 |
from typing import List
import numpy as np
from ..base import BaseVideoPreprocessor
from ..helper import compute_descriptor, compare_descriptor, detect_peak_boundary, compare_ecr
from ..io_utils import video as video_util
from ...proto import gnes_pb2, array2blob
class ShotDetectPreprocessor(BaseVideoPreprocessor):
store_args_kwargs = True
def __init__(self,
frame_size: str = '192:168',
descriptor: str = 'block_hsv_histogram',
distance_metric: str = 'bhattacharya',
detect_method: str = 'threshold',
frame_rate: int = 10,
frame_num: int = -1,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.frame_size = frame_size
self.descriptor = descriptor
self.distance_metric = distance_metric
self.detect_method = detect_method
self.frame_rate = frame_rate
self.frame_num = frame_num
self._detector_kwargs = kwargs
def detect_shots(self, frames: 'np.ndarray') -> List[List['np.ndarray']]:
descriptors = []
for frame in frames:
descriptor = compute_descriptor(
frame, method=self.descriptor, **self._detector_kwargs)
descriptors.append(descriptor)
if self.distance_metric == 'edge_change_ration':
dists = compare_ecr(descriptors)
else:
dists = [
compare_descriptor(pair[0], pair[1], self.distance_metric)
for pair in zip(descriptors[:-1], descriptors[1:])
]
shot_bounds = detect_peak_boundary(dists, self.detect_method)
shots = []
for ci in range(0, len(shot_bounds) - 1):
shots.append(frames[shot_bounds[ci]:shot_bounds[ci + 1]])
return shots
def apply(self, doc: 'gnes_pb2.Document') -> None:
super().apply(doc)
if doc.raw_bytes:
all_frames = video_util.capture_frames(
input_data=doc.raw_bytes,
scale=self.frame_size,
fps=self.frame_rate,
vframes=self.frame_num)
num_frames = len(all_frames)
assert num_frames > 0
shots = self.detect_shots(all_frames)
for ci, frames in enumerate(shots):
c = doc.chunks.add()
c.doc_id = doc.doc_id
chunk_data = np.array(frames)
c.blob.CopyFrom(array2blob(chunk_data))
c.offset = ci
c.weight = len(frames) / num_frames
else:
self.logger.error('bad document: "raw_bytes" is empty!')
| true | true |
f734afad97f1d8801b5285aa420eeebbfb4adc49 | 6,967 | py | Python | src/flappy_bird_gym/envs/flappy_bird_env_simple.py | chokyzhou/gym-flappy-bird | ffe1089501f3e2e113a8868cd27480653dbe0ef7 | [
"MIT"
] | null | null | null | src/flappy_bird_gym/envs/flappy_bird_env_simple.py | chokyzhou/gym-flappy-bird | ffe1089501f3e2e113a8868cd27480653dbe0ef7 | [
"MIT"
] | null | null | null | src/flappy_bird_gym/envs/flappy_bird_env_simple.py | chokyzhou/gym-flappy-bird | ffe1089501f3e2e113a8868cd27480653dbe0ef7 | [
"MIT"
] | null | null | null | #
# Copyright (c) 2020 Gabriel Nogueira (Talendar)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
""" Implementation of a Flappy Bird OpenAI Gym environment that yields simple
numerical information about the game's state as observations.
"""
from typing import Dict, Tuple, Optional, Union
import gym
import numpy as np
import pygame
from flappy_bird_gym.envs.game_logic import FlappyBirdLogic
from flappy_bird_gym.envs.game_logic import PIPE_WIDTH, PIPE_HEIGHT
from flappy_bird_gym.envs.game_logic import PLAYER_WIDTH, PLAYER_HEIGHT
from flappy_bird_gym.envs.renderer import FlappyBirdRenderer
class FlappyBirdEnvSimple(gym.Env):
""" Flappy Bird Gym environment that yields simple observations.
The observations yielded by this environment are simple numerical
information about the game's state. Specifically, the observations are:
* Horizontal distance to the next pipe;
* Difference between the player's y position and the next hole's y
position.
The reward received by the agent in each step is equal to the score obtained
by the agent in that step. A score point is obtained every time the bird
passes a pipe.
Args:
screen_size (Tuple[int, int]): The screen's width and height.
normalize_obs (bool): If `True`, the observations will be normalized
before being returned.
pipe_gap (int): Space between a lower and an upper pipe.
bird_color (str): Color of the flappy bird. The currently available
colors are "yellow", "blue" and "red".
pipe_color (str): Color of the pipes. The currently available colors are
"green" and "red".
background (Optional[str]): Type of background image. The currently
available types are "day" and "night". If `None`, no background will
be drawn.
"""
metadata = {'render.modes': ['human']}
def __init__(self,
screen_size: Tuple[int, int] = (288, 512),
normalize_obs: bool = True,
pipe_gap: int = 100,
bird_color: str = "yellow",
pipe_color: str = "green",
background: Optional[str] = "day") -> None:
self.action_space = gym.spaces.Discrete(2)
self.observation_space = gym.spaces.Box(-np.inf, np.inf,
shape=(3,),
dtype=np.float32)
self._screen_size = screen_size
self._normalize_obs = normalize_obs
self._pipe_gap = pipe_gap
self._game = None
self._renderer = None
self._bird_color = bird_color
self._pipe_color = pipe_color
self._bg_type = background
def _get_observation(self):
up_pipe = low_pipe = None
h_dist = 0
for up_pipe, low_pipe in zip(self._game.upper_pipes,
self._game.lower_pipes):
h_dist = (low_pipe["x"] + PIPE_WIDTH / 2
- (self._game.player_x - PLAYER_WIDTH / 2))
h_dist += 3 # extra distance to compensate for the buggy hit-box
if h_dist >= 0:
break
upper_pipe_y = up_pipe["y"] + PIPE_HEIGHT
lower_pipe_y = low_pipe["y"]
player_y = self._game.player_y
y_vel = self._game.player_vel_y
v_dist = (upper_pipe_y + lower_pipe_y) / 2 - (player_y
+ PLAYER_HEIGHT/2)
if self._normalize_obs:
h_dist /= self._screen_size[0]
v_dist /= self._screen_size[1]
return np.array([
h_dist,
v_dist,
y_vel,
])
def step(self,
action: Union[FlappyBirdLogic.Actions, int],
) -> Tuple[np.ndarray, float, bool, Dict]:
""" Given an action, updates the game state.
Args:
action (Union[FlappyBirdLogic.Actions, int]): The action taken by
the agent. Zero (0) means "do nothing" and one (1) means "flap".
Returns:
A tuple containing, respectively:
* an observation (horizontal distance to the next pipe;
difference between the player's y position and the next hole's
y position);
* a reward (always 1);
* a status report (`True` if the game is over and `False`
otherwise);
* an info dictionary.
"""
alive = self._game.update_state(action)
obs = self._get_observation()
reward = 1
done = not alive
info = {"score": self._game.score}
return obs, reward, done, info
def reset(self):
""" Resets the environment (starts a new game). """
self._game = FlappyBirdLogic(screen_size=self._screen_size,
pipe_gap_size=self._pipe_gap)
if self._renderer is not None:
self._renderer.game = self._game
return self._get_observation()
def render(self, mode='human') -> None:
""" Renders the next frame. """
if self._renderer is None:
self._renderer = FlappyBirdRenderer(screen_size=self._screen_size,
bird_color=self._bird_color,
pipe_color=self._pipe_color,
background=self._bg_type)
self._renderer.game = self._game
self._renderer.make_display()
self._renderer.draw_surface(show_score=True)
self._renderer.update_display()
def close(self):
""" Closes the environment. """
if self._renderer is not None:
pygame.display.quit()
self._renderer = None
super().close()
| 39.140449 | 80 | 0.604564 |
from typing import Dict, Tuple, Optional, Union
import gym
import numpy as np
import pygame
from flappy_bird_gym.envs.game_logic import FlappyBirdLogic
from flappy_bird_gym.envs.game_logic import PIPE_WIDTH, PIPE_HEIGHT
from flappy_bird_gym.envs.game_logic import PLAYER_WIDTH, PLAYER_HEIGHT
from flappy_bird_gym.envs.renderer import FlappyBirdRenderer
class FlappyBirdEnvSimple(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self,
screen_size: Tuple[int, int] = (288, 512),
normalize_obs: bool = True,
pipe_gap: int = 100,
bird_color: str = "yellow",
pipe_color: str = "green",
background: Optional[str] = "day") -> None:
self.action_space = gym.spaces.Discrete(2)
self.observation_space = gym.spaces.Box(-np.inf, np.inf,
shape=(3,),
dtype=np.float32)
self._screen_size = screen_size
self._normalize_obs = normalize_obs
self._pipe_gap = pipe_gap
self._game = None
self._renderer = None
self._bird_color = bird_color
self._pipe_color = pipe_color
self._bg_type = background
def _get_observation(self):
up_pipe = low_pipe = None
h_dist = 0
for up_pipe, low_pipe in zip(self._game.upper_pipes,
self._game.lower_pipes):
h_dist = (low_pipe["x"] + PIPE_WIDTH / 2
- (self._game.player_x - PLAYER_WIDTH / 2))
h_dist += 3
if h_dist >= 0:
break
upper_pipe_y = up_pipe["y"] + PIPE_HEIGHT
lower_pipe_y = low_pipe["y"]
player_y = self._game.player_y
y_vel = self._game.player_vel_y
v_dist = (upper_pipe_y + lower_pipe_y) / 2 - (player_y
+ PLAYER_HEIGHT/2)
if self._normalize_obs:
h_dist /= self._screen_size[0]
v_dist /= self._screen_size[1]
return np.array([
h_dist,
v_dist,
y_vel,
])
def step(self,
action: Union[FlappyBirdLogic.Actions, int],
) -> Tuple[np.ndarray, float, bool, Dict]:
alive = self._game.update_state(action)
obs = self._get_observation()
reward = 1
done = not alive
info = {"score": self._game.score}
return obs, reward, done, info
def reset(self):
self._game = FlappyBirdLogic(screen_size=self._screen_size,
pipe_gap_size=self._pipe_gap)
if self._renderer is not None:
self._renderer.game = self._game
return self._get_observation()
def render(self, mode='human') -> None:
if self._renderer is None:
self._renderer = FlappyBirdRenderer(screen_size=self._screen_size,
bird_color=self._bird_color,
pipe_color=self._pipe_color,
background=self._bg_type)
self._renderer.game = self._game
self._renderer.make_display()
self._renderer.draw_surface(show_score=True)
self._renderer.update_display()
def close(self):
if self._renderer is not None:
pygame.display.quit()
self._renderer = None
super().close()
| true | true |
f734afd4fd2b3089c11565e92f2a38f6324a6e63 | 4,769 | py | Python | pkg/ampcor/products/OffsetMap.py | aivazis/ampcor | a673e6fd12ac29086c88002ce999a8eabdf406cd | [
"BSD-2-Clause"
] | 3 | 2018-12-16T14:16:51.000Z | 2020-11-12T17:33:02.000Z | pkg/ampcor/products/OffsetMap.py | aivazis/ampcor | a673e6fd12ac29086c88002ce999a8eabdf406cd | [
"BSD-2-Clause"
] | null | null | null | pkg/ampcor/products/OffsetMap.py | aivazis/ampcor | a673e6fd12ac29086c88002ce999a8eabdf406cd | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
# parasim
# (c) 1998-2021 all rights reserved
#
# the framework
import ampcor
# the extension
from ampcor.ext import ampcor as libampcor
# declaration
class OffsetMap(ampcor.flow.product,
family="ampcor.products.offsets.offsets", implements=ampcor.specs.offsets):
"""
Access to the data of an offset map
"""
# public data
shape = ampcor.properties.tuple(schema=ampcor.properties.int())
shape.default = (0,0)
shape.doc = "the shape of the map"
data = ampcor.properties.path()
data.doc = "the path to my binary data"
# public data
@property
def layout(self):
"""
Get my layout
"""
# ask the spec
return self.spec.layout
@property
def bytesPerCell(self):
"""
Get the memory footprint of my cell
"""
# ask the spec
return self.spec.bytesPerCell
# protocol obligations
@ampcor.export
def cells(self):
"""
Compute the number of points
"""
# ask my spec; it knows
return self.spec.cells
@ampcor.export
def bytes(self):
"""
Compute my memory footprint
"""
# ask my spec; it knows
return self.spec.bytes
@ampcor.export
def slice(self, origin, shape):
"""
Grant access to a slice of data of the given {shape} starting at {origin}
"""
@ampcor.export
def open(self, mode="r"):
"""
Map me over the contents of {filename}
"""
# unpack the shape
shape = self.shape
# attempt to
try:
# resolve the filename using the {vfs}
uri = self.pyre_fileserver[self.data].uri
# if that fails
except Exception:
# use the raw name
uri = self.data
# if we are opening in read-only mode
if mode == "r":
# make a const raster
raster = ampcor.libampcor.OffsetsConstRaster(shape=shape, uri=uri)
# if we are opening an existing one in read/write mode
elif mode == "w":
# make a modifiable raster
raster = ampcor.libampcor.OffsetsRaster(shape=shape, uri=uri, new=False)
# if we are creating one
elif mode == "n":
# make a new raster; careful: this deletes existing products
raster = ampcor.libampcor.OffsetsRaster(shape=shape, uri=uri, new=True)
# otherwise
else:
# grab the journal
import journal
# make a channel
channel = journal.error("ampcor.products.slc")
# and complain
channel.line(f"unknown mode '{mode}'")
channel.line(f" while opening '{uri}'")
channel.line(f" in ampcor.products.OffsetMap.open();")
channel.line(f" valid modes are: 'r', 'w', 'n'")
channel.log()
# just in case errors are non-fatal
raster = None
# attach the raster
self.raster = raster
# all done
return self
# meta-methods
def __init__(self, **kwds):
# chain up
super().__init__(**kwds)
# load my product spec
self.spec = ampcor.libampcor.Offsets(shape=self.shape)
# i get a raster after {open}
self.raster = None
# all done
return
def __getitem__(self, idx):
"""
Return the pair of correlated points stored at {index}
"""
# ask the raster
return self.raster[idx]
def __setitem__(self, idx, points):
"""
Establish a correlation between the reference and secondary {points} at {index}
"""
# delegate to the raster
self.raster[idx] = points
# all done
return
# framework hooks
def pyre_traitModified(self, trait, new, old):
"""
Handle post construction configuration changes
"""
# when my shape changes
if trait.name == "shape":
# recompute my spec
self.spec = ampcor.libampcor.Offsets(shape=self.shape)
# all done
return self
# implementation details
def show(self, indent, margin):
"""
Generate a report of my configuration
"""
# my info
yield f"{margin}name: {self.pyre_name}"
yield f"{margin}family: {self.pyre_family()}"
yield f"{margin}data: {self.data}"
yield f"{margin}shape: {self.shape}"
yield f"{margin}points: {self.cells()}"
yield f"{margin}footprint: {self.bytes()} bytes"
# all done
return
# end of file
| 25.639785 | 91 | 0.556511 |
import ampcor
from ampcor.ext import ampcor as libampcor
class OffsetMap(ampcor.flow.product,
family="ampcor.products.offsets.offsets", implements=ampcor.specs.offsets):
shape = ampcor.properties.tuple(schema=ampcor.properties.int())
shape.default = (0,0)
shape.doc = "the shape of the map"
data = ampcor.properties.path()
data.doc = "the path to my binary data"
@property
def layout(self):
return self.spec.layout
@property
def bytesPerCell(self):
return self.spec.bytesPerCell
@ampcor.export
def cells(self):
return self.spec.cells
@ampcor.export
def bytes(self):
return self.spec.bytes
@ampcor.export
def slice(self, origin, shape):
@ampcor.export
def open(self, mode="r"):
shape = self.shape
try:
uri = self.pyre_fileserver[self.data].uri
except Exception:
uri = self.data
if mode == "r":
raster = ampcor.libampcor.OffsetsConstRaster(shape=shape, uri=uri)
elif mode == "w":
raster = ampcor.libampcor.OffsetsRaster(shape=shape, uri=uri, new=False)
elif mode == "n":
raster = ampcor.libampcor.OffsetsRaster(shape=shape, uri=uri, new=True)
else:
import journal
channel = journal.error("ampcor.products.slc")
channel.line(f"unknown mode '{mode}'")
channel.line(f" while opening '{uri}'")
channel.line(f" in ampcor.products.OffsetMap.open();")
channel.line(f" valid modes are: 'r', 'w', 'n'")
channel.log()
raster = None
self.raster = raster
return self
def __init__(self, **kwds):
super().__init__(**kwds)
self.spec = ampcor.libampcor.Offsets(shape=self.shape)
self.raster = None
return
def __getitem__(self, idx):
return self.raster[idx]
def __setitem__(self, idx, points):
self.raster[idx] = points
return
def pyre_traitModified(self, trait, new, old):
if trait.name == "shape":
self.spec = ampcor.libampcor.Offsets(shape=self.shape)
return self
def show(self, indent, margin):
yield f"{margin}name: {self.pyre_name}"
yield f"{margin}family: {self.pyre_family()}"
yield f"{margin}data: {self.data}"
yield f"{margin}shape: {self.shape}"
yield f"{margin}points: {self.cells()}"
yield f"{margin}footprint: {self.bytes()} bytes"
return
| true | true |
f734b04a077be3478e1741e20f6f998dfde15758 | 2,542 | py | Python | Competitive Programming/Blind 75 Must Do Leetcode/Maximum Subarray.py | shreejitverma/GeeksforGeeks | d7bcb166369fffa9a031a258e925b6aff8d44e6c | [
"MIT"
] | 2 | 2022-02-18T05:14:28.000Z | 2022-03-08T07:00:08.000Z | Competitive Programming/Blind 75 Must Do Leetcode/Maximum Subarray.py | shivaniverma1/Competitive-Programming-1 | d7bcb166369fffa9a031a258e925b6aff8d44e6c | [
"MIT"
] | 6 | 2022-01-13T04:31:04.000Z | 2022-03-12T01:06:16.000Z | Competitive Programming/Blind 75 Must Do Leetcode/Maximum Subarray.py | shivaniverma1/Competitive-Programming-1 | d7bcb166369fffa9a031a258e925b6aff8d44e6c | [
"MIT"
] | 2 | 2022-02-14T19:53:53.000Z | 2022-02-18T05:14:30.000Z | '''https://leetcode.com/problems/maximum-subarray/
53. Maximum Subarray
Easy
15507
728
Add to List
Share
Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
A subarray is a contiguous part of an array.
Example 1:
Input: nums = [-2,1,-3,4,-1,2,1,-5,4]
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6.
Example 2:
Input: nums = [1]
Output: 1
Example 3:
Input: nums = [5,4,-1,7,8]
Output: 23
Constraints:
1 <= nums.length <= 105
-104 <= nums[i] <= 104
Follow up: If you have figured out the O(n) solution,
try coding another solution using the divide and conquer approach, which is more subtle.'''
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
ans = nums[0]
cur_sum = 0
for i in range(len(nums)):
if cur_sum > 0:
cur_sum += nums[i]
else:
cur_sum = nums[i]
ans = max(ans, cur_sum)
return ans
# @lc code=end
def brute_force(nums):
max_sum = 0
for L in range(len(nums)):
for R in range(L, len(nums)):
cur_sum = 0
for i in range(L, R):
cur_sum += nums[i]
if cur_sum > max_sum:
max_sum = cur_sum
return max_sum
def Devided_Conquer(nums, left, right):
if left == right:
return nums[left] # if nums[left] > 0 else 0
center = (left+right) // 2
max_left = Devided_Conquer(nums, left, center)
max_right = Devided_Conquer(nums, center+1, right)
left_Sum = 0
maxLeft_Sum = nums[center]
for i in range(center-1, left-1, -1):
left_Sum += nums[i]
if left_Sum > maxLeft_Sum:
maxLeft_Sum = left_Sum
right_sum = 0
max_right_sum = nums[center+1]
for i in range(center+2, right+1):
right_sum += nums[i]
if right_sum > max_right_sum:
max_right_sum = right_sum
print("max_left:{0}, max_right:{1} ".format(maxLeft_Sum, max_right_sum))
print("left:{0}, right:{1}, mid:{2}".format(
max_left, max_right, maxLeft_Sum+max_right_sum))
return max(max_left, max_right, maxLeft_Sum+max_right_sum)
def One_Pass(nums):
max_sum = nums[0]
this_sum = nums[0]
for num in nums[1:]:
this_sum = max(num, this_sum+num)
if this_sum > max_sum:
max_sum = this_sum
return max_sum
if __name__ == '__main__':
nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(One_Pass(nums))
| 22.900901 | 136 | 0.597168 |
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
ans = nums[0]
cur_sum = 0
for i in range(len(nums)):
if cur_sum > 0:
cur_sum += nums[i]
else:
cur_sum = nums[i]
ans = max(ans, cur_sum)
return ans
def brute_force(nums):
max_sum = 0
for L in range(len(nums)):
for R in range(L, len(nums)):
cur_sum = 0
for i in range(L, R):
cur_sum += nums[i]
if cur_sum > max_sum:
max_sum = cur_sum
return max_sum
def Devided_Conquer(nums, left, right):
if left == right:
return nums[left]
center = (left+right) // 2
max_left = Devided_Conquer(nums, left, center)
max_right = Devided_Conquer(nums, center+1, right)
left_Sum = 0
maxLeft_Sum = nums[center]
for i in range(center-1, left-1, -1):
left_Sum += nums[i]
if left_Sum > maxLeft_Sum:
maxLeft_Sum = left_Sum
right_sum = 0
max_right_sum = nums[center+1]
for i in range(center+2, right+1):
right_sum += nums[i]
if right_sum > max_right_sum:
max_right_sum = right_sum
print("max_left:{0}, max_right:{1} ".format(maxLeft_Sum, max_right_sum))
print("left:{0}, right:{1}, mid:{2}".format(
max_left, max_right, maxLeft_Sum+max_right_sum))
return max(max_left, max_right, maxLeft_Sum+max_right_sum)
def One_Pass(nums):
max_sum = nums[0]
this_sum = nums[0]
for num in nums[1:]:
this_sum = max(num, this_sum+num)
if this_sum > max_sum:
max_sum = this_sum
return max_sum
if __name__ == '__main__':
nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(One_Pass(nums))
| true | true |
f734b04aea02cf19e100ce06a3b561cc7a0bf7dc | 7,054 | py | Python | iot_services_sdk/sensor.py | sap-archive/iot-services-sdk | 157e607b0c8b3a7b77836336aa31d89ebd8e9f86 | [
"CNRI-Python"
] | 4 | 2019-05-02T07:51:13.000Z | 2019-09-25T12:14:06.000Z | iot_services_sdk/sensor.py | sap-archive/iot-services-sdk | 157e607b0c8b3a7b77836336aa31d89ebd8e9f86 | [
"CNRI-Python"
] | 2 | 2019-09-13T15:36:32.000Z | 2019-11-15T06:01:09.000Z | iot_services_sdk/sensor.py | sap-archive/iot-services-sdk | 157e607b0c8b3a7b77836336aa31d89ebd8e9f86 | [
"CNRI-Python"
] | 1 | 2020-01-17T15:44:52.000Z | 2020-01-17T15:44:52.000Z | """ Author: Philipp Steinrötter (steinroe) """
from .tenant_iot_service import TenantIoTService
from .utils import build_query
from .response import Response
class SensorService(TenantIoTService):
def __init__(self,
instance,
user,
password,
tenant_id):
"""Instantiate SensorService object
Arguments:
instance {string} -- IoT Services instance
user {string} -- IoT Services user
password {string} -- IoT Services password
tenant_id {string} -- Id of the tenant
"""
self.service = '/sensors'
TenantIoTService.__init__(
self,
instance=instance,
user=user,
password=password,
tenant_id=tenant_id
)
def get_sensors(self, filters=None, orderby=None, asc=True, skip=None, top=None) -> Response:
"""The endpoint returns a list of sensors.
Keyword Arguments:
filters {list} -- This parameter allows clients to filter the collection for specific attributes. It is possible to filter by 'id’, 'deviceId’, 'name’, and 'alternateId’. The filters must be provided as a list of strings, e.q. ["name eq 'my-name'", "id eq '111'"]. (default: {None})
orderby {str} -- The attribute to order by. (default: {None})
asc {bool} -- Only considered if orderby is not none. Defines if the values should be ordered asc or desc. (default: {True})
skip {int} -- This parameter specifies the number of items in the queried collection which will be skipped and therefore included in the result set. (default: {None})
top {int} -- This parameter restricts the maximum number of items which will be returned by the request. (default: {None})
Returns:
Response -- Response object
"""
query = build_query(filters=filters, orderby=orderby, asc=asc, skip=skip, top=top)
return super().request_core(method='GET', service=self.service, query=query, accept_json=True)
def get_sensor_count(self):
"""The endpoint returns the count of all sensors.
Returns:
Response -- Response object
"""
service = self.service + '/count'
return super().request_core(method='GET', service=service, accept_json=True)
def create_sensor(self, device_id: str, alternate_id: str, name: str, sensor_type_id: str) -> Response:
"""This endpoint is used to create a sensor.
Arguments:
device_id {str} -- Respective device ID for the sensor
alternate_id {str} -- Alternate ID for the sensor
name {str} -- Name for the sensor
sensor_type_id {str} -- ID of the respective sensor type
Returns:
Response -- Response object
"""
headers = {'Content-Type': 'application/json'}
payload = '{ "deviceId": "' + device_id + '", "alternateId": "' + alternate_id + '", "name": "' + name + '", "sensorTypeId": "' + sensor_type_id + '"}'
return super().request_core(method='POST', service=self.service, headers=headers, payload=payload,
accept_json=True)
def delete_sensor(self, sensor_id: str) -> Response:
"""The endpoint is used to delete the sensor associated to the given id.
Arguments:
sensor_id {str} -- Unique identifier of a sensor
Returns:
Response -- Response object
"""
service = self.service + '/' + sensor_id
return super().request_core(method='DELETE', service=service, accept_json=True)
def get_sensor(self, sensor_id: str) -> Response:
"""The endpoint returns the sensor associated to the given id.
Arguments:
sensor_id {str} -- Unique identifier of a sensor
Returns:
Response -- Response object
"""
service = self.service + '/' + sensor_id
return super().request_core(method='GET', service=service, accept_json=True)
def update_sensor(self, sensor_id: str, name: str, sensor_type_id: str) -> Response:
"""This endpoint is used to update a sensor associated to the given id with details specified in the request body.
Arguments:
sensor_id {str} -- Unique identifier of a sensor
name {str} -- Name of the sensor
sensor_type_id {str} -- Respective sensor type ID
Returns:
Response -- [description]
"""
service = self.service + '/' + sensor_id
headers = {'Content-Type': 'application/json'}
payload = '{ "name" : "' + name + '", "sensorTypeId" : "' + sensor_type_id + '" }'
return super().request_core(method='PUT', service=service, headers=headers, payload=payload, accept_json=True)
def add_custom_property(self, sensor_id: str, key: str, value: str) -> Response:
"""The endpoint is used to add a custom property to the sensor associated to the given id.
Arguments:
sensor_id {str} -- Unique identifier of a sensor
key {str} -- Key of the custom property
value {str} -- Value of the custom property
Returns:
Response -- Response object
"""
service = self.service + '/' + sensor_id + '/customProperties'
headers = {'Content-Type': 'application/json'}
payload = '{ "key" : "' + key + '", "value" : "' + value + '" }'
return super().request_core(method='POST', service=service, headers=headers, payload=payload, accept_json=True)
def delete_custom_property(self, sensor_id: str, key: str) -> Response:
"""Delete a custom property from the sensor associated to the given id.
Arguments:
sensor_id {str} -- Unique identifier of a sensor
key {str} -- Key of the custom property
Returns:
Response -- Response object
"""
service = self.service + '/' + sensor_id + '/customProperties/' + key
return super().request_core(method='DELETE', service=service, accept_json=True)
def update_custom_property(self, sensor_id: str, key: str, value: str) -> Response:
"""Updates a custom property of the sensor associated to the given id. The ‘key’ attribute cannot be modified.
Arguments:
sensor_id {str} -- Unique identifier of a sensor
key {str} -- Key of the custom property
value {str} -- The updated value of the custom property
Returns:
Response -- Response object
"""
service = self.service + '/' + sensor_id + '/customProperties/' + key
headers = {'Content-Type': 'application/json'}
payload = '{ "key" : "' + key + '", "value" : "' + value + '" }'
return super().request_core(method='PUT', service=service, headers=headers, payload=payload, accept_json=True)
| 44.36478 | 294 | 0.601503 |
from .tenant_iot_service import TenantIoTService
from .utils import build_query
from .response import Response
class SensorService(TenantIoTService):
def __init__(self,
instance,
user,
password,
tenant_id):
self.service = '/sensors'
TenantIoTService.__init__(
self,
instance=instance,
user=user,
password=password,
tenant_id=tenant_id
)
def get_sensors(self, filters=None, orderby=None, asc=True, skip=None, top=None) -> Response:
query = build_query(filters=filters, orderby=orderby, asc=asc, skip=skip, top=top)
return super().request_core(method='GET', service=self.service, query=query, accept_json=True)
def get_sensor_count(self):
service = self.service + '/count'
return super().request_core(method='GET', service=service, accept_json=True)
def create_sensor(self, device_id: str, alternate_id: str, name: str, sensor_type_id: str) -> Response:
headers = {'Content-Type': 'application/json'}
payload = '{ "deviceId": "' + device_id + '", "alternateId": "' + alternate_id + '", "name": "' + name + '", "sensorTypeId": "' + sensor_type_id + '"}'
return super().request_core(method='POST', service=self.service, headers=headers, payload=payload,
accept_json=True)
def delete_sensor(self, sensor_id: str) -> Response:
service = self.service + '/' + sensor_id
return super().request_core(method='DELETE', service=service, accept_json=True)
def get_sensor(self, sensor_id: str) -> Response:
service = self.service + '/' + sensor_id
return super().request_core(method='GET', service=service, accept_json=True)
def update_sensor(self, sensor_id: str, name: str, sensor_type_id: str) -> Response:
service = self.service + '/' + sensor_id
headers = {'Content-Type': 'application/json'}
payload = '{ "name" : "' + name + '", "sensorTypeId" : "' + sensor_type_id + '" }'
return super().request_core(method='PUT', service=service, headers=headers, payload=payload, accept_json=True)
def add_custom_property(self, sensor_id: str, key: str, value: str) -> Response:
service = self.service + '/' + sensor_id + '/customProperties'
headers = {'Content-Type': 'application/json'}
payload = '{ "key" : "' + key + '", "value" : "' + value + '" }'
return super().request_core(method='POST', service=service, headers=headers, payload=payload, accept_json=True)
def delete_custom_property(self, sensor_id: str, key: str) -> Response:
service = self.service + '/' + sensor_id + '/customProperties/' + key
return super().request_core(method='DELETE', service=service, accept_json=True)
def update_custom_property(self, sensor_id: str, key: str, value: str) -> Response:
service = self.service + '/' + sensor_id + '/customProperties/' + key
headers = {'Content-Type': 'application/json'}
payload = '{ "key" : "' + key + '", "value" : "' + value + '" }'
return super().request_core(method='PUT', service=service, headers=headers, payload=payload, accept_json=True)
| true | true |
f734b0be4b39646d3f90ce887df36ee68dd8bc06 | 541 | py | Python | users/models.py | DarkoR12/dafi-system | f923ea4273b04f7acc7016b2f7d03e51eb00b85b | [
"MIT"
] | null | null | null | users/models.py | DarkoR12/dafi-system | f923ea4273b04f7acc7016b2f7d03e51eb00b85b | [
"MIT"
] | null | null | null | users/models.py | DarkoR12/dafi-system | f923ea4273b04f7acc7016b2f7d03e51eb00b85b | [
"MIT"
] | null | null | null | from datetime import datetime
from django.contrib.auth.models import AbstractUser
from django.db import models
def current_year():
return datetime.now().year
class User(AbstractUser):
telegram_user = models.CharField('usuario de telegram', max_length=64, blank=True)
telegram_id = models.IntegerField('ID de telegram', blank=True, null=True)
first_year = models.IntegerField('año de ingreso', default=current_year)
def __str__(self):
return '{} {} - {}'.format(self.first_name, self.last_name, self.email)
| 28.473684 | 86 | 0.733826 | from datetime import datetime
from django.contrib.auth.models import AbstractUser
from django.db import models
def current_year():
return datetime.now().year
class User(AbstractUser):
telegram_user = models.CharField('usuario de telegram', max_length=64, blank=True)
telegram_id = models.IntegerField('ID de telegram', blank=True, null=True)
first_year = models.IntegerField('año de ingreso', default=current_year)
def __str__(self):
return '{} {} - {}'.format(self.first_name, self.last_name, self.email)
| true | true |
f734b13f88fa172c6f4bf050b1997c8b370e45cb | 15,421 | py | Python | mistral/mistral/tests/unit/engine/test_default_engine.py | Toure/openstack_mistral_wip | 1c3d028cb7c918de74a3cb018c84d6c5ee42e3f1 | [
"Apache-2.0"
] | null | null | null | mistral/mistral/tests/unit/engine/test_default_engine.py | Toure/openstack_mistral_wip | 1c3d028cb7c918de74a3cb018c84d6c5ee42e3f1 | [
"Apache-2.0"
] | null | null | null | mistral/mistral/tests/unit/engine/test_default_engine.py | Toure/openstack_mistral_wip | 1c3d028cb7c918de74a3cb018c84d6c5ee42e3f1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
from oslo_config import cfg
from oslo_messaging.rpc import client as rpc_client
from oslo_utils import uuidutils
from mistral.db.v2 import api as db_api
from mistral.db.v2.sqlalchemy import models
from mistral.engine import default_engine as d_eng
from mistral.engine.rpc_backend import rpc
from mistral import exceptions as exc
from mistral.services import workbooks as wb_service
from mistral.tests.unit import base
from mistral.tests.unit.engine import base as eng_test_base
from mistral.workflow import states
from mistral.workflow import utils as wf_utils
# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
WORKBOOK = """
---
version: '2.0'
name: wb
workflows:
wf:
type: reverse
input:
- param1: value1
- param2
tasks:
task1:
action: std.echo output=<% $.param1 %>
publish:
var: <% task(task1).result %>
task2:
action: std.echo output=<% $.param2 %>
requires: [task1]
"""
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
ENVIRONMENT = {
'id': uuidutils.generate_uuid(),
'name': 'test',
'description': 'my test settings',
'variables': {
'key1': 'abc',
'key2': 123
},
'scope': 'private',
'created_at': str(datetime.datetime.utcnow()),
'updated_at': str(datetime.datetime.utcnow())
}
ENVIRONMENT_DB = models.Environment(
id=ENVIRONMENT['id'],
name=ENVIRONMENT['name'],
description=ENVIRONMENT['description'],
variables=ENVIRONMENT['variables'],
scope=ENVIRONMENT['scope'],
created_at=datetime.datetime.strptime(ENVIRONMENT['created_at'],
DATETIME_FORMAT),
updated_at=datetime.datetime.strptime(ENVIRONMENT['updated_at'],
DATETIME_FORMAT)
)
MOCK_ENVIRONMENT = mock.MagicMock(return_value=ENVIRONMENT_DB)
MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError())
@mock.patch.object(rpc, 'get_executor_client', mock.Mock())
class DefaultEngineTest(base.DbTestCase):
def setUp(self):
super(DefaultEngineTest, self).setUp()
wb_service.create_workbook_v2(WORKBOOK)
# Note: For purposes of this test we can easily use
# simple magic mocks for engine and executor clients
self.engine = d_eng.DefaultEngine()
def test_start_workflow(self):
wf_input = {'param1': 'Hey', 'param2': 'Hi'}
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf',
wf_input,
'my execution',
task_name='task2'
)
self.assertIsNotNone(wf_ex)
self.assertEqual(states.RUNNING, wf_ex.state)
self.assertEqual('my execution', wf_ex.description)
self.assertIn('__execution', wf_ex.context)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self.assertEqual(1, len(task_execs))
task_ex = task_execs[0]
self.assertEqual('wb.wf', task_ex.workflow_name)
self.assertEqual('task1', task_ex.name)
self.assertEqual(states.RUNNING, task_ex.state)
self.assertIsNotNone(task_ex.spec)
self.assertDictEqual({}, task_ex.runtime_context)
# Data Flow properties.
action_execs = db_api.get_action_executions(
task_execution_id=task_ex.id
)
self.assertEqual(1, len(action_execs))
task_action_ex = action_execs[0]
self.assertIsNotNone(task_action_ex)
self.assertDictEqual({'output': 'Hey'}, task_action_ex.input)
def test_start_workflow_with_input_default(self):
wf_input = {'param2': 'value2'}
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf',
wf_input,
task_name='task1'
)
self.assertIsNotNone(wf_ex)
self.assertEqual(states.RUNNING, wf_ex.state)
self.assertIn('__execution', wf_ex.context)
# Note: We need to reread execution to access related tasks.
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self.assertEqual(1, len(task_execs))
task_ex = task_execs[0]
self.assertEqual('wb.wf', task_ex.workflow_name)
self.assertEqual('task1', task_ex.name)
self.assertEqual(states.RUNNING, task_ex.state)
self.assertIsNotNone(task_ex.spec)
self.assertDictEqual({}, task_ex.runtime_context)
# Data Flow properties.
action_execs = db_api.get_action_executions(
task_execution_id=task_ex.id
)
self.assertEqual(1, len(action_execs))
task_action_ex = action_execs[0]
self.assertIsNotNone(task_action_ex)
self.assertDictEqual({'output': 'value1'}, task_action_ex.input)
def test_start_workflow_with_adhoc_env(self):
wf_input = {
'param1': '<% env().key1 %>',
'param2': '<% env().key2 %>'
}
env = ENVIRONMENT['variables']
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf',
wf_input,
env=env,
task_name='task2')
self.assertIsNotNone(wf_ex)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertDictEqual(wf_ex.params.get('env', {}), env)
@mock.patch.object(db_api, "load_environment", MOCK_ENVIRONMENT)
def test_start_workflow_with_saved_env(self):
wf_input = {
'param1': '<% env().key1 %>',
'param2': '<% env().key2 %>'
}
env = ENVIRONMENT['variables']
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf',
wf_input,
env='test',
task_name='task2'
)
self.assertIsNotNone(wf_ex)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertDictEqual(wf_ex.params.get('env', {}), env)
@mock.patch.object(db_api, "get_environment", MOCK_NOT_FOUND)
def test_start_workflow_env_not_found(self):
e = self.assertRaises(
exc.InputException,
self.engine.start_workflow,
'wb.wf',
{
'param1': '<% env().key1 %>',
'param2': 'some value'
},
env='foo',
task_name='task2'
)
self.assertEqual("Environment is not found: foo", e.message)
def test_start_workflow_with_env_type_error(self):
e = self.assertRaises(
exc.InputException,
self.engine.start_workflow,
'wb.wf',
{
'param1': '<% env().key1 %>',
'param2': 'some value'
},
env=True,
task_name='task2'
)
self.assertIn(
'Unexpected value type for environment',
e.message
)
def test_start_workflow_missing_parameters(self):
e = self.assertRaises(
exc.InputException,
self.engine.start_workflow,
'wb.wf',
None,
task_name='task2'
)
self.assertIn("Invalid input", e.message)
self.assertIn("missing=['param2']", e.message)
def test_start_workflow_unexpected_parameters(self):
e = self.assertRaises(
exc.InputException,
self.engine.start_workflow,
'wb.wf',
{
'param1': 'Hey',
'param2': 'Hi',
'unexpected_param': 'val'
},
task_name='task2'
)
self.assertIn("Invalid input", e.message)
self.assertIn("unexpected=['unexpected_param']", e.message)
def test_on_action_complete(self):
wf_input = {'param1': 'Hey', 'param2': 'Hi'}
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf',
wf_input,
task_name='task2'
)
self.assertIsNotNone(wf_ex)
self.assertEqual(states.RUNNING, wf_ex.state)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self.assertEqual(1, len(task_execs))
task1_ex = task_execs[0]
self.assertEqual('task1', task1_ex.name)
self.assertEqual(states.RUNNING, task1_ex.state)
self.assertIsNotNone(task1_ex.spec)
self.assertDictEqual({}, task1_ex.runtime_context)
self.assertNotIn('__execution', task1_ex.in_context)
action_execs = db_api.get_action_executions(
task_execution_id=task1_ex.id
)
self.assertEqual(1, len(action_execs))
task1_action_ex = action_execs[0]
self.assertIsNotNone(task1_action_ex)
self.assertDictEqual({'output': 'Hey'}, task1_action_ex.input)
# Finish action of 'task1'.
task1_action_ex = self.engine.on_action_complete(
task1_action_ex.id,
wf_utils.Result(data='Hey')
)
self.assertIsInstance(task1_action_ex, models.ActionExecution)
self.assertEqual('std.echo', task1_action_ex.name)
self.assertEqual(states.SUCCESS, task1_action_ex.state)
# Data Flow properties.
task1_ex = db_api.get_task_execution(task1_ex.id) # Re-read the state.
self.assertDictEqual({'var': 'Hey'}, task1_ex.published)
self.assertDictEqual({'output': 'Hey'}, task1_action_ex.input)
self.assertDictEqual({'result': 'Hey'}, task1_action_ex.output)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertIsNotNone(wf_ex)
self.assertEqual(states.RUNNING, wf_ex.state)
task_execs = wf_ex.task_executions
self.assertEqual(2, len(task_execs))
task2_ex = self._assert_single_item(task_execs, name='task2')
self.assertEqual(states.RUNNING, task2_ex.state)
action_execs = db_api.get_action_executions(
task_execution_id=task2_ex.id
)
self.assertEqual(1, len(action_execs))
task2_action_ex = action_execs[0]
self.assertIsNotNone(task2_action_ex)
self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input)
# Finish 'task2'.
task2_action_ex = self.engine.on_action_complete(
task2_action_ex.id,
wf_utils.Result(data='Hi')
)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertIsNotNone(wf_ex)
task_execs = wf_ex.task_executions
# Workflow completion check is done separate with scheduler
# but scheduler doesn't start in this test (in fact, it's just
# a DB test)so the workflow is expected to be in running state.
self.assertEqual(states.RUNNING, wf_ex.state)
self.assertIsInstance(task2_action_ex, models.ActionExecution)
self.assertEqual('std.echo', task2_action_ex.name)
self.assertEqual(states.SUCCESS, task2_action_ex.state)
# Data Flow properties.
self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input)
self.assertDictEqual({}, task2_ex.published)
self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input)
self.assertDictEqual({'result': 'Hi'}, task2_action_ex.output)
self.assertEqual(2, len(task_execs))
self._assert_single_item(task_execs, name='task1')
self._assert_single_item(task_execs, name='task2')
def test_stop_workflow_fail(self):
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf',
{
'param1': 'Hey',
'param2': 'Hi'
},
task_name="task2"
)
# Re-read execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.engine.stop_workflow(wf_ex.id, 'ERROR', "Stop this!")
# Re-read from DB again
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual('ERROR', wf_ex.state)
self.assertEqual("Stop this!", wf_ex.state_info)
def test_stop_workflow_succeed(self):
# Start workflow.
wf_ex = self.engine.start_workflow(
'wb.wf',
{
'param1': 'Hey',
'param2': 'Hi'
},
task_name="task2"
)
# Re-read execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.engine.stop_workflow(wf_ex.id, 'SUCCESS', "Like this, done")
# Re-read from DB again
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual('SUCCESS', wf_ex.state)
self.assertEqual("Like this, done", wf_ex.state_info)
def test_stop_workflow_bad_status(self):
wf_ex = self.engine.start_workflow(
'wb.wf',
{
'param1': 'Hey',
'param2': 'Hi'
},
task_name="task2"
)
# Re-read execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertNotEqual(
'PAUSE',
self.engine.stop_workflow(wf_ex.id, 'PAUSE')
)
def test_resume_workflow(self):
# TODO(akhmerov): Implement.
pass
class DefaultEngineWithTransportTest(eng_test_base.EngineTestCase):
def test_engine_client_remote_error(self):
mocked = mock.Mock()
mocked.sync_call.side_effect = rpc_client.RemoteError(
'InputException',
'Input is wrong'
)
self.engine_client._client = mocked
self.assertRaises(
exc.InputException,
self.engine_client.start_workflow,
'some_wf',
{},
'some_description'
)
def test_engine_client_remote_error_arbitrary(self):
mocked = mock.Mock()
mocked.sync_call.side_effect = KeyError('wrong key')
self.engine_client._client = mocked
exception = self.assertRaises(
exc.MistralException,
self.engine_client.start_workflow,
'some_wf',
{},
'some_description'
)
self.assertIn('KeyError: wrong key', exception.message)
| 30.237255 | 79 | 0.611828 |
import datetime
import mock
from oslo_config import cfg
from oslo_messaging.rpc import client as rpc_client
from oslo_utils import uuidutils
from mistral.db.v2 import api as db_api
from mistral.db.v2.sqlalchemy import models
from mistral.engine import default_engine as d_eng
from mistral.engine.rpc_backend import rpc
from mistral import exceptions as exc
from mistral.services import workbooks as wb_service
from mistral.tests.unit import base
from mistral.tests.unit.engine import base as eng_test_base
from mistral.workflow import states
from mistral.workflow import utils as wf_utils
cfg.CONF.set_default('auth_enable', False, group='pecan')
WORKBOOK = """
---
version: '2.0'
name: wb
workflows:
wf:
type: reverse
input:
- param1: value1
- param2
tasks:
task1:
action: std.echo output=<% $.param1 %>
publish:
var: <% task(task1).result %>
task2:
action: std.echo output=<% $.param2 %>
requires: [task1]
"""
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
ENVIRONMENT = {
'id': uuidutils.generate_uuid(),
'name': 'test',
'description': 'my test settings',
'variables': {
'key1': 'abc',
'key2': 123
},
'scope': 'private',
'created_at': str(datetime.datetime.utcnow()),
'updated_at': str(datetime.datetime.utcnow())
}
ENVIRONMENT_DB = models.Environment(
id=ENVIRONMENT['id'],
name=ENVIRONMENT['name'],
description=ENVIRONMENT['description'],
variables=ENVIRONMENT['variables'],
scope=ENVIRONMENT['scope'],
created_at=datetime.datetime.strptime(ENVIRONMENT['created_at'],
DATETIME_FORMAT),
updated_at=datetime.datetime.strptime(ENVIRONMENT['updated_at'],
DATETIME_FORMAT)
)
MOCK_ENVIRONMENT = mock.MagicMock(return_value=ENVIRONMENT_DB)
MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError())
@mock.patch.object(rpc, 'get_executor_client', mock.Mock())
class DefaultEngineTest(base.DbTestCase):
def setUp(self):
super(DefaultEngineTest, self).setUp()
wb_service.create_workbook_v2(WORKBOOK)
self.engine = d_eng.DefaultEngine()
def test_start_workflow(self):
wf_input = {'param1': 'Hey', 'param2': 'Hi'}
wf_ex = self.engine.start_workflow(
'wb.wf',
wf_input,
'my execution',
task_name='task2'
)
self.assertIsNotNone(wf_ex)
self.assertEqual(states.RUNNING, wf_ex.state)
self.assertEqual('my execution', wf_ex.description)
self.assertIn('__execution', wf_ex.context)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self.assertEqual(1, len(task_execs))
task_ex = task_execs[0]
self.assertEqual('wb.wf', task_ex.workflow_name)
self.assertEqual('task1', task_ex.name)
self.assertEqual(states.RUNNING, task_ex.state)
self.assertIsNotNone(task_ex.spec)
self.assertDictEqual({}, task_ex.runtime_context)
action_execs = db_api.get_action_executions(
task_execution_id=task_ex.id
)
self.assertEqual(1, len(action_execs))
task_action_ex = action_execs[0]
self.assertIsNotNone(task_action_ex)
self.assertDictEqual({'output': 'Hey'}, task_action_ex.input)
def test_start_workflow_with_input_default(self):
wf_input = {'param2': 'value2'}
wf_ex = self.engine.start_workflow(
'wb.wf',
wf_input,
task_name='task1'
)
self.assertIsNotNone(wf_ex)
self.assertEqual(states.RUNNING, wf_ex.state)
self.assertIn('__execution', wf_ex.context)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self.assertEqual(1, len(task_execs))
task_ex = task_execs[0]
self.assertEqual('wb.wf', task_ex.workflow_name)
self.assertEqual('task1', task_ex.name)
self.assertEqual(states.RUNNING, task_ex.state)
self.assertIsNotNone(task_ex.spec)
self.assertDictEqual({}, task_ex.runtime_context)
action_execs = db_api.get_action_executions(
task_execution_id=task_ex.id
)
self.assertEqual(1, len(action_execs))
task_action_ex = action_execs[0]
self.assertIsNotNone(task_action_ex)
self.assertDictEqual({'output': 'value1'}, task_action_ex.input)
def test_start_workflow_with_adhoc_env(self):
wf_input = {
'param1': '<% env().key1 %>',
'param2': '<% env().key2 %>'
}
env = ENVIRONMENT['variables']
wf_ex = self.engine.start_workflow(
'wb.wf',
wf_input,
env=env,
task_name='task2')
self.assertIsNotNone(wf_ex)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertDictEqual(wf_ex.params.get('env', {}), env)
@mock.patch.object(db_api, "load_environment", MOCK_ENVIRONMENT)
def test_start_workflow_with_saved_env(self):
wf_input = {
'param1': '<% env().key1 %>',
'param2': '<% env().key2 %>'
}
env = ENVIRONMENT['variables']
wf_ex = self.engine.start_workflow(
'wb.wf',
wf_input,
env='test',
task_name='task2'
)
self.assertIsNotNone(wf_ex)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertDictEqual(wf_ex.params.get('env', {}), env)
@mock.patch.object(db_api, "get_environment", MOCK_NOT_FOUND)
def test_start_workflow_env_not_found(self):
e = self.assertRaises(
exc.InputException,
self.engine.start_workflow,
'wb.wf',
{
'param1': '<% env().key1 %>',
'param2': 'some value'
},
env='foo',
task_name='task2'
)
self.assertEqual("Environment is not found: foo", e.message)
def test_start_workflow_with_env_type_error(self):
e = self.assertRaises(
exc.InputException,
self.engine.start_workflow,
'wb.wf',
{
'param1': '<% env().key1 %>',
'param2': 'some value'
},
env=True,
task_name='task2'
)
self.assertIn(
'Unexpected value type for environment',
e.message
)
def test_start_workflow_missing_parameters(self):
e = self.assertRaises(
exc.InputException,
self.engine.start_workflow,
'wb.wf',
None,
task_name='task2'
)
self.assertIn("Invalid input", e.message)
self.assertIn("missing=['param2']", e.message)
def test_start_workflow_unexpected_parameters(self):
e = self.assertRaises(
exc.InputException,
self.engine.start_workflow,
'wb.wf',
{
'param1': 'Hey',
'param2': 'Hi',
'unexpected_param': 'val'
},
task_name='task2'
)
self.assertIn("Invalid input", e.message)
self.assertIn("unexpected=['unexpected_param']", e.message)
def test_on_action_complete(self):
wf_input = {'param1': 'Hey', 'param2': 'Hi'}
wf_ex = self.engine.start_workflow(
'wb.wf',
wf_input,
task_name='task2'
)
self.assertIsNotNone(wf_ex)
self.assertEqual(states.RUNNING, wf_ex.state)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self.assertEqual(1, len(task_execs))
task1_ex = task_execs[0]
self.assertEqual('task1', task1_ex.name)
self.assertEqual(states.RUNNING, task1_ex.state)
self.assertIsNotNone(task1_ex.spec)
self.assertDictEqual({}, task1_ex.runtime_context)
self.assertNotIn('__execution', task1_ex.in_context)
action_execs = db_api.get_action_executions(
task_execution_id=task1_ex.id
)
self.assertEqual(1, len(action_execs))
task1_action_ex = action_execs[0]
self.assertIsNotNone(task1_action_ex)
self.assertDictEqual({'output': 'Hey'}, task1_action_ex.input)
task1_action_ex = self.engine.on_action_complete(
task1_action_ex.id,
wf_utils.Result(data='Hey')
)
self.assertIsInstance(task1_action_ex, models.ActionExecution)
self.assertEqual('std.echo', task1_action_ex.name)
self.assertEqual(states.SUCCESS, task1_action_ex.state)
task1_ex = db_api.get_task_execution(task1_ex.id)
self.assertDictEqual({'var': 'Hey'}, task1_ex.published)
self.assertDictEqual({'output': 'Hey'}, task1_action_ex.input)
self.assertDictEqual({'result': 'Hey'}, task1_action_ex.output)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertIsNotNone(wf_ex)
self.assertEqual(states.RUNNING, wf_ex.state)
task_execs = wf_ex.task_executions
self.assertEqual(2, len(task_execs))
task2_ex = self._assert_single_item(task_execs, name='task2')
self.assertEqual(states.RUNNING, task2_ex.state)
action_execs = db_api.get_action_executions(
task_execution_id=task2_ex.id
)
self.assertEqual(1, len(action_execs))
task2_action_ex = action_execs[0]
self.assertIsNotNone(task2_action_ex)
self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input)
task2_action_ex = self.engine.on_action_complete(
task2_action_ex.id,
wf_utils.Result(data='Hi')
)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertIsNotNone(wf_ex)
task_execs = wf_ex.task_executions
self.assertEqual(states.RUNNING, wf_ex.state)
self.assertIsInstance(task2_action_ex, models.ActionExecution)
self.assertEqual('std.echo', task2_action_ex.name)
self.assertEqual(states.SUCCESS, task2_action_ex.state)
self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input)
self.assertDictEqual({}, task2_ex.published)
self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input)
self.assertDictEqual({'result': 'Hi'}, task2_action_ex.output)
self.assertEqual(2, len(task_execs))
self._assert_single_item(task_execs, name='task1')
self._assert_single_item(task_execs, name='task2')
def test_stop_workflow_fail(self):
wf_ex = self.engine.start_workflow(
'wb.wf',
{
'param1': 'Hey',
'param2': 'Hi'
},
task_name="task2"
)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.engine.stop_workflow(wf_ex.id, 'ERROR', "Stop this!")
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual('ERROR', wf_ex.state)
self.assertEqual("Stop this!", wf_ex.state_info)
def test_stop_workflow_succeed(self):
wf_ex = self.engine.start_workflow(
'wb.wf',
{
'param1': 'Hey',
'param2': 'Hi'
},
task_name="task2"
)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.engine.stop_workflow(wf_ex.id, 'SUCCESS', "Like this, done")
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual('SUCCESS', wf_ex.state)
self.assertEqual("Like this, done", wf_ex.state_info)
def test_stop_workflow_bad_status(self):
wf_ex = self.engine.start_workflow(
'wb.wf',
{
'param1': 'Hey',
'param2': 'Hi'
},
task_name="task2"
)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertNotEqual(
'PAUSE',
self.engine.stop_workflow(wf_ex.id, 'PAUSE')
)
def test_resume_workflow(self):
pass
class DefaultEngineWithTransportTest(eng_test_base.EngineTestCase):
def test_engine_client_remote_error(self):
mocked = mock.Mock()
mocked.sync_call.side_effect = rpc_client.RemoteError(
'InputException',
'Input is wrong'
)
self.engine_client._client = mocked
self.assertRaises(
exc.InputException,
self.engine_client.start_workflow,
'some_wf',
{},
'some_description'
)
def test_engine_client_remote_error_arbitrary(self):
mocked = mock.Mock()
mocked.sync_call.side_effect = KeyError('wrong key')
self.engine_client._client = mocked
exception = self.assertRaises(
exc.MistralException,
self.engine_client.start_workflow,
'some_wf',
{},
'some_description'
)
self.assertIn('KeyError: wrong key', exception.message)
| true | true |
f734b394aa62228b86d84da1a5347056479a101d | 1,575 | py | Python | hsse_api/migrations/0032_auto_20181112_2011.py | JEpifanio90/HSSE_APIv2 | 7b8e8fee9cffa932884f979d24658f08726b656d | [
"MIT"
] | null | null | null | hsse_api/migrations/0032_auto_20181112_2011.py | JEpifanio90/HSSE_APIv2 | 7b8e8fee9cffa932884f979d24658f08726b656d | [
"MIT"
] | null | null | null | hsse_api/migrations/0032_auto_20181112_2011.py | JEpifanio90/HSSE_APIv2 | 7b8e8fee9cffa932884f979d24658f08726b656d | [
"MIT"
] | 1 | 2018-12-07T03:43:32.000Z | 2018-12-07T03:43:32.000Z | # Generated by Django 2.1 on 2018-11-13 01:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hsse_api', '0031_auto_20181111_1424'),
]
operations = [
migrations.AlterField(
model_name='auditinspection',
name='created_on',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='employeecommunityactivity',
name='created_on',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='environmentalindicator',
name='created_on',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='monthlyreport',
name='created_on',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='report',
name='created_on',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='safetyactivity',
name='created_on',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='site',
name='created_on',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='user',
name='created_on',
field=models.DateField(auto_now_add=True),
),
]
| 29.166667 | 54 | 0.568889 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hsse_api', '0031_auto_20181111_1424'),
]
operations = [
migrations.AlterField(
model_name='auditinspection',
name='created_on',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='employeecommunityactivity',
name='created_on',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='environmentalindicator',
name='created_on',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='monthlyreport',
name='created_on',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='report',
name='created_on',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='safetyactivity',
name='created_on',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='site',
name='created_on',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='user',
name='created_on',
field=models.DateField(auto_now_add=True),
),
]
| true | true |
f734b4ee525907aaa86b2d3ec604fe9872b56b26 | 129 | py | Python | sandbox/partner/partner/apps.py | ApsRajput/oscar-extend | 8828d66b78ae139a73d5ca707c0fe0710eac890a | [
"BSD-3-Clause"
] | null | null | null | sandbox/partner/partner/apps.py | ApsRajput/oscar-extend | 8828d66b78ae139a73d5ca707c0fe0710eac890a | [
"BSD-3-Clause"
] | null | null | null | sandbox/partner/partner/apps.py | ApsRajput/oscar-extend | 8828d66b78ae139a73d5ca707c0fe0710eac890a | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from django.apps import AppConfig
class AccountConfig(AppConfig):
name = 'account' | 18.428571 | 39 | 0.79845 | from __future__ import unicode_literals
from django.apps import AppConfig
class AccountConfig(AppConfig):
name = 'account' | true | true |
f734b5ebe426a14ff0e314fceef43f9af09ce111 | 680 | py | Python | project/app.py | mapattacker/flask-serverless | 9612b7cbc5157770d88f352e0676911658c4de9a | [
"Apache-2.0"
] | null | null | null | project/app.py | mapattacker/flask-serverless | 9612b7cbc5157770d88f352e0676911658c4de9a | [
"Apache-2.0"
] | null | null | null | project/app.py | mapattacker/flask-serverless | 9612b7cbc5157770d88f352e0676911658c4de9a | [
"Apache-2.0"
] | null | null | null | import pickle
import traceback
import numpy as np
from flask import Flask, request
from config import MODELPATH, DEBUG
app = Flask(__name__)
model = pickle.load(open(MODELPATH, 'rb'))
@app.route("/predict", methods=["POST"])
def predict():
"""{"input": [5.8, 2.8, 5.1, 2.4]}"""
try:
content = request.json
sample = content["input"]
sample = np.array(sample).reshape(1, -1)
prediction = model.predict(sample).tolist()[0]
return {"prediction": prediction}
except Exception as e:
tb = traceback.format_exc()
return {"errorMessages": tb.replace("\n","")}
if __name__ == "__main__":
app.run(debug=DEBUG) | 21.935484 | 54 | 0.623529 | import pickle
import traceback
import numpy as np
from flask import Flask, request
from config import MODELPATH, DEBUG
app = Flask(__name__)
model = pickle.load(open(MODELPATH, 'rb'))
@app.route("/predict", methods=["POST"])
def predict():
try:
content = request.json
sample = content["input"]
sample = np.array(sample).reshape(1, -1)
prediction = model.predict(sample).tolist()[0]
return {"prediction": prediction}
except Exception as e:
tb = traceback.format_exc()
return {"errorMessages": tb.replace("\n","")}
if __name__ == "__main__":
app.run(debug=DEBUG) | true | true |
f734b60ccd490cc86dc23fd7c60141903b7a603e | 1,049 | py | Python | generate_refs.py | life4/gweb | 396bcca71dd05e492b28ccbaa9fb3f9ae1c27502 | [
"MIT"
] | 185 | 2020-02-10T17:54:01.000Z | 2022-03-23T06:12:03.000Z | generate_refs.py | life4/gweb | 396bcca71dd05e492b28ccbaa9fb3f9ae1c27502 | [
"MIT"
] | 1 | 2020-10-05T06:56:38.000Z | 2020-10-05T06:56:38.000Z | generate_refs.py | life4/gweb | 396bcca71dd05e492b28ccbaa9fb3f9ae1c27502 | [
"MIT"
] | 15 | 2020-10-02T17:32:50.000Z | 2022-01-24T14:58:31.000Z | import re
from collections import defaultdict
from pathlib import Path
base_url = 'https://developer.mozilla.org/en-US/docs/Web/API/'
doc_base_url = 'https://pkg.go.dev/github.com/life4/gweb/{package}#{obj}'
link = re.escape(f'// {base_url}')
rex = re.compile(rf'(?:{link}([a-zA-Z/-]+))+\nfunc \([a-z]+ \*?([a-zA-Z]+)\) ([a-zA-Z]+)')
refs: dict = defaultdict(list)
for path in Path().glob('*/*.go'):
content = path.read_text()
for match in rex.findall(content):
*links, struct, func = match
for link in links:
refs[link].append((path.parent.name, f'{struct}.{func}'))
print("""
# Reference
Below is the mapping of web API to gweb functions.
This file is autogenerated, so some references may be missed.
| Web API | gweb |
| ------- | ---- |
""".strip())
for ref, objects in sorted(refs.items()):
url = base_url + ref
ref = ref.replace('/', '.')
for package, obj in objects:
doc_url = doc_base_url.format(package=package, obj=obj)
print(f'| [{ref}]({url}) | [{obj}]({doc_url}) |')
| 31.787879 | 90 | 0.614871 | import re
from collections import defaultdict
from pathlib import Path
base_url = 'https://developer.mozilla.org/en-US/docs/Web/API/'
doc_base_url = 'https://pkg.go.dev/github.com/life4/gweb/{package}#{obj}'
link = re.escape(f'// {base_url}')
rex = re.compile(rf'(?:{link}([a-zA-Z/-]+))+\nfunc \([a-z]+ \*?([a-zA-Z]+)\) ([a-zA-Z]+)')
refs: dict = defaultdict(list)
for path in Path().glob('*/*.go'):
content = path.read_text()
for match in rex.findall(content):
*links, struct, func = match
for link in links:
refs[link].append((path.parent.name, f'{struct}.{func}'))
print("""
# Reference
Below is the mapping of web API to gweb functions.
This file is autogenerated, so some references may be missed.
| Web API | gweb |
| ------- | ---- |
""".strip())
for ref, objects in sorted(refs.items()):
url = base_url + ref
ref = ref.replace('/', '.')
for package, obj in objects:
doc_url = doc_base_url.format(package=package, obj=obj)
print(f'| [{ref}]({url}) | [{obj}]({doc_url}) |')
| true | true |
f734b703f0a22b5a0f6e899df15bfb9827e5dc49 | 1,999 | py | Python | 5_Quadrature Formulas/Algoritmi_Quadratura.py | LeonardoSaccotelli/Numerical-Calculus-Project | becb480a611c9a57416127f6b0289085fe180ee4 | [
"MIT"
] | null | null | null | 5_Quadrature Formulas/Algoritmi_Quadratura.py | LeonardoSaccotelli/Numerical-Calculus-Project | becb480a611c9a57416127f6b0289085fe180ee4 | [
"MIT"
] | null | null | null | 5_Quadrature Formulas/Algoritmi_Quadratura.py | LeonardoSaccotelli/Numerical-Calculus-Project | becb480a611c9a57416127f6b0289085fe180ee4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 7 17:58:09 2020
@author: Leonardo Saccotelli
"""
import numpy as np
"""
FORMULA DEI TRAPEZI
Al metodo vengono passati:
- la funzione integranda
- l'estremo inferiore di integrazione
- l'estremo superiore di integrazione
"""
def Trapezoid(f_x, a, b):
#Calcolo l'integrale
T = (b-a)*(f_x(a)+f_x(b))/2
return T
"""
FORMULA DEI TRAPEZI COMPOSTI
Al metodo vengono passati:
- la funzione integranda
- l'estremo inferiore di integrazione
- l'estremo superiore di integrazione
- il numero di intervallini
"""
def CompositeTrapezoid(f_x, a, b, N):
#Estrpolo N+1 intervalli equidistanti da [a,b]
z = np.linspace(a,b,N+1)
#Calcolo f_x() in ogni punto di z
fz = f_x(z)
S = 0
#Calcolo del trapezio composto
for i in range(1,N):
S = S + fz[i]
TC = (fz[0] + 2*S + fz[N])*(b-a)/2/N
return TC
"""
FORMULA DI SIMPSON
Al metodo vengono passati:
- la funzione integranda
- l'estremo inferiore di integrazione
- l'estremo superiore di integrazione
"""
def Simpson(f_x, a, b):
#Calcolo l'integrale
T = ((b-a)/6) * (f_x(a) +4 * f_x((b+a)/2) + f_x(b))
return T
"""
FORMULA DI SIMPSON COMPOSTA
Al metodo vengono passati:
- la funzione integranda
- l'estremo inferiore di integrazione
- l'estremo superiore di integrazione
- il numero di intervalli
"""
def CompositeSimpson(f, a, b, N):
#Genero n+1 intervallini in [a,b]
z = np.linspace(a,b,N+1)
#Calcolo f negli intervalli z
fz = f(z)
#Definisco le somme dispari e le somme pari
S_d = 0
S_p = 0
#Definisco l'ampiezza dei singoli intervalli
h = (b-a)/N
#Calcolo le somme dispari
for i in range(1,N,2):
S_d = S_d + fz[i]
#Calcolo le somme pari
for i in range(2,N-1,2):
S_p = S_p + fz[i]
Tsc = (fz[0] + 4*S_d + 2*S_p + fz[N])*h/3
return Tsc
| 21.042105 | 55 | 0.595798 |
import numpy as np
def Trapezoid(f_x, a, b):
T = (b-a)*(f_x(a)+f_x(b))/2
return T
def CompositeTrapezoid(f_x, a, b, N):
#Estrpolo N+1 intervalli equidistanti da [a,b]
z = np.linspace(a,b,N+1)
#Calcolo f_x() in ogni punto di z
fz = f_x(z)
S = 0
#Calcolo del trapezio composto
for i in range(1,N):
S = S + fz[i]
TC = (fz[0] + 2*S + fz[N])*(b-a)/2/N
return TC
def Simpson(f_x, a, b):
#Calcolo l'integrale
T = ((b-a)/6) * (f_x(a) +4 * f_x((b+a)/2) + f_x(b))
return T
def CompositeSimpson(f, a, b, N):
z = np.linspace(a,b,N+1)
fz = f(z)
S_d = 0
S_p = 0
h = (b-a)/N
#Calcolo le somme dispari
for i in range(1,N,2):
S_d = S_d + fz[i]
#Calcolo le somme pari
for i in range(2,N-1,2):
S_p = S_p + fz[i]
Tsc = (fz[0] + 4*S_d + 2*S_p + fz[N])*h/3
return Tsc
| true | true |
f734b7c8a2993ef120151e812e8f2a1a91a1d8ae | 6,204 | py | Python | airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_client.py | aadityasinha-dotcom/airbyte | 7f6dd9b6fc4288260532fe1beaf7901633f13c88 | [
"MIT"
] | 1 | 2021-11-24T17:36:54.000Z | 2021-11-24T17:36:54.000Z | airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_client.py | aadityasinha-dotcom/airbyte | 7f6dd9b6fc4288260532fe1beaf7901633f13c88 | [
"MIT"
] | 2 | 2022-03-16T16:17:37.000Z | 2022-03-24T19:00:41.000Z | airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_client.py | aadityasinha-dotcom/airbyte | 7f6dd9b6fc4288260532fe1beaf7901633f13c88 | [
"MIT"
] | null | null | null | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
from datetime import datetime
import pendulum
import pytest
from airbyte_cdk.models import SyncMode
from facebook_business import FacebookAdsApi, FacebookSession
from facebook_business.exceptions import FacebookRequestError
from source_facebook_marketing.api import API
from source_facebook_marketing.streams import AdCreatives, Campaigns
FB_API_VERSION = FacebookAdsApi.API_VERSION
@pytest.fixture(scope="session", name="account_id")
def account_id_fixture():
return "unknown_account"
@pytest.fixture(scope="session", name="some_config")
def some_config_fixture(account_id):
return {"start_date": "2021-01-23T00:00:00Z", "account_id": f"{account_id}", "access_token": "unknown_token"}
@pytest.fixture(autouse=True)
def mock_default_sleep_interval(mocker):
mocker.patch("source_facebook_marketing.streams.common.DEFAULT_SLEEP_INTERVAL", return_value=pendulum.duration(seconds=5))
@pytest.fixture(name="api")
def api_fixture(some_config, requests_mock, fb_account_response):
api = API(account_id=some_config["account_id"], access_token=some_config["access_token"])
requests_mock.register_uri("GET", FacebookSession.GRAPH + f"/{FB_API_VERSION}/me/adaccounts", [fb_account_response])
return api
@pytest.fixture(name="fb_call_rate_response")
def fb_call_rate_response_fixture():
error = {
"message": (
"(#80000) There have been too many calls from this ad-account. Wait a bit and try again. "
"For more info, please refer to https://developers.facebook.com/docs/graph-api/overview/rate-limiting."
),
"type": "OAuthException",
"code": 80000,
"error_subcode": 2446079,
"fbtrace_id": "this_is_fake_response",
}
headers = {"x-app-usage": json.dumps({"call_count": 28, "total_time": 25, "total_cputime": 25})}
return {
"json": {
"error": error,
},
"status_code": 400,
"headers": headers,
}
@pytest.fixture(name="fb_account_response")
def fb_account_response_fixture(account_id):
return {
"json": {
"data": [
{
"account_id": account_id,
"id": f"act_{account_id}",
}
],
"paging": {"cursors": {"before": "MjM4NDYzMDYyMTcyNTAwNzEZD", "after": "MjM4NDYzMDYyMTcyNTAwNzEZD"}},
},
"status_code": 200,
}
class TestBackoff:
def test_limit_reached(self, mocker, requests_mock, api, fb_call_rate_response, account_id):
"""Error once, check that we retry and not fail"""
# turn Campaigns into non batch mode to test non batch logic
mocker.patch.object(Campaigns, "use_batch", new_callable=mocker.PropertyMock, return_value=False)
campaign_responses = [
fb_call_rate_response,
{
"json": {"data": [{"id": 1, "updated_time": "2020-09-25T00:00:00Z"}, {"id": 2, "updated_time": "2020-09-25T00:00:00Z"}]},
"status_code": 200,
},
]
requests_mock.register_uri("GET", FacebookSession.GRAPH + f"/{FB_API_VERSION}/act_{account_id}/campaigns", campaign_responses)
requests_mock.register_uri("GET", FacebookSession.GRAPH + f"/{FB_API_VERSION}/1/", [{"status_code": 200}])
requests_mock.register_uri("GET", FacebookSession.GRAPH + f"/{FB_API_VERSION}/2/", [{"status_code": 200}])
stream = Campaigns(api=api, start_date=pendulum.now(), end_date=pendulum.now(), include_deleted=False)
try:
records = list(stream.read_records(sync_mode=SyncMode.full_refresh, stream_state={}))
assert records
except FacebookRequestError:
pytest.fail("Call rate error has not being handled")
def test_batch_limit_reached(self, requests_mock, api, fb_call_rate_response, account_id):
"""Error once, check that we retry and not fail"""
responses = [
fb_call_rate_response,
{
"json": {
"data": [
{
"id": "123",
"object_type": "SHARE",
"status": "ACTIVE",
},
{
"id": "1234",
"object_type": "SHARE",
"status": "ACTIVE",
},
],
"status_code": 200,
}
},
]
batch_responses = [
fb_call_rate_response,
{
"json": [
{"body": json.dumps({"name": "creative 1"}), "code": 200, "headers": {}},
{"body": json.dumps({"name": "creative 2"}), "code": 200, "headers": {}},
]
},
]
requests_mock.register_uri("GET", FacebookSession.GRAPH + f"/{FB_API_VERSION}/act_{account_id}/adcreatives", responses)
requests_mock.register_uri("POST", FacebookSession.GRAPH + f"/{FB_API_VERSION}/", batch_responses)
stream = AdCreatives(api=api, include_deleted=False)
records = list(stream.read_records(sync_mode=SyncMode.full_refresh, stream_state={}))
assert records == [{"name": "creative 1"}, {"name": "creative 2"}]
def test_server_error(self, requests_mock, api, account_id):
"""Error once, check that we retry and not fail"""
responses = [
{"json": {"error": {}}, "status_code": 500},
{
"json": {"data": [{"id": 1, "updated_time": "2020-09-25T00:00:00Z"}, {"id": 2, "updated_time": "2020-09-25T00:00:00Z"}]},
"status_code": 200,
},
]
requests_mock.register_uri("GET", FacebookSession.GRAPH + f"/{FB_API_VERSION}/act_{account_id}/campaigns", responses)
with pytest.raises(FacebookRequestError):
stream = Campaigns(api=api, start_date=datetime.now(), end_date=datetime.now(), include_deleted=False)
list(stream.read_records(sync_mode=SyncMode.full_refresh, stream_state={}))
| 38.296296 | 137 | 0.598001 |
import json
from datetime import datetime
import pendulum
import pytest
from airbyte_cdk.models import SyncMode
from facebook_business import FacebookAdsApi, FacebookSession
from facebook_business.exceptions import FacebookRequestError
from source_facebook_marketing.api import API
from source_facebook_marketing.streams import AdCreatives, Campaigns
FB_API_VERSION = FacebookAdsApi.API_VERSION
@pytest.fixture(scope="session", name="account_id")
def account_id_fixture():
return "unknown_account"
@pytest.fixture(scope="session", name="some_config")
def some_config_fixture(account_id):
return {"start_date": "2021-01-23T00:00:00Z", "account_id": f"{account_id}", "access_token": "unknown_token"}
@pytest.fixture(autouse=True)
def mock_default_sleep_interval(mocker):
mocker.patch("source_facebook_marketing.streams.common.DEFAULT_SLEEP_INTERVAL", return_value=pendulum.duration(seconds=5))
@pytest.fixture(name="api")
def api_fixture(some_config, requests_mock, fb_account_response):
api = API(account_id=some_config["account_id"], access_token=some_config["access_token"])
requests_mock.register_uri("GET", FacebookSession.GRAPH + f"/{FB_API_VERSION}/me/adaccounts", [fb_account_response])
return api
@pytest.fixture(name="fb_call_rate_response")
def fb_call_rate_response_fixture():
error = {
"message": (
"(#80000) There have been too many calls from this ad-account. Wait a bit and try again. "
"For more info, please refer to https://developers.facebook.com/docs/graph-api/overview/rate-limiting."
),
"type": "OAuthException",
"code": 80000,
"error_subcode": 2446079,
"fbtrace_id": "this_is_fake_response",
}
headers = {"x-app-usage": json.dumps({"call_count": 28, "total_time": 25, "total_cputime": 25})}
return {
"json": {
"error": error,
},
"status_code": 400,
"headers": headers,
}
@pytest.fixture(name="fb_account_response")
def fb_account_response_fixture(account_id):
return {
"json": {
"data": [
{
"account_id": account_id,
"id": f"act_{account_id}",
}
],
"paging": {"cursors": {"before": "MjM4NDYzMDYyMTcyNTAwNzEZD", "after": "MjM4NDYzMDYyMTcyNTAwNzEZD"}},
},
"status_code": 200,
}
class TestBackoff:
def test_limit_reached(self, mocker, requests_mock, api, fb_call_rate_response, account_id):
mocker.patch.object(Campaigns, "use_batch", new_callable=mocker.PropertyMock, return_value=False)
campaign_responses = [
fb_call_rate_response,
{
"json": {"data": [{"id": 1, "updated_time": "2020-09-25T00:00:00Z"}, {"id": 2, "updated_time": "2020-09-25T00:00:00Z"}]},
"status_code": 200,
},
]
requests_mock.register_uri("GET", FacebookSession.GRAPH + f"/{FB_API_VERSION}/act_{account_id}/campaigns", campaign_responses)
requests_mock.register_uri("GET", FacebookSession.GRAPH + f"/{FB_API_VERSION}/1/", [{"status_code": 200}])
requests_mock.register_uri("GET", FacebookSession.GRAPH + f"/{FB_API_VERSION}/2/", [{"status_code": 200}])
stream = Campaigns(api=api, start_date=pendulum.now(), end_date=pendulum.now(), include_deleted=False)
try:
records = list(stream.read_records(sync_mode=SyncMode.full_refresh, stream_state={}))
assert records
except FacebookRequestError:
pytest.fail("Call rate error has not being handled")
def test_batch_limit_reached(self, requests_mock, api, fb_call_rate_response, account_id):
responses = [
fb_call_rate_response,
{
"json": {
"data": [
{
"id": "123",
"object_type": "SHARE",
"status": "ACTIVE",
},
{
"id": "1234",
"object_type": "SHARE",
"status": "ACTIVE",
},
],
"status_code": 200,
}
},
]
batch_responses = [
fb_call_rate_response,
{
"json": [
{"body": json.dumps({"name": "creative 1"}), "code": 200, "headers": {}},
{"body": json.dumps({"name": "creative 2"}), "code": 200, "headers": {}},
]
},
]
requests_mock.register_uri("GET", FacebookSession.GRAPH + f"/{FB_API_VERSION}/act_{account_id}/adcreatives", responses)
requests_mock.register_uri("POST", FacebookSession.GRAPH + f"/{FB_API_VERSION}/", batch_responses)
stream = AdCreatives(api=api, include_deleted=False)
records = list(stream.read_records(sync_mode=SyncMode.full_refresh, stream_state={}))
assert records == [{"name": "creative 1"}, {"name": "creative 2"}]
def test_server_error(self, requests_mock, api, account_id):
responses = [
{"json": {"error": {}}, "status_code": 500},
{
"json": {"data": [{"id": 1, "updated_time": "2020-09-25T00:00:00Z"}, {"id": 2, "updated_time": "2020-09-25T00:00:00Z"}]},
"status_code": 200,
},
]
requests_mock.register_uri("GET", FacebookSession.GRAPH + f"/{FB_API_VERSION}/act_{account_id}/campaigns", responses)
with pytest.raises(FacebookRequestError):
stream = Campaigns(api=api, start_date=datetime.now(), end_date=datetime.now(), include_deleted=False)
list(stream.read_records(sync_mode=SyncMode.full_refresh, stream_state={}))
| true | true |
f734b7e22b114bcf1c5f0a73a9a92dfaa783b0aa | 7,751 | py | Python | bertviz/pytorch_pretrained_bert/file_utils.py | whaleloops/bertviz | 3c6323fa3a3e03e7399a0ad6ab8463c320179323 | [
"Apache-2.0"
] | 58 | 2019-09-16T07:24:23.000Z | 2021-12-30T09:04:38.000Z | bertviz/pytorch_pretrained_bert/file_utils.py | whaleloops/bertviz | 3c6323fa3a3e03e7399a0ad6ab8463c320179323 | [
"Apache-2.0"
] | 4 | 2021-09-09T03:02:18.000Z | 2022-03-24T13:55:55.000Z | bertviz/pytorch_pretrained_bert/file_utils.py | whaleloops/bertviz | 3c6323fa3a3e03e7399a0ad6ab8463c320179323 | [
"Apache-2.0"
] | 32 | 2019-07-30T17:47:21.000Z | 2022-03-29T22:33:29.000Z | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set
from hashlib import sha256
from functools import wraps
from tqdm import tqdm
import boto3
from botocore.exceptions import ClientError
import requests
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: str = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename: Union[str, Path], cache_dir: str = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url: str, cache_dir: str = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 33.123932 | 98 | 0.650239 |
import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set
from hashlib import sha256
from functools import wraps
from tqdm import tqdm
import boto3
from botocore.exceptions import ClientError
import requests
logger = logging.getLogger(__name__)
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
def url_to_filename(url: str, etag: str = None) -> str:
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: str = None) -> Tuple[str, str]:
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename: Union[str, Path], cache_dir: str = None) -> str:
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
return url_or_filename
elif parsed.scheme == '':
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url: str) -> Tuple[str, str]:
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func: Callable):
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url: str) -> Optional[str]:
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url: str, cache_dir: str = None) -> str:
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
collection = set()
with open(filename, 'r') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| true | true |
f734b81b788e1e3bc224c74ce6cddaa03e354b05 | 1,010 | py | Python | bindings/python/tests/test_wrapper.py | kamino/dragonffi | 3c983cc8c091d5472f7cdeab1b06dc3b1902e1be | [
"Apache-2.0"
] | 523 | 2018-02-02T08:07:24.000Z | 2022-03-21T15:44:39.000Z | bindings/python/tests/test_wrapper.py | kamino/dragonffi | 3c983cc8c091d5472f7cdeab1b06dc3b1902e1be | [
"Apache-2.0"
] | 28 | 2018-02-02T20:58:13.000Z | 2022-02-06T15:03:41.000Z | bindings/python/tests/test_wrapper.py | kamino/dragonffi | 3c983cc8c091d5472f7cdeab1b06dc3b1902e1be | [
"Apache-2.0"
] | 28 | 2018-02-02T12:05:55.000Z | 2021-09-16T21:05:05.000Z | # Copyright 2018 Adrien Guinet <adrien@guinet.me>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pydffi
from common import DFFITest
class WrapperTest(DFFITest):
def test_wrapper(self):
CU = self.FFI.compile('''
struct A {
int a;
int b;
};
struct A init_A(int a, int b) {
struct A ret = {a,b};
return ret;
}
''')
FTy = pydffi.typeof(CU.funcs.init_A)
print(FTy.getWrapperLLVMStr("wrap"))
if __name__ == '__main__':
unittest.main()
| 27.297297 | 74 | 0.70099 |
import unittest
import pydffi
from common import DFFITest
class WrapperTest(DFFITest):
def test_wrapper(self):
CU = self.FFI.compile('''
struct A {
int a;
int b;
};
struct A init_A(int a, int b) {
struct A ret = {a,b};
return ret;
}
''')
FTy = pydffi.typeof(CU.funcs.init_A)
print(FTy.getWrapperLLVMStr("wrap"))
if __name__ == '__main__':
unittest.main()
| true | true |
f734b8650bc5950384be7f02d5e473f74c10c65c | 7,897 | py | Python | third_party/retdec-3.2/scripts/retdec-unpacker.py | Fimbure/icebox-1 | 0b81992a53e1b410955ca89bdb6f8169d6f2da86 | [
"MIT"
] | 521 | 2019-03-29T15:44:08.000Z | 2022-03-22T09:46:19.000Z | third_party/retdec-3.2/scripts/retdec-unpacker.py | Fimbure/icebox-1 | 0b81992a53e1b410955ca89bdb6f8169d6f2da86 | [
"MIT"
] | 30 | 2019-06-04T17:00:49.000Z | 2021-09-08T20:44:19.000Z | third_party/retdec-3.2/scripts/retdec-unpacker.py | Fimbure/icebox-1 | 0b81992a53e1b410955ca89bdb6f8169d6f2da86 | [
"MIT"
] | 99 | 2019-03-29T16:04:13.000Z | 2022-03-28T16:59:34.000Z | #!/usr/bin/env python3
"""
The script tries to unpack the given executable file by using any
of the supported unpackers, which are at present:
* generic unpacker
* upx
Required argument:
* (packed) binary file
Optional arguments:
* desired name of unpacked file
* use extended exit codes
Returns:
* 0 successfully unpacked
"""
import argparse
import os
import shutil
import sys
import importlib
config = importlib.import_module('retdec-config')
utils = importlib.import_module('retdec-utils')
CmdRunner = utils.CmdRunner
sys.stdout = utils.Unbuffered(sys.stdout)
def parse_args(_args):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
metavar='FILE',
help='The input file.')
parser.add_argument('-e', '--extended-exit-codes',
dest='extended_exit_codes',
action='store_true',
help='Use more granular exit codes than just 0/1.')
parser.add_argument('-o', '--output',
dest='output',
metavar='FILE',
help='Output file (default: file-unpacked).')
parser.add_argument('--max-memory',
dest='max_memory',
help='Limit the maximal memory of retdec-unpacker to N bytes.')
parser.add_argument('--max-memory-half-ram',
dest='max_memory_half_ram',
action='store_true',
help='Limit the maximal memory of retdec-unpacker to half of system RAM.')
return parser.parse_args(_args)
class Unpacker:
RET_UNPACK_OK = 0
# 1 generic unpacker - nothing to do; upx succeeded (--extended-exit-codes only)
RET_UNPACKER_NOTHING_TO_DO_OTHERS_OK = 1
# 2 not packed or unknown packer
RET_NOTHING_TO_DO = 2
# 3 generic unpacker failed; upx succeeded (--extended-exit-codes only)
RET_UNPACKER_FAILED_OTHERS_OK = 3
# 4 generic unpacker failed; upx not succeeded
RET_UNPACKER_FAILED = 4
UNPACKER_EXIT_CODE_OK = 0
# 1 There was not found matching plugin.
UNPACKER_EXIT_CODE_NOTHING_TO_DO = 1
# 2 At least one plugin failed at the unpacking of the file.
UNPACKER_EXIT_CODE_UNPACKING_FAILED = 2
# 3 Error with preprocessing of input file before unpacking.
UNPACKER_EXIT_CODE_PREPROCESSING_ERROR = 3
#
UNPACKER_EXIT_CODE_OTHER = -1
def __init__(self, _args):
self.args = parse_args(_args)
self.input = ''
self.output = ''
self.log_output = False
self.unpacker_output = ''
def _check_arguments(self):
"""Check proper combination of input arguments.
"""
# Check whether the input file was specified.
if self.args.file is None:
utils.print_error('No input file was specified')
return False
if not os.access(self.args.file, os.R_OK):
utils.print_error('The input file %s does not exist or is not readable' % self.args.file)
return False
# Conditional initialization.
if not self.args.output:
self.output = self.args.file + '-unpacked'
else:
self.output = self.args.output
if self.args.max_memory is not None:
try:
int(self.args.max_memory)
except ValueError:
utils.print_error('Invalid value for --max-memory: %s (expected a positive integer)'
% self.args.max_memory)
return False
# Convert to absolute paths.
self.input = os.path.abspath(self.args.file)
self.output = os.path.abspath(self.output)
return True
def _unpack(self, output):
"""Try to unpack the given file.
"""
unpacker_params = [self.input, '-o', output]
if self.args.max_memory:
unpacker_params.extend(['--max-memory', self.args.max_memory])
elif self.args.max_memory_half_ram:
unpacker_params.append('--max-memory-half-ram')
cmd = CmdRunner()
self._print('\n##### Trying to unpack ' + self.input + ' into ' + output + ' by using generic unpacker...')
out, unpacker_rc, _ = cmd.run_cmd([config.UNPACKER] + unpacker_params, buffer_output=True, print_run_msg=True)
self._print(out)
if unpacker_rc == self.UNPACKER_EXIT_CODE_OK:
self._print('##### Unpacking by using generic unpacker: successfully unpacked')
return self.unpacker_output, self.RET_UNPACK_OK
elif unpacker_rc == self.UNPACKER_EXIT_CODE_NOTHING_TO_DO:
self._print('##### Unpacking by using generic unpacker: nothing to do')
else:
# Do not return -> try the next unpacker
self._print('##### Unpacking by using generic unpacker: failed')
if utils.tool_exists('upx'):
# Do not return -> try the next unpacker
# Try to unpack via UPX
self._print('\n##### Trying to unpack ' + self.input + ' into ' + output + ' by using UPX...')
out, upx_rc, _ = cmd.run_cmd(['upx', '-d', self.input, '-o', output], buffer_output=True, discard_stdout=True, print_run_msg=True)
self._print(out)
if upx_rc == 0:
self._print('##### Unpacking by using UPX: successfully unpacked')
if self.args.extended_exit_codes:
if unpacker_rc == self.UNPACKER_EXIT_CODE_NOTHING_TO_DO:
return self.unpacker_output, self.RET_UNPACKER_NOTHING_TO_DO_OTHERS_OK
elif unpacker_rc >= self.UNPACKER_EXIT_CODE_UNPACKING_FAILED:
return self.unpacker_output, self.RET_UNPACKER_FAILED_OTHERS_OK
else:
return self.unpacker_output, self.RET_UNPACK_OK
else:
# We cannot distinguish whether upx failed or the input file was
# not upx-packed
self._print('##### Unpacking by using UPX: nothing to do')
else:
self._print('##### \'upx\' not available: nothing to do')
if unpacker_rc >= self.UNPACKER_EXIT_CODE_UNPACKING_FAILED:
return self.unpacker_output, self.RET_UNPACKER_FAILED
else:
return self.unpacker_output, self.RET_NOTHING_TO_DO
def unpack_all(self, log_output=False):
self.log_output = log_output
if not self._check_arguments():
return '', self.UNPACKER_EXIT_CODE_OTHER
res_rc = self.UNPACKER_EXIT_CODE_OTHER
res_out = ''
tmp_output = self.output + '.tmp'
while True:
unpacker_out, return_code = self._unpack(tmp_output)
res_out += unpacker_out + '\n'
if return_code == self.RET_UNPACK_OK or return_code == self.RET_UNPACKER_NOTHING_TO_DO_OTHERS_OK \
or return_code == self.RET_UNPACKER_FAILED_OTHERS_OK:
res_rc = return_code
shutil.move(tmp_output, self.output)
self.input = self.output
else:
# Remove the temporary file, just in case some of the unpackers crashed
# during unpacking and left it on the disk (e.g. upx).
utils.remove_file_forced(tmp_output)
break
return (res_out, return_code) if res_rc == self.UNPACKER_EXIT_CODE_OTHER else (res_out, res_rc)
def _print(self, line=''):
if self.log_output:
self.unpacker_output = self.unpacker_output + line
else:
print(line)
if __name__ == '__main__':
unpacker = Unpacker(sys.argv[1:])
_, rc = unpacker.unpack_all()
sys.exit(rc)
| 36.391705 | 142 | 0.607193 |
import argparse
import os
import shutil
import sys
import importlib
config = importlib.import_module('retdec-config')
utils = importlib.import_module('retdec-utils')
CmdRunner = utils.CmdRunner
sys.stdout = utils.Unbuffered(sys.stdout)
def parse_args(_args):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
metavar='FILE',
help='The input file.')
parser.add_argument('-e', '--extended-exit-codes',
dest='extended_exit_codes',
action='store_true',
help='Use more granular exit codes than just 0/1.')
parser.add_argument('-o', '--output',
dest='output',
metavar='FILE',
help='Output file (default: file-unpacked).')
parser.add_argument('--max-memory',
dest='max_memory',
help='Limit the maximal memory of retdec-unpacker to N bytes.')
parser.add_argument('--max-memory-half-ram',
dest='max_memory_half_ram',
action='store_true',
help='Limit the maximal memory of retdec-unpacker to half of system RAM.')
return parser.parse_args(_args)
class Unpacker:
RET_UNPACK_OK = 0
RET_UNPACKER_NOTHING_TO_DO_OTHERS_OK = 1
RET_NOTHING_TO_DO = 2
RET_UNPACKER_FAILED_OTHERS_OK = 3
RET_UNPACKER_FAILED = 4
UNPACKER_EXIT_CODE_OK = 0
UNPACKER_EXIT_CODE_NOTHING_TO_DO = 1
UNPACKER_EXIT_CODE_UNPACKING_FAILED = 2
UNPACKER_EXIT_CODE_PREPROCESSING_ERROR = 3
UNPACKER_EXIT_CODE_OTHER = -1
def __init__(self, _args):
self.args = parse_args(_args)
self.input = ''
self.output = ''
self.log_output = False
self.unpacker_output = ''
def _check_arguments(self):
if self.args.file is None:
utils.print_error('No input file was specified')
return False
if not os.access(self.args.file, os.R_OK):
utils.print_error('The input file %s does not exist or is not readable' % self.args.file)
return False
if not self.args.output:
self.output = self.args.file + '-unpacked'
else:
self.output = self.args.output
if self.args.max_memory is not None:
try:
int(self.args.max_memory)
except ValueError:
utils.print_error('Invalid value for --max-memory: %s (expected a positive integer)'
% self.args.max_memory)
return False
self.input = os.path.abspath(self.args.file)
self.output = os.path.abspath(self.output)
return True
def _unpack(self, output):
unpacker_params = [self.input, '-o', output]
if self.args.max_memory:
unpacker_params.extend(['--max-memory', self.args.max_memory])
elif self.args.max_memory_half_ram:
unpacker_params.append('--max-memory-half-ram')
cmd = CmdRunner()
self._print('\n##### Trying to unpack ' + self.input + ' into ' + output + ' by using generic unpacker...')
out, unpacker_rc, _ = cmd.run_cmd([config.UNPACKER] + unpacker_params, buffer_output=True, print_run_msg=True)
self._print(out)
if unpacker_rc == self.UNPACKER_EXIT_CODE_OK:
self._print('##### Unpacking by using generic unpacker: successfully unpacked')
return self.unpacker_output, self.RET_UNPACK_OK
elif unpacker_rc == self.UNPACKER_EXIT_CODE_NOTHING_TO_DO:
self._print('##### Unpacking by using generic unpacker: nothing to do')
else:
self._print('##### Unpacking by using generic unpacker: failed')
if utils.tool_exists('upx'):
self._print('\n##### Trying to unpack ' + self.input + ' into ' + output + ' by using UPX...')
out, upx_rc, _ = cmd.run_cmd(['upx', '-d', self.input, '-o', output], buffer_output=True, discard_stdout=True, print_run_msg=True)
self._print(out)
if upx_rc == 0:
self._print('##### Unpacking by using UPX: successfully unpacked')
if self.args.extended_exit_codes:
if unpacker_rc == self.UNPACKER_EXIT_CODE_NOTHING_TO_DO:
return self.unpacker_output, self.RET_UNPACKER_NOTHING_TO_DO_OTHERS_OK
elif unpacker_rc >= self.UNPACKER_EXIT_CODE_UNPACKING_FAILED:
return self.unpacker_output, self.RET_UNPACKER_FAILED_OTHERS_OK
else:
return self.unpacker_output, self.RET_UNPACK_OK
else:
self._print('##### Unpacking by using UPX: nothing to do')
else:
self._print('##### \'upx\' not available: nothing to do')
if unpacker_rc >= self.UNPACKER_EXIT_CODE_UNPACKING_FAILED:
return self.unpacker_output, self.RET_UNPACKER_FAILED
else:
return self.unpacker_output, self.RET_NOTHING_TO_DO
def unpack_all(self, log_output=False):
self.log_output = log_output
if not self._check_arguments():
return '', self.UNPACKER_EXIT_CODE_OTHER
res_rc = self.UNPACKER_EXIT_CODE_OTHER
res_out = ''
tmp_output = self.output + '.tmp'
while True:
unpacker_out, return_code = self._unpack(tmp_output)
res_out += unpacker_out + '\n'
if return_code == self.RET_UNPACK_OK or return_code == self.RET_UNPACKER_NOTHING_TO_DO_OTHERS_OK \
or return_code == self.RET_UNPACKER_FAILED_OTHERS_OK:
res_rc = return_code
shutil.move(tmp_output, self.output)
self.input = self.output
else:
utils.remove_file_forced(tmp_output)
break
return (res_out, return_code) if res_rc == self.UNPACKER_EXIT_CODE_OTHER else (res_out, res_rc)
def _print(self, line=''):
if self.log_output:
self.unpacker_output = self.unpacker_output + line
else:
print(line)
if __name__ == '__main__':
unpacker = Unpacker(sys.argv[1:])
_, rc = unpacker.unpack_all()
sys.exit(rc)
| true | true |
f734b8e77edfbc9b357701255382e772aa6650eb | 4,072 | py | Python | data/get_dataset.py | KristinaRay/english-arabic-nmt-bot | 1e0baddc81b829b3ee1abe95143cdef5c1206dd2 | [
"MIT"
] | 1 | 2022-01-31T11:48:01.000Z | 2022-01-31T11:48:01.000Z | data/get_dataset.py | KristinaRay/english-arabic-nmt-bot | 1e0baddc81b829b3ee1abe95143cdef5c1206dd2 | [
"MIT"
] | null | null | null | data/get_dataset.py | KristinaRay/english-arabic-nmt-bot | 1e0baddc81b829b3ee1abe95143cdef5c1206dd2 | [
"MIT"
] | null | null | null | import os
import tqdm
import numpy as np
import requests
import youtokentome as yttm
from argparse import ArgumentParser
from zipfile import ZipFile
from config import *
from data.preprocessing import *
from utils import *
DATA_FILE_PATH = f'{DATA_PATH}/data.zip'
DATA_URL = 'https://opus.nlpl.eu/download.php?f=OpenSubtitles/v2018/moses/ar-en.txt.zip'
TRG_FILE_NAME = 'OpenSubtitles.ar-en.ar'
SRC_FILE_NAME = 'OpenSubtitles.ar-en.en'
TRG_SAMPLE_FILE_PATH = f'{DATA_PATH}/ar.txt'
SRC_SAMPLE_FILE_PATH = f'{DATA_PATH}/en.txt'
TRG_ORIG_FILE_PATH = f'{DATA_PATH}/{TRG_FILE_NAME}'
SRC_ORIG_FILE_PATH = f'{DATA_PATH}/{SRC_FILE_NAME}'
def fetch_dataset(data_url, data_path, data_file_path):
""" Download data """
if not os.path.exists(data_path):
os.makedirs(data_path)
print("Dataset not found, downloading...")
response = requests.get(data_url, stream=True)
filename = data_url.split("/")[-1]
total_size_in_bytes= int(response.headers.get('content-length', 0))
progress_bar = tqdm.tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
with open(data_file_path, 'wb') as file:
for data in response.iter_content(1024):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
log("Download complete")
log("Extracting...")
zip = ZipFile(DATA_FILE_PATH, "r")
zip.extract(TRG_FILE_NAME, DATA_PATH)
zip.extract(SRC_FILE_NAME, DATA_PATH)
zip.close()
log("Extracting complete")
num_lines_ar = sum(1 for line in open(TRG_ORIG_FILE_PATH)) # number of lines in arabic file
num_lines_en = sum(1 for line in open(SRC_ORIG_FILE_PATH)) # number of lines in english file
assert num_lines_ar == num_lines_en, "Lost some data"
assert os.path.exists(data_path)
else:
log('Datasets are found')
def create_sample(sample_size, max_text_len):
"""
Clean data sample and remove duplicates
"""
log('Creating txt files for both languages...')
num_lines_ar = sum(1 for line in open(TRG_ORIG_FILE_PATH))
sample_data_size = 2 * sample_size
chosen_lines = set(np.random.choice(np.arange(num_lines_ar), size=sample_data_size, replace=False))
en_sub = open(SRC_ORIG_FILE_PATH, "r")
ar_sub = open(TRG_ORIG_FILE_PATH, "r")
unique_pairs = set()
with open(SRC_TXT_FILE_PATH, "a+") as en, open(TRG_TXT_FILE_PATH, "a+") as ar:
for idx, (en_line, ar_line) in enumerate(zip(en_sub, ar_sub)):
if idx in chosen_lines:
src = clean_en_text(en_line)
trg = clean_ar_text(ar_line)
if 2 < len(src) <= max_text_len and 2 < len(trg) < max_text_len:
if ((src + trg) not in unique_pairs and (len(unique_pairs) < sample_size)):
en.write(src)
ar.write(trg)
unique_pairs.add((src + trg))
elif len(unique_pairs) >= sample_size:
break
assert len(unique_pairs) == sample_size, "Not enough data"
en_sub.close()
ar_sub.close()
en.close()
ar.close()
log("Done")
log(f'Number of unique pairs of sentences: {len(unique_pairs)}')
def main():
fetch_dataset(DATA_URL, DATA_PATH, DATA_FILE_PATH)
parser = ArgumentParser()
parser.add_argument("--sample_size", required=True, type=int, help='Number of the sentence pairs to prepare for the training')
parser.add_argument("--max_text_len", required=True, type=int, help='Max character length of the sentences')
args = parser.parse_args()
create_sample(args.sample_size, args.max_text_len)
log('Training tokenizers...')
yttm.BPE.train(data=TRG_TXT_FILE_PATH, vocab_size=TRG_VOCAB_SIZE, model=TRG_TOKENIZER_PATH)
yttm.BPE.train(data=SRC_TXT_FILE_PATH, vocab_size=SRC_VOCAB_SIZE, model=SRC_TOKENIZER_PATH)
log("Done")
if __name__ == "__main__":
main()
| 37.357798 | 130 | 0.653242 | import os
import tqdm
import numpy as np
import requests
import youtokentome as yttm
from argparse import ArgumentParser
from zipfile import ZipFile
from config import *
from data.preprocessing import *
from utils import *
DATA_FILE_PATH = f'{DATA_PATH}/data.zip'
DATA_URL = 'https://opus.nlpl.eu/download.php?f=OpenSubtitles/v2018/moses/ar-en.txt.zip'
TRG_FILE_NAME = 'OpenSubtitles.ar-en.ar'
SRC_FILE_NAME = 'OpenSubtitles.ar-en.en'
TRG_SAMPLE_FILE_PATH = f'{DATA_PATH}/ar.txt'
SRC_SAMPLE_FILE_PATH = f'{DATA_PATH}/en.txt'
TRG_ORIG_FILE_PATH = f'{DATA_PATH}/{TRG_FILE_NAME}'
SRC_ORIG_FILE_PATH = f'{DATA_PATH}/{SRC_FILE_NAME}'
def fetch_dataset(data_url, data_path, data_file_path):
if not os.path.exists(data_path):
os.makedirs(data_path)
print("Dataset not found, downloading...")
response = requests.get(data_url, stream=True)
filename = data_url.split("/")[-1]
total_size_in_bytes= int(response.headers.get('content-length', 0))
progress_bar = tqdm.tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
with open(data_file_path, 'wb') as file:
for data in response.iter_content(1024):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
log("Download complete")
log("Extracting...")
zip = ZipFile(DATA_FILE_PATH, "r")
zip.extract(TRG_FILE_NAME, DATA_PATH)
zip.extract(SRC_FILE_NAME, DATA_PATH)
zip.close()
log("Extracting complete")
num_lines_ar = sum(1 for line in open(TRG_ORIG_FILE_PATH))
num_lines_en = sum(1 for line in open(SRC_ORIG_FILE_PATH))
assert num_lines_ar == num_lines_en, "Lost some data"
assert os.path.exists(data_path)
else:
log('Datasets are found')
def create_sample(sample_size, max_text_len):
log('Creating txt files for both languages...')
num_lines_ar = sum(1 for line in open(TRG_ORIG_FILE_PATH))
sample_data_size = 2 * sample_size
chosen_lines = set(np.random.choice(np.arange(num_lines_ar), size=sample_data_size, replace=False))
en_sub = open(SRC_ORIG_FILE_PATH, "r")
ar_sub = open(TRG_ORIG_FILE_PATH, "r")
unique_pairs = set()
with open(SRC_TXT_FILE_PATH, "a+") as en, open(TRG_TXT_FILE_PATH, "a+") as ar:
for idx, (en_line, ar_line) in enumerate(zip(en_sub, ar_sub)):
if idx in chosen_lines:
src = clean_en_text(en_line)
trg = clean_ar_text(ar_line)
if 2 < len(src) <= max_text_len and 2 < len(trg) < max_text_len:
if ((src + trg) not in unique_pairs and (len(unique_pairs) < sample_size)):
en.write(src)
ar.write(trg)
unique_pairs.add((src + trg))
elif len(unique_pairs) >= sample_size:
break
assert len(unique_pairs) == sample_size, "Not enough data"
en_sub.close()
ar_sub.close()
en.close()
ar.close()
log("Done")
log(f'Number of unique pairs of sentences: {len(unique_pairs)}')
def main():
fetch_dataset(DATA_URL, DATA_PATH, DATA_FILE_PATH)
parser = ArgumentParser()
parser.add_argument("--sample_size", required=True, type=int, help='Number of the sentence pairs to prepare for the training')
parser.add_argument("--max_text_len", required=True, type=int, help='Max character length of the sentences')
args = parser.parse_args()
create_sample(args.sample_size, args.max_text_len)
log('Training tokenizers...')
yttm.BPE.train(data=TRG_TXT_FILE_PATH, vocab_size=TRG_VOCAB_SIZE, model=TRG_TOKENIZER_PATH)
yttm.BPE.train(data=SRC_TXT_FILE_PATH, vocab_size=SRC_VOCAB_SIZE, model=SRC_TOKENIZER_PATH)
log("Done")
if __name__ == "__main__":
main()
| true | true |
f734b98f59acd1949a167b610f57a6d37d0e9a9e | 584 | py | Python | codes/DBN.py | NoSRPKU/GradD | c74af028d4a93e2f645a61316a5c339ecf690bf1 | [
"MIT"
] | null | null | null | codes/DBN.py | NoSRPKU/GradD | c74af028d4a93e2f645a61316a5c339ecf690bf1 | [
"MIT"
] | null | null | null | codes/DBN.py | NoSRPKU/GradD | c74af028d4a93e2f645a61316a5c339ecf690bf1 | [
"MIT"
] | null | null | null | import numpy
import theano
import theano.tensor as T
from deeplearning import rbm
class DBN():
def __init__(self, vsize=None, hsizes=[], lr=None, bsize=10, seed=123):
assert vsize and hsizes and lr
input = T.dmatrix('global_input')
self.layers = []
for hsize in hsizes:
r = rbm.RBM(input=input, vsize=vsize, hsize=hsize, bsize=bsize,
lr=lr, seed=seed)
self.layers.append(r)
# configure inputs for subsequent layer
input = self.layers[-1].hid
vsize = hsize
| 23.36 | 75 | 0.583904 | import numpy
import theano
import theano.tensor as T
from deeplearning import rbm
class DBN():
def __init__(self, vsize=None, hsizes=[], lr=None, bsize=10, seed=123):
assert vsize and hsizes and lr
input = T.dmatrix('global_input')
self.layers = []
for hsize in hsizes:
r = rbm.RBM(input=input, vsize=vsize, hsize=hsize, bsize=bsize,
lr=lr, seed=seed)
self.layers.append(r)
input = self.layers[-1].hid
vsize = hsize
| true | true |
f734ba16367e8b66e709a083cd83399a14c6eb48 | 1,169 | py | Python | Model_codebase_2_flask.py | anmolmore/Chatbot-for-COVID-19-FAQ-using-Dialogflow | f80670e9ee67e18c790da85d49e9c9617753c6f8 | [
"MIT"
] | 1 | 2021-01-02T06:37:41.000Z | 2021-01-02T06:37:41.000Z | Model_codebase_2_flask.py | anmolmore/Chatbot-for-COVID-19-FAQ-using-Dialogflow | f80670e9ee67e18c790da85d49e9c9617753c6f8 | [
"MIT"
] | null | null | null | Model_codebase_2_flask.py | anmolmore/Chatbot-for-COVID-19-FAQ-using-Dialogflow | f80670e9ee67e18c790da85d49e9c9617753c6f8 | [
"MIT"
] | null | null | null | #11915010 Raghu Punnamraju
#11915043 Anmol More
#11915001 Sriganesh Balamurugan
#11915052 Kapil Bindal
import pandas as pd
from ast import literal_eval
from cdqa.utils.filters import filter_paragraphs
from cdqa.utils.download import download_model, download_bnpp_data
from cdqa.pipeline.cdqa_sklearn import QAPipeline
#read the cleaned dataset and just take question and context for our model
df = pd.read_csv('data/dataset_collected.csv', usecols=['question', 'context'])
#convert paragraphs to a list
df['paragraphs'] = df[df.columns[1:]].apply(
lambda x: x.dropna().values.tolist(),
axis=1)
df.rename(columns={"question": "title"}, inplace=True)
df.drop(columns='context', inplace=True)
df.to_csv('df_corona.csv', index=False)
#use a lighter pipleline model to build pipeline on top of it
cdqa_pipeline = QAPipeline(reader='models/distilbert_qa.joblib')
cdqa_pipeline.fit_retriever(df=df)
print('Welcome to Corona Chatbot ! How can I help you ? ')
print('Press enter twice to quit')
while True:
query = input()
prediction = cdqa_pipeline.predict(query=query)
print('Query : {}\n'.format(query))
print('Reply from Bot: {}\n'.format(prediction[0])) | 32.472222 | 79 | 0.763901 |
import pandas as pd
from ast import literal_eval
from cdqa.utils.filters import filter_paragraphs
from cdqa.utils.download import download_model, download_bnpp_data
from cdqa.pipeline.cdqa_sklearn import QAPipeline
df = pd.read_csv('data/dataset_collected.csv', usecols=['question', 'context'])
df['paragraphs'] = df[df.columns[1:]].apply(
lambda x: x.dropna().values.tolist(),
axis=1)
df.rename(columns={"question": "title"}, inplace=True)
df.drop(columns='context', inplace=True)
df.to_csv('df_corona.csv', index=False)
cdqa_pipeline = QAPipeline(reader='models/distilbert_qa.joblib')
cdqa_pipeline.fit_retriever(df=df)
print('Welcome to Corona Chatbot ! How can I help you ? ')
print('Press enter twice to quit')
while True:
query = input()
prediction = cdqa_pipeline.predict(query=query)
print('Query : {}\n'.format(query))
print('Reply from Bot: {}\n'.format(prediction[0])) | true | true |
f734bb4107b6567f1e96ae7b325230b167696c84 | 1,053 | py | Python | best_single_model/focal_loss.py | hellopikaqiu/AIchallenger_MachineReadingComprehension | 03c8d4ab60f6ac9c7f777fd2c932cc01300b5c42 | [
"MIT"
] | 94 | 2018-12-17T09:12:48.000Z | 2021-03-15T02:56:22.000Z | best_single_model/focal_loss.py | 1120327383/AIchallenger2018_MachineReadingComprehension | 03c8d4ab60f6ac9c7f777fd2c932cc01300b5c42 | [
"MIT"
] | 4 | 2019-03-10T03:20:18.000Z | 2020-06-02T05:10:46.000Z | best_single_model/focal_loss.py | 1120327383/AIchallenger2018_MachineReadingComprehension | 03c8d4ab60f6ac9c7f777fd2c932cc01300b5c42 | [
"MIT"
] | 24 | 2018-12-17T09:13:49.000Z | 2020-01-15T09:14:41.000Z | """
AI Challenger观点型问题阅读理解
focal_loss.py
@author: yuhaitao
"""
# -*- coding:utf-8 -*-
import tensorflow as tf
def sparse_focal_loss(logits, labels, gamma=2):
"""
Computer focal loss for multi classification
Args:
labels: A int32 tensor of shape [batch_size].
logits: A float32 tensor of shape [batch_size,num_classes].
gamma: A scalar for focal loss gamma hyper-parameter.
Returns:
A tensor of the same shape as `lables`
"""
with tf.name_scope("focal_loss"):
y_pred = tf.nn.softmax(logits, dim=-1) # [batch_size,num_classes]
labels = tf.one_hot(labels, depth=y_pred.shape[1])
L = -labels * ((1 - y_pred)**gamma) * tf.log(y_pred)
L = tf.reduce_sum(L, axis=1)
return L
'''
if __name__ == '__main__':
labels = tf.constant([0, 1], name="labels")
logits = tf.constant([[0.7, 0.2, 0.1], [0.6, 0.1, 0.3]], name="logits")
a = tf.reduce_mean(sparse_focal_loss(logits, tf.stop_gradient(labels)))
with tf.Session() as sess:
print(sess.run(a))'''
| 29.25 | 75 | 0.62868 |
import tensorflow as tf
def sparse_focal_loss(logits, labels, gamma=2):
with tf.name_scope("focal_loss"):
y_pred = tf.nn.softmax(logits, dim=-1)
labels = tf.one_hot(labels, depth=y_pred.shape[1])
L = -labels * ((1 - y_pred)**gamma) * tf.log(y_pred)
L = tf.reduce_sum(L, axis=1)
return L
| true | true |
f734bc2a2a57c1eac2aa84d6d7661a74990feb17 | 72,645 | py | Python | python/ccxt/bitmex.py | Joukahainen/ccxt | 82823a85b96cee336853f0deb353474df2122b88 | [
"MIT"
] | 2 | 2022-03-10T15:21:49.000Z | 2022-03-10T15:22:01.000Z | python/ccxt/bitmex.py | alimogh/ccxt | 518ea1a6d212605aa19deed74991bc525470e5c9 | [
"MIT"
] | 4 | 2021-12-14T06:19:10.000Z | 2022-03-19T02:39:29.000Z | python/ccxt/bitmex.py | alimogh/ccxt | 518ea1a6d212605aa19deed74991bc525470e5c9 | [
"MIT"
] | 2 | 2022-03-08T20:43:26.000Z | 2022-03-14T19:28:27.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class bitmex(Exchange):
def describe(self):
return self.deep_extend(super(bitmex, self).describe(), {
'id': 'bitmex',
'name': 'BitMEX',
'countries': ['SC'], # Seychelles
'version': 'v1',
'userAgent': None,
'rateLimit': 2000,
'pro': True,
'has': {
'CORS': None,
'spot': False,
'margin': False,
'swap': None, # has but not fully implemented
'future': None, # has but not fully implemented
'option': None, # has but not fully implemented
'cancelAllOrders': True,
'cancelOrder': True,
'cancelOrders': True,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchIndexOHLCV': False,
'fetchLedger': True,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTransactions': 'emulated',
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'1h': '1h',
'1d': '1d',
},
'urls': {
'test': {
'public': 'https://testnet.bitmex.com',
'private': 'https://testnet.bitmex.com',
},
'logo': 'https://user-images.githubusercontent.com/1294454/27766319-f653c6e6-5ed4-11e7-933d-f0bc3699ae8f.jpg',
'api': {
'public': 'https://www.bitmex.com',
'private': 'https://www.bitmex.com',
},
'www': 'https://www.bitmex.com',
'doc': [
'https://www.bitmex.com/app/apiOverview',
'https://github.com/BitMEX/api-connectors/tree/master/official-http',
],
'fees': 'https://www.bitmex.com/app/fees',
'referral': 'https://www.bitmex.com/register/upZpOX',
},
'api': {
'public': {
'get': [
'announcement',
'announcement/urgent',
'funding',
'instrument',
'instrument/active',
'instrument/activeAndIndices',
'instrument/activeIntervals',
'instrument/compositeIndex',
'instrument/indices',
'insurance',
'leaderboard',
'liquidation',
'orderBook',
'orderBook/L2',
'quote',
'quote/bucketed',
'schema',
'schema/websocketHelp',
'settlement',
'stats',
'stats/history',
'trade',
'trade/bucketed',
],
},
'private': {
'get': [
'apiKey',
'chat',
'chat/channels',
'chat/connected',
'execution',
'execution/tradeHistory',
'notification',
'order',
'position',
'user',
'user/affiliateStatus',
'user/checkReferralCode',
'user/commission',
'user/depositAddress',
'user/executionHistory',
'user/margin',
'user/minWithdrawalFee',
'user/wallet',
'user/walletHistory',
'user/walletSummary',
],
'post': [
'apiKey',
'apiKey/disable',
'apiKey/enable',
'chat',
'order',
'order/bulk',
'order/cancelAllAfter',
'order/closePosition',
'position/isolate',
'position/leverage',
'position/riskLimit',
'position/transferMargin',
'user/cancelWithdrawal',
'user/confirmEmail',
'user/confirmEnableTFA',
'user/confirmWithdrawal',
'user/disableTFA',
'user/logout',
'user/logoutAll',
'user/preferences',
'user/requestEnableTFA',
'user/requestWithdrawal',
],
'put': [
'order',
'order/bulk',
'user',
],
'delete': [
'apiKey',
'order',
'order/all',
],
},
},
'exceptions': {
'exact': {
'Invalid API Key.': AuthenticationError,
'This key is disabled.': PermissionDenied,
'Access Denied': PermissionDenied,
'Duplicate clOrdID': InvalidOrder,
'orderQty is invalid': InvalidOrder,
'Invalid price': InvalidOrder,
'Invalid stopPx for ordType': InvalidOrder,
},
'broad': {
'Signature not valid': AuthenticationError,
'overloaded': ExchangeNotAvailable,
'Account has insufficient Available Balance': InsufficientFunds,
'Service unavailable': ExchangeNotAvailable, # {"error":{"message":"Service unavailable","name":"HTTPError"}}
'Server Error': ExchangeError, # {"error":{"message":"Server Error","name":"HTTPError"}}
'Unable to cancel order due to existing state': InvalidOrder,
},
},
'precisionMode': TICK_SIZE,
'options': {
# https://blog.bitmex.com/api_announcement/deprecation-of-api-nonce-header/
# https://github.com/ccxt/ccxt/issues/4789
'api-expires': 5, # in seconds
'fetchOHLCVOpenTimestamp': True,
},
'commonCurrencies': {
'USDt': 'USDT',
'XBt': 'BTC',
'XBT': 'BTC',
},
})
def fetch_markets(self, params={}):
response = self.publicGetInstrumentActiveAndIndices(params)
#
# {
# "symbol": "LTCUSDT",
# "rootSymbol": "LTC",
# "state": "Open",
# "typ": "FFWCSX",
# "listing": "2021-11-10T04:00:00.000Z",
# "front": "2021-11-10T04:00:00.000Z",
# "expiry": null,
# "settle": null,
# "listedSettle": null,
# "relistInterval": null,
# "inverseLeg": "",
# "sellLeg": "",
# "buyLeg": "",
# "optionStrikePcnt": null,
# "optionStrikeRound": null,
# "optionStrikePrice": null,
# "optionMultiplier": null,
# "positionCurrency": "LTC",
# "underlying": "LTC",
# "quoteCurrency": "USDT",
# "underlyingSymbol": "LTCT=",
# "reference": "BMEX",
# "referenceSymbol": ".BLTCT",
# "calcInterval": null,
# "publishInterval": null,
# "publishTime": null,
# "maxOrderQty": 1000000000,
# "maxPrice": 1000000,
# "lotSize": 1000,
# "tickSize": 0.01,
# "multiplier": 100,
# "settlCurrency": "USDt",
# "underlyingToPositionMultiplier": 10000,
# "underlyingToSettleMultiplier": null,
# "quoteToSettleMultiplier": 1000000,
# "isQuanto": False,
# "isInverse": False,
# "initMargin": 0.03,
# "maintMargin": 0.015,
# "riskLimit": 1000000000000,
# "riskStep": 1000000000000,
# "limit": null,
# "capped": False,
# "taxed": True,
# "deleverage": True,
# "makerFee": -0.0001,
# "takerFee": 0.0005,
# "settlementFee": 0,
# "insuranceFee": 0,
# "fundingBaseSymbol": ".LTCBON8H",
# "fundingQuoteSymbol": ".USDTBON8H",
# "fundingPremiumSymbol": ".LTCUSDTPI8H",
# "fundingTimestamp": "2022-01-14T20:00:00.000Z",
# "fundingInterval": "2000-01-01T08:00:00.000Z",
# "fundingRate": 0.0001,
# "indicativeFundingRate": 0.0001,
# "rebalanceTimestamp": null,
# "rebalanceInterval": null,
# "openingTimestamp": "2022-01-14T17:00:00.000Z",
# "closingTimestamp": "2022-01-14T18:00:00.000Z",
# "sessionInterval": "2000-01-01T01:00:00.000Z",
# "prevClosePrice": 138.511,
# "limitDownPrice": null,
# "limitUpPrice": null,
# "bankruptLimitDownPrice": null,
# "bankruptLimitUpPrice": null,
# "prevTotalVolume": 12699024000,
# "totalVolume": 12702160000,
# "volume": 3136000,
# "volume24h": 114251000,
# "prevTotalTurnover": 232418052349000,
# "totalTurnover": 232463353260000,
# "turnover": 45300911000,
# "turnover24h": 1604331340000,
# "homeNotional24h": 11425.1,
# "foreignNotional24h": 1604331.3400000003,
# "prevPrice24h": 135.48,
# "vwap": 140.42165,
# "highPrice": 146.42,
# "lowPrice": 135.08,
# "lastPrice": 144.36,
# "lastPriceProtected": 144.36,
# "lastTickDirection": "MinusTick",
# "lastChangePcnt": 0.0655,
# "bidPrice": 143.75,
# "midPrice": 143.855,
# "askPrice": 143.96,
# "impactBidPrice": 143.75,
# "impactMidPrice": 143.855,
# "impactAskPrice": 143.96,
# "hasLiquidity": True,
# "openInterest": 38103000,
# "openValue": 547963053300,
# "fairMethod": "FundingRate",
# "fairBasisRate": 0.1095,
# "fairBasis": 0.004,
# "fairPrice": 143.811,
# "markMethod": "FairPrice",
# "markPrice": 143.811,
# "indicativeTaxRate": null,
# "indicativeSettlePrice": 143.807,
# "optionUnderlyingPrice": null,
# "settledPriceAdjustmentRate": null,
# "settledPrice": null,
# "timestamp": "2022-01-14T17:49:55.000Z"
# }
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'symbol')
baseId = self.safe_string(market, 'underlying')
quoteId = self.safe_string(market, 'quoteCurrency')
settleId = self.safe_string(market, 'settlCurrency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
basequote = baseId + quoteId
swap = (id == basequote)
# 'positionCurrency' may be empty("", as Bitmex currently returns for ETHUSD)
# so let's take the quote currency first and then adjust if needed
type = None
future = False
prediction = False
index = False
symbol = base + '/' + quote + ':' + settle
expiryDatetime = self.safe_string(market, 'expiry')
expiry = self.parse8601(expiryDatetime)
inverse = self.safe_value(market, 'isInverse')
status = self.safe_string(market, 'state')
active = status != 'Unlisted'
if swap:
type = 'swap'
elif id.find('B_') >= 0:
prediction = True
type = 'prediction'
symbol = id
elif expiry is not None:
future = True
type = 'future'
symbol = symbol + '-' + self.yymmdd(expiry)
else:
index = True
type = 'index'
symbol = id
active = False
positionId = self.safe_string_2(market, 'positionCurrency', 'quoteCurrency')
position = self.safe_currency_code(positionId)
positionIsQuote = (position == quote)
maxOrderQty = self.safe_number(market, 'maxOrderQty')
contract = not index
initMargin = self.safe_string(market, 'initMargin', '1')
maxLeverage = self.parse_number(Precise.string_div('1', initMargin))
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': type,
'spot': False,
'margin': False,
'swap': swap,
'future': future,
'option': False,
'prediction': prediction,
'index': index,
'active': active,
'contract': contract,
'linear': not inverse if contract else None,
'inverse': inverse if contract else None,
'taker': self.safe_number(market, 'takerFee'),
'maker': self.safe_number(market, 'makerFee'),
'contractSize': self.safe_number(market, 'multiplier'),
'expiry': expiry,
'expiryDatetime': expiryDatetime,
'strike': self.safe_number(market, 'optionStrikePrice'),
'optionType': None,
'precision': {
'amount': self.safe_number(market, 'lotSize'),
'price': self.safe_number(market, 'tickSize'),
},
'limits': {
'leverage': {
'min': self.parse_number('1') if contract else None,
'max': maxLeverage if contract else None,
},
'amount': {
'min': None,
'max': None if positionIsQuote else maxOrderQty,
},
'price': {
'min': None,
'max': self.safe_number(market, 'maxPrice'),
},
'cost': {
'min': None,
'max': maxOrderQty if positionIsQuote else None,
},
},
'info': market,
})
return result
def parse_balance(self, response):
#
# [
# {
# "account":1455728,
# "currency":"XBt",
# "riskLimit":1000000000000,
# "prevState":"",
# "state":"",
# "action":"",
# "amount":263542,
# "pendingCredit":0,
# "pendingDebit":0,
# "confirmedDebit":0,
# "prevRealisedPnl":0,
# "prevUnrealisedPnl":0,
# "grossComm":0,
# "grossOpenCost":0,
# "grossOpenPremium":0,
# "grossExecCost":0,
# "grossMarkValue":0,
# "riskValue":0,
# "taxableMargin":0,
# "initMargin":0,
# "maintMargin":0,
# "sessionMargin":0,
# "targetExcessMargin":0,
# "varMargin":0,
# "realisedPnl":0,
# "unrealisedPnl":0,
# "indicativeTax":0,
# "unrealisedProfit":0,
# "syntheticMargin":null,
# "walletBalance":263542,
# "marginBalance":263542,
# "marginBalancePcnt":1,
# "marginLeverage":0,
# "marginUsedPcnt":0,
# "excessMargin":263542,
# "excessMarginPcnt":1,
# "availableMargin":263542,
# "withdrawableMargin":263542,
# "timestamp":"2020-08-03T12:01:01.246Z",
# "grossLastValue":0,
# "commission":null
# }
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
free = self.safe_string(balance, 'availableMargin')
total = self.safe_string(balance, 'marginBalance')
if code == 'BTC':
free = Precise.string_div(free, '1e8')
total = Precise.string_div(total, '1e8')
account['free'] = free
account['total'] = total
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
request = {
'currency': 'all',
}
response = self.privateGetUserMargin(self.extend(request, params))
#
# [
# {
# "account":1455728,
# "currency":"XBt",
# "riskLimit":1000000000000,
# "prevState":"",
# "state":"",
# "action":"",
# "amount":263542,
# "pendingCredit":0,
# "pendingDebit":0,
# "confirmedDebit":0,
# "prevRealisedPnl":0,
# "prevUnrealisedPnl":0,
# "grossComm":0,
# "grossOpenCost":0,
# "grossOpenPremium":0,
# "grossExecCost":0,
# "grossMarkValue":0,
# "riskValue":0,
# "taxableMargin":0,
# "initMargin":0,
# "maintMargin":0,
# "sessionMargin":0,
# "targetExcessMargin":0,
# "varMargin":0,
# "realisedPnl":0,
# "unrealisedPnl":0,
# "indicativeTax":0,
# "unrealisedProfit":0,
# "syntheticMargin":null,
# "walletBalance":263542,
# "marginBalance":263542,
# "marginBalancePcnt":1,
# "marginLeverage":0,
# "marginUsedPcnt":0,
# "excessMargin":263542,
# "excessMarginPcnt":1,
# "availableMargin":263542,
# "withdrawableMargin":263542,
# "timestamp":"2020-08-03T12:01:01.246Z",
# "grossLastValue":0,
# "commission":null
# }
# ]
#
return self.parse_balance(response)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['depth'] = limit
response = self.publicGetOrderBookL2(self.extend(request, params))
result = {
'symbol': symbol,
'bids': [],
'asks': [],
'timestamp': None,
'datetime': None,
'nonce': None,
}
for i in range(0, len(response)):
order = response[i]
side = 'asks' if (order['side'] == 'Sell') else 'bids'
amount = self.safe_number(order, 'size')
price = self.safe_number(order, 'price')
# https://github.com/ccxt/ccxt/issues/4926
# https://github.com/ccxt/ccxt/issues/4927
# the exchange sometimes returns null price in the orderbook
if price is not None:
result[side].append([price, amount])
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def fetch_order(self, id, symbol=None, params={}):
filter = {
'filter': {
'orderID': id,
},
}
response = self.fetch_orders(symbol, None, None, self.deep_extend(filter, params))
numResults = len(response)
if numResults == 1:
return response[0]
raise OrderNotFound(self.id + ': The order ' + id + ' not found.')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
request = self.deep_extend(request, params)
# why the hassle? urlencode in python is kinda broken for nested dicts.
# E.g. self.urlencode({"filter": {"open": True}}) will return "filter={'open':+True}"
# Bitmex doesn't like that. Hence resorting to self hack.
if 'filter' in request:
request['filter'] = self.json(request['filter'])
response = self.privateGetOrder(request)
return self.parse_orders(response, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'filter': {
'open': True,
},
}
return self.fetch_orders(symbol, since, limit, self.deep_extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# Bitmex barfs if you set 'open': False in the filter...
orders = self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'closed')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
request = self.deep_extend(request, params)
# why the hassle? urlencode in python is kinda broken for nested dicts.
# E.g. self.urlencode({"filter": {"open": True}}) will return "filter={'open':+True}"
# Bitmex doesn't like that. Hence resorting to self hack.
if 'filter' in request:
request['filter'] = self.json(request['filter'])
response = self.privateGetExecutionTradeHistory(request)
#
# [
# {
# "execID": "string",
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "lastQty": 0,
# "lastPx": 0,
# "underlyingLastPx": 0,
# "lastMkt": "string",
# "lastLiquidityInd": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "execType": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "commission": 0,
# "tradePublishIndicator": "string",
# "multiLegReportingType": "string",
# "text": "string",
# "trdMatchID": "string",
# "execCost": 0,
# "execComm": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "transactTime": "2019-03-05T12:47:02.762Z",
# "timestamp": "2019-03-05T12:47:02.762Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'Withdrawal': 'transaction',
'RealisedPNL': 'margin',
'UnrealisedPNL': 'margin',
'Deposit': 'transaction',
'Transfer': 'transfer',
'AffiliatePayout': 'referral',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# transactID: "69573da3-7744-5467-3207-89fd6efe7a47",
# account: 24321,
# currency: "XBt",
# transactType: "Withdrawal", # "AffiliatePayout", "Transfer", "Deposit", "RealisedPNL", ...
# amount: -1000000,
# fee: 300000,
# transactStatus: "Completed", # "Canceled", ...
# address: "1Ex4fkF4NhQaQdRWNoYpqiPbDBbq18Kdd9",
# tx: "3BMEX91ZhhKoWtsH9QRb5dNXnmnGpiEetA",
# text: "",
# transactTime: "2017-03-21T20:05:14.388Z",
# walletBalance: 0, # balance after
# marginBalance: null,
# timestamp: "2017-03-22T13:09:23.514Z"
# }
#
# ButMEX returns the unrealized pnl from the wallet history endpoint.
# The unrealized pnl transaction has an empty timestamp.
# It is not related to historical pnl it has status set to "Pending".
# Therefore it's not a part of the history at all.
# https://github.com/ccxt/ccxt/issues/6047
#
# {
# "transactID":"00000000-0000-0000-0000-000000000000",
# "account":121210,
# "currency":"XBt",
# "transactType":"UnrealisedPNL",
# "amount":-5508,
# "fee":0,
# "transactStatus":"Pending",
# "address":"XBTUSD",
# "tx":"",
# "text":"",
# "transactTime":null, # ←---------------------------- null
# "walletBalance":139198767,
# "marginBalance":139193259,
# "timestamp":null # ←---------------------------- null
# }
#
id = self.safe_string(item, 'transactID')
account = self.safe_string(item, 'account')
referenceId = self.safe_string(item, 'tx')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'transactType'))
currencyId = self.safe_string(item, 'currency')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_number(item, 'amount')
if amount is not None:
amount = amount / 100000000
timestamp = self.parse8601(self.safe_string(item, 'transactTime'))
if timestamp is None:
# https://github.com/ccxt/ccxt/issues/6047
# set the timestamp to zero, 1970 Jan 1 00:00:00
# for unrealized pnl and other transactions without a timestamp
timestamp = 0 # see comments above
feeCost = self.safe_number(item, 'fee', 0)
if feeCost is not None:
feeCost = feeCost / 100000000
fee = {
'cost': feeCost,
'currency': code,
}
after = self.safe_number(item, 'walletBalance')
if after is not None:
after = after / 100000000
before = self.sum(after, -amount)
direction = None
if amount < 0:
direction = 'out'
amount = abs(amount)
else:
direction = 'in'
status = self.parse_transaction_status(self.safe_string(item, 'transactStatus'))
return {
'id': id,
'info': item,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'fee': fee,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
# 'start': 123,
}
#
# if since is not None:
# # date-based pagination not supported
# }
#
if limit is not None:
request['count'] = limit
response = self.privateGetUserWalletHistory(self.extend(request, params))
#
# [
# {
# transactID: "69573da3-7744-5467-3207-89fd6efe7a47",
# account: 24321,
# currency: "XBt",
# transactType: "Withdrawal", # "AffiliatePayout", "Transfer", "Deposit", "RealisedPNL", ...
# amount: -1000000,
# fee: 300000,
# transactStatus: "Completed", # "Canceled", ...
# address: "1Ex4fkF4NhQaQdRWNoYpqiPbDBbq18Kdd9",
# tx: "3BMEX91ZhhKoWtsH9QRb5dNXnmnGpiEetA",
# text: "",
# transactTime: "2017-03-21T20:05:14.388Z",
# walletBalance: 0, # balance after
# marginBalance: null,
# timestamp: "2017-03-22T13:09:23.514Z"
# }
# ]
#
return self.parse_ledger(response, currency, since, limit)
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'start': 123,
}
#
# if since is not None:
# # date-based pagination not supported
# }
#
if limit is not None:
request['count'] = limit
response = self.privateGetUserWalletHistory(self.extend(request, params))
transactions = self.filter_by_array(response, 'transactType', ['Withdrawal', 'Deposit'], False)
currency = None
if code is not None:
currency = self.currency(code)
return self.parse_transactions(transactions, currency, since, limit)
def parse_transaction_status(self, status):
statuses = {
'Canceled': 'canceled',
'Completed': 'ok',
'Pending': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# {
# 'transactID': 'ffe699c2-95ee-4c13-91f9-0faf41daec25',
# 'account': 123456,
# 'currency': 'XBt',
# 'transactType': 'Withdrawal',
# 'amount': -100100000,
# 'fee': 100000,
# 'transactStatus': 'Completed',
# 'address': '385cR5DM96n1HvBDMzLHPYcw89fZAXULJP',
# 'tx': '3BMEXabcdefghijklmnopqrstuvwxyz123',
# 'text': '',
# 'transactTime': '2019-01-02T01:00:00.000Z',
# 'walletBalance': 99900000,
# 'marginBalance': None,
# 'timestamp': '2019-01-02T13:00:00.000Z'
# }
#
id = self.safe_string(transaction, 'transactID')
# For deposits, transactTime == timestamp
# For withdrawals, transactTime is submission, timestamp is processed
transactTime = self.parse8601(self.safe_string(transaction, 'transactTime'))
timestamp = self.parse8601(self.safe_string(transaction, 'timestamp'))
type = self.safe_string_lower(transaction, 'transactType')
# Deposits have no from address or to address, withdrawals have both
address = None
addressFrom = None
addressTo = None
if type == 'withdrawal':
address = self.safe_string(transaction, 'address')
addressFrom = self.safe_string(transaction, 'tx')
addressTo = address
amountString = self.safe_string(transaction, 'amount')
amountString = Precise.string_div(Precise.string_abs(amountString), '1e8')
feeCostString = self.safe_string(transaction, 'fee')
feeCostString = Precise.string_div(feeCostString, '1e8')
fee = {
'cost': self.parse_number(feeCostString),
'currency': 'BTC',
}
status = self.safe_string(transaction, 'transactStatus')
if status is not None:
status = self.parse_transaction_status(status)
return {
'info': transaction,
'id': id,
'txid': None,
'timestamp': transactTime,
'datetime': self.iso8601(transactTime),
'network': None,
'addressFrom': addressFrom,
'address': address,
'addressTo': addressTo,
'tagFrom': None,
'tag': None,
'tagTo': None,
'type': type,
'amount': self.parse_number(amountString),
# BTC is the only currency on Bitmex
'currency': 'BTC',
'status': status,
'updated': timestamp,
'comment': None,
'fee': fee,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
if not market['active']:
raise BadSymbol(self.id + ' fetchTicker() symbol ' + symbol + ' is not tradable')
tickers = self.fetch_tickers([market['symbol']], params)
ticker = self.safe_value(tickers, market['symbol'])
if ticker is None:
raise BadSymbol(self.id + ' fetchTicker() symbol ' + symbol + ' not found')
return ticker
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetInstrumentActiveAndIndices(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = self.safe_string(ticker, 'symbol')
if symbol is not None:
result[symbol] = ticker
uniformSymbols = []
if symbols is not None:
for i in range(0, len(symbols)):
symbol = symbols[i]
market = self.market(symbol)
uniformSymbols.append(market['symbol'])
return self.filter_by_array(result, 'symbol', uniformSymbols)
def parse_ticker(self, ticker, market=None):
#
# { symbol: "ETHH19",
# rootSymbol: "ETH",
# state: "Open",
# typ: "FFCCSX",
# listing: "2018-12-17T04:00:00.000Z",
# front: "2019-02-22T12:00:00.000Z",
# expiry: "2019-03-29T12:00:00.000Z",
# settle: "2019-03-29T12:00:00.000Z",
# relistInterval: null,
# inverseLeg: "",
# sellLeg: "",
# buyLeg: "",
# optionStrikePcnt: null,
# optionStrikeRound: null,
# optionStrikePrice: null,
# optionMultiplier: null,
# positionCurrency: "ETH",
# underlying: "ETH",
# quoteCurrency: "XBT",
# underlyingSymbol: "ETHXBT=",
# reference: "BMEX",
# referenceSymbol: ".BETHXBT30M",
# calcInterval: null,
# publishInterval: null,
# publishTime: null,
# maxOrderQty: 100000000,
# maxPrice: 10,
# lotSize: 1,
# tickSize: 0.00001,
# multiplier: 100000000,
# settlCurrency: "XBt",
# underlyingToPositionMultiplier: 1,
# underlyingToSettleMultiplier: null,
# quoteToSettleMultiplier: 100000000,
# isQuanto: False,
# isInverse: False,
# initMargin: 0.02,
# maintMargin: 0.01,
# riskLimit: 5000000000,
# riskStep: 5000000000,
# limit: null,
# capped: False,
# taxed: True,
# deleverage: True,
# makerFee: -0.0005,
# takerFee: 0.0025,
# settlementFee: 0,
# insuranceFee: 0,
# fundingBaseSymbol: "",
# fundingQuoteSymbol: "",
# fundingPremiumSymbol: "",
# fundingTimestamp: null,
# fundingInterval: null,
# fundingRate: null,
# indicativeFundingRate: null,
# rebalanceTimestamp: null,
# rebalanceInterval: null,
# openingTimestamp: "2019-02-13T08:00:00.000Z",
# closingTimestamp: "2019-02-13T09:00:00.000Z",
# sessionInterval: "2000-01-01T01:00:00.000Z",
# prevClosePrice: 0.03347,
# limitDownPrice: null,
# limitUpPrice: null,
# bankruptLimitDownPrice: null,
# bankruptLimitUpPrice: null,
# prevTotalVolume: 1386531,
# totalVolume: 1387062,
# volume: 531,
# volume24h: 17118,
# prevTotalTurnover: 4741294246000,
# totalTurnover: 4743103466000,
# turnover: 1809220000,
# turnover24h: 57919845000,
# homeNotional24h: 17118,
# foreignNotional24h: 579.19845,
# prevPrice24h: 0.03349,
# vwap: 0.03383564,
# highPrice: 0.03458,
# lowPrice: 0.03329,
# lastPrice: 0.03406,
# lastPriceProtected: 0.03406,
# lastTickDirection: "ZeroMinusTick",
# lastChangePcnt: 0.017,
# bidPrice: 0.03406,
# midPrice: 0.034065,
# askPrice: 0.03407,
# impactBidPrice: 0.03406,
# impactMidPrice: 0.034065,
# impactAskPrice: 0.03407,
# hasLiquidity: True,
# openInterest: 83679,
# openValue: 285010674000,
# fairMethod: "ImpactMidPrice",
# fairBasisRate: 0,
# fairBasis: 0,
# fairPrice: 0.03406,
# markMethod: "FairPrice",
# markPrice: 0.03406,
# indicativeTaxRate: 0,
# indicativeSettlePrice: 0.03406,
# optionUnderlyingPrice: null,
# settledPrice: null,
# timestamp: "2019-02-13T08:40:30.000Z",
# }
#
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
open = self.safe_string(ticker, 'prevPrice24h')
last = self.safe_string(ticker, 'lastPrice')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'highPrice'),
'low': self.safe_string(ticker, 'lowPrice'),
'bid': self.safe_string(ticker, 'bidPrice'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'askPrice'),
'askVolume': None,
'vwap': self.safe_string(ticker, 'vwap'),
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'homeNotional24h'),
'quoteVolume': self.safe_string(ticker, 'foreignNotional24h'),
'info': ticker,
}, market, False)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "timestamp":"2015-09-25T13:38:00.000Z",
# "symbol":"XBTUSD",
# "open":237.45,
# "high":237.45,
# "low":237.45,
# "close":237.45,
# "trades":0,
# "volume":0,
# "vwap":null,
# "lastSize":null,
# "turnover":0,
# "homeNotional":0,
# "foreignNotional":0
# }
#
return [
self.parse8601(self.safe_string(ohlcv, 'timestamp')),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
# send JSON key/value pairs, such as {"key": "value"}
# filter by individual fields and do advanced queries on timestamps
# filter = {'key': 'value'}
# send a bare series(e.g. XBU) to nearest expiring contract in that series
# you can also send a timeframe, e.g. XBU:monthly
# timeframes: daily, weekly, monthly, quarterly, and biquarterly
market = self.market(symbol)
request = {
'symbol': market['id'],
'binSize': self.timeframes[timeframe],
'partial': True, # True == include yet-incomplete current bins
# 'filter': filter, # filter by individual fields and do advanced queries
# 'columns': [], # will return all columns if omitted
# 'start': 0, # starting point for results(wtf?)
# 'reverse': False, # True == newest first
# 'endTime': '', # ending date filter for results
}
if limit is not None:
request['count'] = limit # default 100, max 500
duration = self.parse_timeframe(timeframe) * 1000
fetchOHLCVOpenTimestamp = self.safe_value(self.options, 'fetchOHLCVOpenTimestamp', True)
# if since is not set, they will return candles starting from 2017-01-01
if since is not None:
timestamp = since
if fetchOHLCVOpenTimestamp:
timestamp = self.sum(timestamp, duration)
ymdhms = self.ymdhms(timestamp)
request['startTime'] = ymdhms # starting date filter for results
else:
request['reverse'] = True
response = self.publicGetTradeBucketed(self.extend(request, params))
#
# [
# {"timestamp":"2015-09-25T13:38:00.000Z","symbol":"XBTUSD","open":237.45,"high":237.45,"low":237.45,"close":237.45,"trades":0,"volume":0,"vwap":null,"lastSize":null,"turnover":0,"homeNotional":0,"foreignNotional":0},
# {"timestamp":"2015-09-25T13:39:00.000Z","symbol":"XBTUSD","open":237.45,"high":237.45,"low":237.45,"close":237.45,"trades":0,"volume":0,"vwap":null,"lastSize":null,"turnover":0,"homeNotional":0,"foreignNotional":0},
# {"timestamp":"2015-09-25T13:40:00.000Z","symbol":"XBTUSD","open":237.45,"high":237.45,"low":237.45,"close":237.45,"trades":0,"volume":0,"vwap":null,"lastSize":null,"turnover":0,"homeNotional":0,"foreignNotional":0}
# ]
#
result = self.parse_ohlcvs(response, market, timeframe, since, limit)
if fetchOHLCVOpenTimestamp:
# bitmex returns the candle's close timestamp - https://github.com/ccxt/ccxt/issues/4446
# we can emulate the open timestamp by shifting all the timestamps one place
# so the previous close becomes the current open, and we drop the first candle
for i in range(0, len(result)):
result[i][0] = result[i][0] - duration
return result
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# timestamp: '2018-08-28T00:00:02.735Z',
# symbol: 'XBTUSD',
# side: 'Buy',
# size: 2000,
# price: 6906.5,
# tickDirection: 'PlusTick',
# trdMatchID: 'b9a42432-0a46-6a2f-5ecc-c32e9ca4baf8',
# grossValue: 28958000,
# homeNotional: 0.28958,
# foreignNotional: 2000
# }
#
# fetchMyTrades(private)
#
# {
# "execID": "string",
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "lastQty": 0,
# "lastPx": 0,
# "underlyingLastPx": 0,
# "lastMkt": "string",
# "lastLiquidityInd": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "execType": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "commission": 0,
# "tradePublishIndicator": "string",
# "multiLegReportingType": "string",
# "text": "string",
# "trdMatchID": "string",
# "execCost": 0,
# "execComm": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "transactTime": "2019-03-05T12:47:02.762Z",
# "timestamp": "2019-03-05T12:47:02.762Z"
# }
#
timestamp = self.parse8601(self.safe_string(trade, 'timestamp'))
priceString = self.safe_string_2(trade, 'avgPx', 'price')
amountString = self.safe_string_2(trade, 'size', 'lastQty')
execCost = self.safe_string(trade, 'execCost')
costString = Precise.string_div(Precise.string_abs(execCost), '1e8')
id = self.safe_string(trade, 'trdMatchID')
order = self.safe_string(trade, 'orderID')
side = self.safe_string_lower(trade, 'side')
# price * amount doesn't work for all symbols(e.g. XBT, ETH)
fee = None
feeCostString = Precise.string_div(self.safe_string(trade, 'execComm'), '1e8')
if feeCostString is not None:
currencyId = self.safe_string(trade, 'settlCurrency')
feeCurrencyCode = self.safe_currency_code(currencyId)
feeRateString = self.safe_string(trade, 'commission')
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
'rate': feeRateString,
}
# Trade or Funding
execType = self.safe_string(trade, 'execType')
takerOrMaker = None
if feeCostString is not None and execType == 'Trade':
takerOrMaker = 'maker' if Precise.string_lt(feeCostString, '0') else 'taker'
marketId = self.safe_string(trade, 'symbol')
symbol = self.safe_symbol(marketId, market)
type = self.safe_string_lower(trade, 'ordType')
return self.safe_trade({
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': order,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': priceString,
'cost': costString,
'amount': amountString,
'fee': fee,
}, market)
def parse_order_status(self, status):
statuses = {
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'DoneForDay': 'open',
'Canceled': 'canceled',
'PendingCancel': 'open',
'PendingNew': 'open',
'Rejected': 'rejected',
'Expired': 'expired',
'Stopped': 'open',
'Untriggered': 'open',
'Triggered': 'open',
}
return self.safe_string(statuses, status, status)
def parse_time_in_force(self, timeInForce):
timeInForces = {
'Day': 'Day',
'GoodTillCancel': 'GTC',
'ImmediateOrCancel': 'IOC',
'FillOrKill': 'FOK',
}
return self.safe_string(timeInForces, timeInForce, timeInForce)
def parse_order(self, order, market=None):
#
# {
# "orderID":"56222c7a-9956-413a-82cf-99f4812c214b",
# "clOrdID":"",
# "clOrdLinkID":"",
# "account":1455728,
# "symbol":"XBTUSD",
# "side":"Sell",
# "simpleOrderQty":null,
# "orderQty":1,
# "price":40000,
# "displayQty":null,
# "stopPx":null,
# "pegOffsetValue":null,
# "pegPriceType":"",
# "currency":"USD",
# "settlCurrency":"XBt",
# "ordType":"Limit",
# "timeInForce":"GoodTillCancel",
# "execInst":"",
# "contingencyType":"",
# "exDestination":"XBME",
# "ordStatus":"New",
# "triggered":"",
# "workingIndicator":true,
# "ordRejReason":"",
# "simpleLeavesQty":null,
# "leavesQty":1,
# "simpleCumQty":null,
# "cumQty":0,
# "avgPx":null,
# "multiLegReportingType":"SingleSecurity",
# "text":"Submitted via API.",
# "transactTime":"2021-01-02T21:38:49.246Z",
# "timestamp":"2021-01-02T21:38:49.246Z"
# }
#
status = self.parse_order_status(self.safe_string(order, 'ordStatus'))
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.parse8601(self.safe_string(order, 'timestamp'))
lastTradeTimestamp = self.parse8601(self.safe_string(order, 'transactTime'))
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'orderQty')
filled = self.safe_string(order, 'cumQty', 0.0)
average = self.safe_string(order, 'avgPx')
id = self.safe_string(order, 'orderID')
type = self.safe_string_lower(order, 'ordType')
side = self.safe_string_lower(order, 'side')
clientOrderId = self.safe_string(order, 'clOrdID')
timeInForce = self.parse_time_in_force(self.safe_string(order, 'timeInForce'))
stopPrice = self.safe_number(order, 'stopPx')
execInst = self.safe_string(order, 'execInst')
postOnly = (execInst == 'ParticipateDoNotInitiate')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': None,
'average': average,
'filled': filled,
'remaining': None,
'status': status,
'fee': None,
'trades': None,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = self.iso8601(since)
else:
# by default reverse=false, i.e. trades are fetched since the time of market inception(year 2015 for XBTUSD)
request['reverse'] = True
if limit is not None:
request['count'] = limit
response = self.publicGetTrade(self.extend(request, params))
#
# [
# {
# timestamp: '2018-08-28T00:00:02.735Z',
# symbol: 'XBTUSD',
# side: 'Buy',
# size: 2000,
# price: 6906.5,
# tickDirection: 'PlusTick',
# trdMatchID: 'b9a42432-0a46-6a2f-5ecc-c32e9ca4baf8',
# grossValue: 28958000,
# homeNotional: 0.28958,
# foreignNotional: 2000
# },
# {
# timestamp: '2018-08-28T00:00:03.778Z',
# symbol: 'XBTUSD',
# side: 'Sell',
# size: 1000,
# price: 6906,
# tickDirection: 'MinusTick',
# trdMatchID: '0d4f1682-5270-a800-569b-4a0eb92db97c',
# grossValue: 14480000,
# homeNotional: 0.1448,
# foreignNotional: 1000
# },
# ]
#
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
orderType = self.capitalize(type)
request = {
'symbol': market['id'],
'side': self.capitalize(side),
'orderQty': float(self.amount_to_precision(symbol, amount)),
'ordType': orderType,
}
if (orderType == 'Stop') or (orderType == 'StopLimit') or (orderType == 'MarketIfTouched') or (orderType == 'LimitIfTouched'):
stopPrice = self.safe_number_2(params, 'stopPx', 'stopPrice')
if stopPrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPx or stopPrice parameter for the ' + orderType + ' order type')
else:
request['stopPx'] = float(self.price_to_precision(symbol, stopPrice))
params = self.omit(params, ['stopPx', 'stopPrice'])
if (orderType == 'Limit') or (orderType == 'StopLimit') or (orderType == 'LimitIfTouched'):
request['price'] = float(self.price_to_precision(symbol, price))
clientOrderId = self.safe_string_2(params, 'clOrdID', 'clientOrderId')
if clientOrderId is not None:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['clOrdID', 'clientOrderId'])
response = self.privatePostOrder(self.extend(request, params))
return self.parse_order(response, market)
def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
self.load_markets()
request = {}
origClOrdID = self.safe_string_2(params, 'origClOrdID', 'clientOrderId')
if origClOrdID is not None:
request['origClOrdID'] = origClOrdID
clientOrderId = self.safe_string(params, 'clOrdID', 'clientOrderId')
if clientOrderId is not None:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['origClOrdID', 'clOrdID', 'clientOrderId'])
else:
request['orderID'] = id
if amount is not None:
request['orderQty'] = amount
if price is not None:
request['price'] = price
response = self.privatePutOrder(self.extend(request, params))
return self.parse_order(response)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
# https://github.com/ccxt/ccxt/issues/6507
clientOrderId = self.safe_value_2(params, 'clOrdID', 'clientOrderId')
request = {}
if clientOrderId is None:
request['orderID'] = id
else:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['clOrdID', 'clientOrderId'])
response = self.privateDeleteOrder(self.extend(request, params))
order = self.safe_value(response, 0, {})
error = self.safe_string(order, 'error')
if error is not None:
if error.find('Unable to cancel order due to existing state') >= 0:
raise OrderNotFound(self.id + ' cancelOrder() failed: ' + error)
return self.parse_order(order)
def cancel_orders(self, ids, symbol=None, params={}):
return self.cancel_order(ids, symbol, params)
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privateDeleteOrderAll(self.extend(request, params))
#
# [
# {
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "multiLegReportingType": "string",
# "text": "string",
# "transactTime": "2020-06-01T09:36:35.290Z",
# "timestamp": "2020-06-01T09:36:35.290Z"
# }
# ]
#
return self.parse_orders(response, market)
def fetch_positions(self, symbols=None, params={}):
self.load_markets()
response = self.privateGetPosition(params)
# [
# {
# "account": 0,
# "symbol": "string",
# "currency": "string",
# "underlying": "string",
# "quoteCurrency": "string",
# "commission": 0,
# "initMarginReq": 0,
# "maintMarginReq": 0,
# "riskLimit": 0,
# "leverage": 0,
# "crossMargin": True,
# "deleveragePercentile": 0,
# "rebalancedPnl": 0,
# "prevRealisedPnl": 0,
# "prevUnrealisedPnl": 0,
# "prevClosePrice": 0,
# "openingTimestamp": "2020-11-09T06:53:59.892Z",
# "openingQty": 0,
# "openingCost": 0,
# "openingComm": 0,
# "openOrderBuyQty": 0,
# "openOrderBuyCost": 0,
# "openOrderBuyPremium": 0,
# "openOrderSellQty": 0,
# "openOrderSellCost": 0,
# "openOrderSellPremium": 0,
# "execBuyQty": 0,
# "execBuyCost": 0,
# "execSellQty": 0,
# "execSellCost": 0,
# "execQty": 0,
# "execCost": 0,
# "execComm": 0,
# "currentTimestamp": "2020-11-09T06:53:59.893Z",
# "currentQty": 0,
# "currentCost": 0,
# "currentComm": 0,
# "realisedCost": 0,
# "unrealisedCost": 0,
# "grossOpenCost": 0,
# "grossOpenPremium": 0,
# "grossExecCost": 0,
# "isOpen": True,
# "markPrice": 0,
# "markValue": 0,
# "riskValue": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "posState": "string",
# "posCost": 0,
# "posCost2": 0,
# "posCross": 0,
# "posInit": 0,
# "posComm": 0,
# "posLoss": 0,
# "posMargin": 0,
# "posMaint": 0,
# "posAllowance": 0,
# "taxableMargin": 0,
# "initMargin": 0,
# "maintMargin": 0,
# "sessionMargin": 0,
# "targetExcessMargin": 0,
# "varMargin": 0,
# "realisedGrossPnl": 0,
# "realisedTax": 0,
# "realisedPnl": 0,
# "unrealisedGrossPnl": 0,
# "longBankrupt": 0,
# "shortBankrupt": 0,
# "taxBase": 0,
# "indicativeTaxRate": 0,
# "indicativeTax": 0,
# "unrealisedTax": 0,
# "unrealisedPnl": 0,
# "unrealisedPnlPcnt": 0,
# "unrealisedRoePcnt": 0,
# "simpleQty": 0,
# "simpleCost": 0,
# "simpleValue": 0,
# "simplePnl": 0,
# "simplePnlPcnt": 0,
# "avgCostPrice": 0,
# "avgEntryPrice": 0,
# "breakEvenPrice": 0,
# "marginCallPrice": 0,
# "liquidationPrice": 0,
# "bankruptPrice": 0,
# "timestamp": "2020-11-09T06:53:59.894Z",
# "lastPrice": 0,
# "lastValue": 0
# }
# ]
#
# todo unify parsePosition/parsePositions
return response
def is_fiat(self, currency):
if currency == 'EUR':
return True
if currency == 'PLN':
return True
return False
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
self.load_markets()
# currency = self.currency(code)
if code != 'BTC':
raise ExchangeError(self.id + ' supoprts BTC withdrawals only, other currencies coming soon...')
request = {
'currency': 'XBt', # temporarily
'amount': amount,
'address': address,
# 'otpToken': '123456', # requires if two-factor auth(OTP) is enabled
# 'fee': 0.001, # bitcoin network fee
}
response = self.privatePostUserRequestWithdrawal(self.extend(request, params))
return {
'info': response,
'id': self.safe_string(response, 'transactID'),
}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if code == 429:
raise DDoSProtection(self.id + ' ' + body)
if code >= 400:
error = self.safe_value(response, 'error', {})
message = self.safe_string(error, 'message')
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
if code == 400:
raise BadRequest(feedback)
raise ExchangeError(feedback) # unknown message
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = '/api/' + self.version + '/' + path
if method == 'GET':
if params:
query += '?' + self.urlencode(params)
else:
format = self.safe_string(params, '_format')
if format is not None:
query += '?' + self.urlencode({'_format': format})
params = self.omit(params, '_format')
url = self.urls['api'][api] + query
if api == 'private':
self.check_required_credentials()
auth = method + query
expires = self.safe_integer(self.options, 'api-expires')
headers = {
'Content-Type': 'application/json',
'api-key': self.apiKey,
}
expires = self.sum(self.seconds(), expires)
expires = str(expires)
auth += expires
headers['api-expires'] = expires
if method == 'POST' or method == 'PUT' or method == 'DELETE':
if params:
body = self.json(params)
auth += body
headers['api-signature'] = self.hmac(self.encode(auth), self.encode(self.secret))
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| 42.284633 | 233 | 0.457664 |
ge import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class bitmex(Exchange):
def describe(self):
return self.deep_extend(super(bitmex, self).describe(), {
'id': 'bitmex',
'name': 'BitMEX',
'countries': ['SC'],
'version': 'v1',
'userAgent': None,
'rateLimit': 2000,
'pro': True,
'has': {
'CORS': None,
'spot': False,
'margin': False,
'swap': None,
'future': None,
'option': None,
'cancelAllOrders': True,
'cancelOrder': True,
'cancelOrders': True,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchIndexOHLCV': False,
'fetchLedger': True,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTransactions': 'emulated',
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'1h': '1h',
'1d': '1d',
},
'urls': {
'test': {
'public': 'https://testnet.bitmex.com',
'private': 'https://testnet.bitmex.com',
},
'logo': 'https://user-images.githubusercontent.com/1294454/27766319-f653c6e6-5ed4-11e7-933d-f0bc3699ae8f.jpg',
'api': {
'public': 'https://www.bitmex.com',
'private': 'https://www.bitmex.com',
},
'www': 'https://www.bitmex.com',
'doc': [
'https://www.bitmex.com/app/apiOverview',
'https://github.com/BitMEX/api-connectors/tree/master/official-http',
],
'fees': 'https://www.bitmex.com/app/fees',
'referral': 'https://www.bitmex.com/register/upZpOX',
},
'api': {
'public': {
'get': [
'announcement',
'announcement/urgent',
'funding',
'instrument',
'instrument/active',
'instrument/activeAndIndices',
'instrument/activeIntervals',
'instrument/compositeIndex',
'instrument/indices',
'insurance',
'leaderboard',
'liquidation',
'orderBook',
'orderBook/L2',
'quote',
'quote/bucketed',
'schema',
'schema/websocketHelp',
'settlement',
'stats',
'stats/history',
'trade',
'trade/bucketed',
],
},
'private': {
'get': [
'apiKey',
'chat',
'chat/channels',
'chat/connected',
'execution',
'execution/tradeHistory',
'notification',
'order',
'position',
'user',
'user/affiliateStatus',
'user/checkReferralCode',
'user/commission',
'user/depositAddress',
'user/executionHistory',
'user/margin',
'user/minWithdrawalFee',
'user/wallet',
'user/walletHistory',
'user/walletSummary',
],
'post': [
'apiKey',
'apiKey/disable',
'apiKey/enable',
'chat',
'order',
'order/bulk',
'order/cancelAllAfter',
'order/closePosition',
'position/isolate',
'position/leverage',
'position/riskLimit',
'position/transferMargin',
'user/cancelWithdrawal',
'user/confirmEmail',
'user/confirmEnableTFA',
'user/confirmWithdrawal',
'user/disableTFA',
'user/logout',
'user/logoutAll',
'user/preferences',
'user/requestEnableTFA',
'user/requestWithdrawal',
],
'put': [
'order',
'order/bulk',
'user',
],
'delete': [
'apiKey',
'order',
'order/all',
],
},
},
'exceptions': {
'exact': {
'Invalid API Key.': AuthenticationError,
'This key is disabled.': PermissionDenied,
'Access Denied': PermissionDenied,
'Duplicate clOrdID': InvalidOrder,
'orderQty is invalid': InvalidOrder,
'Invalid price': InvalidOrder,
'Invalid stopPx for ordType': InvalidOrder,
},
'broad': {
'Signature not valid': AuthenticationError,
'overloaded': ExchangeNotAvailable,
'Account has insufficient Available Balance': InsufficientFunds,
'Service unavailable': ExchangeNotAvailable,
'Server Error': ExchangeError,
'Unable to cancel order due to existing state': InvalidOrder,
},
},
'precisionMode': TICK_SIZE,
'options': {
'api-expires': 5,
'fetchOHLCVOpenTimestamp': True,
},
'commonCurrencies': {
'USDt': 'USDT',
'XBt': 'BTC',
'XBT': 'BTC',
},
})
def fetch_markets(self, params={}):
response = self.publicGetInstrumentActiveAndIndices(params)
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'symbol')
baseId = self.safe_string(market, 'underlying')
quoteId = self.safe_string(market, 'quoteCurrency')
settleId = self.safe_string(market, 'settlCurrency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
basequote = baseId + quoteId
swap = (id == basequote)
type = None
future = False
prediction = False
index = False
symbol = base + '/' + quote + ':' + settle
expiryDatetime = self.safe_string(market, 'expiry')
expiry = self.parse8601(expiryDatetime)
inverse = self.safe_value(market, 'isInverse')
status = self.safe_string(market, 'state')
active = status != 'Unlisted'
if swap:
type = 'swap'
elif id.find('B_') >= 0:
prediction = True
type = 'prediction'
symbol = id
elif expiry is not None:
future = True
type = 'future'
symbol = symbol + '-' + self.yymmdd(expiry)
else:
index = True
type = 'index'
symbol = id
active = False
positionId = self.safe_string_2(market, 'positionCurrency', 'quoteCurrency')
position = self.safe_currency_code(positionId)
positionIsQuote = (position == quote)
maxOrderQty = self.safe_number(market, 'maxOrderQty')
contract = not index
initMargin = self.safe_string(market, 'initMargin', '1')
maxLeverage = self.parse_number(Precise.string_div('1', initMargin))
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': type,
'spot': False,
'margin': False,
'swap': swap,
'future': future,
'option': False,
'prediction': prediction,
'index': index,
'active': active,
'contract': contract,
'linear': not inverse if contract else None,
'inverse': inverse if contract else None,
'taker': self.safe_number(market, 'takerFee'),
'maker': self.safe_number(market, 'makerFee'),
'contractSize': self.safe_number(market, 'multiplier'),
'expiry': expiry,
'expiryDatetime': expiryDatetime,
'strike': self.safe_number(market, 'optionStrikePrice'),
'optionType': None,
'precision': {
'amount': self.safe_number(market, 'lotSize'),
'price': self.safe_number(market, 'tickSize'),
},
'limits': {
'leverage': {
'min': self.parse_number('1') if contract else None,
'max': maxLeverage if contract else None,
},
'amount': {
'min': None,
'max': None if positionIsQuote else maxOrderQty,
},
'price': {
'min': None,
'max': self.safe_number(market, 'maxPrice'),
},
'cost': {
'min': None,
'max': maxOrderQty if positionIsQuote else None,
},
},
'info': market,
})
return result
def parse_balance(self, response):
#
# [
# {
# "account":1455728,
# "currency":"XBt",
# "riskLimit":1000000000000,
# "prevState":"",
# "state":"",
# "action":"",
# "amount":263542,
# "pendingCredit":0,
# "pendingDebit":0,
# "confirmedDebit":0,
# "prevRealisedPnl":0,
# "prevUnrealisedPnl":0,
# "grossComm":0,
# "grossOpenCost":0,
# "grossOpenPremium":0,
# "grossExecCost":0,
# "grossMarkValue":0,
# "riskValue":0,
# "taxableMargin":0,
# "initMargin":0,
# "maintMargin":0,
# "sessionMargin":0,
# "targetExcessMargin":0,
# "varMargin":0,
# "realisedPnl":0,
# "unrealisedPnl":0,
# "indicativeTax":0,
# "unrealisedProfit":0,
# "syntheticMargin":null,
# "walletBalance":263542,
# "marginBalance":263542,
# "marginBalancePcnt":1,
# "marginLeverage":0,
# "marginUsedPcnt":0,
# "excessMargin":263542,
# "excessMarginPcnt":1,
# "availableMargin":263542,
# "withdrawableMargin":263542,
# "timestamp":"2020-08-03T12:01:01.246Z",
# "grossLastValue":0,
# "commission":null
# }
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
free = self.safe_string(balance, 'availableMargin')
total = self.safe_string(balance, 'marginBalance')
if code == 'BTC':
free = Precise.string_div(free, '1e8')
total = Precise.string_div(total, '1e8')
account['free'] = free
account['total'] = total
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
request = {
'currency': 'all',
}
response = self.privateGetUserMargin(self.extend(request, params))
#
# [
# {
# "account":1455728,
# "currency":"XBt",
# "riskLimit":1000000000000,
# "prevState":"",
# "state":"",
# "action":"",
# "amount":263542,
# "pendingCredit":0,
# "pendingDebit":0,
# "confirmedDebit":0,
# "prevRealisedPnl":0,
# "prevUnrealisedPnl":0,
# "grossComm":0,
# "grossOpenCost":0,
# "grossOpenPremium":0,
# "grossExecCost":0,
# "grossMarkValue":0,
# "riskValue":0,
# "taxableMargin":0,
# "initMargin":0,
# "maintMargin":0,
# "sessionMargin":0,
# "targetExcessMargin":0,
# "varMargin":0,
# "realisedPnl":0,
# "unrealisedPnl":0,
# "indicativeTax":0,
# "unrealisedProfit":0,
# "syntheticMargin":null,
# "walletBalance":263542,
# "marginBalance":263542,
# "marginBalancePcnt":1,
# "marginLeverage":0,
# "marginUsedPcnt":0,
# "excessMargin":263542,
# "excessMarginPcnt":1,
# "availableMargin":263542,
# "withdrawableMargin":263542,
# "timestamp":"2020-08-03T12:01:01.246Z",
# "grossLastValue":0,
# "commission":null
# }
# ]
#
return self.parse_balance(response)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['depth'] = limit
response = self.publicGetOrderBookL2(self.extend(request, params))
result = {
'symbol': symbol,
'bids': [],
'asks': [],
'timestamp': None,
'datetime': None,
'nonce': None,
}
for i in range(0, len(response)):
order = response[i]
side = 'asks' if (order['side'] == 'Sell') else 'bids'
amount = self.safe_number(order, 'size')
price = self.safe_number(order, 'price')
# https://github.com/ccxt/ccxt/issues/4926
# https://github.com/ccxt/ccxt/issues/4927
# the exchange sometimes returns null price in the orderbook
if price is not None:
result[side].append([price, amount])
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def fetch_order(self, id, symbol=None, params={}):
filter = {
'filter': {
'orderID': id,
},
}
response = self.fetch_orders(symbol, None, None, self.deep_extend(filter, params))
numResults = len(response)
if numResults == 1:
return response[0]
raise OrderNotFound(self.id + ': The order ' + id + ' not found.')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
request = self.deep_extend(request, params)
# why the hassle? urlencode in python is kinda broken for nested dicts.
# E.g. self.urlencode({"filter": {"open": True}}) will return "filter={'open':+True}"
# Bitmex doesn't like that. Hence resorting to self hack.
if 'filter' in request:
request['filter'] = self.json(request['filter'])
response = self.privateGetOrder(request)
return self.parse_orders(response, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'filter': {
'open': True,
},
}
return self.fetch_orders(symbol, since, limit, self.deep_extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
orders = self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'closed')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
request = self.deep_extend(request, params)
if 'filter' in request:
request['filter'] = self.json(request['filter'])
response = self.privateGetExecutionTradeHistory(request)
#
# [
# {
# "execID": "string",
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "lastQty": 0,
# "lastPx": 0,
# "underlyingLastPx": 0,
# "lastMkt": "string",
# "lastLiquidityInd": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "execType": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "commission": 0,
# "tradePublishIndicator": "string",
# "multiLegReportingType": "string",
# "text": "string",
# "trdMatchID": "string",
# "execCost": 0,
# "execComm": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "transactTime": "2019-03-05T12:47:02.762Z",
# "timestamp": "2019-03-05T12:47:02.762Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'Withdrawal': 'transaction',
'RealisedPNL': 'margin',
'UnrealisedPNL': 'margin',
'Deposit': 'transaction',
'Transfer': 'transfer',
'AffiliatePayout': 'referral',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# transactID: "69573da3-7744-5467-3207-89fd6efe7a47",
# account: 24321,
# currency: "XBt",
# transactType: "Withdrawal", # "AffiliatePayout", "Transfer", "Deposit", "RealisedPNL", ...
# amount: -1000000,
# fee: 300000,
# transactStatus: "Completed", # "Canceled", ...
# address: "1Ex4fkF4NhQaQdRWNoYpqiPbDBbq18Kdd9",
# tx: "3BMEX91ZhhKoWtsH9QRb5dNXnmnGpiEetA",
# text: "",
# transactTime: "2017-03-21T20:05:14.388Z",
# walletBalance: 0, # balance after
# marginBalance: null,
# timestamp: "2017-03-22T13:09:23.514Z"
# }
#
# ButMEX returns the unrealized pnl from the wallet history endpoint.
# The unrealized pnl transaction has an empty timestamp.
# It is not related to historical pnl it has status set to "Pending".
# Therefore it's not a part of the history at all.
ing(item, 'transactID')
account = self.safe_string(item, 'account')
referenceId = self.safe_string(item, 'tx')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'transactType'))
currencyId = self.safe_string(item, 'currency')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_number(item, 'amount')
if amount is not None:
amount = amount / 100000000
timestamp = self.parse8601(self.safe_string(item, 'transactTime'))
if timestamp is None:
timestamp = 0
feeCost = self.safe_number(item, 'fee', 0)
if feeCost is not None:
feeCost = feeCost / 100000000
fee = {
'cost': feeCost,
'currency': code,
}
after = self.safe_number(item, 'walletBalance')
if after is not None:
after = after / 100000000
before = self.sum(after, -amount)
direction = None
if amount < 0:
direction = 'out'
amount = abs(amount)
else:
direction = 'in'
status = self.parse_transaction_status(self.safe_string(item, 'transactStatus'))
return {
'id': id,
'info': item,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'fee': fee,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
}
s not None:
request['count'] = limit
response = self.privateGetUserWalletHistory(self.extend(request, params))
return self.parse_ledger(response, currency, since, limit)
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
}
s not None:
request['count'] = limit
response = self.privateGetUserWalletHistory(self.extend(request, params))
transactions = self.filter_by_array(response, 'transactType', ['Withdrawal', 'Deposit'], False)
currency = None
if code is not None:
currency = self.currency(code)
return self.parse_transactions(transactions, currency, since, limit)
def parse_transaction_status(self, status):
statuses = {
'Canceled': 'canceled',
'Completed': 'ok',
'Pending': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
id = self.safe_string(transaction, 'transactID')
transactTime = self.parse8601(self.safe_string(transaction, 'transactTime'))
timestamp = self.parse8601(self.safe_string(transaction, 'timestamp'))
type = self.safe_string_lower(transaction, 'transactType')
address = None
addressFrom = None
addressTo = None
if type == 'withdrawal':
address = self.safe_string(transaction, 'address')
addressFrom = self.safe_string(transaction, 'tx')
addressTo = address
amountString = self.safe_string(transaction, 'amount')
amountString = Precise.string_div(Precise.string_abs(amountString), '1e8')
feeCostString = self.safe_string(transaction, 'fee')
feeCostString = Precise.string_div(feeCostString, '1e8')
fee = {
'cost': self.parse_number(feeCostString),
'currency': 'BTC',
}
status = self.safe_string(transaction, 'transactStatus')
if status is not None:
status = self.parse_transaction_status(status)
return {
'info': transaction,
'id': id,
'txid': None,
'timestamp': transactTime,
'datetime': self.iso8601(transactTime),
'network': None,
'addressFrom': addressFrom,
'address': address,
'addressTo': addressTo,
'tagFrom': None,
'tag': None,
'tagTo': None,
'type': type,
'amount': self.parse_number(amountString),
'currency': 'BTC',
'status': status,
'updated': timestamp,
'comment': None,
'fee': fee,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
if not market['active']:
raise BadSymbol(self.id + ' fetchTicker() symbol ' + symbol + ' is not tradable')
tickers = self.fetch_tickers([market['symbol']], params)
ticker = self.safe_value(tickers, market['symbol'])
if ticker is None:
raise BadSymbol(self.id + ' fetchTicker() symbol ' + symbol + ' not found')
return ticker
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetInstrumentActiveAndIndices(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = self.safe_string(ticker, 'symbol')
if symbol is not None:
result[symbol] = ticker
uniformSymbols = []
if symbols is not None:
for i in range(0, len(symbols)):
symbol = symbols[i]
market = self.market(symbol)
uniformSymbols.append(market['symbol'])
return self.filter_by_array(result, 'symbol', uniformSymbols)
def parse_ticker(self, ticker, market=None):
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
open = self.safe_string(ticker, 'prevPrice24h')
last = self.safe_string(ticker, 'lastPrice')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'highPrice'),
'low': self.safe_string(ticker, 'lowPrice'),
'bid': self.safe_string(ticker, 'bidPrice'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'askPrice'),
'askVolume': None,
'vwap': self.safe_string(ticker, 'vwap'),
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'homeNotional24h'),
'quoteVolume': self.safe_string(ticker, 'foreignNotional24h'),
'info': ticker,
}, market, False)
def parse_ohlcv(self, ohlcv, market=None):
return [
self.parse8601(self.safe_string(ohlcv, 'timestamp')),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'binSize': self.timeframes[timeframe],
'partial': True,
rame) * 1000
fetchOHLCVOpenTimestamp = self.safe_value(self.options, 'fetchOHLCVOpenTimestamp', True)
if since is not None:
timestamp = since
if fetchOHLCVOpenTimestamp:
timestamp = self.sum(timestamp, duration)
ymdhms = self.ymdhms(timestamp)
request['startTime'] = ymdhms
else:
request['reverse'] = True
response = self.publicGetTradeBucketed(self.extend(request, params))
result = self.parse_ohlcvs(response, market, timeframe, since, limit)
if fetchOHLCVOpenTimestamp:
# we can emulate the open timestamp by shifting all the timestamps one place
# so the previous close becomes the current open, and we drop the first candle
for i in range(0, len(result)):
result[i][0] = result[i][0] - duration
return result
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# timestamp: '2018-08-28T00:00:02.735Z',
# symbol: 'XBTUSD',
# side: 'Buy',
# size: 2000,
# price: 6906.5,
# tickDirection: 'PlusTick',
# trdMatchID: 'b9a42432-0a46-6a2f-5ecc-c32e9ca4baf8',
# grossValue: 28958000,
# homeNotional: 0.28958,
# foreignNotional: 2000
# }
#
# fetchMyTrades(private)
#
# {
# "execID": "string",
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "lastQty": 0,
# "lastPx": 0,
# "underlyingLastPx": 0,
# "lastMkt": "string",
# "lastLiquidityInd": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "execType": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "commission": 0,
# "tradePublishIndicator": "string",
# "multiLegReportingType": "string",
# "text": "string",
# "trdMatchID": "string",
# "execCost": 0,
# "execComm": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "transactTime": "2019-03-05T12:47:02.762Z",
# "timestamp": "2019-03-05T12:47:02.762Z"
# }
#
timestamp = self.parse8601(self.safe_string(trade, 'timestamp'))
priceString = self.safe_string_2(trade, 'avgPx', 'price')
amountString = self.safe_string_2(trade, 'size', 'lastQty')
execCost = self.safe_string(trade, 'execCost')
costString = Precise.string_div(Precise.string_abs(execCost), '1e8')
id = self.safe_string(trade, 'trdMatchID')
order = self.safe_string(trade, 'orderID')
side = self.safe_string_lower(trade, 'side')
# price * amount doesn't work for all symbols(e.g. XBT, ETH)
fee = None
feeCostString = Precise.string_div(self.safe_string(trade, 'execComm'), '1e8')
if feeCostString is not None:
currencyId = self.safe_string(trade, 'settlCurrency')
feeCurrencyCode = self.safe_currency_code(currencyId)
feeRateString = self.safe_string(trade, 'commission')
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
'rate': feeRateString,
}
execType = self.safe_string(trade, 'execType')
takerOrMaker = None
if feeCostString is not None and execType == 'Trade':
takerOrMaker = 'maker' if Precise.string_lt(feeCostString, '0') else 'taker'
marketId = self.safe_string(trade, 'symbol')
symbol = self.safe_symbol(marketId, market)
type = self.safe_string_lower(trade, 'ordType')
return self.safe_trade({
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': order,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': priceString,
'cost': costString,
'amount': amountString,
'fee': fee,
}, market)
def parse_order_status(self, status):
statuses = {
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'DoneForDay': 'open',
'Canceled': 'canceled',
'PendingCancel': 'open',
'PendingNew': 'open',
'Rejected': 'rejected',
'Expired': 'expired',
'Stopped': 'open',
'Untriggered': 'open',
'Triggered': 'open',
}
return self.safe_string(statuses, status, status)
def parse_time_in_force(self, timeInForce):
timeInForces = {
'Day': 'Day',
'GoodTillCancel': 'GTC',
'ImmediateOrCancel': 'IOC',
'FillOrKill': 'FOK',
}
return self.safe_string(timeInForces, timeInForce, timeInForce)
def parse_order(self, order, market=None):
status = self.parse_order_status(self.safe_string(order, 'ordStatus'))
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.parse8601(self.safe_string(order, 'timestamp'))
lastTradeTimestamp = self.parse8601(self.safe_string(order, 'transactTime'))
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'orderQty')
filled = self.safe_string(order, 'cumQty', 0.0)
average = self.safe_string(order, 'avgPx')
id = self.safe_string(order, 'orderID')
type = self.safe_string_lower(order, 'ordType')
side = self.safe_string_lower(order, 'side')
clientOrderId = self.safe_string(order, 'clOrdID')
timeInForce = self.parse_time_in_force(self.safe_string(order, 'timeInForce'))
stopPrice = self.safe_number(order, 'stopPx')
execInst = self.safe_string(order, 'execInst')
postOnly = (execInst == 'ParticipateDoNotInitiate')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': None,
'average': average,
'filled': filled,
'remaining': None,
'status': status,
'fee': None,
'trades': None,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = self.iso8601(since)
else:
request['reverse'] = True
if limit is not None:
request['count'] = limit
response = self.publicGetTrade(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
orderType = self.capitalize(type)
request = {
'symbol': market['id'],
'side': self.capitalize(side),
'orderQty': float(self.amount_to_precision(symbol, amount)),
'ordType': orderType,
}
if (orderType == 'Stop') or (orderType == 'StopLimit') or (orderType == 'MarketIfTouched') or (orderType == 'LimitIfTouched'):
stopPrice = self.safe_number_2(params, 'stopPx', 'stopPrice')
if stopPrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPx or stopPrice parameter for the ' + orderType + ' order type')
else:
request['stopPx'] = float(self.price_to_precision(symbol, stopPrice))
params = self.omit(params, ['stopPx', 'stopPrice'])
if (orderType == 'Limit') or (orderType == 'StopLimit') or (orderType == 'LimitIfTouched'):
request['price'] = float(self.price_to_precision(symbol, price))
clientOrderId = self.safe_string_2(params, 'clOrdID', 'clientOrderId')
if clientOrderId is not None:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['clOrdID', 'clientOrderId'])
response = self.privatePostOrder(self.extend(request, params))
return self.parse_order(response, market)
def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
self.load_markets()
request = {}
origClOrdID = self.safe_string_2(params, 'origClOrdID', 'clientOrderId')
if origClOrdID is not None:
request['origClOrdID'] = origClOrdID
clientOrderId = self.safe_string(params, 'clOrdID', 'clientOrderId')
if clientOrderId is not None:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['origClOrdID', 'clOrdID', 'clientOrderId'])
else:
request['orderID'] = id
if amount is not None:
request['orderQty'] = amount
if price is not None:
request['price'] = price
response = self.privatePutOrder(self.extend(request, params))
return self.parse_order(response)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
clientOrderId = self.safe_value_2(params, 'clOrdID', 'clientOrderId')
request = {}
if clientOrderId is None:
request['orderID'] = id
else:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['clOrdID', 'clientOrderId'])
response = self.privateDeleteOrder(self.extend(request, params))
order = self.safe_value(response, 0, {})
error = self.safe_string(order, 'error')
if error is not None:
if error.find('Unable to cancel order due to existing state') >= 0:
raise OrderNotFound(self.id + ' cancelOrder() failed: ' + error)
return self.parse_order(order)
def cancel_orders(self, ids, symbol=None, params={}):
return self.cancel_order(ids, symbol, params)
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privateDeleteOrderAll(self.extend(request, params))
return self.parse_orders(response, market)
def fetch_positions(self, symbols=None, params={}):
self.load_markets()
response = self.privateGetPosition(params)
return response
def is_fiat(self, currency):
if currency == 'EUR':
return True
if currency == 'PLN':
return True
return False
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
self.load_markets()
if code != 'BTC':
raise ExchangeError(self.id + ' supoprts BTC withdrawals only, other currencies coming soon...')
request = {
'currency': 'XBt',
'amount': amount,
'address': address,
uestWithdrawal(self.extend(request, params))
return {
'info': response,
'id': self.safe_string(response, 'transactID'),
}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if code == 429:
raise DDoSProtection(self.id + ' ' + body)
if code >= 400:
error = self.safe_value(response, 'error', {})
message = self.safe_string(error, 'message')
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
if code == 400:
raise BadRequest(feedback)
raise ExchangeError(feedback)
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = '/api/' + self.version + '/' + path
if method == 'GET':
if params:
query += '?' + self.urlencode(params)
else:
format = self.safe_string(params, '_format')
if format is not None:
query += '?' + self.urlencode({'_format': format})
params = self.omit(params, '_format')
url = self.urls['api'][api] + query
if api == 'private':
self.check_required_credentials()
auth = method + query
expires = self.safe_integer(self.options, 'api-expires')
headers = {
'Content-Type': 'application/json',
'api-key': self.apiKey,
}
expires = self.sum(self.seconds(), expires)
expires = str(expires)
auth += expires
headers['api-expires'] = expires
if method == 'POST' or method == 'PUT' or method == 'DELETE':
if params:
body = self.json(params)
auth += body
headers['api-signature'] = self.hmac(self.encode(auth), self.encode(self.secret))
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| true | true |
f734bd64143a5c11cb97eb8dad029aafd37b792e | 951 | py | Python | setup.py | DavidNKraemer/WordleBot | 856108445ae881edf71d0e4360ec3219c9ed9fe4 | [
"MIT"
] | 1 | 2022-02-11T00:22:14.000Z | 2022-02-11T00:22:14.000Z | setup.py | DavidNKraemer/WordleBot | 856108445ae881edf71d0e4360ec3219c9ed9fe4 | [
"MIT"
] | 1 | 2022-02-07T02:35:49.000Z | 2022-02-15T14:27:57.000Z | setup.py | DavidNKraemer/Gym-Wordle | 856108445ae881edf71d0e4360ec3219c9ed9fe4 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
setup(
name='gym_wordle',
version='0.1.3',
author='David Kraemer',
author_email='david.kraemer@stonybrook.edu',
description='OpenAI gym environment for training agents on Wordle',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/DavidNKraemer/Gym-Wordle',
packages=find_packages(
include=[
'gym_wordle',
'gym_wordle.*'
]
),
package_data={
'gym_wordle': ['dictionary/*']
},
python_requires='>=3.7',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'numpy>=1.20',
'gym==0.19',
'sty==1.0',
],
)
| 26.416667 | 71 | 0.599369 | from setuptools import setup, find_packages
with open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
setup(
name='gym_wordle',
version='0.1.3',
author='David Kraemer',
author_email='david.kraemer@stonybrook.edu',
description='OpenAI gym environment for training agents on Wordle',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/DavidNKraemer/Gym-Wordle',
packages=find_packages(
include=[
'gym_wordle',
'gym_wordle.*'
]
),
package_data={
'gym_wordle': ['dictionary/*']
},
python_requires='>=3.7',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'numpy>=1.20',
'gym==0.19',
'sty==1.0',
],
)
| true | true |
f734bda982fdfb5c124c2601234d24204182ffb0 | 4,957 | py | Python | pywikibot/editor.py | valhallasw/pywikibot-core | 32a8c3c1298a5cb077381fe202daefde82c1c5d3 | [
"MIT"
] | null | null | null | pywikibot/editor.py | valhallasw/pywikibot-core | 32a8c3c1298a5cb077381fe202daefde82c1c5d3 | [
"MIT"
] | null | null | null | pywikibot/editor.py | valhallasw/pywikibot-core | 32a8c3c1298a5cb077381fe202daefde82c1c5d3 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Text editor class for your favourite editor."""
from __future__ import unicode_literals
#
# (C) Gerrit Holl, 2004
# (C) Pywikibot team, 2004-2015
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id: f734bda982fdfb5c124c2601234d24204182ffb0 $'
#
import codecs
import os
import subprocess
import tempfile
import pywikibot
from pywikibot import config
from pywikibot.tools import deprecated
try:
from pywikibot.userinterfaces import gui # noqa
except ImportError as e:
gui = e
class TextEditor(object):
"""Text editor."""
def _command(self, file_name, text, jump_index=None):
"""Return editor selected in user-config.py."""
if jump_index:
# Some editors make it possible to mark occurrences of substrings,
# or to jump to the line of the first occurrence.
# TODO: Find a better solution than hardcoding these, e.g. a config
# option.
line = text[:jump_index].count('\n')
column = jump_index - (text[:jump_index].rfind('\n') + 1)
else:
line = column = 0
# Linux editors. We use startswith() because some users might use
# parameters.
if config.editor.startswith('kate'):
command = ['-l', '%i' % (line + 1), '-c', '%i' % (column + 1)]
elif config.editor.startswith('gedit'):
command = ['+%i' % (line + 1)] # seems not to support columns
elif config.editor.startswith('emacs'):
command = ['+%i' % (line + 1)] # seems not to support columns
elif config.editor.startswith('jedit'):
command = ['+line:%i' % (line + 1)] # seems not to support columns
elif config.editor.startswith('vim'):
command = ['+%i' % (line + 1)] # seems not to support columns
elif config.editor.startswith('nano'):
command = ['+%i,%i' % (line + 1, column + 1)]
# Windows editors
elif config.editor.lower().endswith('notepad++.exe'):
command = ['-n%i' % (line + 1)] # seems not to support columns
else:
command = []
# See T102465 for problems relating to using config.editor unparsed.
command = [config.editor] + command + [file_name]
pywikibot.log(u'Running editor: %s' % TextEditor._concat(command))
return command
@staticmethod
def _concat(command):
return ' '.join("'{0}'".format(part) if ' ' in part else part
for part in command)
@deprecated('_command (should not be used from the outside)')
def command(self, tempFilename, text, jumpIndex=None):
"""Return editor selected in user-config.py."""
return TextEditor._concat(self._command(tempFilename, text, jumpIndex))
def edit(self, text, jumpIndex=None, highlight=None):
"""
Call the editor and thus allows the user to change the text.
Halts the thread's operation until the editor is closed.
@param text: the text to be edited
@type text: unicode
@param jumpIndex: position at which to put the caret
@type jumpIndex: int
@param highlight: each occurrence of this substring will be highlighted
@type highlight: unicode
@return: the modified text, or None if the user didn't save the text
file in his text editor
@rtype: unicode or None
"""
if config.editor:
tempFilename = '%s.%s' % (tempfile.mkstemp()[1],
config.editor_filename_extension)
try:
with codecs.open(tempFilename, 'w',
encoding=config.editor_encoding) as tempFile:
tempFile.write(text)
creationDate = os.stat(tempFilename).st_mtime
subprocess.call(self._command(tempFilename, text, jumpIndex))
lastChangeDate = os.stat(tempFilename).st_mtime
if lastChangeDate == creationDate:
# Nothing changed
return None
else:
with codecs.open(tempFilename, 'r',
encoding=config.editor_encoding) as temp_file:
newcontent = temp_file.read()
return newcontent
finally:
os.unlink(tempFilename)
if isinstance(gui, ImportError):
raise pywikibot.Error(
'Could not load GUI modules: %s\nNo editor available.\n'
'Set your favourite editor in user-config.py "editor", '
'or install python packages tkinter and idlelib, which '
'are typically part of Python but may be packaged separately '
'on your platform.\n' % gui)
return pywikibot.ui.editText(text, jumpIndex=jumpIndex, highlight=highlight)
| 39.031496 | 84 | 0.586443 |
from __future__ import unicode_literals
__version__ = '$Id: f734bda982fdfb5c124c2601234d24204182ffb0 $'
import codecs
import os
import subprocess
import tempfile
import pywikibot
from pywikibot import config
from pywikibot.tools import deprecated
try:
from pywikibot.userinterfaces import gui
except ImportError as e:
gui = e
class TextEditor(object):
def _command(self, file_name, text, jump_index=None):
if jump_index:
line = text[:jump_index].count('\n')
column = jump_index - (text[:jump_index].rfind('\n') + 1)
else:
line = column = 0
if config.editor.startswith('kate'):
command = ['-l', '%i' % (line + 1), '-c', '%i' % (column + 1)]
elif config.editor.startswith('gedit'):
command = ['+%i' % (line + 1)]
elif config.editor.startswith('emacs'):
command = ['+%i' % (line + 1)]
elif config.editor.startswith('jedit'):
command = ['+line:%i' % (line + 1)]
elif config.editor.startswith('vim'):
command = ['+%i' % (line + 1)]
elif config.editor.startswith('nano'):
command = ['+%i,%i' % (line + 1, column + 1)]
elif config.editor.lower().endswith('notepad++.exe'):
command = ['-n%i' % (line + 1)]
else:
command = []
command = [config.editor] + command + [file_name]
pywikibot.log(u'Running editor: %s' % TextEditor._concat(command))
return command
@staticmethod
def _concat(command):
return ' '.join("'{0}'".format(part) if ' ' in part else part
for part in command)
@deprecated('_command (should not be used from the outside)')
def command(self, tempFilename, text, jumpIndex=None):
return TextEditor._concat(self._command(tempFilename, text, jumpIndex))
def edit(self, text, jumpIndex=None, highlight=None):
if config.editor:
tempFilename = '%s.%s' % (tempfile.mkstemp()[1],
config.editor_filename_extension)
try:
with codecs.open(tempFilename, 'w',
encoding=config.editor_encoding) as tempFile:
tempFile.write(text)
creationDate = os.stat(tempFilename).st_mtime
subprocess.call(self._command(tempFilename, text, jumpIndex))
lastChangeDate = os.stat(tempFilename).st_mtime
if lastChangeDate == creationDate:
return None
else:
with codecs.open(tempFilename, 'r',
encoding=config.editor_encoding) as temp_file:
newcontent = temp_file.read()
return newcontent
finally:
os.unlink(tempFilename)
if isinstance(gui, ImportError):
raise pywikibot.Error(
'Could not load GUI modules: %s\nNo editor available.\n'
'Set your favourite editor in user-config.py "editor", '
'or install python packages tkinter and idlelib, which '
'are typically part of Python but may be packaged separately '
'on your platform.\n' % gui)
return pywikibot.ui.editText(text, jumpIndex=jumpIndex, highlight=highlight)
| true | true |
f734bee389dc8207340f0fc6a99ba2f167fd79e7 | 29,991 | py | Python | flax_models/t5x/train.py | muell-monster/google-research | 04d2024f4723bc4be3d639a668c19fb1f6a31478 | [
"Apache-2.0"
] | 1 | 2020-12-25T01:18:50.000Z | 2020-12-25T01:18:50.000Z | flax_models/t5x/train.py | thomascherickal/google-research | 294a888bbb6678ac255c6422fd703c325cbb0772 | [
"Apache-2.0"
] | null | null | null | flax_models/t5x/train.py | thomascherickal/google-research | 294a888bbb6678ac255c6422fd703c325cbb0772 | [
"Apache-2.0"
] | 1 | 2021-09-27T03:17:14.000Z | 2021-09-27T03:17:14.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script pre-trains or fine-tunes a Transformer using the T5 data pipeline."""
from concurrent.futures import thread
import functools
import importlib
import os
from typing import Any, Mapping, Sequence, Tuple
from absl import app
from absl import flags
from absl import logging
# Set Linen to add profiling information when constructing Modules.
# Must be set before flax imports.
# pylint:disable=g-import-not-at-top
os.environ['FLAX_PROFILE'] = 'true'
from flax import linen as nn
from flax import optim
from flax.metrics import tensorboard
from flax.training import checkpoints
from flax.training import common_utils
import jax
from jax import lax
from jax import random
from jax.interpreters.sharded_jit import sharded_jit
import jax.numpy as jnp
import ml_collections
from ml_collections import config_flags
import numpy as np
import t5
from t5x import checkpoint_importer
from t5x import input_pipeline
from t5x import models
from t5x import partitions
from t5x import train_lib
import tensorflow as tf
# pylint:disable=g-long-lambda
FLAGS = flags.FLAGS
CFG = None
PyTreeDef = type(jax.tree_structure(None))
TransformerConfig = models.TransformerConfig
jax.config.parse_flags_with_absl()
flags.DEFINE_string(
'model_dir', default=None, help='Directory to store model data.')
flags.DEFINE_string(
'data_dir', default=None, help='Tensorflow datasets directory.')
config_flags.DEFINE_config_file(
name='config',
default='configs/t5_small_glue.py',
help_string='training config file.')
ConfigDict = ml_collections.ConfigDict
def get_configs(
config
):
"""Get train, eval, and predict model configs.
Args:
config: The config dict for the experiment.
Returns:
A triple (train_config, eval_config, predict_config).
"""
train_config = TransformerConfig(
vocab_size=config.vocab_size,
output_vocab_size=config.vocab_size,
share_embeddings=config.share_embeddings,
logits_via_embedding=config.logits_via_embedding,
dtype=jnp.bfloat16 if config.use_bfloat16 else jnp.float32,
emb_dim=config.emb_dim,
num_heads=config.num_heads,
num_layers=config.num_layers,
qkv_dim=config.qkv_dim,
mlp_dim=config.mlp_dim,
mlp_activations=config.mlp_activations,
position_embeddings='relative',
relative_attention_num_buckets=config.relative_attention_num_buckets,
relative_attention_max_distance=config.relative_attention_max_distance,
max_len=max(config.max_input_length, config.max_target_length,
config.max_eval_input_length, config.max_eval_target_length),
dropout_rate=config.dropout_rate,
attention_dropout_rate=config.attention_dropout_rate,
deterministic=False,
decode=False,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6))
eval_config = train_config.replace(deterministic=True) # pytype: disable=attribute-error
predict_config = train_config.replace( # pytype: disable=attribute-error
deterministic=True,
decode=True,
max_decode_len=config.max_eval_target_length)
return (train_config, eval_config, predict_config)
def get_initial_params(rng, config,
transformer_config,
optimizer_def):
"""Get the initial parameter tree."""
input_shape = (config.batch_size, CFG.max_input_length)
target_shape = (config.batch_size, CFG.max_target_length)
initial_variables = models.Transformer(transformer_config).init(
rng, jnp.ones(input_shape, jnp.float32),
jnp.ones(target_shape, jnp.float32))
# apply an optimizer to the parameters
return optimizer_def.create(initial_variables['params'])
def main(argv):
global CFG
CFG = FLAGS.config
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Guarantee that the JAX bfloat16 extension is used rather than TF bfloat16.
_ = np.array(jnp.array([1.0], dtype=jnp.bfloat16))
# Use hardware RNG for bernoulli randoms in dropout mask creation.
if CFG.hardware_rng:
models.set_hardware_bernoulli()
if 'module_import' in CFG and CFG.module_import:
for module in CFG.module_import:
importlib.import_module(module)
if 'additional_task_cache_dirs' in CFG and CFG.additional_task_cache_dirs:
t5.data.add_global_cache_dirs(CFG.additional_task_cache_dirs)
num_partitions = CFG.num_partitions
topology = train_lib.compute_multihost_topology(num_partitions)
batch_size = CFG.batch_size
eval_batch_size = CFG.eval_batch_size
per_replica_set_eval_batch_size = eval_batch_size // topology.num_replica_sets
if batch_size % topology.num_replicas:
raise ValueError('Batch size must be divisible by the number of replicas.')
steps_per_epoch = CFG.steps_per_epoch
logging.info('steps per epoch: %d', steps_per_epoch)
broadcast = functools.partial(
train_lib.broadcast,
num_replicas=topology.per_replica_set_num_replicas,
num_partitions=topology.per_host_num_partitions,
devices=topology.this_host_device_assignment)
if jax.host_id() == 0:
tf.io.gfile.makedirs(FLAGS.model_dir)
tf.io.gfile.copy(FLAGS['config'].config_filename,
os.path.join(FLAGS.model_dir, 'config.py'),
overwrite=True)
train_summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.model_dir, 'train'))
eval_summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.model_dir, 'eval'))
else:
train_summary_writer = None
eval_summary_writer = None
# Write summaries in background thread to avoid blocking on device sync
if CFG.infeed:
# Infeed is currently synchronous, so do it in a background thread too
infeed_pool = thread.ThreadPoolExecutor(jax.local_device_count(), 'infeed')
(train_ds, eval_ds), eval_cache = input_pipeline.get_datasets_and_cache(
CFG, topology.num_replica_sets, topology.replica_set_id,
topology.per_replica_set_host_id)
vocab = input_pipeline.get_vocabulary(CFG.mixture_or_task_name)
encoder = vocab.tf_tokenizer
eos_id = vocab.tokenizer.eos_id()
def decode_tokens(toks,
eos_id = eos_id,
max_id = 32000):
"""Decode tokens back to unicode."""
del eos_id
# TODO(levskaya): T5 doesn't seem to emit EOS tokens? double check this
# is the best decoding function or just switch to using tf_decode.
# valid_toks = toks[:np.argmax(toks == eos_id) + 1].astype(np.int32)
valid_toks = toks.astype(np.int32)
valid_toks[valid_toks >= max_id] = 3
return encoder.detokenize(valid_toks).numpy().decode('utf-8')
logging.info('Initializing model, optimizer, and step functions.')
train_config, eval_config, predict_config = get_configs(CFG)
rng = random.PRNGKey(CFG.random_seed)
rng, init_rng = random.split(rng)
# This is used for infeed conversion from feature dict <--> tuple
train_keys = [
'inputs', 'targets', 'inputs_position', 'targets_position',
'inputs_segmentation', 'targets_segmentation'
]
device_train_input_shape = tuple([
(batch_size // topology.num_replicas,
CFG.max_input_length if 'inputs' in k else CFG.max_target_length)
for k in train_keys
])
learning_rate_fn = train_lib.create_learning_rate_scheduler(
factors=CFG.schedule,
base_learning_rate=CFG.learning_rate,
warmup_steps=CFG.warmup_steps)
# First, we only abstractly initialize the optimizer and model parameters,
# since the parameters may not even fit in device memory!
# TODO(jekbradbury): make optimizer_defs compare by value so it can be created
# in get_initial_params without causing pytree incompatibility
optimizer_def = optim.Adafactor(
CFG.learning_rate, decay_rate=0.8, step_offset=CFG.step_offset)
initialize_params_fn = functools.partial(
get_initial_params,
config=CFG,
transformer_config=eval_config,
optimizer_def=optimizer_def)
optimizer = jax.eval_shape(initialize_params_fn, init_rng)
# tuple-like pytree leaves for global_arg_shapes
optimizer_shapes = jax.tree_map(lambda x: partitions.Spec(*x.shape),
optimizer)
# Build parameter partition annotations for preserving partitions from train
# to eval.
if num_partitions > 1:
optimizer_partitions = optimizer.restore_state(
partitions.set_partitions(num_partitions, optimizer.state_dict()))
per_host_optimizer_partitions = optimizer.restore_state(
partitions.set_partitions(topology.per_host_num_partitions,
optimizer.state_dict()))
# Restore unreplicated optimizer + model state from last checkpoint.
# TODO(jekbradbury,levskaya): implement sharded native checkpoint/restore
existing_checkpoint_found = False
if CFG.restore_checkpoints:
existing_checkpoint_found = train_lib.checkpoint_exists(FLAGS.model_dir)
optimizer = checkpoints.restore_checkpoint(FLAGS.model_dir, optimizer)
# Import a pretrained-T5 checkpoint only if we didn't import a local
# "native" checkpoint (e.g. due to resuming a pre-empted finetuning run.)
# TODO(jekbradbury,levskaya): implement sharded T5 checkpoint/restore
if CFG.restore_t5_checkpoint and not existing_checkpoint_found:
optimizer = checkpoint_importer.restore_from_t5_checkpoint(
optimizer, CFG.restore_t5_checkpoint)
if CFG.restore_t5_checkpoint or existing_checkpoint_found:
if num_partitions > 1:
# Until checkpoint/restore is sharded, the restored checkpoint is global
# and we need to slice each sharded parameter into the chunk containing
# only the partitions that are present on this host.
def per_host_chunk(x, spec):
if spec is None or spec is x: # unsharded or not a parameter
return x
if spec[0] == 1:
dim_size = x.shape[1]
elif spec[1] == 1:
dim_size = x.shape[0]
else:
raise NotImplementedError()
chunk_size = (
dim_size * topology.per_host_num_partitions // num_partitions)
lower = topology.per_replica_set_host_id * chunk_size
upper = (topology.per_replica_set_host_id + 1) * chunk_size
if spec[0] == 1:
return x[:, lower:upper]
else:
return x[lower:upper]
optimizer = jax.tree_multimap(per_host_chunk, optimizer,
optimizer_partitions)
else:
# If pretraining and no checkpoint imported, we jit the (sharded-) init
# function to minimize fragmentation. We use the same pmap(sharded_jit)
# setup as the training step/loop to initialize everything "in-place" and
# avoid communication or OOM.
if num_partitions > 1:
initialize_params_fn = sharded_jit(
initialize_params_fn,
in_parts=None,
local_in_parts=None,
out_parts=optimizer_partitions,
local_out_parts=per_host_optimizer_partitions,
# devices=one_replica_device_assignment,
)
initialize_params_fn = jax.pmap(
initialize_params_fn,
'batch',
in_axes=0,
axis_size=topology.num_replicas,
devices=topology.device_assignment)
init_rng = broadcast(init_rng)
optimizer = initialize_params_fn(init_rng)
# We maintain the optimizer in unbroadcasted form (i.e. with no leading
# replica axis). This is equivalent to the as-yet-nonexistent pmap kwarg
# out_axes=None.
optimizer = train_lib.unbroadcast(optimizer)
else:
optimizer = jax.jit(initialize_params_fn)(init_rng)
# ---------------------------------------------------------------------------
# Compile multidevice versions of train/eval/predict step and cache init fn.
# ---------------------------------------------------------------------------
# We can use either a single train-step for a host training loop:
# train_step(optimizer, batch, prev_metrics, dropout_rng, **kwargs)
# --> new_optimizer, metrics, new_dropout_rng
def p_train_step(optimizer, batch,
prev_metrics,
dropout_rng):
return train_lib.train_step(
optimizer,
batch,
prev_metrics,
dropout_rng,
config=train_config,
learning_rate_fn=learning_rate_fn,
num_microbatches=CFG.microbatches,
label_smoothing=CFG.label_smoothing,
z_loss=CFG.z_loss,
use_bfloat16=CFG.use_bfloat16)
if num_partitions > 1:
p_train_step = sharded_jit(
p_train_step,
in_parts=(optimizer_partitions, None, None, None),
local_in_parts=(per_host_optimizer_partitions, None, None, None),
out_parts=(optimizer_partitions, None, None),
local_out_parts=(per_host_optimizer_partitions, None, None))
# TODO(levskaya): the in_axes spec below might be wrong, double-check.
p_train_step = jax.pmap(
p_train_step,
axis_name='batch',
in_axes=(None, 0, 0, 0),
donate_argnums=(0,),
global_arg_shapes=(optimizer_shapes, None, None, None),
axis_size=topology.num_replicas,
devices=topology.device_assignment) # pytype: disable=wrong-arg-types
# OR, we use an on-device loop that feeds the training step via infeed queue.
def device_train_loop_cond(
args
):
"""Stopping criterion for on-device loop."""
_, _, _, _, step, epoch = args
return step // steps_per_epoch == epoch
def device_train_loop_body(
args
):
"""On-device loop body."""
optimizer, dropout_rngs, metrics, token, step, epoch = args
# Ordering input data from infeed requires threading a symbolic token
# through the computation.
input_data, token = lax.infeed(
token,
shape=tuple(
[jax.ShapedArray(s, jnp.int32) for s in device_train_input_shape]))
# Rebuild input dict from infeed data tuple.
batch = {k: v for k, v in zip(train_keys, input_data)}
# Run the train_step function and return the loop state.
optimizer, metrics, dropout_rngs = train_lib.train_step(
optimizer,
batch,
metrics,
dropout_rngs,
train_config,
learning_rate_fn,
num_microbatches=CFG.microbatches,
label_smoothing=CFG.label_smoothing,
z_loss=CFG.z_loss)
step += 1
return optimizer, dropout_rngs, metrics, token, step, epoch
def device_train_loop(optimizer, dropout_rngs,
metrics, step,
epoch):
# Create symbolic token for threading infeed data.
token = lax.create_token(step)
# Run on-device loop.
optimizer, dropout_rngs, metrics, _, step, _ = lax.while_loop(
device_train_loop_cond, device_train_loop_body,
(optimizer, dropout_rngs, metrics, token, step, epoch))
return optimizer, dropout_rngs, metrics, step
if num_partitions > 1:
device_train_loop = sharded_jit(
device_train_loop,
in_parts=(optimizer_partitions, None, None, None, None),
local_in_parts=(per_host_optimizer_partitions, None, None, None, None),
out_parts=(optimizer_partitions, None, None, None),
local_out_parts=(per_host_optimizer_partitions, None, None, None))
p_train_epoch = jax.pmap(
device_train_loop,
axis_name='batch',
in_axes=(None, 0, 0, None, None),
donate_argnums=(0,),
global_arg_shapes=(optimizer_shapes, None, None, None, None),
axis_size=topology.num_replicas,
devices=topology.device_assignment) # pytype: disable=wrong-arg-types
# Reduction psum for metric data.
def p_allreduce_metrics(x):
return lax.psum(x, axis_name='batch')
if num_partitions > 1:
p_allreduce_metrics = sharded_jit(
p_allreduce_metrics,
in_parts=None,
local_in_parts=None,
out_parts=None,
local_out_parts=None,
num_partitions=num_partitions,
local_num_partitions=topology.per_host_num_partitions)
p_allreduce_metrics = jax.pmap(
p_allreduce_metrics,
axis_name='batch',
global_arg_shapes=None,
axis_size=topology.num_replicas,
devices=topology.device_assignment)
# Training evaluation computation.
# eval_step(params, batch, config, label_smoothing=0.0) --> metrics
def p_eval_step(params, batch):
return train_lib.eval_step(
params, batch, config=eval_config, label_smoothing=CFG.label_smoothing)
if num_partitions > 1:
p_eval_step = sharded_jit(
p_eval_step,
in_parts=(optimizer_partitions.target, None),
local_in_parts=(per_host_optimizer_partitions.target, None),
out_parts=None,
local_out_parts=None)
p_eval_step = jax.pmap(
p_eval_step,
axis_name='batch',
in_axes=(None, 0),
global_arg_shapes=(optimizer_shapes.target, None),
axis_size=topology.num_replicas,
devices=topology.device_assignment) # pytype: disable=wrong-arg-types
# Fast autoregressive decoding loop.
# For inference and model evaluation.
# predict_step(inputs, params,
# eos_id, max_decode_len, config, beam_size=4) --> beam_seqs
def p_pred_step(inputs, params):
return train_lib.predict_step(inputs, params, eos_id,
CFG.max_eval_target_length, predict_config,
CFG.beam_size)
if num_partitions > 1:
p_pred_step = sharded_jit(
p_pred_step,
in_parts=(None, optimizer_partitions.target),
local_in_parts=(None, per_host_optimizer_partitions.target),
out_parts=None,
local_out_parts=None)
p_pred_step = jax.pmap(
p_pred_step,
axis_name='batch',
in_axes=(0, None),
global_arg_shapes=(None, optimizer_shapes.target),
axis_size=topology.num_replicas,
devices=topology.device_assignment) # pytype: disable=wrong-arg-types
# ---------------------------------------------------------------------------
# Main Train Loop
# ---------------------------------------------------------------------------
# We init the first set of dropout PRNG keys, but update it afterwards inside
# the main pmap'd training update for performance.
# There should be a unique dropout key for each replica represented on this
# host, but the key should be the same for the same replica on other hosts.
# Again, this is what the replica set abstraction is for.
dropout_rngs = random.split(
random.fold_in(rng, topology.replica_set_id),
topology.per_replica_set_num_replicas)
# restore step from last checkpoint
host_step = int(optimizer.state.step)
empty_metrics = broadcast({
'loss': 0.0,
'accuracy': 0.0,
'learning_rate': 0.0,
'denominator': 0.0
})
if CFG.infeed:
# TODO(jekbradbury): support something like this for the Python-loop case
logging.info('Precompiling training loop and moving optimizer to device.')
optimizer, _, metrics, _ = p_train_epoch(optimizer, dropout_rngs,
empty_metrics,
jnp.array(0, dtype=jnp.int32), 1)
optimizer = train_lib.unbroadcast(optimizer)
metrics['loss'].block_until_ready()
logging.info('Starting training loop.')
local_devices = jax.local_devices()
device_step = broadcast(host_step)
first_epoch = host_step // steps_per_epoch
# Main Loop over "epochs".
train_iter = train_ds.as_numpy_iterator()
for epoch in range(first_epoch, first_epoch + CFG.num_epochs):
metrics = empty_metrics
# NOTE: 'optimizer' is unbroadcast by construction at initialization or
# when loading a checkpoint. It is maintained in 'unbroadcast' state to
# enable the XLA cross-replica sharding optimization. The broadcasting is
# handled automatically by the pmap'd functions that use it.
# Gather all task evaluation metrics.
logging.info('Evaluating tasks.')
if epoch == first_epoch + 1:
train_lib.sync_devices()
for task in eval_cache.tasks:
logging.info('Evaluating task %s', task.name)
all_predicted, all_bs = [], []
for pred_batch in eval_cache.preprocessed_examples[task.name]:
# Handle final odd-sized batch by padding instead of dropping it.
input_batch, unpadded_batch_size = train_lib.pad_batch_to_size(
pred_batch['inputs'], per_replica_set_eval_batch_size)
all_bs.append(unpadded_batch_size)
# Split batch dimensions for pmap.
input_batch = jax.tree_map(
lambda x: x.reshape(
(topology.per_replica_set_num_replicas, -1) + x.shape[1:]),
input_batch)
# Run fast inference on batch.
all_predicted.append(p_pred_step(input_batch, optimizer.target))
# Pad out the number of batches so each host has the same number.
max_host_batch_number = np.max(
eval_cache.preprocessed_batch_sizes[task.name])
batch_shortfall = max_host_batch_number - len(all_predicted)
if batch_shortfall > 0:
# TODO(levskaya): Fix for case of entirely empty all_predicted.
# To make sure the cross-host barriers work, we run the program the same
# number of times on all hosts. The results of this call is ignored, and
# the predictions are populated with zeros instead.
p_pred_step(input_batch, optimizer.target) # Dummy call.
all_predicted.extend([jnp.zeros_like(all_predicted[0])] *
batch_shortfall)
all_bs.extend([0] * batch_shortfall)
all_predicted = jnp.concatenate(all_predicted)
all_bs = jnp.array(all_bs)
# Collect all batches from across hosts and reverse sharding.
all_predicted = train_lib.host_allgather(
all_predicted, topology.num_replica_sets, topology.replica_set_id,
topology.per_replica_set_host_id == 0)
seqlength = all_predicted.shape[-1]
total_examples = np.sum(
train_lib.host_allgather(all_bs, topology.num_replica_sets,
topology.replica_set_id,
topology.per_replica_set_host_id == 0))
del all_bs
assert total_examples == len(eval_cache.examples[task.name]), (
'Total number of batches incorrect for task %s.' % task.name)
# De-shard the collected predicted tokens and remove padding.
all_predicted = np.transpose(all_predicted, (1, 2, 0, 3)).reshape(
-1, seqlength)[:total_examples]
# We now run the post-processing and metric-fns on a single host.
if jax.host_id() == 0:
assert eval_summary_writer
raw_predictions = []
for tokens in all_predicted:
raw_predictions.append(decode_tokens(tokens))
# post-process predictions for metric fns
predictions = [
task.postprocess_fn(p, example=ex)
for p, ex in zip(raw_predictions, eval_cache.examples[task.name])
]
for metric_fn in task.metric_fns:
scores = metric_fn(eval_cache.targets[task.name], predictions)
for metric_name, metric_value in scores.items():
tag = f'eval/{task.name}/{metric_name}'
eval_summary_writer.scalar(tag, metric_value, host_step)
logging.info('EVAL %s at step %d: %.3f', tag, host_step,
metric_value)
eval_summary_writer.flush()
# Save text samples for tensorboard.
exemplars = ''
for n in np.random.choice(np.arange(len(predictions)), 8):
tgt_txt = tf.compat.as_text(
eval_cache.examples[task.name][n]['targets_plaintext'])
pred_txt = raw_predictions[n]
exemplars += (f'{eval_cache.inputs[task.name][n]}\n\n'
f'target: {tgt_txt}\n\n'
f'prediction: {pred_txt}\n\n')
eval_summary_writer.text(f'{task.name} samples', exemplars, host_step)
eval_summary_writer.flush()
# Take an Xprof trace after the first loop has compiled everything.
if epoch == first_epoch + 1:
train_lib.sync_devices()
# For on-device loop, we launch the computation before feeding data.
logging.info('BEGIN Train loop.')
if CFG.infeed:
optimizer, dropout_rngs, metrics, device_step = p_train_epoch(
optimizer, dropout_rngs, metrics, train_lib.unbroadcast(device_step),
epoch)
optimizer = train_lib.unbroadcast(optimizer)
# Epoch loop.
while int(host_step // steps_per_epoch) == epoch:
batch = next(train_iter)
batch = jax.tree_map(
lambda x: x.reshape(
(topology.per_replica_set_num_replicas, -1) + x.shape[1:]), batch)
# Feed the on-device training loop.
if CFG.infeed:
for i, device in enumerate(local_devices):
# When using infeed to provide data to the computation, we're on our
# own for feeding the right values to the right devices. Each device
# should get the minibatch corresponding to its replica, a slice of
# the larger batch corresponding to the host's replica set.
if device.platform == 'tpu':
device_coords = (*device.coords, device.id % 2)
else:
device_coords = (device.host_id, i)
per_replica_set_device_coords = tuple(
dc % prsm
for dc, prsm in zip(device_coords, topology.per_replica_set_mesh))
per_replica_set_replica_coords = tuple(
prsdc // prm for prsdc, prm in zip(per_replica_set_device_coords,
topology.per_replica_mesh))
per_replica_set_replica_id = 0
for prsm, prm, prsrc in zip(topology.per_replica_set_mesh,
topology.per_replica_mesh,
per_replica_set_replica_coords):
per_replica_set_replica_id = (
per_replica_set_replica_id * prsm // prm + prsrc)
input_tuple = tuple(
[batch[k][per_replica_set_replica_id] for k in train_keys])
# Safety check: infeed does not check shape or types but requires
# them to agree with on-device spec, otherwise the queue and program
# stalls.
tuple_shapes = jax.tree_map(jnp.shape, input_tuple)
tuple_dtypes = jax.tree_map(lambda x: x.dtype, input_tuple)
assert tuple_shapes == device_train_input_shape, (
'infeed shape error %s != %s' %
(tuple_shapes, device_train_input_shape))
assert tuple(set(tuple_dtypes)) == (jnp.int32,), \
('infeed dtype error %s not all of type %s' % (
tuple_dtypes, jnp.int32))
infeed_pool.submit(
functools.partial(device.transfer_to_infeed, input_tuple))
# Host training loop.
else:
optimizer, metrics, dropout_rngs = p_train_step(optimizer, batch,
metrics, dropout_rngs)
optimizer = train_lib.unbroadcast(optimizer)
host_step += 1
logging.info('END Train loop.')
# Maybe save a checkpoint on one host.
if (CFG.save_checkpoints and
epoch % CFG.checkpoint_freq == CFG.checkpoint_freq - 1 and
jax.host_id() == 0):
checkpoints.save_checkpoint(FLAGS.model_dir, optimizer, host_step)
# Gather training metrics.
metrics = p_allreduce_metrics(metrics)
metrics = jax.tree_map(lambda x: jax.device_get(x[0]), metrics)
denominator = metrics.pop('denominator')
summary = jax.tree_map(lambda x: x / denominator, metrics) # pylint: disable=cell-var-from-loop
logging.info('train in step: %s, %s', host_step, summary)
if jax.host_id() == 0:
assert train_summary_writer
for key, val in summary.items():
train_summary_writer.scalar(key, val, host_step)
train_summary_writer.flush()
# Gather training evaluation metrics.
logging.info('Gathering training evaluation metrics.')
eval_metrics = []
eval_iter = eval_ds.as_numpy_iterator()
for _, eval_batch in zip(range(CFG.num_eval_steps), eval_iter):
eval_batch = jax.tree_map(
lambda x: x.reshape(
(topology.per_replica_set_num_replicas, -1) + x.shape[1:]),
eval_batch)
metrics = p_eval_step(optimizer.target, eval_batch)
eval_metrics.append(metrics)
# average metrics across devices
eval_metrics = p_allreduce_metrics(eval_metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
# average metrics across steps
eval_metrics = jax.tree_map(np.sum, eval_metrics)
eval_denominator = eval_metrics.pop('denominator')
eval_summary = jax.tree_map(
lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop
eval_metrics)
logging.info('eval in step: %s, %s', host_step, eval_summary)
if jax.host_id() == 0:
assert eval_summary_writer
for key, val in eval_summary.items():
eval_summary_writer.scalar(key, val, host_step)
eval_summary_writer.flush()
# Wait until computations are done before exiting
logging.info('Finished.')
train_lib.sync_devices()
# Shut down the infeed threadpool.
if CFG.infeed:
infeed_pool.shutdown()
if __name__ == '__main__':
app.run(main)
| 40.419137 | 100 | 0.681038 |
from concurrent.futures import thread
import functools
import importlib
import os
from typing import Any, Mapping, Sequence, Tuple
from absl import app
from absl import flags
from absl import logging
os.environ['FLAX_PROFILE'] = 'true'
from flax import linen as nn
from flax import optim
from flax.metrics import tensorboard
from flax.training import checkpoints
from flax.training import common_utils
import jax
from jax import lax
from jax import random
from jax.interpreters.sharded_jit import sharded_jit
import jax.numpy as jnp
import ml_collections
from ml_collections import config_flags
import numpy as np
import t5
from t5x import checkpoint_importer
from t5x import input_pipeline
from t5x import models
from t5x import partitions
from t5x import train_lib
import tensorflow as tf
FLAGS = flags.FLAGS
CFG = None
PyTreeDef = type(jax.tree_structure(None))
TransformerConfig = models.TransformerConfig
jax.config.parse_flags_with_absl()
flags.DEFINE_string(
'model_dir', default=None, help='Directory to store model data.')
flags.DEFINE_string(
'data_dir', default=None, help='Tensorflow datasets directory.')
config_flags.DEFINE_config_file(
name='config',
default='configs/t5_small_glue.py',
help_string='training config file.')
ConfigDict = ml_collections.ConfigDict
def get_configs(
config
):
train_config = TransformerConfig(
vocab_size=config.vocab_size,
output_vocab_size=config.vocab_size,
share_embeddings=config.share_embeddings,
logits_via_embedding=config.logits_via_embedding,
dtype=jnp.bfloat16 if config.use_bfloat16 else jnp.float32,
emb_dim=config.emb_dim,
num_heads=config.num_heads,
num_layers=config.num_layers,
qkv_dim=config.qkv_dim,
mlp_dim=config.mlp_dim,
mlp_activations=config.mlp_activations,
position_embeddings='relative',
relative_attention_num_buckets=config.relative_attention_num_buckets,
relative_attention_max_distance=config.relative_attention_max_distance,
max_len=max(config.max_input_length, config.max_target_length,
config.max_eval_input_length, config.max_eval_target_length),
dropout_rate=config.dropout_rate,
attention_dropout_rate=config.attention_dropout_rate,
deterministic=False,
decode=False,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6))
eval_config = train_config.replace(deterministic=True)
predict_config = train_config.replace(
deterministic=True,
decode=True,
max_decode_len=config.max_eval_target_length)
return (train_config, eval_config, predict_config)
def get_initial_params(rng, config,
transformer_config,
optimizer_def):
input_shape = (config.batch_size, CFG.max_input_length)
target_shape = (config.batch_size, CFG.max_target_length)
initial_variables = models.Transformer(transformer_config).init(
rng, jnp.ones(input_shape, jnp.float32),
jnp.ones(target_shape, jnp.float32))
return optimizer_def.create(initial_variables['params'])
def main(argv):
global CFG
CFG = FLAGS.config
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
_ = np.array(jnp.array([1.0], dtype=jnp.bfloat16))
if CFG.hardware_rng:
models.set_hardware_bernoulli()
if 'module_import' in CFG and CFG.module_import:
for module in CFG.module_import:
importlib.import_module(module)
if 'additional_task_cache_dirs' in CFG and CFG.additional_task_cache_dirs:
t5.data.add_global_cache_dirs(CFG.additional_task_cache_dirs)
num_partitions = CFG.num_partitions
topology = train_lib.compute_multihost_topology(num_partitions)
batch_size = CFG.batch_size
eval_batch_size = CFG.eval_batch_size
per_replica_set_eval_batch_size = eval_batch_size // topology.num_replica_sets
if batch_size % topology.num_replicas:
raise ValueError('Batch size must be divisible by the number of replicas.')
steps_per_epoch = CFG.steps_per_epoch
logging.info('steps per epoch: %d', steps_per_epoch)
broadcast = functools.partial(
train_lib.broadcast,
num_replicas=topology.per_replica_set_num_replicas,
num_partitions=topology.per_host_num_partitions,
devices=topology.this_host_device_assignment)
if jax.host_id() == 0:
tf.io.gfile.makedirs(FLAGS.model_dir)
tf.io.gfile.copy(FLAGS['config'].config_filename,
os.path.join(FLAGS.model_dir, 'config.py'),
overwrite=True)
train_summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.model_dir, 'train'))
eval_summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.model_dir, 'eval'))
else:
train_summary_writer = None
eval_summary_writer = None
if CFG.infeed:
infeed_pool = thread.ThreadPoolExecutor(jax.local_device_count(), 'infeed')
(train_ds, eval_ds), eval_cache = input_pipeline.get_datasets_and_cache(
CFG, topology.num_replica_sets, topology.replica_set_id,
topology.per_replica_set_host_id)
vocab = input_pipeline.get_vocabulary(CFG.mixture_or_task_name)
encoder = vocab.tf_tokenizer
eos_id = vocab.tokenizer.eos_id()
def decode_tokens(toks,
eos_id = eos_id,
max_id = 32000):
del eos_id
# is the best decoding function or just switch to using tf_decode.
# valid_toks = toks[:np.argmax(toks == eos_id) + 1].astype(np.int32)
valid_toks = toks.astype(np.int32)
valid_toks[valid_toks >= max_id] = 3
return encoder.detokenize(valid_toks).numpy().decode('utf-8')
logging.info('Initializing model, optimizer, and step functions.')
train_config, eval_config, predict_config = get_configs(CFG)
rng = random.PRNGKey(CFG.random_seed)
rng, init_rng = random.split(rng)
# This is used for infeed conversion from feature dict <--> tuple
train_keys = [
'inputs', 'targets', 'inputs_position', 'targets_position',
'inputs_segmentation', 'targets_segmentation'
]
device_train_input_shape = tuple([
(batch_size // topology.num_replicas,
CFG.max_input_length if 'inputs' in k else CFG.max_target_length)
for k in train_keys
])
learning_rate_fn = train_lib.create_learning_rate_scheduler(
factors=CFG.schedule,
base_learning_rate=CFG.learning_rate,
warmup_steps=CFG.warmup_steps)
# First, we only abstractly initialize the optimizer and model parameters,
# since the parameters may not even fit in device memory!
# TODO(jekbradbury): make optimizer_defs compare by value so it can be created
# in get_initial_params without causing pytree incompatibility
optimizer_def = optim.Adafactor(
CFG.learning_rate, decay_rate=0.8, step_offset=CFG.step_offset)
initialize_params_fn = functools.partial(
get_initial_params,
config=CFG,
transformer_config=eval_config,
optimizer_def=optimizer_def)
optimizer = jax.eval_shape(initialize_params_fn, init_rng)
# tuple-like pytree leaves for global_arg_shapes
optimizer_shapes = jax.tree_map(lambda x: partitions.Spec(*x.shape),
optimizer)
# Build parameter partition annotations for preserving partitions from train
# to eval.
if num_partitions > 1:
optimizer_partitions = optimizer.restore_state(
partitions.set_partitions(num_partitions, optimizer.state_dict()))
per_host_optimizer_partitions = optimizer.restore_state(
partitions.set_partitions(topology.per_host_num_partitions,
optimizer.state_dict()))
# Restore unreplicated optimizer + model state from last checkpoint.
# TODO(jekbradbury,levskaya): implement sharded native checkpoint/restore
existing_checkpoint_found = False
if CFG.restore_checkpoints:
existing_checkpoint_found = train_lib.checkpoint_exists(FLAGS.model_dir)
optimizer = checkpoints.restore_checkpoint(FLAGS.model_dir, optimizer)
# Import a pretrained-T5 checkpoint only if we didn't import a local
if CFG.restore_t5_checkpoint and not existing_checkpoint_found:
optimizer = checkpoint_importer.restore_from_t5_checkpoint(
optimizer, CFG.restore_t5_checkpoint)
if CFG.restore_t5_checkpoint or existing_checkpoint_found:
if num_partitions > 1:
def per_host_chunk(x, spec):
if spec is None or spec is x:
return x
if spec[0] == 1:
dim_size = x.shape[1]
elif spec[1] == 1:
dim_size = x.shape[0]
else:
raise NotImplementedError()
chunk_size = (
dim_size * topology.per_host_num_partitions // num_partitions)
lower = topology.per_replica_set_host_id * chunk_size
upper = (topology.per_replica_set_host_id + 1) * chunk_size
if spec[0] == 1:
return x[:, lower:upper]
else:
return x[lower:upper]
optimizer = jax.tree_multimap(per_host_chunk, optimizer,
optimizer_partitions)
else:
if num_partitions > 1:
initialize_params_fn = sharded_jit(
initialize_params_fn,
in_parts=None,
local_in_parts=None,
out_parts=optimizer_partitions,
local_out_parts=per_host_optimizer_partitions,
)
initialize_params_fn = jax.pmap(
initialize_params_fn,
'batch',
in_axes=0,
axis_size=topology.num_replicas,
devices=topology.device_assignment)
init_rng = broadcast(init_rng)
optimizer = initialize_params_fn(init_rng)
optimizer = train_lib.unbroadcast(optimizer)
else:
optimizer = jax.jit(initialize_params_fn)(init_rng)
def p_train_step(optimizer, batch,
prev_metrics,
dropout_rng):
return train_lib.train_step(
optimizer,
batch,
prev_metrics,
dropout_rng,
config=train_config,
learning_rate_fn=learning_rate_fn,
num_microbatches=CFG.microbatches,
label_smoothing=CFG.label_smoothing,
z_loss=CFG.z_loss,
use_bfloat16=CFG.use_bfloat16)
if num_partitions > 1:
p_train_step = sharded_jit(
p_train_step,
in_parts=(optimizer_partitions, None, None, None),
local_in_parts=(per_host_optimizer_partitions, None, None, None),
out_parts=(optimizer_partitions, None, None),
local_out_parts=(per_host_optimizer_partitions, None, None))
p_train_step = jax.pmap(
p_train_step,
axis_name='batch',
in_axes=(None, 0, 0, 0),
donate_argnums=(0,),
global_arg_shapes=(optimizer_shapes, None, None, None),
axis_size=topology.num_replicas,
devices=topology.device_assignment)
def device_train_loop_cond(
args
):
_, _, _, _, step, epoch = args
return step // steps_per_epoch == epoch
def device_train_loop_body(
args
):
optimizer, dropout_rngs, metrics, token, step, epoch = args
input_data, token = lax.infeed(
token,
shape=tuple(
[jax.ShapedArray(s, jnp.int32) for s in device_train_input_shape]))
batch = {k: v for k, v in zip(train_keys, input_data)}
optimizer, metrics, dropout_rngs = train_lib.train_step(
optimizer,
batch,
metrics,
dropout_rngs,
train_config,
learning_rate_fn,
num_microbatches=CFG.microbatches,
label_smoothing=CFG.label_smoothing,
z_loss=CFG.z_loss)
step += 1
return optimizer, dropout_rngs, metrics, token, step, epoch
def device_train_loop(optimizer, dropout_rngs,
metrics, step,
epoch):
token = lax.create_token(step)
optimizer, dropout_rngs, metrics, _, step, _ = lax.while_loop(
device_train_loop_cond, device_train_loop_body,
(optimizer, dropout_rngs, metrics, token, step, epoch))
return optimizer, dropout_rngs, metrics, step
if num_partitions > 1:
device_train_loop = sharded_jit(
device_train_loop,
in_parts=(optimizer_partitions, None, None, None, None),
local_in_parts=(per_host_optimizer_partitions, None, None, None, None),
out_parts=(optimizer_partitions, None, None, None),
local_out_parts=(per_host_optimizer_partitions, None, None, None))
p_train_epoch = jax.pmap(
device_train_loop,
axis_name='batch',
in_axes=(None, 0, 0, None, None),
donate_argnums=(0,),
global_arg_shapes=(optimizer_shapes, None, None, None, None),
axis_size=topology.num_replicas,
devices=topology.device_assignment)
def p_allreduce_metrics(x):
return lax.psum(x, axis_name='batch')
if num_partitions > 1:
p_allreduce_metrics = sharded_jit(
p_allreduce_metrics,
in_parts=None,
local_in_parts=None,
out_parts=None,
local_out_parts=None,
num_partitions=num_partitions,
local_num_partitions=topology.per_host_num_partitions)
p_allreduce_metrics = jax.pmap(
p_allreduce_metrics,
axis_name='batch',
global_arg_shapes=None,
axis_size=topology.num_replicas,
devices=topology.device_assignment)
def p_eval_step(params, batch):
return train_lib.eval_step(
params, batch, config=eval_config, label_smoothing=CFG.label_smoothing)
if num_partitions > 1:
p_eval_step = sharded_jit(
p_eval_step,
in_parts=(optimizer_partitions.target, None),
local_in_parts=(per_host_optimizer_partitions.target, None),
out_parts=None,
local_out_parts=None)
p_eval_step = jax.pmap(
p_eval_step,
axis_name='batch',
in_axes=(None, 0),
global_arg_shapes=(optimizer_shapes.target, None),
axis_size=topology.num_replicas,
devices=topology.device_assignment)
def p_pred_step(inputs, params):
return train_lib.predict_step(inputs, params, eos_id,
CFG.max_eval_target_length, predict_config,
CFG.beam_size)
if num_partitions > 1:
p_pred_step = sharded_jit(
p_pred_step,
in_parts=(None, optimizer_partitions.target),
local_in_parts=(None, per_host_optimizer_partitions.target),
out_parts=None,
local_out_parts=None)
p_pred_step = jax.pmap(
p_pred_step,
axis_name='batch',
in_axes=(0, None),
global_arg_shapes=(None, optimizer_shapes.target),
axis_size=topology.num_replicas,
devices=topology.device_assignment)
# There should be a unique dropout key for each replica represented on this
# host, but the key should be the same for the same replica on other hosts.
# Again, this is what the replica set abstraction is for.
dropout_rngs = random.split(
random.fold_in(rng, topology.replica_set_id),
topology.per_replica_set_num_replicas)
# restore step from last checkpoint
host_step = int(optimizer.state.step)
empty_metrics = broadcast({
'loss': 0.0,
'accuracy': 0.0,
'learning_rate': 0.0,
'denominator': 0.0
})
if CFG.infeed:
# TODO(jekbradbury): support something like this for the Python-loop case
logging.info('Precompiling training loop and moving optimizer to device.')
optimizer, _, metrics, _ = p_train_epoch(optimizer, dropout_rngs,
empty_metrics,
jnp.array(0, dtype=jnp.int32), 1)
optimizer = train_lib.unbroadcast(optimizer)
metrics['loss'].block_until_ready()
logging.info('Starting training loop.')
local_devices = jax.local_devices()
device_step = broadcast(host_step)
first_epoch = host_step // steps_per_epoch
# Main Loop over "epochs".
train_iter = train_ds.as_numpy_iterator()
for epoch in range(first_epoch, first_epoch + CFG.num_epochs):
metrics = empty_metrics
# NOTE: 'optimizer' is unbroadcast by construction at initialization or
# when loading a checkpoint. It is maintained in 'unbroadcast' state to
# enable the XLA cross-replica sharding optimization. The broadcasting is
# handled automatically by the pmap'd functions that use it.
logging.info('Evaluating tasks.')
if epoch == first_epoch + 1:
train_lib.sync_devices()
for task in eval_cache.tasks:
logging.info('Evaluating task %s', task.name)
all_predicted, all_bs = [], []
for pred_batch in eval_cache.preprocessed_examples[task.name]:
input_batch, unpadded_batch_size = train_lib.pad_batch_to_size(
pred_batch['inputs'], per_replica_set_eval_batch_size)
all_bs.append(unpadded_batch_size)
input_batch = jax.tree_map(
lambda x: x.reshape(
(topology.per_replica_set_num_replicas, -1) + x.shape[1:]),
input_batch)
all_predicted.append(p_pred_step(input_batch, optimizer.target))
max_host_batch_number = np.max(
eval_cache.preprocessed_batch_sizes[task.name])
batch_shortfall = max_host_batch_number - len(all_predicted)
if batch_shortfall > 0:
p_pred_step(input_batch, optimizer.target)
all_predicted.extend([jnp.zeros_like(all_predicted[0])] *
batch_shortfall)
all_bs.extend([0] * batch_shortfall)
all_predicted = jnp.concatenate(all_predicted)
all_bs = jnp.array(all_bs)
all_predicted = train_lib.host_allgather(
all_predicted, topology.num_replica_sets, topology.replica_set_id,
topology.per_replica_set_host_id == 0)
seqlength = all_predicted.shape[-1]
total_examples = np.sum(
train_lib.host_allgather(all_bs, topology.num_replica_sets,
topology.replica_set_id,
topology.per_replica_set_host_id == 0))
del all_bs
assert total_examples == len(eval_cache.examples[task.name]), (
'Total number of batches incorrect for task %s.' % task.name)
all_predicted = np.transpose(all_predicted, (1, 2, 0, 3)).reshape(
-1, seqlength)[:total_examples]
if jax.host_id() == 0:
assert eval_summary_writer
raw_predictions = []
for tokens in all_predicted:
raw_predictions.append(decode_tokens(tokens))
predictions = [
task.postprocess_fn(p, example=ex)
for p, ex in zip(raw_predictions, eval_cache.examples[task.name])
]
for metric_fn in task.metric_fns:
scores = metric_fn(eval_cache.targets[task.name], predictions)
for metric_name, metric_value in scores.items():
tag = f'eval/{task.name}/{metric_name}'
eval_summary_writer.scalar(tag, metric_value, host_step)
logging.info('EVAL %s at step %d: %.3f', tag, host_step,
metric_value)
eval_summary_writer.flush()
exemplars = ''
for n in np.random.choice(np.arange(len(predictions)), 8):
tgt_txt = tf.compat.as_text(
eval_cache.examples[task.name][n]['targets_plaintext'])
pred_txt = raw_predictions[n]
exemplars += (f'{eval_cache.inputs[task.name][n]}\n\n'
f'target: {tgt_txt}\n\n'
f'prediction: {pred_txt}\n\n')
eval_summary_writer.text(f'{task.name} samples', exemplars, host_step)
eval_summary_writer.flush()
if epoch == first_epoch + 1:
train_lib.sync_devices()
logging.info('BEGIN Train loop.')
if CFG.infeed:
optimizer, dropout_rngs, metrics, device_step = p_train_epoch(
optimizer, dropout_rngs, metrics, train_lib.unbroadcast(device_step),
epoch)
optimizer = train_lib.unbroadcast(optimizer)
while int(host_step // steps_per_epoch) == epoch:
batch = next(train_iter)
batch = jax.tree_map(
lambda x: x.reshape(
(topology.per_replica_set_num_replicas, -1) + x.shape[1:]), batch)
if CFG.infeed:
for i, device in enumerate(local_devices):
# own for feeding the right values to the right devices. Each device
# should get the minibatch corresponding to its replica, a slice of
# the larger batch corresponding to the host's replica set.
if device.platform == 'tpu':
device_coords = (*device.coords, device.id % 2)
else:
device_coords = (device.host_id, i)
per_replica_set_device_coords = tuple(
dc % prsm
for dc, prsm in zip(device_coords, topology.per_replica_set_mesh))
per_replica_set_replica_coords = tuple(
prsdc // prm for prsdc, prm in zip(per_replica_set_device_coords,
topology.per_replica_mesh))
per_replica_set_replica_id = 0
for prsm, prm, prsrc in zip(topology.per_replica_set_mesh,
topology.per_replica_mesh,
per_replica_set_replica_coords):
per_replica_set_replica_id = (
per_replica_set_replica_id * prsm // prm + prsrc)
input_tuple = tuple(
[batch[k][per_replica_set_replica_id] for k in train_keys])
tuple_shapes = jax.tree_map(jnp.shape, input_tuple)
tuple_dtypes = jax.tree_map(lambda x: x.dtype, input_tuple)
assert tuple_shapes == device_train_input_shape, (
'infeed shape error %s != %s' %
(tuple_shapes, device_train_input_shape))
assert tuple(set(tuple_dtypes)) == (jnp.int32,), \
('infeed dtype error %s not all of type %s' % (
tuple_dtypes, jnp.int32))
infeed_pool.submit(
functools.partial(device.transfer_to_infeed, input_tuple))
else:
optimizer, metrics, dropout_rngs = p_train_step(optimizer, batch,
metrics, dropout_rngs)
optimizer = train_lib.unbroadcast(optimizer)
host_step += 1
logging.info('END Train loop.')
if (CFG.save_checkpoints and
epoch % CFG.checkpoint_freq == CFG.checkpoint_freq - 1 and
jax.host_id() == 0):
checkpoints.save_checkpoint(FLAGS.model_dir, optimizer, host_step)
metrics = p_allreduce_metrics(metrics)
metrics = jax.tree_map(lambda x: jax.device_get(x[0]), metrics)
denominator = metrics.pop('denominator')
summary = jax.tree_map(lambda x: x / denominator, metrics)
logging.info('train in step: %s, %s', host_step, summary)
if jax.host_id() == 0:
assert train_summary_writer
for key, val in summary.items():
train_summary_writer.scalar(key, val, host_step)
train_summary_writer.flush()
logging.info('Gathering training evaluation metrics.')
eval_metrics = []
eval_iter = eval_ds.as_numpy_iterator()
for _, eval_batch in zip(range(CFG.num_eval_steps), eval_iter):
eval_batch = jax.tree_map(
lambda x: x.reshape(
(topology.per_replica_set_num_replicas, -1) + x.shape[1:]),
eval_batch)
metrics = p_eval_step(optimizer.target, eval_batch)
eval_metrics.append(metrics)
eval_metrics = p_allreduce_metrics(eval_metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
eval_metrics = jax.tree_map(np.sum, eval_metrics)
eval_denominator = eval_metrics.pop('denominator')
eval_summary = jax.tree_map(
lambda x: x / eval_denominator,
eval_metrics)
logging.info('eval in step: %s, %s', host_step, eval_summary)
if jax.host_id() == 0:
assert eval_summary_writer
for key, val in eval_summary.items():
eval_summary_writer.scalar(key, val, host_step)
eval_summary_writer.flush()
logging.info('Finished.')
train_lib.sync_devices()
if CFG.infeed:
infeed_pool.shutdown()
if __name__ == '__main__':
app.run(main)
| true | true |
f734bfd8dda478c4a821955f2b8d8d088ca859db | 47,394 | py | Python | botcity/core/bot.py | lf2a/botcity-framework-core-python-2 | eaa073a4f0b5099b0684400533f7427e001648e5 | [
"Apache-2.0"
] | null | null | null | botcity/core/bot.py | lf2a/botcity-framework-core-python-2 | eaa073a4f0b5099b0684400533f7427e001648e5 | [
"Apache-2.0"
] | null | null | null | botcity/core/bot.py | lf2a/botcity-framework-core-python-2 | eaa073a4f0b5099b0684400533f7427e001648e5 | [
"Apache-2.0"
] | null | null | null | import os
import functools
import multiprocessing
import platform
import random
import subprocess
import time
import webbrowser
import pyautogui
import pyperclip
from PIL import Image
from botcity.base import BaseBot, State
from botcity.base.utils import is_retina, only_if_element
from . import config, os_compat
try:
from botcity.maestro import BotMaestroSDK
MAESTRO_AVAILABLE = True
except ImportError:
MAESTRO_AVAILABLE = False
class DesktopBot(BaseBot):
"""
Base class for Desktop Bots.
Users must implement the `action` method in their classes.
Attributes:
state (State): The internal state of this bot.
maestro (BotMaestroSDK): an instance to interact with the BotMaestro server.
"""
def __init__(self):
super().__init__()
self.state = State()
self.maestro = BotMaestroSDK() if MAESTRO_AVAILABLE else None
self._interval = 0.005 if platform.system() == "Darwin" else 0.0
# For parity with Java
self.addImage = self.add_image
self.getImageFromMap = self.get_image_from_map
self.getLastElement = self.get_last_element
self.getScreenShot = self.get_screenshot
self.screenCut = self.screen_cut
self.saveScreenshot = self.save_screenshot
self.getCoordinates = self.get_element_coords
self.getElementCoords = self.get_element_coords
self.getElementCoordsCentered = self.get_element_coords_centered
self.find = self.find_until
self.findUntil = self.find_until
self.findText = self.find_text
self.findLastUntil = self.find_until
# Java API compatibility
self.clickOn = self.click_on
self.getLastX = self.get_last_x
self.getLastY = self.get_last_y
self.mouseMove = self.mouse_move
self.clickAt = self.click_at
self.doubleclick = self.double_click
self.doubleClick = self.double_click
self.doubleClickRelative = self.double_click_relative
self.tripleClick = self.triple_click
self.tripleClickRelative = self.triple_click_relative
self.scrollDown = self.scroll_down
self.scrollUp = self.scroll_up
self.moveTo = self.mouse_move
self.moveRelative = self.move_relative
self.moveRandom = self.move_random
self.moveAndClick = self.click
self.rightClick = self.right_click
self.rightClickAt = self.right_click_at
self.rightClickRelative = self.right_click_relative
self.moveAndRightClick = self.right_click
pyperclip.determine_clipboard()
##########
# Display
##########
def add_image(self, label, path):
"""
Add an image into the state image map.
Args:
label (str): The image identifier
path (str): The path for the image on disk
"""
self.state.map_images[label] = path
def get_image_from_map(self, label):
"""
Return an image from teh state image map.
Args:
label (str): The image identifier
Returns:
Image: The Image object
"""
path = self.state.map_images.get(label)
if not path:
raise KeyError('Invalid label for image map.')
img = Image.open(path)
return img
def find_multiple(self, labels, x=None, y=None, width=None, height=None, *,
threshold=None, matching=0.9, waiting_time=10000, best=True, grayscale=False):
"""
Find multiple elements defined by label on screen until a timeout happens.
Args:
labels (list): A list of image identifiers
x (int, optional): Search region start position x. Defaults to 0.
y (int, optional): Search region start position y. Defaults to 0.
width (int, optional): Search region width. Defaults to screen width.
height (int, optional): Search region height. Defaults to screen height.
threshold (int, optional): The threshold to be applied when doing grayscale search.
Defaults to None.
matching (float, optional): The matching index ranging from 0 to 1.
Defaults to 0.9.
waiting_time (int, optional): Maximum wait time (ms) to search for a hit.
Defaults to 10000ms (10s).
best (bool, optional): Whether or not to keep looking until the best matching is found.
Defaults to True.
grayscale (bool, optional): Whether or not to convert to grayscale before searching.
Defaults to False.
Returns:
results (dict): A dictionary in which the key is the label and value are the element coordinates in a
NamedTuple.
"""
def _to_dict(lbs, elems):
return {k: v for k, v in zip(lbs, elems)}
screen_w, screen_h = pyautogui.size()
x = x or 0
y = y or 0
w = width or screen_w
h = height or screen_h
region = (x, y, w, h)
results = [None] * len(labels)
paths = [self._search_image_file(la) for la in labels]
if threshold:
# TODO: Figure out how we should do threshold
print('Threshold not yet supported')
if not best:
# TODO: Implement best=False.
print('Warning: Ignoring best=False for now. It will be supported in the future.')
start_time = time.time()
n_cpus = multiprocessing.cpu_count() - 1
while True:
elapsed_time = (time.time() - start_time) * 1000
if elapsed_time > waiting_time:
return _to_dict(labels, results)
haystack = pyautogui.screenshot()
helper = functools.partial(self._find_multiple_helper, haystack, region, matching, grayscale)
with multiprocessing.Pool(processes=n_cpus) as pool:
results = pool.map(helper, paths)
results = [self._fix_retina_element(r) for r in results]
if None in results:
continue
else:
return _to_dict(labels, results)
def _fix_retina_element(self, ele):
if not is_retina():
return ele
if ele is not None:
if is_retina():
ele = ele._replace(left=ele.left / 2.0, top=ele.top / 2.0)
return ele
def _find_multiple_helper(self, haystack, region, confidence, grayscale, needle):
ele = pyautogui.locate(needle, haystack, region=region, confidence=confidence, grayscale=grayscale)
return ele
def find(self, label, x=None, y=None, width=None, height=None, *, threshold=None,
matching=0.9, waiting_time=10000, best=True, grayscale=False):
"""
Find an element defined by label on screen until a timeout happens.
Args:
label (str): The image identifier
x (int, optional): Search region start position x. Defaults to 0.
y (int, optional): Search region start position y. Defaults to 0.
width (int, optional): Search region width. Defaults to screen width.
height (int, optional): Search region height. Defaults to screen height.
threshold (int, optional): The threshold to be applied when doing grayscale search.
Defaults to None.
matching (float, optional): The matching index ranging from 0 to 1.
Defaults to 0.9.
waiting_time (int, optional): Maximum wait time (ms) to search for a hit.
Defaults to 10000ms (10s).
best (bool, optional): Whether or not to keep looking until the best matching is found.
Defaults to True.
grayscale (bool, optional): Whether or not to convert to grayscale before searching.
Defaults to False.
Returns:
element (NamedTuple): The element coordinates. None if not found.
"""
return self.find_until(label, x=x, y=y, width=width, height=height, threshold=threshold,
matching=matching, waiting_time=waiting_time, best=best, grayscale=grayscale)
def find_until(self, label, x=None, y=None, width=None, height=None, *,
threshold=None, matching=0.9, waiting_time=10000, best=True, grayscale=False):
"""
Find an element defined by label on screen until a timeout happens.
Args:
label (str): The image identifier
x (int, optional): Search region start position x. Defaults to 0.
y (int, optional): Search region start position y. Defaults to 0.
width (int, optional): Search region width. Defaults to screen width.
height (int, optional): Search region height. Defaults to screen height.
threshold (int, optional): The threshold to be applied when doing grayscale search.
Defaults to None.
matching (float, optional): The matching index ranging from 0 to 1.
Defaults to 0.9.
waiting_time (int, optional): Maximum wait time (ms) to search for a hit.
Defaults to 10000ms (10s).
best (bool, optional): Whether or not to keep looking until the best matching is found.
Defaults to True.
grayscale (bool, optional): Whether or not to convert to grayscale before searching.
Defaults to False.
Returns:
element (NamedTuple): The element coordinates. None if not found.
"""
self.state.element = None
screen_w, screen_h = pyautogui.size()
x = x or 0
y = y or 0
w = width or screen_w
h = height or screen_h
region = (x, y, w, h)
element_path = self._search_image_file(label)
if threshold:
# TODO: Figure out how we should do threshold
print('Threshold not yet supported')
if not best:
# TODO: Implement best=False.
print('Warning: Ignoring best=False for now. It will be supported in the future.')
start_time = time.time()
while True:
elapsed_time = (time.time() - start_time) * 1000
if elapsed_time > waiting_time:
return None
ele = pyautogui.locateOnScreen(element_path, region=region, confidence=matching,
grayscale=grayscale)
if ele is not None:
if is_retina():
ele = ele._replace(left=ele.left / 2.0, top=ele.top / 2.0)
self.state.element = ele
return ele
def find_all(self, label, x=None, y=None, width=None, height=None, *,
threshold=None, matching=0.9, waiting_time=10000, grayscale=False):
"""
Find all elements defined by label on screen until a timeout happens.
Args:
label (str): The image identifier
x (int, optional): Search region start position x. Defaults to 0.
y (int, optional): Search region start position y. Defaults to 0.
width (int, optional): Search region width. Defaults to screen width.
height (int, optional): Search region height. Defaults to screen height.
threshold (int, optional): The threshold to be applied when doing grayscale search.
Defaults to None.
matching (float, optional): The matching index ranging from 0 to 1.
Defaults to 0.9.
waiting_time (int, optional): Maximum wait time (ms) to search for a hit.
Defaults to 10000ms (10s).
grayscale (bool, optional): Whether or not to convert to grayscale before searching.
Defaults to False.
Returns:
elements (collections.Iterable[NamedTuple]): A generator with all element coordinates fount.
None if not found.
"""
def deduplicate(elems):
def find_same(item, items):
x_start = item.left
x_end = item.left + item.width
y_start = item.top
y_end = item.top + item.height
similars = []
for itm in items:
if itm == item:
continue
if (itm.left >= x_start and itm.left < x_end)\
and (itm.top >= y_start and itm.top < y_end):
similars.append(itm)
continue
return similars
index = 0
while True:
try:
dups = find_same(elems[index], elems[index:])
for d in dups:
elems.remove(d)
index += 1
except IndexError:
break
return elems
self.state.element = None
screen_w, screen_h = pyautogui.size()
x = x or 0
y = y or 0
w = width or screen_w
h = height or screen_h
region = (x, y, w, h)
element_path = self._search_image_file(label)
if threshold:
# TODO: Figure out how we should do threshold
print('Threshold not yet supported')
start_time = time.time()
while True:
elapsed_time = (time.time() - start_time) * 1000
if elapsed_time > waiting_time:
return None
eles = pyautogui.locateAllOnScreen(element_path, region=region, confidence=matching,
grayscale=grayscale)
if not eles:
continue
eles = deduplicate(list(eles))
for ele in eles:
if ele is not None:
if is_retina():
ele = ele._replace(left=ele.left / 2.0, top=ele.top / 2.0)
self.state.element = ele
yield ele
break
def find_text(self, label, x=None, y=None, width=None, height=None, *, threshold=None, matching=0.9,
waiting_time=10000, best=True):
"""
Find an element defined by label on screen until a timeout happens.
Args:
label (str): The image identifier
x (int, optional): Search region start position x. Defaults to 0.
y (int, optional): Search region start position y. Defaults to 0.
width (int, optional): Search region width. Defaults to screen width.
height (int, optional): Search region height. Defaults to screen height.
threshold (int, optional): The threshold to be applied when doing grayscale search.
Defaults to None.
matching (float, optional): The matching index ranging from 0 to 1.
Defaults to 0.9.
waiting_time (int, optional): Maximum wait time (ms) to search for a hit.
Defaults to 10000ms (10s).
best (bool, optional): Whether or not to keep looking until the best matching is found.
Defaults to True.
Returns:
element (NamedTuple): The element coordinates. None if not found.
"""
return self.find_until(label, x, y, width, height, threshold=threshold, matching=matching,
waiting_time=waiting_time, best=best, grayscale=True)
def get_last_element(self):
"""
Return the last element found.
Returns:
element (NamedTuple): The element coordinates (left, top, width, height)
"""
return self.state.element
def display_size(self):
"""
Returns the display size in pixels.
Returns:
size (Tuple): The screen dimension (width and height) in pixels.
"""
screen_size = pyautogui.size()
return screen_size.width, screen_size.height
def screenshot(self, filepath=None, region=None):
"""
Capture a screenshot.
Args:
filepath (str, optional): The filepath in which to save the screenshot. Defaults to None.
region (tuple, optional): Bounding box containing left, top, width and height to crop screenshot.
Returns:
Image: The screenshot Image object
"""
img = pyautogui.screenshot(filepath, region)
return img
def get_screenshot(self, filepath=None, region=None):
"""
Capture a screenshot.
Args:
filepath (str, optional): The filepath in which to save the screenshot. Defaults to None.
region (tuple, optional): Bounding box containing left, top, width and height to crop screenshot.
Returns:
Image: The screenshot Image object
"""
return self.screenshot(filepath, region)
def screen_cut(self, x, y, width=None, height=None):
"""
Capture a screenshot from a region of the screen.
Args:
x (int): region start position x
y (int): region start position y
width (int): region width
height (int): region height
Returns:
Image: The screenshot Image object
"""
screen_size = pyautogui.size()
x = x or 0
y = y or 0
width = width or screen_size.width
height = height or screen_size.height
img = pyautogui.screenshot(region=(x, y, width, height))
return img
def save_screenshot(self, path):
"""
Saves a screenshot in a given path.
Args:
path (str): The filepath in which to save the screenshot
"""
pyautogui.screenshot(path)
def get_element_coords(self, label, x=None, y=None, width=None, height=None, matching=0.9, best=True):
"""
Find an element defined by label on screen and returns its coordinates.
Args:
label (str): The image identifier
x (int, optional): X (Left) coordinate of the search area.
y (int, optional): Y (Top) coordinate of the search area.
width (int, optional): Width of the search area.
height (int, optional): Height of the search area.
matching (float, optional): Minimum score to consider a match in the element image recognition process.
Defaults to 0.9.
best (bool, optional): Whether or not to search for the best value. If False the method returns on
the first find. Defaults to True.
Returns:
coords (Tuple): A tuple containing the x and y coordinates for the element.
"""
self.state.element = None
screen_size = pyautogui.size()
x = x or 0
y = y or 0
width = width or screen_size.width
height = height or screen_size.height
region = (x, y, width, height)
if not best:
print('Warning: Ignoring best=False for now. It will be supported in the future.')
ele = pyautogui.locateOnScreen(self._search_image_file(label), region=region, confidence=matching)
if ele is None:
return None, None
if is_retina():
ele = ele._replace(left=ele.left / 2.0, top=ele.top / 2.0)
self.state.element = ele
return ele.left, ele.top
def get_element_coords_centered(self, label, x=None, y=None, width=None, height=None,
matching=0.9, best=True):
"""
Find an element defined by label on screen and returns its centered coordinates.
Args:
label (str): The image identifier
x (int, optional): X (Left) coordinate of the search area.
y (int, optional): Y (Top) coordinate of the search area.
width (int, optional): Width of the search area.
height (int, optional): Height of the search area.
matching (float, optional): Minimum score to consider a match in the element image recognition process.
Defaults to 0.9.
best (bool, optional): Whether or not to search for the best value. If False the method returns on
the first find. Defaults to True.
Returns:
coords (Tuple): A tuple containing the x and y coordinates for the center of the element.
"""
self.get_element_coords(label, x, y, width, height, matching, best)
return self.state.center()
#########
# Browser
#########
def browse(self, url, location=0):
"""
Invoke the default browser passing an URL
Args:
url (str): The URL to be visited.
location (int): If possible, open url in a location determined by new:
* 0: the same browser window (the default)
* 1: a new browser window
* 2: a new browser page ("tab")
Returns:
bool: Whether or not the request was successful
"""
status = webbrowser.open(url, location)
return status
#######
# Mouse
#######
def click_on(self, label):
"""
Click on the element.
Args:
label (str): The image identifier
"""
x, y = self.get_element_coords_centered(label)
if None in (x, y):
raise ValueError(f'Element not available. Cannot find {label}.')
os_compat.click(x, y)
def get_last_x(self):
"""
Get the last X position for the mouse.
Returns:
x (int): The last x position for the mouse.
"""
return pyautogui.position().x
def get_last_y(self):
"""
Get the last Y position for the mouse.
Returns:
y (int): The last y position for the mouse.
"""
return pyautogui.position().y
def mouse_move(self, x, y):
"""
Mouse the move to the coordinate defined by x and y
Args:
x (int): The X coordinate
y (int): The Y coordinate
"""
pyautogui.moveTo(x, y)
def click_at(self, x, y):
"""
Click at the coordinate defined by x and y
Args:
x (int): The X coordinate
y (int): The Y coordinate
"""
os_compat.click(x, y)
@only_if_element
def click(self, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *,
clicks=1, interval_between_clicks=0, button='left'):
"""
Click on the last found element.
Args:
wait_after (int, optional): Interval to wait after clicking on the element.
clicks (int, optional): Number of times to click. Defaults to 1.
interval_between_clicks (int, optional): The interval between clicks in ms. Defaults to 0.
button (str, optional): One of 'left', 'right', 'middle'. Defaults to 'left'
"""
x, y = self.state.center()
os_compat.click(x, y, clicks=clicks, button=button, interval=interval_between_clicks/1000.0)
self.sleep(wait_after)
@only_if_element
def click_relative(self, x, y, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *,
clicks=1, interval_between_clicks=0, button='left'):
"""
Click Relative on the last found element.
Args:
x (int): Horizontal offset
y (int): Vertical offset
wait_after (int, optional): Interval to wait after clicking on the element.
clicks (int, optional): Number of times to click. Defaults to 1.
interval_between_clicks (int, optional): The interval between clicks in ms. Defaults to 0.
button (str, optional): One of 'left', 'right', 'middle'. Defaults to 'left'
"""
x = self.state.x() + x
y = self.state.y() + y
os_compat.click(x, y, clicks=clicks, button=button, interval=interval_between_clicks/1000.0)
self.sleep(wait_after)
@only_if_element
def double_click(self, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION):
"""
Double Click on the last found element.
Args:
wait_after (int, optional): Interval to wait after clicking on the element.
"""
self.click(wait_after=wait_after, clicks=2)
@only_if_element
def double_click_relative(self, x, y, interval_between_clicks=0, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION):
"""
Double Click Relative on the last found element.
Args:
x (int): Horizontal offset
y (int): Vertical offset
interval_between_clicks (int, optional): The interval between clicks in ms. Defaults to 0.
wait_after (int, optional): Interval to wait after clicking on the element.
"""
self.click_relative(x, y, wait_after=wait_after, clicks=2, interval_between_clicks=interval_between_clicks)
@only_if_element
def triple_click(self, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION):
"""
Triple Click on the last found element.
Args:
wait_after (int, optional): Interval to wait after clicking on the element.
"""
self.click(wait_after=wait_after, clicks=3)
@only_if_element
def triple_click_relative(self, x, y, interval_between_clicks=0, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION):
"""
Triple Click Relative on the last found element.
Args:
x (int): Horizontal offset
y (int): Vertical offset
interval_between_clicks (int, optional): The interval between clicks in ms. Defaults to 0.
wait_after (int, optional): Interval to wait after clicking on the element.
"""
self.click_relative(x, y, wait_after=wait_after, clicks=3, interval_between_clicks=interval_between_clicks)
def mouse_down(self, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *, button='left'):
"""
Holds down the requested mouse button.
Args:
wait_after (int, optional): Interval to wait after clicking on the element.
button (str, optional): One of 'left', 'right', 'middle'. Defaults to 'left'
"""
pyautogui.mouseDown(button=button)
self.sleep(wait_after)
def mouse_up(self, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *, button='left'):
"""
Releases the requested mouse button.
Args:
wait_after (int, optional): Interval to wait after clicking on the element.
button (str, optional): One of 'left', 'right', 'middle'. Defaults to 'left'
"""
pyautogui.mouseUp(button=button)
self.sleep(wait_after)
def scroll_down(self, clicks):
"""
Scroll Down n clicks
Args:
clicks (int): Number of times to scroll down.
"""
pyautogui.scroll(-1 * clicks)
def scroll_up(self, clicks):
"""
Scroll Up n clicks
Args:
clicks (int): Number of times to scroll up.
"""
pyautogui.scroll(clicks)
@only_if_element
def move(self):
"""
Move to the center position of last found item.
"""
x, y = self.state.center()
pyautogui.moveTo(x, y)
def move_relative(self, x, y):
"""
Move the mouse relative to its current position.
Args:
x (int): Horizontal offset
y (int): Vertical offset
"""
x = self.get_last_x() + x
y = self.get_last_y() + y
pyautogui.moveTo(x, y)
def move_random(self, range_x, range_y):
"""
Move randomly along the given x, y range.
Args:
range_x (int): Horizontal range
range_y (int): Vertical range
"""
x = int(random.random() * range_x)
y = int(random.random() * range_y)
pyautogui.moveTo(x, y)
@only_if_element
def right_click(self, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *,
clicks=1, interval_between_clicks=0):
"""
Right click on the last found element.
Args:
wait_after (int, optional): Interval to wait after clicking on the element.
clicks (int, optional): Number of times to click. Defaults to 1.
interval_between_clicks (int, optional): The interval between clicks in ms. Defaults to 0.
"""
x, y = self.state.center()
os_compat.click(x, y, clicks=clicks, button='right', interval=interval_between_clicks/1000.0)
self.sleep(wait_after)
def right_click_at(self, x, y):
"""
Right click at the coordinate defined by x and y
Args:
x (int): The X coordinate
y (int): The Y coordinate
"""
os_compat.click(x, y, button='right')
@only_if_element
def right_click_relative(self, x, y, interval_between_clicks=0, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION):
"""
Right Click Relative on the last found element.
Args:
x (int): Horizontal offset
y (int): Vertical offset
interval_between_clicks (int, optional): The interval between clicks in ms. Defaults to 0.
wait_after (int, optional): Interval to wait after clicking on the element.
"""
self.click_relative(x, y, wait_after=wait_after, clicks=3, interval_between_clicks=interval_between_clicks,
button='right')
##########
# Keyboard
##########
def type_key(self, text, interval=0):
"""
Type a text char by char (individual key events).
Args:
text (str): text to be typed.
interval (int, optional): interval (ms) between each key press. Defaults to 0
"""
self.kb_type(text=text, interval=interval/1000.0)
def kb_type(self, text, interval=0):
"""
Type a text char by char (individual key events).
Args:
text (str): text to be typed.
interval (int, optional): interval (ms) between each key press. Defaults to 0
"""
pyautogui.write(text, interval=interval/1000.0)
self.sleep(config.DEFAULT_SLEEP_AFTER_ACTION)
def paste(self, text=None, wait=0):
"""
Paste content from the clipboard.
Args:
text (str, optional): The text to be pasted. Defaults to None
wait (int, optional): Wait interval (ms) after task
"""
if text:
pyperclip.copy(text)
self.control_v()
def copy_to_clipboard(self, text, wait=0):
"""
Copy content to the clipboard.
Args:
text (str): The text to be copied.
wait (int, optional): Wait interval (ms) after task
"""
pyperclip.copy(text)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def tab(self, wait=0):
"""
Press key Tab
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.press('tab')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def enter(self, wait=0):
"""
Press key Enter
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.press('enter')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def key_right(self, wait=0):
"""
Press key Right
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.press('right')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def key_enter(self, wait=0):
"""
Press key Enter
Args:
wait (int, optional): Wait interval (ms) after task
"""
self.enter(wait)
def key_end(self, wait=0):
"""
Press key End
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.press('end')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def key_esc(self, wait=0):
"""
Press key Esc
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.press('esc')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def _key_fx(self, idx, wait=0):
"""
Press key F[idx] where idx is a value from 1 to 12
Args:
idx (int): F key index from 1 to 12
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.press(f'f{idx}')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def key_f1(self, wait=0):
self._key_fx(1, wait=wait)
def key_f2(self, wait=0):
self._key_fx(2, wait=wait)
def key_f3(self, wait=0):
self._key_fx(3, wait=wait)
def key_f4(self, wait=0):
self._key_fx(4, wait=wait)
def key_f5(self, wait=0):
self._key_fx(5, wait=wait)
def key_f6(self, wait=0):
self._key_fx(6, wait=wait)
def key_f7(self, wait=0):
self._key_fx(7, wait=wait)
def key_f8(self, wait=0):
self._key_fx(8, wait=wait)
def key_f9(self, wait=0):
self._key_fx(9, wait=wait)
def key_f10(self, wait=0):
self._key_fx(10, wait=wait)
def key_f11(self, wait=0):
self._key_fx(11, wait=wait)
def key_f12(self, wait=0):
self._key_fx(12, wait=wait)
def hold_shift(self, wait=0):
"""
Hold key Shift
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.keyDown('shift')
self.sleep(wait)
def release_shift(self):
"""
Release key Shift.
This method needs to be invoked after holding Shift or similar.
"""
pyautogui.keyUp('shift')
def alt_space(self, wait=0):
"""
Press keys Alt+Space
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.hotkey('alt', 'space', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def maximize_window(self):
"""
Shortcut to maximize window on Windows OS.
"""
self.alt_space()
self.sleep(1000)
pyautogui.press('x')
def type_keys_with_interval(self, interval, keys):
"""
Press a sequence of keys. Hold the keys in the specific order and releases them.
Args:
interval (int): Interval (ms) in which to press and release keys
keys (list): List of keys to be pressed
"""
pyautogui.hotkey(*keys, interval=interval/1000.0)
def type_keys(self, keys):
"""
Press a sequence of keys. Hold the keys in the specific order and releases them.
Args:
keys (list): List of keys to be pressed
"""
self.type_keys_with_interval(100, keys)
def alt_e(self, wait=0):
"""
Press keys Alt+E
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.hotkey('alt', 'e', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def alt_r(self, wait=0):
"""
Press keys Alt+R
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.hotkey('alt', 'r', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def alt_f(self, wait=0):
"""
Press keys Alt+F
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.hotkey('alt', 'f', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def alt_u(self, wait=0):
"""
Press keys Alt+U
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.hotkey('alt', 'u', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def alt_f4(self, wait=0):
"""
Press keys Alt+F4
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.hotkey('alt', 'f4', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_c(self, wait=0):
"""
Press keys CTRL+C
Args:
wait (int, optional): Wait interval (ms) after task
"""
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'c', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
return self.get_clipboard()
def control_v(self, wait=0):
"""
Press keys CTRL+V
Args:
wait (int, optional): Wait interval (ms) after task
"""
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'v', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_a(self, wait=0):
"""
Press keys CTRL+A
Args:
wait (int, optional): Wait interval (ms) after task
"""
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'a', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_f(self, wait=0):
"""
Press keys CTRL+F
Args:
wait (int, optional): Wait interval (ms) after task
"""
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'f', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_p(self, wait=0):
"""
Press keys CTRL+P
Args:
wait (int, optional): Wait interval (ms) after task
"""
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'p', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_u(self, wait=0):
"""
Press keys CTRL+U
Args:
wait (int, optional): Wait interval (ms) after task
"""
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'u', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_r(self, wait=0):
"""
Press keys CTRL+R
Args:
wait (int, optional): Wait interval (ms) after task
"""
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'r', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_t(self, wait=0):
"""
Press keys CTRL+T
Args:
wait (int, optional): Wait interval (ms) after task
"""
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 't', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_end(self, wait=0):
"""
Press keys CTRL+End
Args:
wait (int, optional): Wait interval (ms) after task
"""
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'end', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_home(self, wait=0):
"""
Press keys CTRL+Home
Args:
wait (int, optional): Wait interval (ms) after task
"""
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'home', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_w(self, wait=0):
"""
Press keys CTRL+W
Args:
wait (int, optional): Wait interval (ms) after task
"""
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'w', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_shift_p(self, wait=0):
"""
Press keys CTRL+Shift+P
Args:
wait (int, optional): Wait interval (ms) after task
"""
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'shift', 'p', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_shift_j(self, wait=0):
"""
Press keys CTRL+Shift+J
Args:
wait (int, optional): Wait interval (ms) after task
"""
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'shift', 'j', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def shift_tab(self, wait=0):
"""
Press keys Shift+Tab
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.hotkey('shift', 'tab', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def get_clipboard(self):
"""
Get the current content in the clipboard.
Returns:
text (str): Current clipboard content
"""
return pyperclip.paste()
def type_left(self, wait=0):
"""
Press Left key
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.press('left')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def type_right(self, wait=0):
"""
Press Right key
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.press('right')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def type_down(self, wait=0):
"""
Press Down key
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.press('down')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def type_up(self, wait=0):
"""
Press Up key
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.press('up')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def type_windows(self, wait=0):
"""
Press Win logo key
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.press('win')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def page_up(self, wait=0):
"""
Press Page Up key
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.press('pageup')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def page_down(self, wait=0):
"""
Press Page Down key
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.press('pagedown')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def space(self, wait=0):
"""
Press Space key
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.press('space')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def backspace(self, wait=0):
"""
Press Backspace key
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.press('backspace')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def delete(self, wait=0):
"""
Press Delete key
Args:
wait (int, optional): Wait interval (ms) after task
"""
pyautogui.press('delete')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
######
# Misc
######
def wait_for_file(self, path, timeout=10000):
"""
Invoke the system handler to open the given file.
Args:
path (str): The path for the file to be executed
timeout (int, optional): Maximum wait time (ms) to search for a hit.
Defaults to 10000ms (10s).
Returns
status (bool): Whether or not the file was available before the timeout
"""
start_time = time.time()
while True:
elapsed_time = (time.time() - start_time) * 1000
if elapsed_time > timeout:
return False
if os.path.isfile(path) and os.access(path, os.R_OK):
return True
self.sleep(config.DEFAULT_SLEEP_AFTER_ACTION)
def execute(self, file_path):
"""
Invoke the system handler to open the given file.
Args:
file_path (str): The path for the file to be executed
"""
if platform.system() == "Windows":
os.startfile(file_path)
else:
subprocess.Popen(file_path.split(" "))
def wait(self, interval):
"""
Wait / Sleep for a given interval.
Args:
interval (int): Interval in milliseconds
"""
time.sleep(interval / 1000.0)
def sleep(self, interval):
"""
Wait / Sleep for a given interval.
Args:
interval (int): Interval in milliseconds
"""
self.wait(interval)
| 32.044625 | 115 | 0.573617 | import os
import functools
import multiprocessing
import platform
import random
import subprocess
import time
import webbrowser
import pyautogui
import pyperclip
from PIL import Image
from botcity.base import BaseBot, State
from botcity.base.utils import is_retina, only_if_element
from . import config, os_compat
try:
from botcity.maestro import BotMaestroSDK
MAESTRO_AVAILABLE = True
except ImportError:
MAESTRO_AVAILABLE = False
class DesktopBot(BaseBot):
def __init__(self):
super().__init__()
self.state = State()
self.maestro = BotMaestroSDK() if MAESTRO_AVAILABLE else None
self._interval = 0.005 if platform.system() == "Darwin" else 0.0
self.addImage = self.add_image
self.getImageFromMap = self.get_image_from_map
self.getLastElement = self.get_last_element
self.getScreenShot = self.get_screenshot
self.screenCut = self.screen_cut
self.saveScreenshot = self.save_screenshot
self.getCoordinates = self.get_element_coords
self.getElementCoords = self.get_element_coords
self.getElementCoordsCentered = self.get_element_coords_centered
self.find = self.find_until
self.findUntil = self.find_until
self.findText = self.find_text
self.findLastUntil = self.find_until
self.clickOn = self.click_on
self.getLastX = self.get_last_x
self.getLastY = self.get_last_y
self.mouseMove = self.mouse_move
self.clickAt = self.click_at
self.doubleclick = self.double_click
self.doubleClick = self.double_click
self.doubleClickRelative = self.double_click_relative
self.tripleClick = self.triple_click
self.tripleClickRelative = self.triple_click_relative
self.scrollDown = self.scroll_down
self.scrollUp = self.scroll_up
self.moveTo = self.mouse_move
self.moveRelative = self.move_relative
self.moveRandom = self.move_random
self.moveAndClick = self.click
self.rightClick = self.right_click
self.rightClickAt = self.right_click_at
self.rightClickRelative = self.right_click_relative
self.moveAndRightClick = self.right_click
pyperclip.determine_clipboard()
ath
def get_image_from_map(self, label):
path = self.state.map_images.get(label)
if not path:
raise KeyError('Invalid label for image map.')
img = Image.open(path)
return img
def find_multiple(self, labels, x=None, y=None, width=None, height=None, *,
threshold=None, matching=0.9, waiting_time=10000, best=True, grayscale=False):
def _to_dict(lbs, elems):
return {k: v for k, v in zip(lbs, elems)}
screen_w, screen_h = pyautogui.size()
x = x or 0
y = y or 0
w = width or screen_w
h = height or screen_h
region = (x, y, w, h)
results = [None] * len(labels)
paths = [self._search_image_file(la) for la in labels]
if threshold:
print('Threshold not yet supported')
if not best:
print('Warning: Ignoring best=False for now. It will be supported in the future.')
start_time = time.time()
n_cpus = multiprocessing.cpu_count() - 1
while True:
elapsed_time = (time.time() - start_time) * 1000
if elapsed_time > waiting_time:
return _to_dict(labels, results)
haystack = pyautogui.screenshot()
helper = functools.partial(self._find_multiple_helper, haystack, region, matching, grayscale)
with multiprocessing.Pool(processes=n_cpus) as pool:
results = pool.map(helper, paths)
results = [self._fix_retina_element(r) for r in results]
if None in results:
continue
else:
return _to_dict(labels, results)
def _fix_retina_element(self, ele):
if not is_retina():
return ele
if ele is not None:
if is_retina():
ele = ele._replace(left=ele.left / 2.0, top=ele.top / 2.0)
return ele
def _find_multiple_helper(self, haystack, region, confidence, grayscale, needle):
ele = pyautogui.locate(needle, haystack, region=region, confidence=confidence, grayscale=grayscale)
return ele
def find(self, label, x=None, y=None, width=None, height=None, *, threshold=None,
matching=0.9, waiting_time=10000, best=True, grayscale=False):
return self.find_until(label, x=x, y=y, width=width, height=height, threshold=threshold,
matching=matching, waiting_time=waiting_time, best=best, grayscale=grayscale)
def find_until(self, label, x=None, y=None, width=None, height=None, *,
threshold=None, matching=0.9, waiting_time=10000, best=True, grayscale=False):
self.state.element = None
screen_w, screen_h = pyautogui.size()
x = x or 0
y = y or 0
w = width or screen_w
h = height or screen_h
region = (x, y, w, h)
element_path = self._search_image_file(label)
if threshold:
print('Threshold not yet supported')
if not best:
print('Warning: Ignoring best=False for now. It will be supported in the future.')
start_time = time.time()
while True:
elapsed_time = (time.time() - start_time) * 1000
if elapsed_time > waiting_time:
return None
ele = pyautogui.locateOnScreen(element_path, region=region, confidence=matching,
grayscale=grayscale)
if ele is not None:
if is_retina():
ele = ele._replace(left=ele.left / 2.0, top=ele.top / 2.0)
self.state.element = ele
return ele
def find_all(self, label, x=None, y=None, width=None, height=None, *,
threshold=None, matching=0.9, waiting_time=10000, grayscale=False):
def deduplicate(elems):
def find_same(item, items):
x_start = item.left
x_end = item.left + item.width
y_start = item.top
y_end = item.top + item.height
similars = []
for itm in items:
if itm == item:
continue
if (itm.left >= x_start and itm.left < x_end)\
and (itm.top >= y_start and itm.top < y_end):
similars.append(itm)
continue
return similars
index = 0
while True:
try:
dups = find_same(elems[index], elems[index:])
for d in dups:
elems.remove(d)
index += 1
except IndexError:
break
return elems
self.state.element = None
screen_w, screen_h = pyautogui.size()
x = x or 0
y = y or 0
w = width or screen_w
h = height or screen_h
region = (x, y, w, h)
element_path = self._search_image_file(label)
if threshold:
print('Threshold not yet supported')
start_time = time.time()
while True:
elapsed_time = (time.time() - start_time) * 1000
if elapsed_time > waiting_time:
return None
eles = pyautogui.locateAllOnScreen(element_path, region=region, confidence=matching,
grayscale=grayscale)
if not eles:
continue
eles = deduplicate(list(eles))
for ele in eles:
if ele is not None:
if is_retina():
ele = ele._replace(left=ele.left / 2.0, top=ele.top / 2.0)
self.state.element = ele
yield ele
break
def find_text(self, label, x=None, y=None, width=None, height=None, *, threshold=None, matching=0.9,
waiting_time=10000, best=True):
return self.find_until(label, x, y, width, height, threshold=threshold, matching=matching,
waiting_time=waiting_time, best=best, grayscale=True)
def get_last_element(self):
return self.state.element
def display_size(self):
screen_size = pyautogui.size()
return screen_size.width, screen_size.height
def screenshot(self, filepath=None, region=None):
img = pyautogui.screenshot(filepath, region)
return img
def get_screenshot(self, filepath=None, region=None):
return self.screenshot(filepath, region)
def screen_cut(self, x, y, width=None, height=None):
screen_size = pyautogui.size()
x = x or 0
y = y or 0
width = width or screen_size.width
height = height or screen_size.height
img = pyautogui.screenshot(region=(x, y, width, height))
return img
def save_screenshot(self, path):
pyautogui.screenshot(path)
def get_element_coords(self, label, x=None, y=None, width=None, height=None, matching=0.9, best=True):
self.state.element = None
screen_size = pyautogui.size()
x = x or 0
y = y or 0
width = width or screen_size.width
height = height or screen_size.height
region = (x, y, width, height)
if not best:
print('Warning: Ignoring best=False for now. It will be supported in the future.')
ele = pyautogui.locateOnScreen(self._search_image_file(label), region=region, confidence=matching)
if ele is None:
return None, None
if is_retina():
ele = ele._replace(left=ele.left / 2.0, top=ele.top / 2.0)
self.state.element = ele
return ele.left, ele.top
def get_element_coords_centered(self, label, x=None, y=None, width=None, height=None,
matching=0.9, best=True):
self.get_element_coords(label, x, y, width, height, matching, best)
return self.state.center()
rowser.open(url, location)
return status
x, y = self.get_element_coords_centered(label)
if None in (x, y):
raise ValueError(f'Element not available. Cannot find {label}.')
os_compat.click(x, y)
def get_last_x(self):
return pyautogui.position().x
def get_last_y(self):
return pyautogui.position().y
def mouse_move(self, x, y):
pyautogui.moveTo(x, y)
def click_at(self, x, y):
os_compat.click(x, y)
@only_if_element
def click(self, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *,
clicks=1, interval_between_clicks=0, button='left'):
x, y = self.state.center()
os_compat.click(x, y, clicks=clicks, button=button, interval=interval_between_clicks/1000.0)
self.sleep(wait_after)
@only_if_element
def click_relative(self, x, y, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *,
clicks=1, interval_between_clicks=0, button='left'):
x = self.state.x() + x
y = self.state.y() + y
os_compat.click(x, y, clicks=clicks, button=button, interval=interval_between_clicks/1000.0)
self.sleep(wait_after)
@only_if_element
def double_click(self, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION):
self.click(wait_after=wait_after, clicks=2)
@only_if_element
def double_click_relative(self, x, y, interval_between_clicks=0, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION):
self.click_relative(x, y, wait_after=wait_after, clicks=2, interval_between_clicks=interval_between_clicks)
@only_if_element
def triple_click(self, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION):
self.click(wait_after=wait_after, clicks=3)
@only_if_element
def triple_click_relative(self, x, y, interval_between_clicks=0, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION):
self.click_relative(x, y, wait_after=wait_after, clicks=3, interval_between_clicks=interval_between_clicks)
def mouse_down(self, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *, button='left'):
pyautogui.mouseDown(button=button)
self.sleep(wait_after)
def mouse_up(self, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *, button='left'):
pyautogui.mouseUp(button=button)
self.sleep(wait_after)
def scroll_down(self, clicks):
pyautogui.scroll(-1 * clicks)
def scroll_up(self, clicks):
pyautogui.scroll(clicks)
@only_if_element
def move(self):
x, y = self.state.center()
pyautogui.moveTo(x, y)
def move_relative(self, x, y):
x = self.get_last_x() + x
y = self.get_last_y() + y
pyautogui.moveTo(x, y)
def move_random(self, range_x, range_y):
x = int(random.random() * range_x)
y = int(random.random() * range_y)
pyautogui.moveTo(x, y)
@only_if_element
def right_click(self, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *,
clicks=1, interval_between_clicks=0):
x, y = self.state.center()
os_compat.click(x, y, clicks=clicks, button='right', interval=interval_between_clicks/1000.0)
self.sleep(wait_after)
def right_click_at(self, x, y):
os_compat.click(x, y, button='right')
@only_if_element
def right_click_relative(self, x, y, interval_between_clicks=0, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION):
self.click_relative(x, y, wait_after=wait_after, clicks=3, interval_between_clicks=interval_between_clicks,
button='right')
rval=interval/1000.0)
def kb_type(self, text, interval=0):
pyautogui.write(text, interval=interval/1000.0)
self.sleep(config.DEFAULT_SLEEP_AFTER_ACTION)
def paste(self, text=None, wait=0):
if text:
pyperclip.copy(text)
self.control_v()
def copy_to_clipboard(self, text, wait=0):
pyperclip.copy(text)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def tab(self, wait=0):
pyautogui.press('tab')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def enter(self, wait=0):
pyautogui.press('enter')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def key_right(self, wait=0):
pyautogui.press('right')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def key_enter(self, wait=0):
self.enter(wait)
def key_end(self, wait=0):
pyautogui.press('end')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def key_esc(self, wait=0):
pyautogui.press('esc')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def _key_fx(self, idx, wait=0):
pyautogui.press(f'f{idx}')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def key_f1(self, wait=0):
self._key_fx(1, wait=wait)
def key_f2(self, wait=0):
self._key_fx(2, wait=wait)
def key_f3(self, wait=0):
self._key_fx(3, wait=wait)
def key_f4(self, wait=0):
self._key_fx(4, wait=wait)
def key_f5(self, wait=0):
self._key_fx(5, wait=wait)
def key_f6(self, wait=0):
self._key_fx(6, wait=wait)
def key_f7(self, wait=0):
self._key_fx(7, wait=wait)
def key_f8(self, wait=0):
self._key_fx(8, wait=wait)
def key_f9(self, wait=0):
self._key_fx(9, wait=wait)
def key_f10(self, wait=0):
self._key_fx(10, wait=wait)
def key_f11(self, wait=0):
self._key_fx(11, wait=wait)
def key_f12(self, wait=0):
self._key_fx(12, wait=wait)
def hold_shift(self, wait=0):
pyautogui.keyDown('shift')
self.sleep(wait)
def release_shift(self):
pyautogui.keyUp('shift')
def alt_space(self, wait=0):
pyautogui.hotkey('alt', 'space', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def maximize_window(self):
self.alt_space()
self.sleep(1000)
pyautogui.press('x')
def type_keys_with_interval(self, interval, keys):
pyautogui.hotkey(*keys, interval=interval/1000.0)
def type_keys(self, keys):
self.type_keys_with_interval(100, keys)
def alt_e(self, wait=0):
pyautogui.hotkey('alt', 'e', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def alt_r(self, wait=0):
pyautogui.hotkey('alt', 'r', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def alt_f(self, wait=0):
pyautogui.hotkey('alt', 'f', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def alt_u(self, wait=0):
pyautogui.hotkey('alt', 'u', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def alt_f4(self, wait=0):
pyautogui.hotkey('alt', 'f4', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_c(self, wait=0):
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'c', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
return self.get_clipboard()
def control_v(self, wait=0):
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'v', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_a(self, wait=0):
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'a', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_f(self, wait=0):
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'f', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_p(self, wait=0):
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'p', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_u(self, wait=0):
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'u', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_r(self, wait=0):
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'r', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_t(self, wait=0):
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 't', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_end(self, wait=0):
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'end', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_home(self, wait=0):
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'home', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_w(self, wait=0):
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'w', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_shift_p(self, wait=0):
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'shift', 'p', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def control_shift_j(self, wait=0):
key = 'ctrl'
if platform.system() == 'Darwin':
key = 'command'
pyautogui.hotkey(key, 'shift', 'j', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def shift_tab(self, wait=0):
pyautogui.hotkey('shift', 'tab', interval=self._interval)
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def get_clipboard(self):
return pyperclip.paste()
def type_left(self, wait=0):
pyautogui.press('left')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def type_right(self, wait=0):
pyautogui.press('right')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def type_down(self, wait=0):
pyautogui.press('down')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def type_up(self, wait=0):
pyautogui.press('up')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def type_windows(self, wait=0):
pyautogui.press('win')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def page_up(self, wait=0):
pyautogui.press('pageup')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def page_down(self, wait=0):
pyautogui.press('pagedown')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def space(self, wait=0):
pyautogui.press('space')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def backspace(self, wait=0):
pyautogui.press('backspace')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
def delete(self, wait=0):
pyautogui.press('delete')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
self.sleep(delay)
ile(self, path, timeout=10000):
start_time = time.time()
while True:
elapsed_time = (time.time() - start_time) * 1000
if elapsed_time > timeout:
return False
if os.path.isfile(path) and os.access(path, os.R_OK):
return True
self.sleep(config.DEFAULT_SLEEP_AFTER_ACTION)
def execute(self, file_path):
if platform.system() == "Windows":
os.startfile(file_path)
else:
subprocess.Popen(file_path.split(" "))
def wait(self, interval):
time.sleep(interval / 1000.0)
def sleep(self, interval):
self.wait(interval)
| true | true |
f734c01585f2ae4014ad72f125f7748b1f3b18d9 | 4,174 | py | Python | aleph/tests/test_collections_api.py | gavinrozzi/aleph | a8e3d10ec34b0d0a05b4daf3fdd2d09b96928b35 | [
"MIT"
] | null | null | null | aleph/tests/test_collections_api.py | gavinrozzi/aleph | a8e3d10ec34b0d0a05b4daf3fdd2d09b96928b35 | [
"MIT"
] | null | null | null | aleph/tests/test_collections_api.py | gavinrozzi/aleph | a8e3d10ec34b0d0a05b4daf3fdd2d09b96928b35 | [
"MIT"
] | null | null | null | import json
from aleph.core import db
from aleph.model import Entity
from aleph.tests.util import TestCase
class CollectionsApiTestCase(TestCase):
def setUp(self):
super(CollectionsApiTestCase, self).setUp()
self.rolex = self.create_user(foreign_id='user_3')
self.col = self.create_collection(
label='Test Collection',
foreign_id='test_coll_entities_api',
category='leak',
countries=[]
)
self.ent = Entity.create({
'schema': 'Person',
'name': 'Winnie the Pooh',
}, self.col)
db.session.add(self.ent)
db.session.commit()
def test_index(self):
res = self.client.get('/api/2/collections')
assert res.status_code == 200, res
assert res.json['total'] == 0, res.json
_, headers = self.login(is_admin=True)
res = self.client.get('/api/2/collections',
headers=headers)
assert res.status_code == 200, res
assert res.json['total'] == 1, res.json
def test_view(self):
res = self.client.get('/api/2/collections/%s' % self.col.id)
assert res.status_code == 403, res
_, headers = self.login(is_admin=True)
res = self.client.get('/api/2/collections/%s' % self.col.id,
headers=headers)
assert res.status_code == 200, res
assert 'test_coll' in res.json['foreign_id'], res.json
assert 'Winnie' not in res.json['label'], res.json
def test_sitemap(self):
self.update_index()
url = '/api/2/collections/%s/sitemap.xml' % self.col.id
res = self.client.get(url)
assert res.status_code == 403, res
self.grant_publish(self.col)
res = self.client.get(url)
assert res.status_code == 200, res
data = res.data.decode('utf-8')
assert self.ent.id in data, data
def test_rdf(self):
url = '/api/2/collections/%s/rdf' % self.col.id
res = self.client.get(url)
assert res.status_code == 403, res
self.grant_publish(self.col)
res = self.client.get(url)
assert res.status_code == 200, res
def test_update_valid(self):
_, headers = self.login(is_admin=True)
url = '/api/2/collections/%s' % self.col.id
res = self.client.get(url,
headers=headers)
assert res.status_code == 200, res
data = res.json
data['label'] = 'Collected Collection'
res = self.client.post(url,
data=json.dumps(data),
headers=headers,
content_type='application/json')
assert res.status_code == 200, res.json
assert 'Collected' in res.json['label'], res.json
def test_update_no_label(self):
_, headers = self.login(is_admin=True)
url = '/api/2/collections/%s' % self.col.id
res = self.client.get(url, headers=headers)
data = res.json
data['label'] = ''
res = self.client.post(url,
data=json.dumps(data),
headers=headers,
content_type='application/json')
assert res.status_code == 400, res.json
res = self.client.get(url, headers=headers)
data = res.json
data['category'] = 'banana'
res = self.client.post(url,
data=json.dumps(data),
headers=headers,
content_type='application/json')
assert res.status_code == 400, res.json
def test_delete(self):
_, headers = self.login(is_admin=True)
url = '/api/2/collections/%s' % self.col.id
res = self.client.get(url, headers=headers)
assert res.status_code == 200, res
res = self.client.delete(url,
headers=headers)
assert res.status_code == 204, res
res = self.client.get(url,
headers=headers)
assert res.status_code == 404, res
| 36.938053 | 68 | 0.546957 | import json
from aleph.core import db
from aleph.model import Entity
from aleph.tests.util import TestCase
class CollectionsApiTestCase(TestCase):
def setUp(self):
super(CollectionsApiTestCase, self).setUp()
self.rolex = self.create_user(foreign_id='user_3')
self.col = self.create_collection(
label='Test Collection',
foreign_id='test_coll_entities_api',
category='leak',
countries=[]
)
self.ent = Entity.create({
'schema': 'Person',
'name': 'Winnie the Pooh',
}, self.col)
db.session.add(self.ent)
db.session.commit()
def test_index(self):
res = self.client.get('/api/2/collections')
assert res.status_code == 200, res
assert res.json['total'] == 0, res.json
_, headers = self.login(is_admin=True)
res = self.client.get('/api/2/collections',
headers=headers)
assert res.status_code == 200, res
assert res.json['total'] == 1, res.json
def test_view(self):
res = self.client.get('/api/2/collections/%s' % self.col.id)
assert res.status_code == 403, res
_, headers = self.login(is_admin=True)
res = self.client.get('/api/2/collections/%s' % self.col.id,
headers=headers)
assert res.status_code == 200, res
assert 'test_coll' in res.json['foreign_id'], res.json
assert 'Winnie' not in res.json['label'], res.json
def test_sitemap(self):
self.update_index()
url = '/api/2/collections/%s/sitemap.xml' % self.col.id
res = self.client.get(url)
assert res.status_code == 403, res
self.grant_publish(self.col)
res = self.client.get(url)
assert res.status_code == 200, res
data = res.data.decode('utf-8')
assert self.ent.id in data, data
def test_rdf(self):
url = '/api/2/collections/%s/rdf' % self.col.id
res = self.client.get(url)
assert res.status_code == 403, res
self.grant_publish(self.col)
res = self.client.get(url)
assert res.status_code == 200, res
def test_update_valid(self):
_, headers = self.login(is_admin=True)
url = '/api/2/collections/%s' % self.col.id
res = self.client.get(url,
headers=headers)
assert res.status_code == 200, res
data = res.json
data['label'] = 'Collected Collection'
res = self.client.post(url,
data=json.dumps(data),
headers=headers,
content_type='application/json')
assert res.status_code == 200, res.json
assert 'Collected' in res.json['label'], res.json
def test_update_no_label(self):
_, headers = self.login(is_admin=True)
url = '/api/2/collections/%s' % self.col.id
res = self.client.get(url, headers=headers)
data = res.json
data['label'] = ''
res = self.client.post(url,
data=json.dumps(data),
headers=headers,
content_type='application/json')
assert res.status_code == 400, res.json
res = self.client.get(url, headers=headers)
data = res.json
data['category'] = 'banana'
res = self.client.post(url,
data=json.dumps(data),
headers=headers,
content_type='application/json')
assert res.status_code == 400, res.json
def test_delete(self):
_, headers = self.login(is_admin=True)
url = '/api/2/collections/%s' % self.col.id
res = self.client.get(url, headers=headers)
assert res.status_code == 200, res
res = self.client.delete(url,
headers=headers)
assert res.status_code == 204, res
res = self.client.get(url,
headers=headers)
assert res.status_code == 404, res
| true | true |
f734c05d8d611e2e78373da2d10eb3ba7f634a15 | 18,048 | py | Python | scripts/icml_2018_experiment.py | davidinouye/destructive-deep-learning | 632add7a9731347e050d271ceebb24251e1d8e01 | [
"BSD-3-Clause"
] | 21 | 2018-06-19T21:23:32.000Z | 2021-03-03T03:29:00.000Z | scripts/icml_2018_experiment.py | davidinouye/destructive-deep-learning | 632add7a9731347e050d271ceebb24251e1d8e01 | [
"BSD-3-Clause"
] | 39 | 2018-06-19T16:45:51.000Z | 2020-09-15T12:53:20.000Z | scripts/icml_2018_experiment.py | davidinouye/destructive-deep-learning | 632add7a9731347e050d271ceebb24251e1d8e01 | [
"BSD-3-Clause"
] | 5 | 2018-10-01T22:58:32.000Z | 2019-11-12T20:41:16.000Z | """ICML 2018 experiment for MNIST and CIFAR-10."""
import argparse
import logging
import os
import subprocess
import sys
import time
import warnings
import numpy as np
import scipy.stats # Needed for standard error of the mean scipy.stats.sem
from sklearn.base import clone
from sklearn.decomposition import PCA
# Add the directory of this script
sys.path.append(os.path.dirname(os.path.realpath(__file__))) # noqa E402
# Add directory for ddl library
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')) # noqa E402
# isort:imports-firstparty
from ddl.base import CompositeDestructor
from ddl.deep import DeepDestructorCV
from ddl.externals.mlpack import MlpackDensityTreeEstimator
from ddl.independent import IndependentDensity, IndependentDestructor, IndependentInverseCdf
from ddl.linear import BestLinearReconstructionDestructor
from ddl.local import FeatureGroupsDestructor, ImageFeaturePairs
from ddl.tree import TreeDensity, TreeDestructor
from ddl.univariate import HistogramUnivariateDensity, ScipyUnivariateDensity
from maf_data import CIFAR10_ALPHA, MNIST_ALPHA, get_maf_data
try:
import cPickle as pickle
except ImportError:
import pickle
logger = logging.getLogger(__name__)
def run_experiment(data_name, model_name, model_kwargs=None):
"""
Parameters
----------
data_name :
model_name :
model_kwargs :
Returns
-------
"""
if model_kwargs is None:
model_kwargs = {}
# Setup
experiment_filename = model_kwargs['experiment_filename']
experiment_label = model_kwargs['experiment_label']
_setup_loggers(experiment_filename)
try:
git_hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']
).decode('ascii')[:-1]
except subprocess.CalledProcessError:
git_hash = 'unknown'
logger.debug('Current git hash = %s' % git_hash)
# Load data
logger.debug('Loading data for %s' % experiment_label)
data_dict = get_maf_data(data_name)
X_train, X_validation, X_test = (
data_dict['X_train'], data_dict['X_validation'], data_dict['X_test'])
n_train, n_validation, n_test = (_X.shape[0] for _X in (X_train, X_validation, X_test))
# Setup cv and refit parameters
X_train_val = np.vstack((X_train, X_validation))
model_kwargs['cv'] = [(np.arange(n_train), n_train + np.arange(n_validation))]
model_kwargs['refit'] = False
# Load model
deep_destructor = _get_model(data_name, model_name, model_kwargs=model_kwargs)
# Fit destructor
logger.debug('Starting training for %s' % experiment_label)
start_time = time.time()
deep_destructor.fit(X_train_val, y=None, X_test=X_test)
train_time = time.time() - start_time
logger.debug('Finished training for %s' % experiment_label)
logger.debug('%s: Time to train = %g s or %g minutes or %g hours'
% (experiment_label, train_time, train_time / 60, train_time / 60 / 60))
# Get test score
start_time = time.time()
test_scores = deep_destructor.score_samples(X_test)
score_time = time.time() - start_time
test_score = np.mean(test_scores)
test_score_stderr = scipy.stats.sem(test_scores)
logger.debug('%s: Final test score=%g with std_err=%g computed in %g s'
% (experiment_label, float(test_score), test_score_stderr, score_time))
date_time_completed = time.strftime("%Y_%m_%d-%H_%M_%S")
logger.debug('Date/time completed (just before saving): %s' % date_time_completed)
# Prepare results in dictionary
result_dict = dict(
# Data statistics
data_name=data_name, n_features=X_train.shape[1],
n_train=n_train, n_validation=n_validation, n_test=n_test,
# Model
destructor=deep_destructor, model_name=model_name, model_kwargs=model_kwargs,
# Time
train_time=train_time, score_time=score_time, date_time_completed=date_time_completed,
# Test scores
test_score=test_score, test_score_stderr=test_score_stderr, test_scores=test_scores,
git_hash=git_hash,
)
# Save results to pickle file
with open(experiment_filename + '.pkl', 'wb') as f:
pickle.dump(result_dict, f)
logger.debug('%s: Saved results to file %s' % (experiment_label, experiment_filename))
return result_dict
def load_experiment_results(data_name, model_name=None, model_kwargs=None, notebook=False):
"""
Parameters
----------
data_name :
model_name :
model_kwargs :
notebook :
Returns
-------
"""
experiment_filename, _ = _get_experiment_filename_and_label(data_name, model_name=model_name,
model_kwargs=model_kwargs)
if notebook:
experiment_filename = os.path.join('..', experiment_filename)
with open(experiment_filename + '.pkl', 'rb') as f:
result_dict = pickle.load(file=f)
logger.debug('Loaded results from file %s' % experiment_filename)
return result_dict
def _get_model(data_name, model_name, model_kwargs):
if 'is_test' not in model_kwargs:
model_kwargs['is_test'] = False
# Init destructor is shared with all models
init_destructor = CompositeDestructor(
destructors=[
_get_inverse_logit_destructor(data_name),
IndependentDestructor(
independent_density=IndependentDensity(
univariate_estimators=HistogramUnivariateDensity(
bins=256, bounds=[0, 1], alpha=1)
)
)
],
random_state=0,
)
# Setup canonical destructor for various models
if model_name == 'deep-copula':
deep_stop_tol = 0.001
canonical_destructor = _get_copula_destructor()
else:
deep_stop_tol = 0.0001
n_jobs = model_kwargs['n_jobs']
# Get pair estimators (i.e. pairs of pixels in a spiral pattern)
pair_estimators = _get_pair_estimators(data_name, n_uniq_dir=8)
# Setup the local/pair destructor
pair_canonical_destructor = _get_pair_canonical_destructor(model_name)
# Setup a list of canonical destructors that destroy in each pixel direction
canonical_destructor = [
FeatureGroupsDestructor(
groups_estimator=pair_estimator,
group_canonical_destructor=clone(pair_canonical_destructor),
n_jobs=n_jobs
)
for pair_estimator in pair_estimators
]
# Shared DeepDestructorCV
return DeepDestructorCV(
init_destructor=init_destructor,
canonical_destructor=canonical_destructor,
stop_tol=deep_stop_tol,
# Either n_extend or max_canonical_destructors must be None
n_extend=1,
cv=model_kwargs['cv'],
refit=model_kwargs['refit'],
silent=False,
log_prefix='',
random_state=0,
# Set maximum number of layers (None for infinite)
max_canonical_destructors=None if not model_kwargs['is_test'] else 1,
)
def _get_inverse_logit_destructor(data_name):
if data_name == 'mnist':
alpha = MNIST_ALPHA
elif data_name == 'cifar10':
alpha = CIFAR10_ALPHA
else:
raise ValueError('dataset should either be mnist or cifar10')
inverse_logit = CompositeDestructor(
destructors=[
IndependentDestructor(
independent_density=IndependentDensity(
univariate_estimators=ScipyUnivariateDensity(
scipy_rv=scipy.stats.logistic,
scipy_fit_kwargs=dict(floc=0, fscale=1)
)
)
),
IndependentDestructor(
independent_density=IndependentDensity(
univariate_estimators=ScipyUnivariateDensity(
scipy_rv=scipy.stats.uniform,
scipy_fit_kwargs=dict(floc=alpha, fscale=1 - 2 * alpha)
)
)
)
]
)
return inverse_logit
def _get_copula_destructor(hist_kwargs=None):
if hist_kwargs is None:
hist_kwargs = dict(bins=40, bounds=[0, 1], alpha=100)
return CompositeDestructor(
destructors=[
IndependentDestructor(
independent_density=IndependentDensity(
univariate_estimators=HistogramUnivariateDensity(**hist_kwargs)
)
),
IndependentInverseCdf(),
BestLinearReconstructionDestructor(
linear_estimator=PCA(),
destructor=IndependentDestructor(),
linear_projector_kwargs=dict(fit_bias=False),
)
],
random_state=0,
)
def _get_pair_canonical_destructor(model_name):
if model_name == 'image-pairs-tree':
return TreeDestructor(
tree_density=TreeDensity(
tree_estimator=MlpackDensityTreeEstimator(
max_depth=None,
min_samples_leaf=100,
max_leaf_nodes=50,
),
get_tree=None,
node_destructor=None,
uniform_weight=0.5,
)
)
elif model_name == 'image-pairs-copula':
return _get_copula_destructor()
else:
raise ValueError('Invalid model name "%s"')
def _get_pair_estimators(data_name, n_uniq_dir):
"""Returns `n_uniq_dir` pair estimators in a spiral pattern."""
def _generate_pixel_circle(radius=1):
cur = radius * np.array([1, 1]) # Start in top right
d = [cur]
for step in np.array([[0, -1], [-1, 0], [0, 1], [1, 0]]):
for i in range(2 * radius):
cur = cur + step
d.append(cur)
d.pop(-1) # remove last that is a repeat
def _rotate(a, n):
return a[n:] + a[:n]
return _rotate(d, radius) # Rotate to make directly east the first direction
def _generate_pixel_spiral(n_spirals=2):
d = []
for i in range(n_spirals):
d.extend(_generate_pixel_circle(radius=i + 1))
return d
directions = np.array(_generate_pixel_spiral(n_spirals=10))
if data_name == 'mnist':
directions = directions[:n_uniq_dir]
return [
ImageFeaturePairs(
image_shape=(28, 28), relative_position=r,
init_offset=(0, 0), step=(1, 0), wrap=True
)
for r in directions
]
elif data_name == 'cifar10':
# Make 3d coordinates
directions = [(d2[0], d2[1], 0) for d2 in directions[:n_uniq_dir]]
init_offset = [(0, 0, 0) for _ in directions]
# Handle color channels
directions.extend([(0, 0, 1), (0, 0, 1), (0, 0, 1)])
init_offset.extend([(0, 0, 0), (0, 0, 1), (0, 0, 2)])
return [
ImageFeaturePairs(
image_shape=(32, 32, 3), relative_position=r,
init_offset=io, step=(1, 0, 0), wrap=True
)
for r, io in zip(directions, init_offset)
]
else:
raise RuntimeError('Only mnist and cifar10 are supported')
def _setup_loggers(experiment_filename):
# Setup log file and console to have same format
log_formatter = logging.Formatter(
fmt='%(asctime)s:%(levelname)s:%(name)s:%(process)d: %(message)s')
log_file = logging.FileHandler(experiment_filename + '.log')
log_file.setFormatter(log_formatter)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(log_formatter)
# Add handlers to root logger
root_logger = logging.getLogger()
root_logger.addHandler(console_handler)
root_logger.addHandler(log_file)
# Adjust settings for loggers
logging.captureWarnings(True)
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger('ddl').setLevel(logging.DEBUG)
def _get_experiment_filename_and_label(data_name, model_name=None, model_kwargs=None):
if model_kwargs is None:
model_kwargs = {}
data_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..', 'data', 'results')
try:
os.makedirs(data_dir)
except OSError:
pass
arg_str = '_'.join(['%s-%s' % (k, str(v)) for k, v in model_kwargs.items()])
arg_str = arg_str.replace('.', '_')
if len(arg_str) > 0:
arg_str = '_' + arg_str
filename = ('data-%s_model-%s%s'
% (str(data_name), str(model_name), arg_str))
pickle_filename = os.path.join(data_dir, filename)
arg_str = ', '.join(['%s=%s' % (k, str(v)) for k, v in model_kwargs.items()])
if len(arg_str) > 0:
arg_str = ', ' + arg_str
experiment_label = '(data=%s, model=%s%s)' % (data_name, str(model_name), arg_str)
return pickle_filename, experiment_label
# Add fast sanity-check tests for mnist dataset
try:
# noinspection PyPackageRequirements
import pytest
except ImportError:
pass
else:
@pytest.mark.parametrize(
'model_name',
# 'image-pairs-tree' not needed since covered by other tests
['deep-copula', 'image-pairs-copula']
)
def test_mnist_experiment(model_name):
data_name = 'mnist'
model_kwargs = dict(is_test=True, n_jobs=1)
model_kwargs['experiment_filename'], model_kwargs[
'experiment_label'] = _get_experiment_filename_and_label(
data_name, model_name=model_name, model_kwargs=model_kwargs)
result_dict = run_experiment(data_name, model_name, model_kwargs=model_kwargs)
# Check if test likelihood/score is as expected
_model_names = ['deep-copula', 'image-pairs-copula', 'image-pairs-tree']
expected_test_scores = [-1.060270463188296844e+03, -1.155477974922050180e+03,
-1.134326498390250208e+03]
ind = _model_names.index(model_name)
assert (np.abs(expected_test_scores[ind] - result_dict['test_score'])
/ np.abs(expected_test_scores[ind]) < 1e-15)
if __name__ == '__main__':
# Parse args
all_data_names = ['mnist', 'cifar10']
all_model_names = ['deep-copula', 'image-pairs-copula', 'image-pairs-tree']
parser = argparse.ArgumentParser(description='Sets up and/or runs MAF experiments.')
parser.add_argument(
'--model_names', default=','.join(all_model_names),
help='One or more model names separated by commas from the list %s' % str(all_model_names))
parser.add_argument(
'--data_names', default=','.join(all_data_names),
help='One or more data names separated by commas from the list %s' % str(all_data_names))
parser.add_argument(
'--parallel_subprocesses', default=False, type=bool,
help='Whether to use parallel subprocesses for each (model, data) experiment '
'pair or run directly (default is False).')
parser.add_argument(
'--n_jobs', default=1, type=int,
help='Number of parallel jobs to use for image-pairs models (default is 1).')
args = parser.parse_args()
print('Parsed args = %s' % str(args))
print('----------------------')
# Run experiments
_model_kwargs = vars(args).copy() # Extract model_kwargs as dictionary
model_names = _model_kwargs.pop('model_names').split(',')
data_names = _model_kwargs.pop('data_names').split(',')
is_parallel = _model_kwargs.pop('parallel_subprocesses')
processes = []
for _data_name in data_names:
# Make sure data has already been cached
get_maf_data(_data_name)
for _model_name in model_names:
_model_kwargs['experiment_filename'], _model_kwargs[
'experiment_label'] = _get_experiment_filename_and_label(
_data_name, model_name=_model_name, model_kwargs=_model_kwargs)
if not is_parallel:
# Just run the experiment directly
try:
run_experiment(_data_name, _model_name, _model_kwargs)
except RuntimeError as e:
if 'mlpack' not in str(e).lower():
raise e
else:
warnings.warn('Skipping %s because of error "%s"' % (_model_name, str(e)))
else:
# Generate script to run experiment in parallel in separate subprocesses
script_str = (
'import os\n'
'os.chdir(\'%s\')\n'
'from icml_2018_experiment import run_experiment\n'
'run_experiment(\'%s\', \'%s\', model_kwargs=%s)\n'
) % (
os.path.dirname(os.path.realpath(__file__)),
_data_name, _model_name, str(_model_kwargs)
)
echo_args = ['echo', '-e', script_str]
# Launch subprocess which can run in parallel
DEVNULL = open(os.devnull, 'w')
echo = subprocess.Popen(['echo', '-e', script_str], stdout=subprocess.PIPE)
python = subprocess.Popen(['python'], stdin=echo.stdout, stdout=DEVNULL)
processes.append(echo)
processes.append(python)
print('Started subprocess for experiment %s' % _model_kwargs['experiment_label'])
print(
' Appending to end of log file %s.log' % _model_kwargs['experiment_filename'])
# Remove filenames and labels for next round
_model_kwargs.pop('experiment_filename')
_model_kwargs.pop('experiment_label')
if is_parallel:
# Wait for all processes to finish
print('Waiting for all subprocesses to finish')
for p in processes:
p.wait()
print('All subprocesses finished!')
| 37.757322 | 99 | 0.624224 | import argparse
import logging
import os
import subprocess
import sys
import time
import warnings
import numpy as np
import scipy.stats
from sklearn.base import clone
from sklearn.decomposition import PCA
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
from ddl.base import CompositeDestructor
from ddl.deep import DeepDestructorCV
from ddl.externals.mlpack import MlpackDensityTreeEstimator
from ddl.independent import IndependentDensity, IndependentDestructor, IndependentInverseCdf
from ddl.linear import BestLinearReconstructionDestructor
from ddl.local import FeatureGroupsDestructor, ImageFeaturePairs
from ddl.tree import TreeDensity, TreeDestructor
from ddl.univariate import HistogramUnivariateDensity, ScipyUnivariateDensity
from maf_data import CIFAR10_ALPHA, MNIST_ALPHA, get_maf_data
try:
import cPickle as pickle
except ImportError:
import pickle
logger = logging.getLogger(__name__)
def run_experiment(data_name, model_name, model_kwargs=None):
if model_kwargs is None:
model_kwargs = {}
experiment_filename = model_kwargs['experiment_filename']
experiment_label = model_kwargs['experiment_label']
_setup_loggers(experiment_filename)
try:
git_hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']
).decode('ascii')[:-1]
except subprocess.CalledProcessError:
git_hash = 'unknown'
logger.debug('Current git hash = %s' % git_hash)
logger.debug('Loading data for %s' % experiment_label)
data_dict = get_maf_data(data_name)
X_train, X_validation, X_test = (
data_dict['X_train'], data_dict['X_validation'], data_dict['X_test'])
n_train, n_validation, n_test = (_X.shape[0] for _X in (X_train, X_validation, X_test))
X_train_val = np.vstack((X_train, X_validation))
model_kwargs['cv'] = [(np.arange(n_train), n_train + np.arange(n_validation))]
model_kwargs['refit'] = False
deep_destructor = _get_model(data_name, model_name, model_kwargs=model_kwargs)
logger.debug('Starting training for %s' % experiment_label)
start_time = time.time()
deep_destructor.fit(X_train_val, y=None, X_test=X_test)
train_time = time.time() - start_time
logger.debug('Finished training for %s' % experiment_label)
logger.debug('%s: Time to train = %g s or %g minutes or %g hours'
% (experiment_label, train_time, train_time / 60, train_time / 60 / 60))
start_time = time.time()
test_scores = deep_destructor.score_samples(X_test)
score_time = time.time() - start_time
test_score = np.mean(test_scores)
test_score_stderr = scipy.stats.sem(test_scores)
logger.debug('%s: Final test score=%g with std_err=%g computed in %g s'
% (experiment_label, float(test_score), test_score_stderr, score_time))
date_time_completed = time.strftime("%Y_%m_%d-%H_%M_%S")
logger.debug('Date/time completed (just before saving): %s' % date_time_completed)
result_dict = dict(
data_name=data_name, n_features=X_train.shape[1],
n_train=n_train, n_validation=n_validation, n_test=n_test,
destructor=deep_destructor, model_name=model_name, model_kwargs=model_kwargs,
train_time=train_time, score_time=score_time, date_time_completed=date_time_completed,
test_score=test_score, test_score_stderr=test_score_stderr, test_scores=test_scores,
git_hash=git_hash,
)
with open(experiment_filename + '.pkl', 'wb') as f:
pickle.dump(result_dict, f)
logger.debug('%s: Saved results to file %s' % (experiment_label, experiment_filename))
return result_dict
def load_experiment_results(data_name, model_name=None, model_kwargs=None, notebook=False):
experiment_filename, _ = _get_experiment_filename_and_label(data_name, model_name=model_name,
model_kwargs=model_kwargs)
if notebook:
experiment_filename = os.path.join('..', experiment_filename)
with open(experiment_filename + '.pkl', 'rb') as f:
result_dict = pickle.load(file=f)
logger.debug('Loaded results from file %s' % experiment_filename)
return result_dict
def _get_model(data_name, model_name, model_kwargs):
if 'is_test' not in model_kwargs:
model_kwargs['is_test'] = False
init_destructor = CompositeDestructor(
destructors=[
_get_inverse_logit_destructor(data_name),
IndependentDestructor(
independent_density=IndependentDensity(
univariate_estimators=HistogramUnivariateDensity(
bins=256, bounds=[0, 1], alpha=1)
)
)
],
random_state=0,
)
if model_name == 'deep-copula':
deep_stop_tol = 0.001
canonical_destructor = _get_copula_destructor()
else:
deep_stop_tol = 0.0001
n_jobs = model_kwargs['n_jobs']
pair_estimators = _get_pair_estimators(data_name, n_uniq_dir=8)
pair_canonical_destructor = _get_pair_canonical_destructor(model_name)
canonical_destructor = [
FeatureGroupsDestructor(
groups_estimator=pair_estimator,
group_canonical_destructor=clone(pair_canonical_destructor),
n_jobs=n_jobs
)
for pair_estimator in pair_estimators
]
return DeepDestructorCV(
init_destructor=init_destructor,
canonical_destructor=canonical_destructor,
stop_tol=deep_stop_tol,
n_extend=1,
cv=model_kwargs['cv'],
refit=model_kwargs['refit'],
silent=False,
log_prefix='',
random_state=0,
max_canonical_destructors=None if not model_kwargs['is_test'] else 1,
)
def _get_inverse_logit_destructor(data_name):
if data_name == 'mnist':
alpha = MNIST_ALPHA
elif data_name == 'cifar10':
alpha = CIFAR10_ALPHA
else:
raise ValueError('dataset should either be mnist or cifar10')
inverse_logit = CompositeDestructor(
destructors=[
IndependentDestructor(
independent_density=IndependentDensity(
univariate_estimators=ScipyUnivariateDensity(
scipy_rv=scipy.stats.logistic,
scipy_fit_kwargs=dict(floc=0, fscale=1)
)
)
),
IndependentDestructor(
independent_density=IndependentDensity(
univariate_estimators=ScipyUnivariateDensity(
scipy_rv=scipy.stats.uniform,
scipy_fit_kwargs=dict(floc=alpha, fscale=1 - 2 * alpha)
)
)
)
]
)
return inverse_logit
def _get_copula_destructor(hist_kwargs=None):
if hist_kwargs is None:
hist_kwargs = dict(bins=40, bounds=[0, 1], alpha=100)
return CompositeDestructor(
destructors=[
IndependentDestructor(
independent_density=IndependentDensity(
univariate_estimators=HistogramUnivariateDensity(**hist_kwargs)
)
),
IndependentInverseCdf(),
BestLinearReconstructionDestructor(
linear_estimator=PCA(),
destructor=IndependentDestructor(),
linear_projector_kwargs=dict(fit_bias=False),
)
],
random_state=0,
)
def _get_pair_canonical_destructor(model_name):
if model_name == 'image-pairs-tree':
return TreeDestructor(
tree_density=TreeDensity(
tree_estimator=MlpackDensityTreeEstimator(
max_depth=None,
min_samples_leaf=100,
max_leaf_nodes=50,
),
get_tree=None,
node_destructor=None,
uniform_weight=0.5,
)
)
elif model_name == 'image-pairs-copula':
return _get_copula_destructor()
else:
raise ValueError('Invalid model name "%s"')
def _get_pair_estimators(data_name, n_uniq_dir):
def _generate_pixel_circle(radius=1):
cur = radius * np.array([1, 1])
d = [cur]
for step in np.array([[0, -1], [-1, 0], [0, 1], [1, 0]]):
for i in range(2 * radius):
cur = cur + step
d.append(cur)
d.pop(-1)
def _rotate(a, n):
return a[n:] + a[:n]
return _rotate(d, radius)
def _generate_pixel_spiral(n_spirals=2):
d = []
for i in range(n_spirals):
d.extend(_generate_pixel_circle(radius=i + 1))
return d
directions = np.array(_generate_pixel_spiral(n_spirals=10))
if data_name == 'mnist':
directions = directions[:n_uniq_dir]
return [
ImageFeaturePairs(
image_shape=(28, 28), relative_position=r,
init_offset=(0, 0), step=(1, 0), wrap=True
)
for r in directions
]
elif data_name == 'cifar10':
directions = [(d2[0], d2[1], 0) for d2 in directions[:n_uniq_dir]]
init_offset = [(0, 0, 0) for _ in directions]
directions.extend([(0, 0, 1), (0, 0, 1), (0, 0, 1)])
init_offset.extend([(0, 0, 0), (0, 0, 1), (0, 0, 2)])
return [
ImageFeaturePairs(
image_shape=(32, 32, 3), relative_position=r,
init_offset=io, step=(1, 0, 0), wrap=True
)
for r, io in zip(directions, init_offset)
]
else:
raise RuntimeError('Only mnist and cifar10 are supported')
def _setup_loggers(experiment_filename):
log_formatter = logging.Formatter(
fmt='%(asctime)s:%(levelname)s:%(name)s:%(process)d: %(message)s')
log_file = logging.FileHandler(experiment_filename + '.log')
log_file.setFormatter(log_formatter)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(log_formatter)
root_logger = logging.getLogger()
root_logger.addHandler(console_handler)
root_logger.addHandler(log_file)
logging.captureWarnings(True)
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger('ddl').setLevel(logging.DEBUG)
def _get_experiment_filename_and_label(data_name, model_name=None, model_kwargs=None):
if model_kwargs is None:
model_kwargs = {}
data_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..', 'data', 'results')
try:
os.makedirs(data_dir)
except OSError:
pass
arg_str = '_'.join(['%s-%s' % (k, str(v)) for k, v in model_kwargs.items()])
arg_str = arg_str.replace('.', '_')
if len(arg_str) > 0:
arg_str = '_' + arg_str
filename = ('data-%s_model-%s%s'
% (str(data_name), str(model_name), arg_str))
pickle_filename = os.path.join(data_dir, filename)
arg_str = ', '.join(['%s=%s' % (k, str(v)) for k, v in model_kwargs.items()])
if len(arg_str) > 0:
arg_str = ', ' + arg_str
experiment_label = '(data=%s, model=%s%s)' % (data_name, str(model_name), arg_str)
return pickle_filename, experiment_label
try:
import pytest
except ImportError:
pass
else:
@pytest.mark.parametrize(
'model_name',
['deep-copula', 'image-pairs-copula']
)
def test_mnist_experiment(model_name):
data_name = 'mnist'
model_kwargs = dict(is_test=True, n_jobs=1)
model_kwargs['experiment_filename'], model_kwargs[
'experiment_label'] = _get_experiment_filename_and_label(
data_name, model_name=model_name, model_kwargs=model_kwargs)
result_dict = run_experiment(data_name, model_name, model_kwargs=model_kwargs)
_model_names = ['deep-copula', 'image-pairs-copula', 'image-pairs-tree']
expected_test_scores = [-1.060270463188296844e+03, -1.155477974922050180e+03,
-1.134326498390250208e+03]
ind = _model_names.index(model_name)
assert (np.abs(expected_test_scores[ind] - result_dict['test_score'])
/ np.abs(expected_test_scores[ind]) < 1e-15)
if __name__ == '__main__':
all_data_names = ['mnist', 'cifar10']
all_model_names = ['deep-copula', 'image-pairs-copula', 'image-pairs-tree']
parser = argparse.ArgumentParser(description='Sets up and/or runs MAF experiments.')
parser.add_argument(
'--model_names', default=','.join(all_model_names),
help='One or more model names separated by commas from the list %s' % str(all_model_names))
parser.add_argument(
'--data_names', default=','.join(all_data_names),
help='One or more data names separated by commas from the list %s' % str(all_data_names))
parser.add_argument(
'--parallel_subprocesses', default=False, type=bool,
help='Whether to use parallel subprocesses for each (model, data) experiment '
'pair or run directly (default is False).')
parser.add_argument(
'--n_jobs', default=1, type=int,
help='Number of parallel jobs to use for image-pairs models (default is 1).')
args = parser.parse_args()
print('Parsed args = %s' % str(args))
print('----------------------')
_model_kwargs = vars(args).copy()
model_names = _model_kwargs.pop('model_names').split(',')
data_names = _model_kwargs.pop('data_names').split(',')
is_parallel = _model_kwargs.pop('parallel_subprocesses')
processes = []
for _data_name in data_names:
get_maf_data(_data_name)
for _model_name in model_names:
_model_kwargs['experiment_filename'], _model_kwargs[
'experiment_label'] = _get_experiment_filename_and_label(
_data_name, model_name=_model_name, model_kwargs=_model_kwargs)
if not is_parallel:
try:
run_experiment(_data_name, _model_name, _model_kwargs)
except RuntimeError as e:
if 'mlpack' not in str(e).lower():
raise e
else:
warnings.warn('Skipping %s because of error "%s"' % (_model_name, str(e)))
else:
script_str = (
'import os\n'
'os.chdir(\'%s\')\n'
'from icml_2018_experiment import run_experiment\n'
'run_experiment(\'%s\', \'%s\', model_kwargs=%s)\n'
) % (
os.path.dirname(os.path.realpath(__file__)),
_data_name, _model_name, str(_model_kwargs)
)
echo_args = ['echo', '-e', script_str]
DEVNULL = open(os.devnull, 'w')
echo = subprocess.Popen(['echo', '-e', script_str], stdout=subprocess.PIPE)
python = subprocess.Popen(['python'], stdin=echo.stdout, stdout=DEVNULL)
processes.append(echo)
processes.append(python)
print('Started subprocess for experiment %s' % _model_kwargs['experiment_label'])
print(
' Appending to end of log file %s.log' % _model_kwargs['experiment_filename'])
_model_kwargs.pop('experiment_filename')
_model_kwargs.pop('experiment_label')
if is_parallel:
print('Waiting for all subprocesses to finish')
for p in processes:
p.wait()
print('All subprocesses finished!')
| true | true |
f734c2997557807df9822443ec26bb97e6121bc8 | 8,265 | py | Python | samples/pubsub.py | KernelGamut32/aws-iot-device-sdk-python-v2 | 1eb65095261c431c8fc6f1168ec490a5fc87ed34 | [
"Apache-2.0"
] | null | null | null | samples/pubsub.py | KernelGamut32/aws-iot-device-sdk-python-v2 | 1eb65095261c431c8fc6f1168ec490a5fc87ed34 | [
"Apache-2.0"
] | null | null | null | samples/pubsub.py | KernelGamut32/aws-iot-device-sdk-python-v2 | 1eb65095261c431c8fc6f1168ec490a5fc87ed34 | [
"Apache-2.0"
] | null | null | null | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
import argparse
from awscrt import io, mqtt, auth, http
from awsiot import mqtt_connection_builder
import sys
import threading
import time
from uuid import uuid4
import json
# This sample uses the Message Broker for AWS IoT to send and receive messages
# through an MQTT connection. On startup, the device connects to the server,
# subscribes to a topic, and begins publishing messages to that topic.
# The device should receive those same messages back from the message broker,
# since it is subscribed to that same topic.
parser = argparse.ArgumentParser(description="Send and receive messages through and MQTT connection.")
parser.add_argument('--endpoint', required=True, help="Your AWS IoT custom endpoint, not including a port. " +
"Ex: \"abcd123456wxyz-ats.iot.us-east-1.amazonaws.com\"")
parser.add_argument('--cert', help="File path to your client certificate, in PEM format.")
parser.add_argument('--key', help="File path to your private key, in PEM format.")
parser.add_argument('--root-ca', help="File path to root certificate authority, in PEM format. " +
"Necessary if MQTT server uses a certificate that's not already in " +
"your trust store.")
parser.add_argument('--client-id', default="test-" + str(uuid4()), help="Client ID for MQTT connection.")
parser.add_argument('--topic', default="test/topic", help="Topic to subscribe to, and publish messages to.")
parser.add_argument('--message', default="Hello World!", help="Message to publish. " +
"Specify empty string to publish nothing.")
parser.add_argument('--count', default=10, type=int, help="Number of messages to publish/receive before exiting. " +
"Specify 0 to run forever.")
parser.add_argument('--use-websocket', default=False, action='store_true',
help="To use a websocket instead of raw mqtt. If you " +
"specify this option you must specify a region for signing, you can also enable proxy mode.")
parser.add_argument('--signing-region', default='us-east-1', help="If you specify --use-web-socket, this " +
"is the region that will be used for computing the Sigv4 signature")
parser.add_argument('--proxy-host', help="Hostname for proxy to connect to. Note: if you use this feature, " +
"you will likely need to set --root-ca to the ca for your proxy.")
parser.add_argument('--proxy-port', type=int, default=8080, help="Port for proxy to connect to.")
parser.add_argument('--verbosity', choices=[x.name for x in io.LogLevel], default=io.LogLevel.NoLogs.name,
help='Logging level')
parser.add_argument('--interval', type=int, default=1)
parser.add_argument('--devicename', default='')
# Using globals to simplify sample code
args = parser.parse_args()
io.init_logging(getattr(io.LogLevel, args.verbosity), 'stderr')
received_count = 0
received_all_event = threading.Event()
# Callback when connection is accidentally lost.
def on_connection_interrupted(connection, error, **kwargs):
print("Connection interrupted. error: {}".format(error))
# Callback when an interrupted connection is re-established.
def on_connection_resumed(connection, return_code, session_present, **kwargs):
print("Connection resumed. return_code: {} session_present: {}".format(return_code, session_present))
if return_code == mqtt.ConnectReturnCode.ACCEPTED and not session_present:
print("Session did not persist. Resubscribing to existing topics...")
resubscribe_future, _ = connection.resubscribe_existing_topics()
# Cannot synchronously wait for resubscribe result because we're on the connection's event-loop thread,
# evaluate result with a callback instead.
resubscribe_future.add_done_callback(on_resubscribe_complete)
def on_resubscribe_complete(resubscribe_future):
resubscribe_results = resubscribe_future.result()
print("Resubscribe results: {}".format(resubscribe_results))
for topic, qos in resubscribe_results['topics']:
if qos is None:
sys.exit("Server rejected resubscribe to topic: {}".format(topic))
# Callback when the subscribed topic receives a message
def on_message_received(topic, payload, dup, qos, retain, **kwargs):
print("Received message from topic '{}': {}".format(topic, payload))
global received_count
received_count += 1
if received_count == args.count:
received_all_event.set()
if __name__ == '__main__':
# Spin up resources
event_loop_group = io.EventLoopGroup(1)
host_resolver = io.DefaultHostResolver(event_loop_group)
client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver)
if args.use_websocket == True:
proxy_options = None
if (args.proxy_host):
proxy_options = http.HttpProxyOptions(host_name=args.proxy_host, port=args.proxy_port)
credentials_provider = auth.AwsCredentialsProvider.new_default_chain(client_bootstrap)
mqtt_connection = mqtt_connection_builder.websockets_with_default_aws_signing(
endpoint=args.endpoint,
client_bootstrap=client_bootstrap,
region=args.signing_region,
credentials_provider=credentials_provider,
websocket_proxy_options=proxy_options,
ca_filepath=args.root_ca,
on_connection_interrupted=on_connection_interrupted,
on_connection_resumed=on_connection_resumed,
client_id=args.client_id,
clean_session=False,
keep_alive_secs=6)
else:
mqtt_connection = mqtt_connection_builder.mtls_from_path(
endpoint=args.endpoint,
cert_filepath=args.cert,
pri_key_filepath=args.key,
client_bootstrap=client_bootstrap,
ca_filepath=args.root_ca,
on_connection_interrupted=on_connection_interrupted,
on_connection_resumed=on_connection_resumed,
client_id=args.client_id,
clean_session=False,
keep_alive_secs=6)
print("Connecting to {} with client ID '{}'...".format(
args.endpoint, args.client_id))
connect_future = mqtt_connection.connect()
# Future.result() waits until a result is available
connect_future.result()
print("Connected!")
# Subscribe
print("Subscribing to topic '{}'...".format(args.topic))
subscribe_future, packet_id = mqtt_connection.subscribe(
topic=args.topic,
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_message_received)
subscribe_result = subscribe_future.result()
print("Subscribed with {}".format(str(subscribe_result['qos'])))
# Publish message to server desired number of times.
# This step is skipped if message is blank.
# This step loops forever if count was set to 0.
if args.message:
if args.count == 0:
print ("Sending messages until program killed")
else:
print ("Sending {} message(s)".format(args.count))
publish_count = 1
while (publish_count <= args.count) or (args.count == 0):
jsonMessage = {"device_name": args.devicename, "data": {"temperature": 79.5, "humidity": 0.45} }
jsonData = json.dumps(jsonMessage)
print("Publishing message to topic '{}': {}".format(args.topic, jsonData))
mqtt_connection.publish(
topic=args.topic,
payload=jsonData,
qos=mqtt.QoS.AT_LEAST_ONCE)
time.sleep(args.interval)
publish_count += 1
# Wait for all messages to be received.
# This waits forever if count was set to 0.
if args.count != 0 and not received_all_event.is_set():
print("Waiting for all messages to be received...")
received_all_event.wait()
print("{} message(s) received.".format(received_count))
# Disconnect
print("Disconnecting...")
disconnect_future = mqtt_connection.disconnect()
disconnect_future.result()
print("Disconnected!")
| 45.916667 | 116 | 0.684453 |
import argparse
from awscrt import io, mqtt, auth, http
from awsiot import mqtt_connection_builder
import sys
import threading
import time
from uuid import uuid4
import json
parser = argparse.ArgumentParser(description="Send and receive messages through and MQTT connection.")
parser.add_argument('--endpoint', required=True, help="Your AWS IoT custom endpoint, not including a port. " +
"Ex: \"abcd123456wxyz-ats.iot.us-east-1.amazonaws.com\"")
parser.add_argument('--cert', help="File path to your client certificate, in PEM format.")
parser.add_argument('--key', help="File path to your private key, in PEM format.")
parser.add_argument('--root-ca', help="File path to root certificate authority, in PEM format. " +
"Necessary if MQTT server uses a certificate that's not already in " +
"your trust store.")
parser.add_argument('--client-id', default="test-" + str(uuid4()), help="Client ID for MQTT connection.")
parser.add_argument('--topic', default="test/topic", help="Topic to subscribe to, and publish messages to.")
parser.add_argument('--message', default="Hello World!", help="Message to publish. " +
"Specify empty string to publish nothing.")
parser.add_argument('--count', default=10, type=int, help="Number of messages to publish/receive before exiting. " +
"Specify 0 to run forever.")
parser.add_argument('--use-websocket', default=False, action='store_true',
help="To use a websocket instead of raw mqtt. If you " +
"specify this option you must specify a region for signing, you can also enable proxy mode.")
parser.add_argument('--signing-region', default='us-east-1', help="If you specify --use-web-socket, this " +
"is the region that will be used for computing the Sigv4 signature")
parser.add_argument('--proxy-host', help="Hostname for proxy to connect to. Note: if you use this feature, " +
"you will likely need to set --root-ca to the ca for your proxy.")
parser.add_argument('--proxy-port', type=int, default=8080, help="Port for proxy to connect to.")
parser.add_argument('--verbosity', choices=[x.name for x in io.LogLevel], default=io.LogLevel.NoLogs.name,
help='Logging level')
parser.add_argument('--interval', type=int, default=1)
parser.add_argument('--devicename', default='')
# Using globals to simplify sample code
args = parser.parse_args()
io.init_logging(getattr(io.LogLevel, args.verbosity), 'stderr')
received_count = 0
received_all_event = threading.Event()
# Callback when connection is accidentally lost.
def on_connection_interrupted(connection, error, **kwargs):
print("Connection interrupted. error: {}".format(error))
# Callback when an interrupted connection is re-established.
def on_connection_resumed(connection, return_code, session_present, **kwargs):
print("Connection resumed. return_code: {} session_present: {}".format(return_code, session_present))
if return_code == mqtt.ConnectReturnCode.ACCEPTED and not session_present:
print("Session did not persist. Resubscribing to existing topics...")
resubscribe_future, _ = connection.resubscribe_existing_topics()
# Cannot synchronously wait for resubscribe result because we're on the connection's event-loop thread,
# evaluate result with a callback instead.
resubscribe_future.add_done_callback(on_resubscribe_complete)
def on_resubscribe_complete(resubscribe_future):
resubscribe_results = resubscribe_future.result()
print("Resubscribe results: {}".format(resubscribe_results))
for topic, qos in resubscribe_results['topics']:
if qos is None:
sys.exit("Server rejected resubscribe to topic: {}".format(topic))
# Callback when the subscribed topic receives a message
def on_message_received(topic, payload, dup, qos, retain, **kwargs):
print("Received message from topic '{}': {}".format(topic, payload))
global received_count
received_count += 1
if received_count == args.count:
received_all_event.set()
if __name__ == '__main__':
# Spin up resources
event_loop_group = io.EventLoopGroup(1)
host_resolver = io.DefaultHostResolver(event_loop_group)
client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver)
if args.use_websocket == True:
proxy_options = None
if (args.proxy_host):
proxy_options = http.HttpProxyOptions(host_name=args.proxy_host, port=args.proxy_port)
credentials_provider = auth.AwsCredentialsProvider.new_default_chain(client_bootstrap)
mqtt_connection = mqtt_connection_builder.websockets_with_default_aws_signing(
endpoint=args.endpoint,
client_bootstrap=client_bootstrap,
region=args.signing_region,
credentials_provider=credentials_provider,
websocket_proxy_options=proxy_options,
ca_filepath=args.root_ca,
on_connection_interrupted=on_connection_interrupted,
on_connection_resumed=on_connection_resumed,
client_id=args.client_id,
clean_session=False,
keep_alive_secs=6)
else:
mqtt_connection = mqtt_connection_builder.mtls_from_path(
endpoint=args.endpoint,
cert_filepath=args.cert,
pri_key_filepath=args.key,
client_bootstrap=client_bootstrap,
ca_filepath=args.root_ca,
on_connection_interrupted=on_connection_interrupted,
on_connection_resumed=on_connection_resumed,
client_id=args.client_id,
clean_session=False,
keep_alive_secs=6)
print("Connecting to {} with client ID '{}'...".format(
args.endpoint, args.client_id))
connect_future = mqtt_connection.connect()
# Future.result() waits until a result is available
connect_future.result()
print("Connected!")
# Subscribe
print("Subscribing to topic '{}'...".format(args.topic))
subscribe_future, packet_id = mqtt_connection.subscribe(
topic=args.topic,
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_message_received)
subscribe_result = subscribe_future.result()
print("Subscribed with {}".format(str(subscribe_result['qos'])))
# Publish message to server desired number of times.
# This step is skipped if message is blank.
# This step loops forever if count was set to 0.
if args.message:
if args.count == 0:
print ("Sending messages until program killed")
else:
print ("Sending {} message(s)".format(args.count))
publish_count = 1
while (publish_count <= args.count) or (args.count == 0):
jsonMessage = {"device_name": args.devicename, "data": {"temperature": 79.5, "humidity": 0.45} }
jsonData = json.dumps(jsonMessage)
print("Publishing message to topic '{}': {}".format(args.topic, jsonData))
mqtt_connection.publish(
topic=args.topic,
payload=jsonData,
qos=mqtt.QoS.AT_LEAST_ONCE)
time.sleep(args.interval)
publish_count += 1
# Wait for all messages to be received.
# This waits forever if count was set to 0.
if args.count != 0 and not received_all_event.is_set():
print("Waiting for all messages to be received...")
received_all_event.wait()
print("{} message(s) received.".format(received_count))
# Disconnect
print("Disconnecting...")
disconnect_future = mqtt_connection.disconnect()
disconnect_future.result()
print("Disconnected!")
| true | true |
f734c331e3defaefc8932cce380c4f0de759d187 | 24,299 | py | Python | pandas/tests/scalar/timestamp/test_timestamp.py | guitargeek/pandas | a6c1f6cccee6bbccfb29488a94664ed07db024d9 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2022-01-07T12:43:15.000Z | 2022-01-07T12:43:15.000Z | pandas/tests/scalar/timestamp/test_timestamp.py | guitargeek/pandas | a6c1f6cccee6bbccfb29488a94664ed07db024d9 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/scalar/timestamp/test_timestamp.py | guitargeek/pandas | a6c1f6cccee6bbccfb29488a94664ed07db024d9 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | """ test the scalar Timestamp """
import calendar
from datetime import (
datetime,
timedelta,
)
import locale
import pickle
import unicodedata
from dateutil.tz import tzutc
import numpy as np
import pytest
import pytz
from pytz import (
timezone,
utc,
)
from pandas._libs.tslibs.timezones import (
dateutil_gettz as gettz,
get_timezone,
)
import pandas.util._test_decorators as td
from pandas import (
NaT,
Timedelta,
Timestamp,
)
import pandas._testing as tm
from pandas.tseries import offsets
class TestTimestampProperties:
def test_freq_deprecation(self):
# GH#41586
msg = "The 'freq' argument in Timestamp is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# warning issued at construction
ts = Timestamp("2021-06-01", freq="D")
ts2 = Timestamp("2021-06-01", freq="B")
msg = "Timestamp.freq is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# warning issued at attribute lookup
ts.freq
for per in ["month", "quarter", "year"]:
for side in ["start", "end"]:
attr = f"is_{per}_{side}"
with tm.assert_produces_warning(FutureWarning, match=msg):
getattr(ts2, attr)
# is_(month|quarter|year)_(start|end) does _not_ issue a warning
# with freq="D" bc the result will be unaffected by the deprecation
with tm.assert_produces_warning(None):
getattr(ts, attr)
@pytest.mark.filterwarnings("ignore:The 'freq' argument:FutureWarning")
@pytest.mark.filterwarnings("ignore:Timestamp.freq is deprecated:FutureWarning")
def test_properties_business(self):
ts = Timestamp("2017-10-01", freq="B")
control = Timestamp("2017-10-01")
assert ts.dayofweek == 6
assert ts.day_of_week == 6
assert not ts.is_month_start # not a weekday
assert not ts.freq.is_month_start(ts)
assert ts.freq.is_month_start(ts + Timedelta(days=1))
assert not ts.is_quarter_start # not a weekday
assert not ts.freq.is_quarter_start(ts)
assert ts.freq.is_quarter_start(ts + Timedelta(days=1))
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp("2017-09-30", freq="B")
control = Timestamp("2017-09-30")
assert ts.dayofweek == 5
assert ts.day_of_week == 5
assert not ts.is_month_end # not a weekday
assert not ts.freq.is_month_end(ts)
assert ts.freq.is_month_end(ts - Timedelta(days=1))
assert not ts.is_quarter_end # not a weekday
assert not ts.freq.is_quarter_end(ts)
assert ts.freq.is_quarter_end(ts - Timedelta(days=1))
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
@pytest.mark.parametrize(
"attr, expected",
[
["year", 2014],
["month", 12],
["day", 31],
["hour", 23],
["minute", 59],
["second", 0],
["microsecond", 0],
["nanosecond", 0],
["dayofweek", 2],
["day_of_week", 2],
["quarter", 4],
["dayofyear", 365],
["day_of_year", 365],
["week", 1],
["daysinmonth", 31],
],
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_fields(self, attr, expected, tz):
# GH 10050
# GH 13303
ts = Timestamp("2014-12-31 23:59:00", tz=tz)
result = getattr(ts, attr)
# that we are int like
assert isinstance(result, int)
assert result == expected
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_millisecond_raises(self, tz):
ts = Timestamp("2014-12-31 23:59:00", tz=tz)
msg = "'Timestamp' object has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
ts.millisecond
@pytest.mark.parametrize(
"start", ["is_month_start", "is_quarter_start", "is_year_start"]
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_is_start(self, start, tz):
ts = Timestamp("2014-01-01 00:00:00", tz=tz)
assert getattr(ts, start)
@pytest.mark.parametrize("end", ["is_month_end", "is_year_end", "is_quarter_end"])
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_is_end(self, end, tz):
ts = Timestamp("2014-12-31 23:59:59", tz=tz)
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize(
"data",
[Timestamp("2017-08-28 23:00:00"), Timestamp("2017-08-28 23:00:00", tz="EST")],
)
# error: Unsupported operand types for + ("List[None]" and "List[str]")
@pytest.mark.parametrize(
"time_locale", [None] + (tm.get_locales() or []) # type: ignore[operator]
)
def test_names(self, data, time_locale):
# GH 17354
# Test .day_name(), .month_name
if time_locale is None:
expected_day = "Monday"
expected_month = "August"
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
result_day = data.day_name(time_locale)
result_month = data.month_name(time_locale)
# Work around https://github.com/pandas-dev/pandas/issues/22342
# different normalizations
expected_day = unicodedata.normalize("NFD", expected_day)
expected_month = unicodedata.normalize("NFD", expected_month)
result_day = unicodedata.normalize("NFD", result_day)
result_month = unicodedata.normalize("NFD", result_month)
assert result_day == expected_day
assert result_month == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
def test_is_leap_year(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 13727
dt = Timestamp("2000-01-01 00:00:00", tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp("1999-01-01 00:00:00", tz=tz)
assert not dt.is_leap_year
dt = Timestamp("2004-01-01 00:00:00", tz=tz)
assert dt.is_leap_year
dt = Timestamp("2100-01-01 00:00:00", tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array(
[
Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (2005, 1, 1), (2005, 1, 2)]
]
)
assert (result == [52, 52, 53, 53]).all()
def test_resolution(self):
# GH#21336, GH#21365
dt = Timestamp("2100-01-01 00:00:00")
assert dt.resolution == Timedelta(nanoseconds=1)
# Check that the attribute is available on the class, mirroring
# the stdlib datetime behavior
assert Timestamp.resolution == Timedelta(nanoseconds=1)
class TestTimestamp:
def test_tz(self):
tstr = "2014-02-01 09:00"
ts = Timestamp(tstr)
local = ts.tz_localize("Asia/Tokyo")
assert local.hour == 9
assert local == Timestamp(tstr, tz="Asia/Tokyo")
conv = local.tz_convert("US/Eastern")
assert conv == Timestamp("2014-01-31 19:00", tz="US/Eastern")
assert conv.hour == 19
# preserves nanosecond
ts = Timestamp(tstr) + offsets.Nano(5)
local = ts.tz_localize("Asia/Tokyo")
assert local.hour == 9
assert local.nanosecond == 5
conv = local.tz_convert("US/Eastern")
assert conv.nanosecond == 5
assert conv.hour == 19
def test_utc_z_designator(self):
assert get_timezone(Timestamp("2014-11-02 01:00Z").tzinfo) is utc
def test_asm8(self):
np.random.seed(7_960_929)
ns = [Timestamp.min.value, Timestamp.max.value, 1000]
for n in ns:
assert (
Timestamp(n).asm8.view("i8") == np.datetime64(n, "ns").view("i8") == n
)
assert Timestamp("nat").asm8.view("i8") == np.datetime64("nat", "ns").view("i8")
def test_class_ops_pytz(self):
def compare(x, y):
assert int((Timestamp(x).value - Timestamp(y).value) / 1e9) == 0
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now("UTC"), datetime.now(timezone("UTC")))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
msg = "timezone-aware Timestamp with UTC"
with tm.assert_produces_warning(FutureWarning, match=msg):
# GH#22451
ts_utc = Timestamp.utcfromtimestamp(current_time)
compare(
ts_utc,
datetime.utcfromtimestamp(current_time),
)
compare(
Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time)
)
compare(
# Support tz kwarg in Timestamp.fromtimestamp
Timestamp.fromtimestamp(current_time, "UTC"),
datetime.fromtimestamp(current_time, utc),
)
compare(
# Support tz kwarg in Timestamp.fromtimestamp
Timestamp.fromtimestamp(current_time, tz="UTC"),
datetime.fromtimestamp(current_time, utc),
)
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(
Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component),
)
def test_class_ops_dateutil(self):
def compare(x, y):
assert (
int(
np.round(Timestamp(x).value / 1e9)
- np.round(Timestamp(y).value / 1e9)
)
== 0
)
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now("UTC"), datetime.now(tzutc()))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
msg = "timezone-aware Timestamp with UTC"
with tm.assert_produces_warning(FutureWarning, match=msg):
# GH#22451
ts_utc = Timestamp.utcfromtimestamp(current_time)
compare(
ts_utc,
datetime.utcfromtimestamp(current_time),
)
compare(
Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time)
)
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(
Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component),
)
def test_basics_nanos(self):
val = np.int64(946_684_800_000_000_000).view("M8[ns]")
stamp = Timestamp(val.view("i8") + 500)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.microsecond == 0
assert stamp.nanosecond == 500
# GH 14415
val = np.iinfo(np.int64).min + 80_000_000_000_000
stamp = Timestamp(val)
assert stamp.year == 1677
assert stamp.month == 9
assert stamp.day == 21
assert stamp.microsecond == 145224
assert stamp.nanosecond == 192
@pytest.mark.parametrize(
"value, check_kwargs",
[
[946688461000000000, {}],
[946688461000000000 / 1000, {"unit": "us"}],
[946688461000000000 / 1_000_000, {"unit": "ms"}],
[946688461000000000 / 1_000_000_000, {"unit": "s"}],
[10957, {"unit": "D", "h": 0}],
[
(946688461000000000 + 500000) / 1000000000,
{"unit": "s", "us": 499, "ns": 964},
],
[
(946688461000000000 + 500000000) / 1000000000,
{"unit": "s", "us": 500000},
],
[(946688461000000000 + 500000) / 1000000, {"unit": "ms", "us": 500}],
[(946688461000000000 + 500000) / 1000, {"unit": "us", "us": 500}],
[(946688461000000000 + 500000000) / 1000000, {"unit": "ms", "us": 500000}],
[946688461000000000 / 1000.0 + 5, {"unit": "us", "us": 5}],
[946688461000000000 / 1000.0 + 5000, {"unit": "us", "us": 5000}],
[946688461000000000 / 1000000.0 + 0.5, {"unit": "ms", "us": 500}],
[946688461000000000 / 1000000.0 + 0.005, {"unit": "ms", "us": 5, "ns": 5}],
[946688461000000000 / 1000000000.0 + 0.5, {"unit": "s", "us": 500000}],
[10957 + 0.5, {"unit": "D", "h": 12}],
],
)
def test_unit(self, value, check_kwargs):
def check(value, unit=None, h=1, s=1, us=0, ns=0):
stamp = Timestamp(value, unit=unit)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.day == 1
assert stamp.hour == h
if unit != "D":
assert stamp.minute == 1
assert stamp.second == s
assert stamp.microsecond == us
else:
assert stamp.minute == 0
assert stamp.second == 0
assert stamp.microsecond == 0
assert stamp.nanosecond == ns
check(value, **check_kwargs)
def test_roundtrip(self):
# test value to string and back conversions
# further test accessors
base = Timestamp("20140101 00:00:00")
result = Timestamp(base.value + Timedelta("5ms").value)
assert result == Timestamp(f"{base}.005000")
assert result.microsecond == 5000
result = Timestamp(base.value + Timedelta("5us").value)
assert result == Timestamp(f"{base}.000005")
assert result.microsecond == 5
result = Timestamp(base.value + Timedelta("5ns").value)
assert result == Timestamp(f"{base}.000000005")
assert result.nanosecond == 5
assert result.microsecond == 0
result = Timestamp(base.value + Timedelta("6ms 5us").value)
assert result == Timestamp(f"{base}.006005")
assert result.microsecond == 5 + 6 * 1000
result = Timestamp(base.value + Timedelta("200ms 5us").value)
assert result == Timestamp(f"{base}.200005")
assert result.microsecond == 5 + 200 * 1000
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
assert d[stamp] == 5
@pytest.mark.parametrize(
"timezone, year, month, day, hour",
[["America/Chicago", 2013, 11, 3, 1], ["America/Santiago", 2021, 4, 3, 23]],
)
def test_hash_timestamp_with_fold(self, timezone, year, month, day, hour):
# see gh-33931
test_timezone = gettz(timezone)
transition_1 = Timestamp(
year=year,
month=month,
day=day,
hour=hour,
minute=0,
fold=0,
tzinfo=test_timezone,
)
transition_2 = Timestamp(
year=year,
month=month,
day=day,
hour=hour,
minute=0,
fold=1,
tzinfo=test_timezone,
)
assert hash(transition_1) == hash(transition_2)
def test_tz_conversion_freq(self, tz_naive_fixture):
# GH25241
with tm.assert_produces_warning(FutureWarning, match="freq"):
t1 = Timestamp("2019-01-01 10:00", freq="H")
assert t1.tz_localize(tz=tz_naive_fixture).freq == t1.freq
with tm.assert_produces_warning(FutureWarning, match="freq"):
t2 = Timestamp("2019-01-02 12:00", tz="UTC", freq="T")
assert t2.tz_convert(tz="UTC").freq == t2.freq
def test_pickle_freq_no_warning(self):
# GH#41949 we don't want a warning on unpickling
with tm.assert_produces_warning(FutureWarning, match="freq"):
ts = Timestamp("2019-01-01 10:00", freq="H")
out = pickle.dumps(ts)
with tm.assert_produces_warning(None):
res = pickle.loads(out)
assert res._freq == ts._freq
class TestTimestampNsOperations:
def test_nanosecond_string_parsing(self):
ts = Timestamp("2013-05-01 07:15:45.123456789")
# GH 7878
expected_repr = "2013-05-01 07:15:45.123456789"
expected_value = 1_367_392_545_123_456_789
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789+09:00", tz="Asia/Tokyo")
assert ts.value == expected_value - 9 * 3600 * 1_000_000_000
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789", tz="UTC")
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789", tz="US/Eastern")
assert ts.value == expected_value + 4 * 3600 * 1_000_000_000
assert expected_repr in repr(ts)
# GH 10041
ts = Timestamp("20130501T071545.123456789")
assert ts.value == expected_value
assert expected_repr in repr(ts)
def test_nanosecond_timestamp(self):
# GH 7610
expected = 1_293_840_000_000_000_005
t = Timestamp("2011-01-01") + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp("2011-01-01 00:00:00.000000005")
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
expected = 1_293_840_000_000_000_010
t = t + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp("2011-01-01 00:00:00.000000010")
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
class TestTimestampToJulianDate:
def test_compare_1700(self):
r = Timestamp("1700-06-23").to_julian_date()
assert r == 2_342_145.5
def test_compare_2000(self):
r = Timestamp("2000-04-12").to_julian_date()
assert r == 2_451_646.5
def test_compare_2100(self):
r = Timestamp("2100-08-12").to_julian_date()
assert r == 2_488_292.5
def test_compare_hour01(self):
r = Timestamp("2000-08-12T01:00:00").to_julian_date()
assert r == 2_451_768.5416666666666666
def test_compare_hour13(self):
r = Timestamp("2000-08-12T13:00:00").to_julian_date()
assert r == 2_451_769.0416666666666666
class TestTimestampConversion:
def test_conversion(self):
# GH#9255
ts = Timestamp("2000-01-01")
result = ts.to_pydatetime()
expected = datetime(2000, 1, 1)
assert result == expected
assert type(result) == type(expected)
result = ts.to_datetime64()
expected = np.datetime64(ts.value, "ns")
assert result == expected
assert type(result) == type(expected)
assert result.dtype == expected.dtype
def test_to_pydatetime_nonzero_nano(self):
ts = Timestamp("2011-01-01 9:00:00.123456789")
# Warn the user of data loss (nanoseconds).
with tm.assert_produces_warning(UserWarning):
expected = datetime(2011, 1, 1, 9, 0, 0, 123456)
result = ts.to_pydatetime()
assert result == expected
def test_timestamp_to_datetime(self):
stamp = Timestamp("20090415", tz="US/Eastern")
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_to_datetime_dateutil(self):
stamp = Timestamp("20090415", tz="dateutil/US/Eastern")
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_to_datetime_explicit_pytz(self):
stamp = Timestamp("20090415", tz=pytz.timezone("US/Eastern"))
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
@td.skip_if_windows
def test_timestamp_to_datetime_explicit_dateutil(self):
stamp = Timestamp("20090415", tz=gettz("US/Eastern"))
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_to_datetime_bijective(self):
# Ensure that converting to datetime and back only loses precision
# by going from nanoseconds to microseconds.
exp_warning = None if Timestamp.max.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning):
pydt_max = Timestamp.max.to_pydatetime()
assert Timestamp(pydt_max).value / 1000 == Timestamp.max.value / 1000
exp_warning = None if Timestamp.min.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning):
pydt_min = Timestamp.min.to_pydatetime()
# The next assertion can be enabled once GH#39221 is merged
# assert pydt_min < Timestamp.min # this is bc nanos are dropped
tdus = timedelta(microseconds=1)
assert pydt_min + tdus > Timestamp.min
assert Timestamp(pydt_min + tdus).value / 1000 == Timestamp.min.value / 1000
def test_to_period_tz_warning(self):
# GH#21333 make sure a warning is issued when timezone
# info is lost
ts = Timestamp("2009-04-15 16:17:18", tz="US/Eastern")
with tm.assert_produces_warning(UserWarning):
# warning that timezone info will be lost
ts.to_period("D")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
ts = Timestamp(datetime.now())
assert ts.to_datetime64() == ts.to_numpy()
# GH#44460
msg = "dtype and copy arguments are ignored"
with pytest.raises(ValueError, match=msg):
ts.to_numpy("M8[s]")
with pytest.raises(ValueError, match=msg):
ts.to_numpy(copy=True)
class SubDatetime(datetime):
pass
@pytest.mark.parametrize(
"lh,rh",
[
(SubDatetime(2000, 1, 1), Timedelta(hours=1)),
(Timedelta(hours=1), SubDatetime(2000, 1, 1)),
],
)
def test_dt_subclass_add_timedelta(lh, rh):
# GH#25851
# ensure that subclassed datetime works for
# Timedelta operations
result = lh + rh
expected = SubDatetime(2000, 1, 1, 1)
assert result == expected
| 35.318314 | 88 | 0.59381 |
import calendar
from datetime import (
datetime,
timedelta,
)
import locale
import pickle
import unicodedata
from dateutil.tz import tzutc
import numpy as np
import pytest
import pytz
from pytz import (
timezone,
utc,
)
from pandas._libs.tslibs.timezones import (
dateutil_gettz as gettz,
get_timezone,
)
import pandas.util._test_decorators as td
from pandas import (
NaT,
Timedelta,
Timestamp,
)
import pandas._testing as tm
from pandas.tseries import offsets
class TestTimestampProperties:
def test_freq_deprecation(self):
msg = "The 'freq' argument in Timestamp is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
ts = Timestamp("2021-06-01", freq="D")
ts2 = Timestamp("2021-06-01", freq="B")
msg = "Timestamp.freq is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
ts.freq
for per in ["month", "quarter", "year"]:
for side in ["start", "end"]:
attr = f"is_{per}_{side}"
with tm.assert_produces_warning(FutureWarning, match=msg):
getattr(ts2, attr)
with tm.assert_produces_warning(None):
getattr(ts, attr)
@pytest.mark.filterwarnings("ignore:The 'freq' argument:FutureWarning")
@pytest.mark.filterwarnings("ignore:Timestamp.freq is deprecated:FutureWarning")
def test_properties_business(self):
ts = Timestamp("2017-10-01", freq="B")
control = Timestamp("2017-10-01")
assert ts.dayofweek == 6
assert ts.day_of_week == 6
assert not ts.is_month_start
assert not ts.freq.is_month_start(ts)
assert ts.freq.is_month_start(ts + Timedelta(days=1))
assert not ts.is_quarter_start
assert not ts.freq.is_quarter_start(ts)
assert ts.freq.is_quarter_start(ts + Timedelta(days=1))
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp("2017-09-30", freq="B")
control = Timestamp("2017-09-30")
assert ts.dayofweek == 5
assert ts.day_of_week == 5
assert not ts.is_month_end
assert not ts.freq.is_month_end(ts)
assert ts.freq.is_month_end(ts - Timedelta(days=1))
assert not ts.is_quarter_end
assert not ts.freq.is_quarter_end(ts)
assert ts.freq.is_quarter_end(ts - Timedelta(days=1))
assert control.is_month_end
assert control.is_quarter_end
@pytest.mark.parametrize(
"attr, expected",
[
["year", 2014],
["month", 12],
["day", 31],
["hour", 23],
["minute", 59],
["second", 0],
["microsecond", 0],
["nanosecond", 0],
["dayofweek", 2],
["day_of_week", 2],
["quarter", 4],
["dayofyear", 365],
["day_of_year", 365],
["week", 1],
["daysinmonth", 31],
],
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_fields(self, attr, expected, tz):
ts = Timestamp("2014-12-31 23:59:00", tz=tz)
result = getattr(ts, attr)
assert isinstance(result, int)
assert result == expected
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_millisecond_raises(self, tz):
ts = Timestamp("2014-12-31 23:59:00", tz=tz)
msg = "'Timestamp' object has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
ts.millisecond
@pytest.mark.parametrize(
"start", ["is_month_start", "is_quarter_start", "is_year_start"]
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_is_start(self, start, tz):
ts = Timestamp("2014-01-01 00:00:00", tz=tz)
assert getattr(ts, start)
@pytest.mark.parametrize("end", ["is_month_end", "is_year_end", "is_quarter_end"])
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_is_end(self, end, tz):
ts = Timestamp("2014-12-31 23:59:59", tz=tz)
assert getattr(ts, end)
@pytest.mark.parametrize(
"data",
[Timestamp("2017-08-28 23:00:00"), Timestamp("2017-08-28 23:00:00", tz="EST")],
)
@pytest.mark.parametrize(
"time_locale", [None] + (tm.get_locales() or [])
)
def test_names(self, data, time_locale):
if time_locale is None:
expected_day = "Monday"
expected_month = "August"
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
result_day = data.day_name(time_locale)
result_month = data.month_name(time_locale)
expected_day = unicodedata.normalize("NFD", expected_day)
expected_month = unicodedata.normalize("NFD", expected_month)
result_day = unicodedata.normalize("NFD", result_day)
result_month = unicodedata.normalize("NFD", result_month)
assert result_day == expected_day
assert result_month == expected_month
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
def test_is_leap_year(self, tz_naive_fixture):
tz = tz_naive_fixture
dt = Timestamp("2000-01-01 00:00:00", tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp("1999-01-01 00:00:00", tz=tz)
assert not dt.is_leap_year
dt = Timestamp("2004-01-01 00:00:00", tz=tz)
assert dt.is_leap_year
dt = Timestamp("2100-01-01 00:00:00", tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53
assert result == expected
result = np.array(
[
Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (2005, 1, 1), (2005, 1, 2)]
]
)
assert (result == [52, 52, 53, 53]).all()
def test_resolution(self):
p("2100-01-01 00:00:00")
assert dt.resolution == Timedelta(nanoseconds=1)
assert Timestamp.resolution == Timedelta(nanoseconds=1)
class TestTimestamp:
def test_tz(self):
tstr = "2014-02-01 09:00"
ts = Timestamp(tstr)
local = ts.tz_localize("Asia/Tokyo")
assert local.hour == 9
assert local == Timestamp(tstr, tz="Asia/Tokyo")
conv = local.tz_convert("US/Eastern")
assert conv == Timestamp("2014-01-31 19:00", tz="US/Eastern")
assert conv.hour == 19
ts = Timestamp(tstr) + offsets.Nano(5)
local = ts.tz_localize("Asia/Tokyo")
assert local.hour == 9
assert local.nanosecond == 5
conv = local.tz_convert("US/Eastern")
assert conv.nanosecond == 5
assert conv.hour == 19
def test_utc_z_designator(self):
assert get_timezone(Timestamp("2014-11-02 01:00Z").tzinfo) is utc
def test_asm8(self):
np.random.seed(7_960_929)
ns = [Timestamp.min.value, Timestamp.max.value, 1000]
for n in ns:
assert (
Timestamp(n).asm8.view("i8") == np.datetime64(n, "ns").view("i8") == n
)
assert Timestamp("nat").asm8.view("i8") == np.datetime64("nat", "ns").view("i8")
def test_class_ops_pytz(self):
def compare(x, y):
assert int((Timestamp(x).value - Timestamp(y).value) / 1e9) == 0
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now("UTC"), datetime.now(timezone("UTC")))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
msg = "timezone-aware Timestamp with UTC"
with tm.assert_produces_warning(FutureWarning, match=msg):
ts_utc = Timestamp.utcfromtimestamp(current_time)
compare(
ts_utc,
datetime.utcfromtimestamp(current_time),
)
compare(
Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time)
)
compare(
Timestamp.fromtimestamp(current_time, "UTC"),
datetime.fromtimestamp(current_time, utc),
)
compare(
Timestamp.fromtimestamp(current_time, tz="UTC"),
datetime.fromtimestamp(current_time, utc),
)
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(
Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component),
)
def test_class_ops_dateutil(self):
def compare(x, y):
assert (
int(
np.round(Timestamp(x).value / 1e9)
- np.round(Timestamp(y).value / 1e9)
)
== 0
)
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now("UTC"), datetime.now(tzutc()))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
msg = "timezone-aware Timestamp with UTC"
with tm.assert_produces_warning(FutureWarning, match=msg):
ts_utc = Timestamp.utcfromtimestamp(current_time)
compare(
ts_utc,
datetime.utcfromtimestamp(current_time),
)
compare(
Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time)
)
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(
Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component),
)
def test_basics_nanos(self):
val = np.int64(946_684_800_000_000_000).view("M8[ns]")
stamp = Timestamp(val.view("i8") + 500)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.microsecond == 0
assert stamp.nanosecond == 500
val = np.iinfo(np.int64).min + 80_000_000_000_000
stamp = Timestamp(val)
assert stamp.year == 1677
assert stamp.month == 9
assert stamp.day == 21
assert stamp.microsecond == 145224
assert stamp.nanosecond == 192
@pytest.mark.parametrize(
"value, check_kwargs",
[
[946688461000000000, {}],
[946688461000000000 / 1000, {"unit": "us"}],
[946688461000000000 / 1_000_000, {"unit": "ms"}],
[946688461000000000 / 1_000_000_000, {"unit": "s"}],
[10957, {"unit": "D", "h": 0}],
[
(946688461000000000 + 500000) / 1000000000,
{"unit": "s", "us": 499, "ns": 964},
],
[
(946688461000000000 + 500000000) / 1000000000,
{"unit": "s", "us": 500000},
],
[(946688461000000000 + 500000) / 1000000, {"unit": "ms", "us": 500}],
[(946688461000000000 + 500000) / 1000, {"unit": "us", "us": 500}],
[(946688461000000000 + 500000000) / 1000000, {"unit": "ms", "us": 500000}],
[946688461000000000 / 1000.0 + 5, {"unit": "us", "us": 5}],
[946688461000000000 / 1000.0 + 5000, {"unit": "us", "us": 5000}],
[946688461000000000 / 1000000.0 + 0.5, {"unit": "ms", "us": 500}],
[946688461000000000 / 1000000.0 + 0.005, {"unit": "ms", "us": 5, "ns": 5}],
[946688461000000000 / 1000000000.0 + 0.5, {"unit": "s", "us": 500000}],
[10957 + 0.5, {"unit": "D", "h": 12}],
],
)
def test_unit(self, value, check_kwargs):
def check(value, unit=None, h=1, s=1, us=0, ns=0):
stamp = Timestamp(value, unit=unit)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.day == 1
assert stamp.hour == h
if unit != "D":
assert stamp.minute == 1
assert stamp.second == s
assert stamp.microsecond == us
else:
assert stamp.minute == 0
assert stamp.second == 0
assert stamp.microsecond == 0
assert stamp.nanosecond == ns
check(value, **check_kwargs)
def test_roundtrip(self):
base = Timestamp("20140101 00:00:00")
result = Timestamp(base.value + Timedelta("5ms").value)
assert result == Timestamp(f"{base}.005000")
assert result.microsecond == 5000
result = Timestamp(base.value + Timedelta("5us").value)
assert result == Timestamp(f"{base}.000005")
assert result.microsecond == 5
result = Timestamp(base.value + Timedelta("5ns").value)
assert result == Timestamp(f"{base}.000000005")
assert result.nanosecond == 5
assert result.microsecond == 0
result = Timestamp(base.value + Timedelta("6ms 5us").value)
assert result == Timestamp(f"{base}.006005")
assert result.microsecond == 5 + 6 * 1000
result = Timestamp(base.value + Timedelta("200ms 5us").value)
assert result == Timestamp(f"{base}.200005")
assert result.microsecond == 5 + 200 * 1000
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
assert d[stamp] == 5
@pytest.mark.parametrize(
"timezone, year, month, day, hour",
[["America/Chicago", 2013, 11, 3, 1], ["America/Santiago", 2021, 4, 3, 23]],
)
def test_hash_timestamp_with_fold(self, timezone, year, month, day, hour):
test_timezone = gettz(timezone)
transition_1 = Timestamp(
year=year,
month=month,
day=day,
hour=hour,
minute=0,
fold=0,
tzinfo=test_timezone,
)
transition_2 = Timestamp(
year=year,
month=month,
day=day,
hour=hour,
minute=0,
fold=1,
tzinfo=test_timezone,
)
assert hash(transition_1) == hash(transition_2)
def test_tz_conversion_freq(self, tz_naive_fixture):
with tm.assert_produces_warning(FutureWarning, match="freq"):
t1 = Timestamp("2019-01-01 10:00", freq="H")
assert t1.tz_localize(tz=tz_naive_fixture).freq == t1.freq
with tm.assert_produces_warning(FutureWarning, match="freq"):
t2 = Timestamp("2019-01-02 12:00", tz="UTC", freq="T")
assert t2.tz_convert(tz="UTC").freq == t2.freq
def test_pickle_freq_no_warning(self):
ureWarning, match="freq"):
ts = Timestamp("2019-01-01 10:00", freq="H")
out = pickle.dumps(ts)
with tm.assert_produces_warning(None):
res = pickle.loads(out)
assert res._freq == ts._freq
class TestTimestampNsOperations:
def test_nanosecond_string_parsing(self):
ts = Timestamp("2013-05-01 07:15:45.123456789")
# GH 7878
expected_repr = "2013-05-01 07:15:45.123456789"
expected_value = 1_367_392_545_123_456_789
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789+09:00", tz="Asia/Tokyo")
assert ts.value == expected_value - 9 * 3600 * 1_000_000_000
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789", tz="UTC")
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789", tz="US/Eastern")
assert ts.value == expected_value + 4 * 3600 * 1_000_000_000
assert expected_repr in repr(ts)
# GH 10041
ts = Timestamp("20130501T071545.123456789")
assert ts.value == expected_value
assert expected_repr in repr(ts)
def test_nanosecond_timestamp(self):
# GH 7610
expected = 1_293_840_000_000_000_005
t = Timestamp("2011-01-01") + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp("2011-01-01 00:00:00.000000005")
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
expected = 1_293_840_000_000_000_010
t = t + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp("2011-01-01 00:00:00.000000010")
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
class TestTimestampToJulianDate:
def test_compare_1700(self):
r = Timestamp("1700-06-23").to_julian_date()
assert r == 2_342_145.5
def test_compare_2000(self):
r = Timestamp("2000-04-12").to_julian_date()
assert r == 2_451_646.5
def test_compare_2100(self):
r = Timestamp("2100-08-12").to_julian_date()
assert r == 2_488_292.5
def test_compare_hour01(self):
r = Timestamp("2000-08-12T01:00:00").to_julian_date()
assert r == 2_451_768.5416666666666666
def test_compare_hour13(self):
r = Timestamp("2000-08-12T13:00:00").to_julian_date()
assert r == 2_451_769.0416666666666666
class TestTimestampConversion:
def test_conversion(self):
# GH#9255
ts = Timestamp("2000-01-01")
result = ts.to_pydatetime()
expected = datetime(2000, 1, 1)
assert result == expected
assert type(result) == type(expected)
result = ts.to_datetime64()
expected = np.datetime64(ts.value, "ns")
assert result == expected
assert type(result) == type(expected)
assert result.dtype == expected.dtype
def test_to_pydatetime_nonzero_nano(self):
ts = Timestamp("2011-01-01 9:00:00.123456789")
# Warn the user of data loss (nanoseconds).
with tm.assert_produces_warning(UserWarning):
expected = datetime(2011, 1, 1, 9, 0, 0, 123456)
result = ts.to_pydatetime()
assert result == expected
def test_timestamp_to_datetime(self):
stamp = Timestamp("20090415", tz="US/Eastern")
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_to_datetime_dateutil(self):
stamp = Timestamp("20090415", tz="dateutil/US/Eastern")
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_to_datetime_explicit_pytz(self):
stamp = Timestamp("20090415", tz=pytz.timezone("US/Eastern"))
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
@td.skip_if_windows
def test_timestamp_to_datetime_explicit_dateutil(self):
stamp = Timestamp("20090415", tz=gettz("US/Eastern"))
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_to_datetime_bijective(self):
# Ensure that converting to datetime and back only loses precision
# by going from nanoseconds to microseconds.
exp_warning = None if Timestamp.max.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning):
pydt_max = Timestamp.max.to_pydatetime()
assert Timestamp(pydt_max).value / 1000 == Timestamp.max.value / 1000
exp_warning = None if Timestamp.min.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning):
pydt_min = Timestamp.min.to_pydatetime()
# The next assertion can be enabled once GH#39221 is merged
# assert pydt_min < Timestamp.min # this is bc nanos are dropped
tdus = timedelta(microseconds=1)
assert pydt_min + tdus > Timestamp.min
assert Timestamp(pydt_min + tdus).value / 1000 == Timestamp.min.value / 1000
def test_to_period_tz_warning(self):
# GH#21333 make sure a warning is issued when timezone
# info is lost
ts = Timestamp("2009-04-15 16:17:18", tz="US/Eastern")
with tm.assert_produces_warning(UserWarning):
# warning that timezone info will be lost
ts.to_period("D")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
ts = Timestamp(datetime.now())
assert ts.to_datetime64() == ts.to_numpy()
# GH#44460
msg = "dtype and copy arguments are ignored"
with pytest.raises(ValueError, match=msg):
ts.to_numpy("M8[s]")
with pytest.raises(ValueError, match=msg):
ts.to_numpy(copy=True)
class SubDatetime(datetime):
pass
@pytest.mark.parametrize(
"lh,rh",
[
(SubDatetime(2000, 1, 1), Timedelta(hours=1)),
(Timedelta(hours=1), SubDatetime(2000, 1, 1)),
],
)
def test_dt_subclass_add_timedelta(lh, rh):
# GH#25851
# ensure that subclassed datetime works for
# Timedelta operations
result = lh + rh
expected = SubDatetime(2000, 1, 1, 1)
assert result == expected
| true | true |
f734c4c022c88b66d3fcb77381bcb51601a9d8af | 992 | py | Python | backend/web_item.py | ldrozdz/Webforming | ded27ee72da1753b4f3d83e8a4abda1884528a8b | [
"Apache-2.0"
] | null | null | null | backend/web_item.py | ldrozdz/Webforming | ded27ee72da1753b4f3d83e8a4abda1884528a8b | [
"Apache-2.0"
] | null | null | null | backend/web_item.py | ldrozdz/Webforming | ded27ee72da1753b4f3d83e8a4abda1884528a8b | [
"Apache-2.0"
] | null | null | null | class WebItem(object):
def __init__(self):
self.post_id = None
self.parent_id = None
self.thread_starter_id = None
self.post_url = None
self.site_url = None
self.source_id = None
self.type = None
self.hash = None
self.post_date = None
self.parsed_post_date = None
self.crawl_date = None
self.post_title = None
self.post_author = None
self.post_author_id = None
self.post_author_reputation = None
self.content = None
self.parent_title = None
self.parent_author = None
self.parent_author_id = None
self.parent_url = None
self.thread_starter_title = None
self.thread_starter_author = None
self.thread_starter_url = None
self.page_category = None
self.sub_category = None
self.notes = None
self.language = None
self.likes = None
self.dislikes = None
self.children = None
self.facebooked = None
self.tweeted = None
self.linkedinned = None
self.gplussed = None | 27.555556 | 38 | 0.680444 | class WebItem(object):
def __init__(self):
self.post_id = None
self.parent_id = None
self.thread_starter_id = None
self.post_url = None
self.site_url = None
self.source_id = None
self.type = None
self.hash = None
self.post_date = None
self.parsed_post_date = None
self.crawl_date = None
self.post_title = None
self.post_author = None
self.post_author_id = None
self.post_author_reputation = None
self.content = None
self.parent_title = None
self.parent_author = None
self.parent_author_id = None
self.parent_url = None
self.thread_starter_title = None
self.thread_starter_author = None
self.thread_starter_url = None
self.page_category = None
self.sub_category = None
self.notes = None
self.language = None
self.likes = None
self.dislikes = None
self.children = None
self.facebooked = None
self.tweeted = None
self.linkedinned = None
self.gplussed = None | true | true |
f734c5a16a4fb88f97b1cf255529a1b8023df50f | 1,216 | py | Python | _unittests/ut_packaged/test_diff.py | sdpython/pymyinstall | 72b3a56a29def0694e34ccae910bf288a95cf4a5 | [
"MIT"
] | 8 | 2015-08-24T21:01:49.000Z | 2018-01-04T06:34:51.000Z | _unittests/ut_packaged/test_diff.py | sdpython/pymyinstall | 72b3a56a29def0694e34ccae910bf288a95cf4a5 | [
"MIT"
] | 66 | 2015-06-14T22:04:58.000Z | 2021-11-11T13:46:03.000Z | _unittests/ut_packaged/test_diff.py | sdpython/pymyinstall | 72b3a56a29def0694e34ccae910bf288a95cf4a5 | [
"MIT"
] | 5 | 2016-09-13T18:14:46.000Z | 2021-08-23T12:03:28.000Z | """
@brief test log(time=200s)
"""
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import ExtTestCase
from pymyinstall.packaged import small_set
class TestDifference(ExtTestCase):
def test_diff(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
name = set(_.name for _ in small_set())
keep = []
for mod in small_set():
if mod.name not in name:
keep.append(mod)
self.assertGreater(len(keep), 0)
for mod in keep:
if mod.mname is None:
fLOG(
"ModuleInstall('{0}', '{1}'),".format(mod.name, mod.kind))
else:
fLOG("ModuleInstall('{0}', '{1}', mname='{2}'),".format(
mod.name, mod.kind, mod.mname))
def test_diff2(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
res = small_set()
count = {}
for mod in res:
count[mod.name] = 1
self.assertIn("coverage", count)
if __name__ == "__main__":
unittest.main()
| 25.87234 | 78 | 0.535362 | import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import ExtTestCase
from pymyinstall.packaged import small_set
class TestDifference(ExtTestCase):
def test_diff(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
name = set(_.name for _ in small_set())
keep = []
for mod in small_set():
if mod.name not in name:
keep.append(mod)
self.assertGreater(len(keep), 0)
for mod in keep:
if mod.mname is None:
fLOG(
"ModuleInstall('{0}', '{1}'),".format(mod.name, mod.kind))
else:
fLOG("ModuleInstall('{0}', '{1}', mname='{2}'),".format(
mod.name, mod.kind, mod.mname))
def test_diff2(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
res = small_set()
count = {}
for mod in res:
count[mod.name] = 1
self.assertIn("coverage", count)
if __name__ == "__main__":
unittest.main()
| true | true |
f734c5b8ff6599d9412f3021b918a8bb7dc23ee8 | 553 | py | Python | bot/cogs/guilds/sheeptrainer.py | issuemeaname/rammus-discord-bot | 23bcb02af11b8b764d75fa974149164f0086c1ea | [
"MIT"
] | 1 | 2020-07-07T05:02:40.000Z | 2020-07-07T05:02:40.000Z | bot/cogs/guilds/sheeptrainer.py | issuemeaname/rammus-discord-bot | 23bcb02af11b8b764d75fa974149164f0086c1ea | [
"MIT"
] | null | null | null | bot/cogs/guilds/sheeptrainer.py | issuemeaname/rammus-discord-bot | 23bcb02af11b8b764d75fa974149164f0086c1ea | [
"MIT"
] | 2 | 2019-02-07T18:26:43.000Z | 2021-07-04T16:58:41.000Z | # import discord
from discord.ext import commands
import bot.checks
class Sheeptrainer(commands.Cog, command_attrs={"hidden": True}):
_GUILD = 296463400064647168
def __init__(self, bot):
self.bot = bot
@bot.checks.in_guild(_GUILD)
async def cog_check(self, ctx):
print("a")
return True
@commands.command(usage="{0}part 8")
async def part(self, ctx, number: int = None):
if number == 8:
await ctx.send("Have ur balls removed")
def setup(bot):
bot.add_cog(Sheeptrainer(bot))
| 21.269231 | 65 | 0.641953 |
from discord.ext import commands
import bot.checks
class Sheeptrainer(commands.Cog, command_attrs={"hidden": True}):
_GUILD = 296463400064647168
def __init__(self, bot):
self.bot = bot
@bot.checks.in_guild(_GUILD)
async def cog_check(self, ctx):
print("a")
return True
@commands.command(usage="{0}part 8")
async def part(self, ctx, number: int = None):
if number == 8:
await ctx.send("Have ur balls removed")
def setup(bot):
bot.add_cog(Sheeptrainer(bot))
| true | true |
f734c651842c525dc107baa1fc6c1a5c794093b3 | 1,458 | py | Python | setup.py | pyarnold/Mailpile | a7c0a0c6257da167207200f3b214b0e66bb93a10 | [
"Apache-2.0"
] | 2 | 2017-02-03T07:00:57.000Z | 2020-12-18T01:07:34.000Z | setup.py | cz8s/Mailpile | a7c0a0c6257da167207200f3b214b0e66bb93a10 | [
"Apache-2.0"
] | null | null | null | setup.py | cz8s/Mailpile | a7c0a0c6257da167207200f3b214b0e66bb93a10 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
from datetime import date
from setuptools import setup, find_packages
import os
import re
from glob import glob
APPVER = (
line.strip() for line in open('mailpile/defaults.py', 'r')
if re.match(r'^APPVER\s*=', line)
).next().split('"')[1]
try:
# This borks sdist.
os.remove('.SELF')
except:
pass
data_files = []
# Copy static UI files
for dir, dirs, files in os.walk('static'):
data_files.append((dir, [os.path.join(dir, file_) for file_ in files]))
# Copy translation files
for dir, dirs, files in os.walk('locale'):
data_files.append((dir, [os.path.join(dir, file_) for file_ in files]))
setup(
name="mailpile",
version=APPVER.replace('github',
'dev'+date.today().isoformat().replace('-', '')),
license="AGPLv3+",
author="Bjarni R. Einarsson",
author_email="bre@klaki.net",
url="http://www.mailpile.is/",
description="""\
Mailpile is a personal tool for searching and indexing e-mail.""",
long_description="""\
Mailpile is a tool for building and maintaining a tagging search
engine for a personal collection of e-mail. It can be used as a
simple web-mail client.
""",
packages=find_packages(),
data_files=data_files,
install_requires=[
'lxml>=2.3.2',
'jinja2',
'spambayes>=1.1b1'
],
entry_points={
'console_scripts': [
'mailpile = mailpile.__main__:main'
]},
)
| 25.578947 | 76 | 0.633059 |
from datetime import date
from setuptools import setup, find_packages
import os
import re
from glob import glob
APPVER = (
line.strip() for line in open('mailpile/defaults.py', 'r')
if re.match(r'^APPVER\s*=', line)
).next().split('"')[1]
try:
# This borks sdist.
os.remove('.SELF')
except:
pass
data_files = []
# Copy static UI files
for dir, dirs, files in os.walk('static'):
data_files.append((dir, [os.path.join(dir, file_) for file_ in files]))
# Copy translation files
for dir, dirs, files in os.walk('locale'):
data_files.append((dir, [os.path.join(dir, file_) for file_ in files]))
setup(
name="mailpile",
version=APPVER.replace('github',
'dev'+date.today().isoformat().replace('-', '')),
license="AGPLv3+",
author="Bjarni R. Einarsson",
author_email="bre@klaki.net",
url="http://www.mailpile.is/",
description="""\
Mailpile is a personal tool for searching and indexing e-mail.""",
long_description="""\
Mailpile is a tool for building and maintaining a tagging search
engine for a personal collection of e-mail. It can be used as a
simple web-mail client.
""",
packages=find_packages(),
data_files=data_files,
install_requires=[
'lxml>=2.3.2',
'jinja2',
'spambayes>=1.1b1'
],
entry_points={
'console_scripts': [
'mailpile = mailpile.__main__:main'
]},
)
| true | true |
f734c7ad29376309ca2140ef6f5ab05ea0760b35 | 3,037 | py | Python | Code/main.py | Orelbenr/acoustic-fencing | 2d8c6121c915d2f12fae3c9d776e6339f028e35a | [
"MIT"
] | null | null | null | Code/main.py | Orelbenr/acoustic-fencing | 2d8c6121c915d2f12fae3c9d776e6339f028e35a | [
"MIT"
] | null | null | null | Code/main.py | Orelbenr/acoustic-fencing | 2d8c6121c915d2f12fae3c9d776e6339f028e35a | [
"MIT"
] | null | null | null | import sys
from train import train
from separate import separate
import os
from os.path import join as pjoin
import logging
from datetime import datetime
def get_logger(logger_name, file_name):
logger = logging.getLogger(logger_name)
file_handler = logging.FileHandler(file_name)
stream_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(message)s')
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
return logger
def main():
# directories
output_dir = '../output' # Folder to save outputs (Created by program)
train_dir = '../data/TRAIN' # Directory contiaing train set - .wav files (Created by user)
test_dir = '../data/TEST' # Directory contiaing test set - .wav files (Created by user)
rir_dir = '../RIRs' # Directory of Simulation rirs .mat file (Created by user)
runs = [
# run1
{'run_name': 'demo_run', 'test_out_name': 'test_results', 'rir_name': 'demo', 'micN': 9,
'zoneN': 2, 'spN': 2, 'batch': 128, 'lr': 1e-3, 'perm_skip': 0, 'seg_len': 100, 'epochs': 30, 'sc_step': 10,
'sc_gamma': 0.5, 'train': True, 'test': True, 'files2save': 5, 'evaluate': True, 'is_simulation': True, 'old_model': None}
# run2 ...
]
for i, run in enumerate(runs):
# create required directories
cur_out_dir = pjoin(output_dir, run['run_name'])
os.makedirs(cur_out_dir, exist_ok=True)
train_rir = pjoin(rir_dir, 'train_' + run['rir_name'] + '.mat')
test_rir = pjoin(rir_dir, 'test_' + run['rir_name'] + '.mat')
model_path = pjoin(cur_out_dir, 'trained_model', 'unet_model.pt') # created by train
test_out_dir = pjoin(cur_out_dir, run['test_out_name'])
# logging
logger = get_logger(logger_name='my_logger', file_name=pjoin(cur_out_dir, 'log.txt'))
now = datetime.now()
logger.info('Run {}/{} Started - {}'.format(i, len(runs), now.strftime("%d/%m/%Y %H:%M:%S")))
if run['train']:
train(cur_out_dir, train_dir, train_rir, mic_num=run['micN'], zone_num=run['zoneN'], sp_num=run['spN'],
batch_size=run['batch'], perm_skip=run['perm_skip'], seg_len=run['seg_len'], learning_rate=run['lr'],
num_epochs=run['epochs'], sched_step_size=run['sc_step'], sched_gamma=run['sc_gamma'],
is_simulation=run['is_simulation'], old_model=run['old_model'])
if run['test']:
separate(test_out_dir, test_dir, test_rir, model_path, mic_num=run['micN'], zone_num=run['zoneN'],
sp_num=run['spN'], perm_skip=run['perm_skip'], seg_len=run['seg_len'],
save_num=run['files2save'], is_evaluated=run['evaluate'], is_simulation=run['is_simulation'])
logger.info('\nProgram Finished Successfully. Yey!')
logger.info('-'*30)
if __name__ == '__main__':
main()
| 43.385714 | 131 | 0.638459 | import sys
from train import train
from separate import separate
import os
from os.path import join as pjoin
import logging
from datetime import datetime
def get_logger(logger_name, file_name):
logger = logging.getLogger(logger_name)
file_handler = logging.FileHandler(file_name)
stream_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(message)s')
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
return logger
def main():
output_dir = '../output'
train_dir = '../data/TRAIN'
test_dir = '../data/TEST'
rir_dir = '../RIRs'
runs = [
{'run_name': 'demo_run', 'test_out_name': 'test_results', 'rir_name': 'demo', 'micN': 9,
'zoneN': 2, 'spN': 2, 'batch': 128, 'lr': 1e-3, 'perm_skip': 0, 'seg_len': 100, 'epochs': 30, 'sc_step': 10,
'sc_gamma': 0.5, 'train': True, 'test': True, 'files2save': 5, 'evaluate': True, 'is_simulation': True, 'old_model': None}
]
for i, run in enumerate(runs):
cur_out_dir = pjoin(output_dir, run['run_name'])
os.makedirs(cur_out_dir, exist_ok=True)
train_rir = pjoin(rir_dir, 'train_' + run['rir_name'] + '.mat')
test_rir = pjoin(rir_dir, 'test_' + run['rir_name'] + '.mat')
model_path = pjoin(cur_out_dir, 'trained_model', 'unet_model.pt')
test_out_dir = pjoin(cur_out_dir, run['test_out_name'])
logger = get_logger(logger_name='my_logger', file_name=pjoin(cur_out_dir, 'log.txt'))
now = datetime.now()
logger.info('Run {}/{} Started - {}'.format(i, len(runs), now.strftime("%d/%m/%Y %H:%M:%S")))
if run['train']:
train(cur_out_dir, train_dir, train_rir, mic_num=run['micN'], zone_num=run['zoneN'], sp_num=run['spN'],
batch_size=run['batch'], perm_skip=run['perm_skip'], seg_len=run['seg_len'], learning_rate=run['lr'],
num_epochs=run['epochs'], sched_step_size=run['sc_step'], sched_gamma=run['sc_gamma'],
is_simulation=run['is_simulation'], old_model=run['old_model'])
if run['test']:
separate(test_out_dir, test_dir, test_rir, model_path, mic_num=run['micN'], zone_num=run['zoneN'],
sp_num=run['spN'], perm_skip=run['perm_skip'], seg_len=run['seg_len'],
save_num=run['files2save'], is_evaluated=run['evaluate'], is_simulation=run['is_simulation'])
logger.info('\nProgram Finished Successfully. Yey!')
logger.info('-'*30)
if __name__ == '__main__':
main()
| true | true |
f734ca6ded8fd6b46244078e8a884dc84331652f | 18,494 | py | Python | lasagne/tests/layers/test_conv.py | JeffreyDF/Lasagne | 6dd88f5fada20768087f29ae89cbd83980fe0a4e | [
"MIT"
] | 60 | 2015-01-29T21:54:04.000Z | 2019-11-12T07:38:15.000Z | lasagne/tests/layers/test_conv.py | JeffreyDF/Lasagne | 6dd88f5fada20768087f29ae89cbd83980fe0a4e | [
"MIT"
] | 5 | 2015-06-15T00:21:47.000Z | 2017-09-14T10:24:40.000Z | lasagne/tests/layers/test_conv.py | JeffreyDF/Lasagne | 6dd88f5fada20768087f29ae89cbd83980fe0a4e | [
"MIT"
] | 20 | 2015-04-28T00:21:41.000Z | 2019-09-16T01:10:37.000Z | import numpy as np
import pytest
import importlib
import theano
import lasagne
from lasagne.utils import floatX, as_tuple
def conv2d(input, kernel, pad):
"""Execute a 2D convolution.
Parameters
----------
input : numpy array
kernel : numpy array
pad : {0, 'valid', 'same', 'full'}
Returns
-------
numpy array
"""
if pad not in ['valid', 'same', 'full']:
pad = as_tuple(pad, 2, int)
input = np.pad(input,
((0, 0), (0, 0), (pad[0], pad[0]), (pad[1], pad[1])),
mode='constant')
pad = 'valid'
output = np.zeros((input.shape[0],
kernel.shape[0],
input.shape[2] + kernel.shape[2] - 1,
input.shape[3] + kernel.shape[3] - 1,
))
for i in range(kernel.shape[2]):
for j in range(kernel.shape[3]):
k = kernel[:, :, i, j][:, :, np.newaxis, np.newaxis]
output[:, :, i:i + input.shape[2],
j:j + input.shape[3]] += (input[:, np.newaxis] * k).sum(2)
if pad == 'valid':
trim = (kernel.shape[2] - 1, kernel.shape[3] - 1)
output = output[:,
:,
trim[0]:-trim[0] or None,
trim[1]:-trim[1] or None]
elif pad == 'same':
shift_x = (kernel.shape[2] - 1) // 2
shift_y = (kernel.shape[3] - 1) // 2
output = output[:, :, shift_x:input.shape[2] + shift_x,
shift_y:input.shape[3] + shift_y]
return output
def conv2d_test_sets():
def _convert(input, kernel, output, kwargs):
return [theano.shared(floatX(input)), floatX(kernel), output, kwargs]
for pad in [0, 'full', 'same']:
for stride in [1, 2, 3]:
for filter_size in [1, 3]:
if stride > filter_size:
continue
input = np.random.random((3, 1, 16, 23))
kernel = np.random.random((16, 1, filter_size, filter_size))
output = conv2d(input, kernel, pad=pad)
output = output[:, :, ::stride, ::stride]
yield _convert(input, kernel, output, {'pad': pad,
'stride': stride
})
# bias-less case
input = np.random.random((3, 1, 16, 23))
kernel = np.random.random((16, 1, 3, 3))
output = conv2d(input, kernel, pad='valid')
yield _convert(input, kernel, output, {'b': None})
# pad='valid' case
yield _convert(input, kernel, output, {'pad': 'valid'})
def conv1d(input, kernel, pad):
if pad not in ['valid', 'same', 'full']:
input = np.pad(input,
((0, 0), (0, 0), (int(pad), int(pad))),
mode='constant')
pad = 'valid'
output = []
for b in input:
temp = []
for c in kernel:
temp.append(
np.convolve(b[0, :], c[0, :], mode=pad))
output.append(temp)
return np.array(output)
def conv1d_test_sets():
def _convert(input, kernel, output, kwargs):
return [theano.shared(floatX(input)), floatX(kernel), output, kwargs]
for pad in [0, 1, 2, 'full', 'same']:
for stride in [1, 2, 3]:
for filter_size in [1, 3]:
if stride > filter_size:
continue
input = np.random.random((3, 1, 23))
kernel = np.random.random((16, 1, filter_size))
output = conv1d(input, kernel, pad)
output = output[:, :, ::stride]
yield _convert(input, kernel, output, {'pad': pad,
'stride': stride,
})
# bias-less case
input = np.random.random((3, 1, 23))
kernel = np.random.random((16, 1, 3))
output = conv1d(input, kernel, pad='valid')
yield _convert(input, kernel, output, {'b': None})
# pad='valid' case
yield _convert(input, kernel, output, {'pad': 'valid'})
def test_conv_output_length():
from lasagne.layers.conv import conv_output_length
assert conv_output_length(13, 5, 3, 'valid') == 3
assert conv_output_length(13, 5, 3, 0) == 3
assert conv_output_length(13, 5, 3, 'full') == 6
assert conv_output_length(13, 5, 3, 'same') == 5
assert conv_output_length(13, 5, 3, 2) == 5
with pytest.raises(ValueError) as exc:
conv_output_length(13, 5, 3, '_nonexistent_mode')
assert "Invalid pad: " in exc.value.args[0]
@pytest.fixture
def DummyInputLayer():
def factory(shape):
from lasagne.layers.input import InputLayer
return InputLayer(shape)
return factory
class TestConv1DLayer:
@pytest.mark.parametrize(
"input, kernel, output, kwargs", list(conv1d_test_sets()))
@pytest.mark.parametrize("extra_kwargs", [
{},
{'untie_biases': True},
])
def test_defaults(self, DummyInputLayer,
input, kernel, output, kwargs, extra_kwargs):
kwargs.update(extra_kwargs)
b, c, w = input.shape.eval()
input_layer = DummyInputLayer((b, c, w))
try:
from lasagne.layers.conv import Conv1DLayer
layer = Conv1DLayer(
input_layer,
num_filters=kernel.shape[0],
filter_size=kernel.shape[2],
W=kernel,
**kwargs
)
actual = layer.get_output_for(input).eval()
assert actual.shape == output.shape
assert actual.shape == layer.output_shape
assert np.allclose(actual, output)
except NotImplementedError:
pass
def test_init_none_nonlinearity_bias(self, DummyInputLayer):
from lasagne.layers.conv import Conv1DLayer
input_layer = DummyInputLayer((1, 2, 3))
layer = Conv1DLayer(input_layer, num_filters=16, filter_size=(3,),
nonlinearity=None, b=None)
assert layer.nonlinearity == lasagne.nonlinearities.identity
assert layer.b is None
def test_invalid_pad(self, DummyInputLayer):
from lasagne.layers.conv import Conv1DLayer
input_layer = DummyInputLayer((1, 2, 3))
with pytest.raises(TypeError) as exc:
layer = Conv1DLayer(input_layer, num_filters=16, filter_size=(3,),
pad='_nonexistent_mode')
assert "iterable of int" in exc.value.args[0]
with pytest.raises(NotImplementedError) as exc:
layer = Conv1DLayer(input_layer, num_filters=16, filter_size=(4,),
pad='same')
assert "requires odd filter size" in exc.value.args[0]
class TestConv2DLayerImplementations:
@pytest.fixture(
params=[
('lasagne.layers', 'Conv2DLayer', {}),
('lasagne.layers.cuda_convnet',
'Conv2DCCLayer',
{'flip_filters': True}),
('lasagne.layers.corrmm', 'Conv2DMMLayer', {'flip_filters': True}),
('lasagne.layers.dnn', 'Conv2DDNNLayer', {'flip_filters': True}),
],
)
def Conv2DImpl(self, request):
impl_module_name, impl_name, impl_default_kwargs = request.param
try:
mod = importlib.import_module(impl_module_name)
except ImportError:
pytest.skip("{} not available".format(impl_module_name))
impl = getattr(mod, impl_name)
def wrapper(*args, **kwargs):
kwargs2 = impl_default_kwargs.copy()
kwargs2.update(kwargs)
return impl(*args, **kwargs2)
wrapper.__name__ = impl_name
return wrapper
@pytest.mark.parametrize(
"input, kernel, output, kwargs", list(conv2d_test_sets()))
@pytest.mark.parametrize("extra_kwargs", [
{},
{'untie_biases': True},
])
def test_defaults(self, Conv2DImpl, DummyInputLayer,
input, kernel, output, kwargs, extra_kwargs):
kwargs.update(extra_kwargs)
b, c, h, w = input.shape.eval()
input_layer = DummyInputLayer((b, c, h, w))
try:
layer = Conv2DImpl(
input_layer,
num_filters=kernel.shape[0],
filter_size=kernel.shape[2:],
W=kernel,
**kwargs
)
actual = layer.get_output_for(input).eval()
assert actual.shape == output.shape
assert actual.shape == layer.output_shape
assert np.allclose(actual, output)
except NotImplementedError:
pytest.skip()
@pytest.mark.parametrize(
"input, kernel, output, kwargs", list(conv2d_test_sets()))
def test_with_nones(self, Conv2DImpl, DummyInputLayer,
input, kernel, output, kwargs):
b, c, h, w = input.shape.eval()
input_layer = DummyInputLayer((None, c, None, None))
try:
layer = Conv2DImpl(
input_layer,
num_filters=kernel.shape[0],
filter_size=kernel.shape[2:],
W=kernel,
**kwargs
)
actual = layer.get_output_for(input).eval()
assert layer.output_shape == (None,
kernel.shape[0],
None,
None)
assert actual.shape == output.shape
assert np.allclose(actual, output)
except NotImplementedError:
pytest.skip()
def test_init_none_nonlinearity_bias(self, Conv2DImpl, DummyInputLayer):
input_layer = DummyInputLayer((1, 2, 3, 3))
layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3),
nonlinearity=None, b=None)
assert layer.nonlinearity == lasagne.nonlinearities.identity
assert layer.b is None
def test_invalid_pad(self, Conv2DImpl, DummyInputLayer):
input_layer = DummyInputLayer((1, 2, 3))
with pytest.raises(TypeError) as exc:
layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3),
pad='_nonexistent_mode')
assert "iterable of int" in exc.value.args[0]
with pytest.raises(NotImplementedError) as exc:
layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(4, 4),
pad='same')
assert "requires odd filter size" in exc.value.args[0]
def test_get_params(self, Conv2DImpl, DummyInputLayer):
input_layer = DummyInputLayer((128, 3, 32, 32))
layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3))
assert layer.get_params() == [layer.W, layer.b]
assert layer.get_params(regularizable=False) == [layer.b]
assert layer.get_params(regularizable=True) == [layer.W]
assert layer.get_params(trainable=True) == [layer.W, layer.b]
assert layer.get_params(trainable=False) == []
assert layer.get_params(_nonexistent_tag=True) == []
assert layer.get_params(_nonexistent_tag=False) == [layer.W, layer.b]
class TestConv2DDNNLayer:
def test_import_without_gpu_or_cudnn_raises(self):
from theano.sandbox.cuda import dnn
if theano.config.device.startswith("gpu") and dnn.dnn_available():
pytest.skip()
else:
with pytest.raises(ImportError):
import lasagne.layers.dnn
def test_pad(self, DummyInputLayer):
try:
from lasagne.layers.dnn import Conv2DDNNLayer
except ImportError:
pytest.skip("dnn not available")
input_layer = DummyInputLayer((1, 2, 3, 3))
layer = Conv2DDNNLayer(input_layer, num_filters=4, filter_size=(3, 3),
pad=(3, 3))
assert layer.output_shape == (1, 4, 7, 7)
class TestConv2DMMLayer:
def test_import_without_gpu_raises(self):
if theano.config.device.startswith("gpu"):
pytest.skip()
else:
with pytest.raises(ImportError):
import lasagne.layers.corrmm
def test_pad(self, DummyInputLayer):
try:
from lasagne.layers.corrmm import Conv2DMMLayer
except ImportError:
pytest.skip("corrmm not available")
input_layer = DummyInputLayer((1, 2, 3, 3))
layer = Conv2DMMLayer(input_layer, num_filters=4, filter_size=(3, 3),
pad=(3, 3))
assert layer.output_shape == (1, 4, 7, 7)
class TestConv2DCCLayer:
def test_import_without_gpu_raises(self):
if theano.config.device.startswith("gpu"):
pytest.skip()
else:
with pytest.raises(ImportError):
import lasagne.layers.cuda_convnet
def test_unsupported_settings(self, DummyInputLayer):
try:
from lasagne.layers.cuda_convnet import Conv2DCCLayer
except ImportError:
pytest.skip("cuda_convnet not available")
input_layer = DummyInputLayer((128, 3, 32, 32))
with pytest.raises(RuntimeError) as exc:
layer = Conv2DCCLayer(input_layer, num_filters=16,
filter_size=(3, 5))
assert ("Conv2DCCLayer only supports square filters" in
exc.value.args[0])
with pytest.raises(RuntimeError) as exc:
layer = Conv2DCCLayer(input_layer, num_filters=16,
filter_size=(3, 3), stride=(1, 2))
assert ("Conv2DCCLayer only supports square strides" in
exc.value.args[0])
with pytest.raises(RuntimeError) as exc:
layer = Conv2DCCLayer(input_layer, num_filters=15,
filter_size=(3, 3))
assert ("Conv2DCCLayer requires num_filters to be a multiple of 16" in
exc.value.args[0])
with pytest.raises(RuntimeError) as exc:
layer = Conv2DCCLayer(input_layer, num_filters=16,
filter_size=(3, 3), pad=(1, 2))
assert ("Conv2DCCLayer only supports square padding" in
exc.value.args[0])
input_layer = DummyInputLayer((128, 7, 32, 32))
with pytest.raises(RuntimeError) as exc:
layer = Conv2DCCLayer(input_layer, num_filters=16,
filter_size=(3, 3))
assert ("Conv2DCCLayer requires the number of input channels to be "
"1, 2, 3 or a multiple of 4" in exc.value.args[0])
def test_pad(self, DummyInputLayer):
try:
from lasagne.layers.cuda_convnet import Conv2DCCLayer
except ImportError:
pytest.skip("cuda_convnet not available")
input_layer = DummyInputLayer((128, 3, 32, 32))
layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),
pad=(3, 3))
assert layer.output_shape == (128, 16, 36, 36)
def test_dimshuffle_false_shapes(self, DummyInputLayer):
try:
from lasagne.layers.cuda_convnet import Conv2DCCLayer
except ImportError:
pytest.skip("cuda_convnet not available")
input_layer = DummyInputLayer((4, 32, 32, 128)) # c01b instead of bc01
layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),
dimshuffle=False)
assert layer.W.get_value().shape == (4, 3, 3, 16)
assert layer.b.get_value().shape == (16,)
layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),
dimshuffle=False, untie_biases=True)
assert layer.W.get_value().shape == (4, 3, 3, 16)
assert layer.b.get_value().shape == (16, 30, 30)
def test_dimshuffle_false_get_output_for(self, DummyInputLayer):
try:
from lasagne.layers.cuda_convnet import Conv2DCCLayer
except ImportError:
pytest.skip("cuda_convnet not available")
# this implementation is tested against FilterActs instead of
# theano.tensor.nnet.conv.conv2d because using the latter leads to
# numerical precision errors.
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
filter_acts = FilterActs(stride=1, pad=0, partial_sum=1)
input = theano.shared(floatX(np.random.random((4, 5, 5, 8))))
kernel = theano.shared(floatX(np.random.random((4, 3, 3, 16))))
input_layer = DummyInputLayer((4, 5, 5, 8)) # c01b instead of bc01
layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),
dimshuffle=False, W=kernel, b=None,
nonlinearity=None)
output = np.array(filter_acts(input, kernel).eval())
actual = layer.get_output_for(input).eval()
actual = np.array(actual)
assert actual.shape == output.shape
assert actual.shape == layer.output_shape
assert np.allclose(actual, output)
class TestShuffleLayers:
def test_bc01_to_c01b(self):
from lasagne.layers.input import InputLayer
try:
from lasagne.layers.cuda_convnet import ShuffleBC01ToC01BLayer
except ImportError:
pytest.skip("cuda_convnet not available")
input_layer = InputLayer((1, 2, 3, 4))
layer = ShuffleBC01ToC01BLayer(input_layer)
assert layer.output_shape == (2, 3, 4, 1)
input = floatX(np.random.random((1, 2, 3, 4)))
output = input.transpose(1, 2, 3, 0)
actual = layer.get_output_for(theano.shared(input)).eval()
assert np.allclose(output, actual)
def test_c01b_to_bc01(self):
from lasagne.layers.input import InputLayer
try:
from lasagne.layers.cuda_convnet import ShuffleC01BToBC01Layer
except ImportError:
pytest.skip("cuda_convnet not available")
input_layer = InputLayer((1, 2, 3, 4))
layer = ShuffleC01BToBC01Layer(input_layer)
assert layer.output_shape == (4, 1, 2, 3)
input = floatX(np.random.random((1, 2, 3, 4)))
output = input.transpose(3, 0, 1, 2)
actual = layer.get_output_for(theano.shared(input)).eval()
assert np.allclose(output, actual)
| 37.513185 | 79 | 0.568779 | import numpy as np
import pytest
import importlib
import theano
import lasagne
from lasagne.utils import floatX, as_tuple
def conv2d(input, kernel, pad):
if pad not in ['valid', 'same', 'full']:
pad = as_tuple(pad, 2, int)
input = np.pad(input,
((0, 0), (0, 0), (pad[0], pad[0]), (pad[1], pad[1])),
mode='constant')
pad = 'valid'
output = np.zeros((input.shape[0],
kernel.shape[0],
input.shape[2] + kernel.shape[2] - 1,
input.shape[3] + kernel.shape[3] - 1,
))
for i in range(kernel.shape[2]):
for j in range(kernel.shape[3]):
k = kernel[:, :, i, j][:, :, np.newaxis, np.newaxis]
output[:, :, i:i + input.shape[2],
j:j + input.shape[3]] += (input[:, np.newaxis] * k).sum(2)
if pad == 'valid':
trim = (kernel.shape[2] - 1, kernel.shape[3] - 1)
output = output[:,
:,
trim[0]:-trim[0] or None,
trim[1]:-trim[1] or None]
elif pad == 'same':
shift_x = (kernel.shape[2] - 1) // 2
shift_y = (kernel.shape[3] - 1) // 2
output = output[:, :, shift_x:input.shape[2] + shift_x,
shift_y:input.shape[3] + shift_y]
return output
def conv2d_test_sets():
def _convert(input, kernel, output, kwargs):
return [theano.shared(floatX(input)), floatX(kernel), output, kwargs]
for pad in [0, 'full', 'same']:
for stride in [1, 2, 3]:
for filter_size in [1, 3]:
if stride > filter_size:
continue
input = np.random.random((3, 1, 16, 23))
kernel = np.random.random((16, 1, filter_size, filter_size))
output = conv2d(input, kernel, pad=pad)
output = output[:, :, ::stride, ::stride]
yield _convert(input, kernel, output, {'pad': pad,
'stride': stride
})
input = np.random.random((3, 1, 16, 23))
kernel = np.random.random((16, 1, 3, 3))
output = conv2d(input, kernel, pad='valid')
yield _convert(input, kernel, output, {'b': None})
yield _convert(input, kernel, output, {'pad': 'valid'})
def conv1d(input, kernel, pad):
if pad not in ['valid', 'same', 'full']:
input = np.pad(input,
((0, 0), (0, 0), (int(pad), int(pad))),
mode='constant')
pad = 'valid'
output = []
for b in input:
temp = []
for c in kernel:
temp.append(
np.convolve(b[0, :], c[0, :], mode=pad))
output.append(temp)
return np.array(output)
def conv1d_test_sets():
def _convert(input, kernel, output, kwargs):
return [theano.shared(floatX(input)), floatX(kernel), output, kwargs]
for pad in [0, 1, 2, 'full', 'same']:
for stride in [1, 2, 3]:
for filter_size in [1, 3]:
if stride > filter_size:
continue
input = np.random.random((3, 1, 23))
kernel = np.random.random((16, 1, filter_size))
output = conv1d(input, kernel, pad)
output = output[:, :, ::stride]
yield _convert(input, kernel, output, {'pad': pad,
'stride': stride,
})
input = np.random.random((3, 1, 23))
kernel = np.random.random((16, 1, 3))
output = conv1d(input, kernel, pad='valid')
yield _convert(input, kernel, output, {'b': None})
yield _convert(input, kernel, output, {'pad': 'valid'})
def test_conv_output_length():
from lasagne.layers.conv import conv_output_length
assert conv_output_length(13, 5, 3, 'valid') == 3
assert conv_output_length(13, 5, 3, 0) == 3
assert conv_output_length(13, 5, 3, 'full') == 6
assert conv_output_length(13, 5, 3, 'same') == 5
assert conv_output_length(13, 5, 3, 2) == 5
with pytest.raises(ValueError) as exc:
conv_output_length(13, 5, 3, '_nonexistent_mode')
assert "Invalid pad: " in exc.value.args[0]
@pytest.fixture
def DummyInputLayer():
def factory(shape):
from lasagne.layers.input import InputLayer
return InputLayer(shape)
return factory
class TestConv1DLayer:
@pytest.mark.parametrize(
"input, kernel, output, kwargs", list(conv1d_test_sets()))
@pytest.mark.parametrize("extra_kwargs", [
{},
{'untie_biases': True},
])
def test_defaults(self, DummyInputLayer,
input, kernel, output, kwargs, extra_kwargs):
kwargs.update(extra_kwargs)
b, c, w = input.shape.eval()
input_layer = DummyInputLayer((b, c, w))
try:
from lasagne.layers.conv import Conv1DLayer
layer = Conv1DLayer(
input_layer,
num_filters=kernel.shape[0],
filter_size=kernel.shape[2],
W=kernel,
**kwargs
)
actual = layer.get_output_for(input).eval()
assert actual.shape == output.shape
assert actual.shape == layer.output_shape
assert np.allclose(actual, output)
except NotImplementedError:
pass
def test_init_none_nonlinearity_bias(self, DummyInputLayer):
from lasagne.layers.conv import Conv1DLayer
input_layer = DummyInputLayer((1, 2, 3))
layer = Conv1DLayer(input_layer, num_filters=16, filter_size=(3,),
nonlinearity=None, b=None)
assert layer.nonlinearity == lasagne.nonlinearities.identity
assert layer.b is None
def test_invalid_pad(self, DummyInputLayer):
from lasagne.layers.conv import Conv1DLayer
input_layer = DummyInputLayer((1, 2, 3))
with pytest.raises(TypeError) as exc:
layer = Conv1DLayer(input_layer, num_filters=16, filter_size=(3,),
pad='_nonexistent_mode')
assert "iterable of int" in exc.value.args[0]
with pytest.raises(NotImplementedError) as exc:
layer = Conv1DLayer(input_layer, num_filters=16, filter_size=(4,),
pad='same')
assert "requires odd filter size" in exc.value.args[0]
class TestConv2DLayerImplementations:
@pytest.fixture(
params=[
('lasagne.layers', 'Conv2DLayer', {}),
('lasagne.layers.cuda_convnet',
'Conv2DCCLayer',
{'flip_filters': True}),
('lasagne.layers.corrmm', 'Conv2DMMLayer', {'flip_filters': True}),
('lasagne.layers.dnn', 'Conv2DDNNLayer', {'flip_filters': True}),
],
)
def Conv2DImpl(self, request):
impl_module_name, impl_name, impl_default_kwargs = request.param
try:
mod = importlib.import_module(impl_module_name)
except ImportError:
pytest.skip("{} not available".format(impl_module_name))
impl = getattr(mod, impl_name)
def wrapper(*args, **kwargs):
kwargs2 = impl_default_kwargs.copy()
kwargs2.update(kwargs)
return impl(*args, **kwargs2)
wrapper.__name__ = impl_name
return wrapper
@pytest.mark.parametrize(
"input, kernel, output, kwargs", list(conv2d_test_sets()))
@pytest.mark.parametrize("extra_kwargs", [
{},
{'untie_biases': True},
])
def test_defaults(self, Conv2DImpl, DummyInputLayer,
input, kernel, output, kwargs, extra_kwargs):
kwargs.update(extra_kwargs)
b, c, h, w = input.shape.eval()
input_layer = DummyInputLayer((b, c, h, w))
try:
layer = Conv2DImpl(
input_layer,
num_filters=kernel.shape[0],
filter_size=kernel.shape[2:],
W=kernel,
**kwargs
)
actual = layer.get_output_for(input).eval()
assert actual.shape == output.shape
assert actual.shape == layer.output_shape
assert np.allclose(actual, output)
except NotImplementedError:
pytest.skip()
@pytest.mark.parametrize(
"input, kernel, output, kwargs", list(conv2d_test_sets()))
def test_with_nones(self, Conv2DImpl, DummyInputLayer,
input, kernel, output, kwargs):
b, c, h, w = input.shape.eval()
input_layer = DummyInputLayer((None, c, None, None))
try:
layer = Conv2DImpl(
input_layer,
num_filters=kernel.shape[0],
filter_size=kernel.shape[2:],
W=kernel,
**kwargs
)
actual = layer.get_output_for(input).eval()
assert layer.output_shape == (None,
kernel.shape[0],
None,
None)
assert actual.shape == output.shape
assert np.allclose(actual, output)
except NotImplementedError:
pytest.skip()
def test_init_none_nonlinearity_bias(self, Conv2DImpl, DummyInputLayer):
input_layer = DummyInputLayer((1, 2, 3, 3))
layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3),
nonlinearity=None, b=None)
assert layer.nonlinearity == lasagne.nonlinearities.identity
assert layer.b is None
def test_invalid_pad(self, Conv2DImpl, DummyInputLayer):
input_layer = DummyInputLayer((1, 2, 3))
with pytest.raises(TypeError) as exc:
layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3),
pad='_nonexistent_mode')
assert "iterable of int" in exc.value.args[0]
with pytest.raises(NotImplementedError) as exc:
layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(4, 4),
pad='same')
assert "requires odd filter size" in exc.value.args[0]
def test_get_params(self, Conv2DImpl, DummyInputLayer):
input_layer = DummyInputLayer((128, 3, 32, 32))
layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3))
assert layer.get_params() == [layer.W, layer.b]
assert layer.get_params(regularizable=False) == [layer.b]
assert layer.get_params(regularizable=True) == [layer.W]
assert layer.get_params(trainable=True) == [layer.W, layer.b]
assert layer.get_params(trainable=False) == []
assert layer.get_params(_nonexistent_tag=True) == []
assert layer.get_params(_nonexistent_tag=False) == [layer.W, layer.b]
class TestConv2DDNNLayer:
def test_import_without_gpu_or_cudnn_raises(self):
from theano.sandbox.cuda import dnn
if theano.config.device.startswith("gpu") and dnn.dnn_available():
pytest.skip()
else:
with pytest.raises(ImportError):
import lasagne.layers.dnn
def test_pad(self, DummyInputLayer):
try:
from lasagne.layers.dnn import Conv2DDNNLayer
except ImportError:
pytest.skip("dnn not available")
input_layer = DummyInputLayer((1, 2, 3, 3))
layer = Conv2DDNNLayer(input_layer, num_filters=4, filter_size=(3, 3),
pad=(3, 3))
assert layer.output_shape == (1, 4, 7, 7)
class TestConv2DMMLayer:
def test_import_without_gpu_raises(self):
if theano.config.device.startswith("gpu"):
pytest.skip()
else:
with pytest.raises(ImportError):
import lasagne.layers.corrmm
def test_pad(self, DummyInputLayer):
try:
from lasagne.layers.corrmm import Conv2DMMLayer
except ImportError:
pytest.skip("corrmm not available")
input_layer = DummyInputLayer((1, 2, 3, 3))
layer = Conv2DMMLayer(input_layer, num_filters=4, filter_size=(3, 3),
pad=(3, 3))
assert layer.output_shape == (1, 4, 7, 7)
class TestConv2DCCLayer:
def test_import_without_gpu_raises(self):
if theano.config.device.startswith("gpu"):
pytest.skip()
else:
with pytest.raises(ImportError):
import lasagne.layers.cuda_convnet
def test_unsupported_settings(self, DummyInputLayer):
try:
from lasagne.layers.cuda_convnet import Conv2DCCLayer
except ImportError:
pytest.skip("cuda_convnet not available")
input_layer = DummyInputLayer((128, 3, 32, 32))
with pytest.raises(RuntimeError) as exc:
layer = Conv2DCCLayer(input_layer, num_filters=16,
filter_size=(3, 5))
assert ("Conv2DCCLayer only supports square filters" in
exc.value.args[0])
with pytest.raises(RuntimeError) as exc:
layer = Conv2DCCLayer(input_layer, num_filters=16,
filter_size=(3, 3), stride=(1, 2))
assert ("Conv2DCCLayer only supports square strides" in
exc.value.args[0])
with pytest.raises(RuntimeError) as exc:
layer = Conv2DCCLayer(input_layer, num_filters=15,
filter_size=(3, 3))
assert ("Conv2DCCLayer requires num_filters to be a multiple of 16" in
exc.value.args[0])
with pytest.raises(RuntimeError) as exc:
layer = Conv2DCCLayer(input_layer, num_filters=16,
filter_size=(3, 3), pad=(1, 2))
assert ("Conv2DCCLayer only supports square padding" in
exc.value.args[0])
input_layer = DummyInputLayer((128, 7, 32, 32))
with pytest.raises(RuntimeError) as exc:
layer = Conv2DCCLayer(input_layer, num_filters=16,
filter_size=(3, 3))
assert ("Conv2DCCLayer requires the number of input channels to be "
"1, 2, 3 or a multiple of 4" in exc.value.args[0])
def test_pad(self, DummyInputLayer):
try:
from lasagne.layers.cuda_convnet import Conv2DCCLayer
except ImportError:
pytest.skip("cuda_convnet not available")
input_layer = DummyInputLayer((128, 3, 32, 32))
layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),
pad=(3, 3))
assert layer.output_shape == (128, 16, 36, 36)
def test_dimshuffle_false_shapes(self, DummyInputLayer):
try:
from lasagne.layers.cuda_convnet import Conv2DCCLayer
except ImportError:
pytest.skip("cuda_convnet not available")
input_layer = DummyInputLayer((4, 32, 32, 128))
layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),
dimshuffle=False)
assert layer.W.get_value().shape == (4, 3, 3, 16)
assert layer.b.get_value().shape == (16,)
layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),
dimshuffle=False, untie_biases=True)
assert layer.W.get_value().shape == (4, 3, 3, 16)
assert layer.b.get_value().shape == (16, 30, 30)
def test_dimshuffle_false_get_output_for(self, DummyInputLayer):
try:
from lasagne.layers.cuda_convnet import Conv2DCCLayer
except ImportError:
pytest.skip("cuda_convnet not available")
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
filter_acts = FilterActs(stride=1, pad=0, partial_sum=1)
input = theano.shared(floatX(np.random.random((4, 5, 5, 8))))
kernel = theano.shared(floatX(np.random.random((4, 3, 3, 16))))
input_layer = DummyInputLayer((4, 5, 5, 8))
layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),
dimshuffle=False, W=kernel, b=None,
nonlinearity=None)
output = np.array(filter_acts(input, kernel).eval())
actual = layer.get_output_for(input).eval()
actual = np.array(actual)
assert actual.shape == output.shape
assert actual.shape == layer.output_shape
assert np.allclose(actual, output)
class TestShuffleLayers:
def test_bc01_to_c01b(self):
from lasagne.layers.input import InputLayer
try:
from lasagne.layers.cuda_convnet import ShuffleBC01ToC01BLayer
except ImportError:
pytest.skip("cuda_convnet not available")
input_layer = InputLayer((1, 2, 3, 4))
layer = ShuffleBC01ToC01BLayer(input_layer)
assert layer.output_shape == (2, 3, 4, 1)
input = floatX(np.random.random((1, 2, 3, 4)))
output = input.transpose(1, 2, 3, 0)
actual = layer.get_output_for(theano.shared(input)).eval()
assert np.allclose(output, actual)
def test_c01b_to_bc01(self):
from lasagne.layers.input import InputLayer
try:
from lasagne.layers.cuda_convnet import ShuffleC01BToBC01Layer
except ImportError:
pytest.skip("cuda_convnet not available")
input_layer = InputLayer((1, 2, 3, 4))
layer = ShuffleC01BToBC01Layer(input_layer)
assert layer.output_shape == (4, 1, 2, 3)
input = floatX(np.random.random((1, 2, 3, 4)))
output = input.transpose(3, 0, 1, 2)
actual = layer.get_output_for(theano.shared(input)).eval()
assert np.allclose(output, actual)
| true | true |
f734caef5c5d450dabd683c8724747521c21a4ff | 21,738 | py | Python | tensorflow/python/distribute/tpu_values.py | EricLi404/tensorflow | 23759800d89f7b5362c338d9a3fd72a6810c3e22 | [
"Apache-2.0"
] | 74 | 2020-07-06T17:11:39.000Z | 2022-01-28T06:31:28.000Z | tensorflow/python/distribute/tpu_values.py | EricLi404/tensorflow | 23759800d89f7b5362c338d9a3fd72a6810c3e22 | [
"Apache-2.0"
] | 9 | 2020-10-13T23:25:29.000Z | 2022-02-10T06:54:48.000Z | tensorflow/python/distribute/tpu_values.py | EricLi404/tensorflow | 23759800d89f7b5362c338d9a3fd72a6810c3e22 | [
"Apache-2.0"
] | 12 | 2020-07-08T07:27:17.000Z | 2021-12-27T08:54:27.000Z | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various classes representing TPU distributed values.
Note that the tests are in values_test.py .
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.distribute import packed_distributed_variable as packed
from tensorflow.python.distribute import values
from tensorflow.python.distribute import values_util
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.tpu import tpu
@contextlib.contextmanager
def _maybe_enter_graph(tensor):
# Note: might have an eager tensor but not be executing eagerly when
# building functions.
if (context.executing_eagerly() or isinstance(tensor, ops.EagerTensor) or
ops.has_default_graph()):
yield
else:
with tensor.graph.as_default():
yield
@contextlib.contextmanager
def _maybe_on_device(var):
# Add a device scope for packed variables.
if isinstance(var, packed.PackedVarAndDevice):
with ops.device(var.device):
yield
else:
yield
def _make_raw_assign_fn(raw_assign_fn): # pylint: disable=missing-docstring
def assign_fn(var, value, use_locking=False, name=None, read_value=True): # pylint: disable=missing-docstring
del use_locking # Unused.
handle = var.handle
with _maybe_enter_graph(handle), _maybe_on_device(var):
op = raw_assign_fn(
handle,
ops.convert_to_tensor(value, dtype=var.dtype),
name=name)
with ops.control_dependencies([op]):
return var._read_variable_op() if read_value else op # pylint: disable=protected-access
return assign_fn
class TPUVariableMixin(object):
"""Mixin for TPU variables."""
def __init__(self, *args, **kwargs):
super(TPUVariableMixin, self).__init__(*args, **kwargs)
# Handle ID is needed for `get_replicated_var_handle` to cache the variables
# correctly since in eager mode different variables can have the same name.
if ops.executing_eagerly_outside_functions():
self._handle_id = self._common_name + "_" + str(id(self._primary))
else:
self._handle_id = self._common_name
def __getattr__(self, name):
if enclosing_tpu_context() is None:
return super(TPUVariableMixin, self).__getattr__(name)
else:
raise AttributeError(
"'{}' not accessible within a TPU context.".format(name))
def get(self):
if enclosing_tpu_context() is None:
return super(TPUVariableMixin, self).get()
else:
raise NotImplementedError(
"`TPUVariableMixin.get()` is not supported within a TPU context.")
def _get_as_operand(self):
return self.read_value()
def _is_mirrored(self):
raise NotImplementedError(
"`TPUVariableMixin._is_mirrored()` must be implemented by subclasses.")
@property
def handle(self):
"""The handle by which this variable can be accessed."""
# If we're in a tpu.rewrite(), return the replicated handle.
tpu_context = enclosing_tpu_context()
if tpu_context is None or context.executing_eagerly():
return self._get_on_device_or_primary().handle
else:
is_packed = self._packed_var is not None
val = self._values
if is_packed:
val = [self._packed_var]
return tpu_context.get_replicated_var_handle(self._handle_id, val,
self._is_mirrored(),
is_packed)
@property
def device(self):
return self.handle.device
def _read_variable_op(self):
"""Reads the value of this variable."""
if self.trainable:
tape.variable_accessed(self)
handle = self.handle
if getattr(handle, "is_packed", False):
# Add a device scope for a packed variable handle.
with ops.device(self._get_on_device_or_primary().device):
return gen_resource_variable_ops.read_variable_op(handle, self.dtype)
else:
return gen_resource_variable_ops.read_variable_op(handle, self.dtype)
def read_value(self):
if enclosing_tpu_context() is None:
return super(TPUVariableMixin, self).read_value()
else:
return self._read_variable_op()
def value(self):
if enclosing_tpu_context() is None:
return super(TPUVariableMixin, self).value()
else:
return self._read_variable_op()
def _as_graph_element(self):
if enclosing_tpu_context() is None:
return super(TPUVariableMixin, self)._as_graph_element() # pylint: disable=protected-access
else:
return None
@property
def op(self):
if values_util.is_saving_non_distributed():
return self._primary.op
return values.DistributedVarOp(self._primary.op.name,
self._primary.op.graph,
self._primary.op.traceback,
self._primary.op.type)
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
# pylint: disable=protected-access
if enclosing_tpu_context() is None:
return super(TPUVariableMixin, self)._dense_var_to_tensor(
dtype=dtype, name=name, as_ref=as_ref)
# pylint: enable=protected-access
elif dtype is not None and dtype != self.dtype:
return math_ops.cast(self.read_value(), dtype)
else:
return self.handle if as_ref else self.read_value()
def enclosing_tpu_context():
"""Returns the TPUReplicateContext, which exists inside a tpu.rewrite()."""
graph = ops.get_default_graph()
while graph is not None:
# pylint: disable=protected-access
context_ = graph._get_control_flow_context()
# pylint: enable=protected-access
while context_ is not None:
if isinstance(context_, tpu.TPUReplicateContext):
return context_
context_ = context_.outer_context
# This may be a FuncGraph due to defuns or v2 control flow. We need to
# find the original graph with the XLAControlFlowContext.
graph = getattr(graph, "outer_graph", None)
return None
class TPUDistributedVariable(TPUVariableMixin, values.DistributedVariable):
"""DistributedVariable subclass for TPUStrategy."""
def _is_mirrored(self):
self._policy._is_mirrored() # pylint: disable=protected-access
def assign_sub(self, value, use_locking=False, name=None, read_value=True):
if values_util.is_saving_non_distributed():
return self._primary.assign_sub(value, use_locking, name, read_value)
return self._policy.assign_sub(
self, value, use_locking=use_locking, name=name, read_value=read_value)
def assign_add(self, value, use_locking=False, name=None, read_value=True):
if values_util.is_saving_non_distributed():
return self._primary.assign_add(value, use_locking, name, read_value)
return self._policy.assign_add(
self, value, use_locking=use_locking, name=name, read_value=read_value)
def assign(self, value, use_locking=False, name=None, read_value=True):
if values_util.is_saving_non_distributed():
return self._primary.assign(value, use_locking, name, read_value)
return self._policy.assign(
self, value, use_locking=use_locking, name=name, read_value=read_value)
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_sub(sparse_delta, use_locking, name)
return self._policy.scatter_sub(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_add(sparse_delta, use_locking, name)
return self._policy.scatter_add(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_mul(sparse_delta, use_locking, name)
return self._policy.scatter_mul(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_div(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_div(sparse_delta, use_locking, name)
return self._policy.scatter_div(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_min(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_min(sparse_delta, use_locking, name)
return self._policy.scatter_min(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_max(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_max(sparse_delta, use_locking, name)
return self._policy.scatter_max(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_update(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_update(sparse_delta, use_locking, name)
return self._policy.scatter_update(
self, sparse_delta, use_locking=use_locking, name=name)
class TPUMirroredVariable(TPUVariableMixin, values.MirroredVariable):
"""Holds a map from replica to TPU variables whose values are kept in sync."""
def assign_sub(self, value, use_locking=False, name=None,
read_value=True):
if (enclosing_tpu_context() and
self.aggregation == variable_scope.VariableAggregation.NONE):
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_sub_variable_op)(
self,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
return assign_sub(self, value, use_locking=use_locking, name=name,
read_value=read_value)
def assign_add(self, value, use_locking=False, name=None,
read_value=True):
if (enclosing_tpu_context() and
self.aggregation == variable_scope.VariableAggregation.NONE):
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_add_variable_op)(
self,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
return assign_add(self, value, use_locking=use_locking, name=name,
read_value=read_value)
def assign(self, value, use_locking=False, name=None, read_value=True):
if (enclosing_tpu_context() and
self.aggregation == variable_scope.VariableAggregation.NONE):
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_variable_op)(
self,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
return assign(self, value, use_locking=use_locking, name=name,
read_value=read_value)
def scatter_sub(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_sub(*args, **kwargs)
raise NotImplementedError
def scatter_add(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_add(*args, **kwargs)
raise NotImplementedError
def scatter_max(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_max(*args, **kwargs)
raise NotImplementedError
def scatter_min(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_min(*args, **kwargs)
raise NotImplementedError
def scatter_mul(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_mul(*args, **kwargs)
raise NotImplementedError
def scatter_div(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_div(*args, **kwargs)
raise NotImplementedError
def scatter_update(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_update(*args, **kwargs)
raise NotImplementedError
def _is_mirrored(self):
return True
class TPUSyncOnReadVariable(TPUVariableMixin, values.SyncOnReadVariable):
"""Holds a map from replica to variables whose values are reduced on save."""
def assign_sub(self, *args, **kwargs):
if enclosing_tpu_context() is None:
return values.SyncOnReadVariable.assign_sub(self, *args, **kwargs)
else:
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_sub_variable_op)(self, *args,
**kwargs)
def assign_add(self, *args, **kwargs):
if enclosing_tpu_context() is None:
return values.SyncOnReadVariable.assign_add(self, *args, **kwargs)
else:
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_add_variable_op)(self, *args,
**kwargs)
def assign(self, *args, **kwargs):
if enclosing_tpu_context() is None:
return values.SyncOnReadVariable.assign(self, *args, **kwargs)
else:
return _make_raw_assign_fn(gen_resource_variable_ops.assign_variable_op)(
self, *args, **kwargs)
def _is_mirrored(self):
return False
# Common method between AutoPolicy, OnWrite and Mirrored variables.
def assign_sub(var, value, use_locking=False, name=None, read_value=True):
assign_sub_fn = _make_raw_assign_fn(
gen_resource_variable_ops.assign_sub_variable_op)
return var._update( # pylint: disable=protected-access
update_fn=assign_sub_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def assign_add(var, value, use_locking=False, name=None, read_value=True):
assign_add_fn = _make_raw_assign_fn(
gen_resource_variable_ops.assign_add_variable_op)
return var._update( # pylint: disable=protected-access
update_fn=assign_add_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def assign(var, value, use_locking=False, name=None, read_value=True):
assign_fn = _make_raw_assign_fn(
gen_resource_variable_ops.assign_variable_op)
return var._update( # pylint: disable=protected-access
update_fn=assign_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
class TPUAutoPolicy(values.AutoPolicy):
"""Policy defined for `tf.VariableSynchronization.AUTO` synchronization.
This policy is created when `synchronization` is set to
`tf.VariableSynchronization.AUTO` and `aggregation` is set to
`tf.VariableAggregation.NONE` when creating a `tf.Variable` in `tf.distribute`
scope.
"""
def assign_sub(self, var, value, use_locking=False, name=None,
read_value=True):
if enclosing_tpu_context():
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_sub_variable_op)(
var,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
return assign_sub(var, value, use_locking=use_locking, name=name,
read_value=read_value)
def assign_add(self, var, value, use_locking=False, name=None,
read_value=True):
if enclosing_tpu_context():
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_add_variable_op)(
var,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
return assign_add(var, value, use_locking=use_locking, name=name,
read_value=read_value)
def assign(self, var, value, use_locking=False, name=None, read_value=True):
if enclosing_tpu_context():
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_variable_op)(
var,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
return assign(var, value, use_locking=use_locking, name=name,
read_value=read_value)
def scatter_sub(self, *args, **kwargs):
raise NotImplementedError
def scatter_add(self, *args, **kwargs):
raise NotImplementedError
def scatter_max(self, *args, **kwargs):
raise NotImplementedError
def scatter_min(self, *args, **kwargs):
raise NotImplementedError
def scatter_mul(self, *args, **kwargs):
raise NotImplementedError
def scatter_div(self, *args, **kwargs):
raise NotImplementedError
def scatter_update(self, *args, **kwargs):
raise NotImplementedError
def _is_mirrored(self):
return True
class TPUOnWritePolicy(values.OnWritePolicy):
"""Policy defined for `tf.VariableSynchronization.ON_WRITE` synchronization.
This policy is created when the following `synchronization` and
`aggregation` parameters are specified when creating a `tf.Variable` in
`tf.distribute` scope:
* `synchronization` is equal to `tf.VariableSynchronization.AUTO` and
aggregation can be any of the following `tf.VariableAggregation` enum
values such as `SUM`, `MEAN` or `ONLY_FIRST_REPLICA`.
* `synchronization` is equal to `tf.VariableSynchronization.ON_WRITE` and
aggregation can be any of the following `tf.VariableAggregation` enum
values such as `NONE`, `SUM`, `MEAN` or `ONLY_FIRST_REPLICA`.
"""
def assign_sub(self, var, value, use_locking=False, name=None,
read_value=True):
return assign_sub(var, value, use_locking=use_locking, name=name,
read_value=read_value)
def assign_add(self, var, value, use_locking=False, name=None,
read_value=True):
return assign_add(var, value, use_locking=use_locking, name=name,
read_value=read_value)
def assign(self, var, value, use_locking=False, name=None, read_value=True):
return assign(var, value, use_locking=use_locking, name=name,
read_value=read_value)
def scatter_sub(self, *args, **kwargs):
raise NotImplementedError
def scatter_add(self, *args, **kwargs):
raise NotImplementedError
def scatter_max(self, *args, **kwargs):
raise NotImplementedError
def scatter_min(self, *args, **kwargs):
raise NotImplementedError
def scatter_mul(self, *args, **kwargs):
raise NotImplementedError
def scatter_div(self, *args, **kwargs):
raise NotImplementedError
def scatter_update(self, *args, **kwargs):
raise NotImplementedError
def _is_mirrored(self):
return True
class TPUOnReadPolicy(values.OnReadPolicy):
"""Policy defined for `tf.VariableSynchronization.ON_READ` synchronization.
This policy is created when `synchronization` is set to
`tf.VariableSynchronization.ON_READ` and `aggregation` is set to any of the
values allowed by the `tf.VariableAggregation` enum such as `NONE`, `SUM`,
`MEAN` or `ONLY_FIRST_REPLICA`when creating a `tf.Variable` in `tf.distribute`
scope.
"""
def assign_sub(self, var, *args, **kwargs):
if enclosing_tpu_context() is None:
return super(TPUOnReadPolicy, self).assign_sub(var, *args, **kwargs)
else:
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_sub_variable_op)(var, *args,
**kwargs)
def assign_add(self, var, *args, **kwargs):
if enclosing_tpu_context() is None:
return super(TPUOnReadPolicy, self).assign_add(var, *args, **kwargs)
else:
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_add_variable_op)(var, *args,
**kwargs)
def assign(self, var, *args, **kwargs):
if enclosing_tpu_context() is None:
return super(TPUOnReadPolicy, self).assign(var, *args, **kwargs)
else:
return _make_raw_assign_fn(gen_resource_variable_ops.assign_variable_op)(
var, *args, **kwargs)
def _is_mirrored(self):
return False
def scatter_sub(self, *args, **kwargs):
raise NotImplementedError
def scatter_add(self, *args, **kwargs):
raise NotImplementedError
def scatter_max(self, *args, **kwargs):
raise NotImplementedError
def scatter_min(self, *args, **kwargs):
raise NotImplementedError
def scatter_mul(self, *args, **kwargs):
raise NotImplementedError
def scatter_div(self, *args, **kwargs):
raise NotImplementedError
def scatter_update(self, *args, **kwargs):
raise NotImplementedError
| 36.351171 | 112 | 0.699006 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.distribute import packed_distributed_variable as packed
from tensorflow.python.distribute import values
from tensorflow.python.distribute import values_util
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.tpu import tpu
@contextlib.contextmanager
def _maybe_enter_graph(tensor):
if (context.executing_eagerly() or isinstance(tensor, ops.EagerTensor) or
ops.has_default_graph()):
yield
else:
with tensor.graph.as_default():
yield
@contextlib.contextmanager
def _maybe_on_device(var):
if isinstance(var, packed.PackedVarAndDevice):
with ops.device(var.device):
yield
else:
yield
def _make_raw_assign_fn(raw_assign_fn):
def assign_fn(var, value, use_locking=False, name=None, read_value=True):
del use_locking
handle = var.handle
with _maybe_enter_graph(handle), _maybe_on_device(var):
op = raw_assign_fn(
handle,
ops.convert_to_tensor(value, dtype=var.dtype),
name=name)
with ops.control_dependencies([op]):
return var._read_variable_op() if read_value else op
return assign_fn
class TPUVariableMixin(object):
def __init__(self, *args, **kwargs):
super(TPUVariableMixin, self).__init__(*args, **kwargs)
if ops.executing_eagerly_outside_functions():
self._handle_id = self._common_name + "_" + str(id(self._primary))
else:
self._handle_id = self._common_name
def __getattr__(self, name):
if enclosing_tpu_context() is None:
return super(TPUVariableMixin, self).__getattr__(name)
else:
raise AttributeError(
"'{}' not accessible within a TPU context.".format(name))
def get(self):
if enclosing_tpu_context() is None:
return super(TPUVariableMixin, self).get()
else:
raise NotImplementedError(
"`TPUVariableMixin.get()` is not supported within a TPU context.")
def _get_as_operand(self):
return self.read_value()
def _is_mirrored(self):
raise NotImplementedError(
"`TPUVariableMixin._is_mirrored()` must be implemented by subclasses.")
@property
def handle(self):
tpu_context = enclosing_tpu_context()
if tpu_context is None or context.executing_eagerly():
return self._get_on_device_or_primary().handle
else:
is_packed = self._packed_var is not None
val = self._values
if is_packed:
val = [self._packed_var]
return tpu_context.get_replicated_var_handle(self._handle_id, val,
self._is_mirrored(),
is_packed)
@property
def device(self):
return self.handle.device
def _read_variable_op(self):
if self.trainable:
tape.variable_accessed(self)
handle = self.handle
if getattr(handle, "is_packed", False):
# Add a device scope for a packed variable handle.
with ops.device(self._get_on_device_or_primary().device):
return gen_resource_variable_ops.read_variable_op(handle, self.dtype)
else:
return gen_resource_variable_ops.read_variable_op(handle, self.dtype)
def read_value(self):
if enclosing_tpu_context() is None:
return super(TPUVariableMixin, self).read_value()
else:
return self._read_variable_op()
def value(self):
if enclosing_tpu_context() is None:
return super(TPUVariableMixin, self).value()
else:
return self._read_variable_op()
def _as_graph_element(self):
if enclosing_tpu_context() is None:
return super(TPUVariableMixin, self)._as_graph_element() # pylint: disable=protected-access
else:
return None
@property
def op(self):
if values_util.is_saving_non_distributed():
return self._primary.op
return values.DistributedVarOp(self._primary.op.name,
self._primary.op.graph,
self._primary.op.traceback,
self._primary.op.type)
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
# pylint: disable=protected-access
if enclosing_tpu_context() is None:
return super(TPUVariableMixin, self)._dense_var_to_tensor(
dtype=dtype, name=name, as_ref=as_ref)
# pylint: enable=protected-access
elif dtype is not None and dtype != self.dtype:
return math_ops.cast(self.read_value(), dtype)
else:
return self.handle if as_ref else self.read_value()
def enclosing_tpu_context():
graph = ops.get_default_graph()
while graph is not None:
# pylint: disable=protected-access
context_ = graph._get_control_flow_context()
# pylint: enable=protected-access
while context_ is not None:
if isinstance(context_, tpu.TPUReplicateContext):
return context_
context_ = context_.outer_context
# This may be a FuncGraph due to defuns or v2 control flow. We need to
# find the original graph with the XLAControlFlowContext.
graph = getattr(graph, "outer_graph", None)
return None
class TPUDistributedVariable(TPUVariableMixin, values.DistributedVariable):
def _is_mirrored(self):
self._policy._is_mirrored() # pylint: disable=protected-access
def assign_sub(self, value, use_locking=False, name=None, read_value=True):
if values_util.is_saving_non_distributed():
return self._primary.assign_sub(value, use_locking, name, read_value)
return self._policy.assign_sub(
self, value, use_locking=use_locking, name=name, read_value=read_value)
def assign_add(self, value, use_locking=False, name=None, read_value=True):
if values_util.is_saving_non_distributed():
return self._primary.assign_add(value, use_locking, name, read_value)
return self._policy.assign_add(
self, value, use_locking=use_locking, name=name, read_value=read_value)
def assign(self, value, use_locking=False, name=None, read_value=True):
if values_util.is_saving_non_distributed():
return self._primary.assign(value, use_locking, name, read_value)
return self._policy.assign(
self, value, use_locking=use_locking, name=name, read_value=read_value)
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_sub(sparse_delta, use_locking, name)
return self._policy.scatter_sub(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_add(sparse_delta, use_locking, name)
return self._policy.scatter_add(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_mul(sparse_delta, use_locking, name)
return self._policy.scatter_mul(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_div(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_div(sparse_delta, use_locking, name)
return self._policy.scatter_div(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_min(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_min(sparse_delta, use_locking, name)
return self._policy.scatter_min(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_max(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_max(sparse_delta, use_locking, name)
return self._policy.scatter_max(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_update(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_update(sparse_delta, use_locking, name)
return self._policy.scatter_update(
self, sparse_delta, use_locking=use_locking, name=name)
class TPUMirroredVariable(TPUVariableMixin, values.MirroredVariable):
def assign_sub(self, value, use_locking=False, name=None,
read_value=True):
if (enclosing_tpu_context() and
self.aggregation == variable_scope.VariableAggregation.NONE):
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_sub_variable_op)(
self,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
return assign_sub(self, value, use_locking=use_locking, name=name,
read_value=read_value)
def assign_add(self, value, use_locking=False, name=None,
read_value=True):
if (enclosing_tpu_context() and
self.aggregation == variable_scope.VariableAggregation.NONE):
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_add_variable_op)(
self,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
return assign_add(self, value, use_locking=use_locking, name=name,
read_value=read_value)
def assign(self, value, use_locking=False, name=None, read_value=True):
if (enclosing_tpu_context() and
self.aggregation == variable_scope.VariableAggregation.NONE):
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_variable_op)(
self,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
return assign(self, value, use_locking=use_locking, name=name,
read_value=read_value)
def scatter_sub(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_sub(*args, **kwargs)
raise NotImplementedError
def scatter_add(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_add(*args, **kwargs)
raise NotImplementedError
def scatter_max(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_max(*args, **kwargs)
raise NotImplementedError
def scatter_min(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_min(*args, **kwargs)
raise NotImplementedError
def scatter_mul(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_mul(*args, **kwargs)
raise NotImplementedError
def scatter_div(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_div(*args, **kwargs)
raise NotImplementedError
def scatter_update(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_update(*args, **kwargs)
raise NotImplementedError
def _is_mirrored(self):
return True
class TPUSyncOnReadVariable(TPUVariableMixin, values.SyncOnReadVariable):
def assign_sub(self, *args, **kwargs):
if enclosing_tpu_context() is None:
return values.SyncOnReadVariable.assign_sub(self, *args, **kwargs)
else:
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_sub_variable_op)(self, *args,
**kwargs)
def assign_add(self, *args, **kwargs):
if enclosing_tpu_context() is None:
return values.SyncOnReadVariable.assign_add(self, *args, **kwargs)
else:
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_add_variable_op)(self, *args,
**kwargs)
def assign(self, *args, **kwargs):
if enclosing_tpu_context() is None:
return values.SyncOnReadVariable.assign(self, *args, **kwargs)
else:
return _make_raw_assign_fn(gen_resource_variable_ops.assign_variable_op)(
self, *args, **kwargs)
def _is_mirrored(self):
return False
# Common method between AutoPolicy, OnWrite and Mirrored variables.
def assign_sub(var, value, use_locking=False, name=None, read_value=True):
assign_sub_fn = _make_raw_assign_fn(
gen_resource_variable_ops.assign_sub_variable_op)
return var._update( # pylint: disable=protected-access
update_fn=assign_sub_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def assign_add(var, value, use_locking=False, name=None, read_value=True):
assign_add_fn = _make_raw_assign_fn(
gen_resource_variable_ops.assign_add_variable_op)
return var._update( # pylint: disable=protected-access
update_fn=assign_add_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def assign(var, value, use_locking=False, name=None, read_value=True):
assign_fn = _make_raw_assign_fn(
gen_resource_variable_ops.assign_variable_op)
return var._update( # pylint: disable=protected-access
update_fn=assign_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
class TPUAutoPolicy(values.AutoPolicy):
def assign_sub(self, var, value, use_locking=False, name=None,
read_value=True):
if enclosing_tpu_context():
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_sub_variable_op)(
var,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
return assign_sub(var, value, use_locking=use_locking, name=name,
read_value=read_value)
def assign_add(self, var, value, use_locking=False, name=None,
read_value=True):
if enclosing_tpu_context():
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_add_variable_op)(
var,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
return assign_add(var, value, use_locking=use_locking, name=name,
read_value=read_value)
def assign(self, var, value, use_locking=False, name=None, read_value=True):
if enclosing_tpu_context():
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_variable_op)(
var,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
return assign(var, value, use_locking=use_locking, name=name,
read_value=read_value)
def scatter_sub(self, *args, **kwargs):
raise NotImplementedError
def scatter_add(self, *args, **kwargs):
raise NotImplementedError
def scatter_max(self, *args, **kwargs):
raise NotImplementedError
def scatter_min(self, *args, **kwargs):
raise NotImplementedError
def scatter_mul(self, *args, **kwargs):
raise NotImplementedError
def scatter_div(self, *args, **kwargs):
raise NotImplementedError
def scatter_update(self, *args, **kwargs):
raise NotImplementedError
def _is_mirrored(self):
return True
class TPUOnWritePolicy(values.OnWritePolicy):
def assign_sub(self, var, value, use_locking=False, name=None,
read_value=True):
return assign_sub(var, value, use_locking=use_locking, name=name,
read_value=read_value)
def assign_add(self, var, value, use_locking=False, name=None,
read_value=True):
return assign_add(var, value, use_locking=use_locking, name=name,
read_value=read_value)
def assign(self, var, value, use_locking=False, name=None, read_value=True):
return assign(var, value, use_locking=use_locking, name=name,
read_value=read_value)
def scatter_sub(self, *args, **kwargs):
raise NotImplementedError
def scatter_add(self, *args, **kwargs):
raise NotImplementedError
def scatter_max(self, *args, **kwargs):
raise NotImplementedError
def scatter_min(self, *args, **kwargs):
raise NotImplementedError
def scatter_mul(self, *args, **kwargs):
raise NotImplementedError
def scatter_div(self, *args, **kwargs):
raise NotImplementedError
def scatter_update(self, *args, **kwargs):
raise NotImplementedError
def _is_mirrored(self):
return True
class TPUOnReadPolicy(values.OnReadPolicy):
def assign_sub(self, var, *args, **kwargs):
if enclosing_tpu_context() is None:
return super(TPUOnReadPolicy, self).assign_sub(var, *args, **kwargs)
else:
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_sub_variable_op)(var, *args,
**kwargs)
def assign_add(self, var, *args, **kwargs):
if enclosing_tpu_context() is None:
return super(TPUOnReadPolicy, self).assign_add(var, *args, **kwargs)
else:
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_add_variable_op)(var, *args,
**kwargs)
def assign(self, var, *args, **kwargs):
if enclosing_tpu_context() is None:
return super(TPUOnReadPolicy, self).assign(var, *args, **kwargs)
else:
return _make_raw_assign_fn(gen_resource_variable_ops.assign_variable_op)(
var, *args, **kwargs)
def _is_mirrored(self):
return False
def scatter_sub(self, *args, **kwargs):
raise NotImplementedError
def scatter_add(self, *args, **kwargs):
raise NotImplementedError
def scatter_max(self, *args, **kwargs):
raise NotImplementedError
def scatter_min(self, *args, **kwargs):
raise NotImplementedError
def scatter_mul(self, *args, **kwargs):
raise NotImplementedError
def scatter_div(self, *args, **kwargs):
raise NotImplementedError
def scatter_update(self, *args, **kwargs):
raise NotImplementedError
| true | true |
f734cb4823063b2767bd39361c8b9837fb410f99 | 2,294 | py | Python | Exercise-3/sensor_stick/src/sensor_stick/features.py | antoszy/RoboND-perception-exercises | 8c725e77316162ae485ccee94085fd2314be7ae0 | [
"MIT"
] | null | null | null | Exercise-3/sensor_stick/src/sensor_stick/features.py | antoszy/RoboND-perception-exercises | 8c725e77316162ae485ccee94085fd2314be7ae0 | [
"MIT"
] | null | null | null | Exercise-3/sensor_stick/src/sensor_stick/features.py | antoszy/RoboND-perception-exercises | 8c725e77316162ae485ccee94085fd2314be7ae0 | [
"MIT"
] | null | null | null | import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
from pcl_helper import *
nbinscol = 32
nbinsnor = 20
def rgb_to_hsv(rgb_list):
rgb_normalized = [1.0*rgb_list[0]/255, 1.0*rgb_list[1]/255, 1.0*rgb_list[2]/255]
hsv_normalized = matplotlib.colors.rgb_to_hsv([[rgb_normalized]])[0][0]
return hsv_normalized
def compute_color_histograms(cloud, using_hsv=False):
# Compute histograms for the clusters
point_colors_list = []
# Step through each point in the point cloud
for point in pc2.read_points(cloud, skip_nans=True):
rgb_list = float_to_rgb(point[3])
if using_hsv:
point_colors_list.append(rgb_to_hsv(rgb_list) * 255)
else:
point_colors_list.append(rgb_list)
# Populate lists with color values
channel_1_vals = []
channel_2_vals = []
channel_3_vals = []
for color in point_colors_list:
channel_1_vals.append(color[0])
channel_2_vals.append(color[1])
channel_3_vals.append(color[2])
# TODO: Compute histograms
hist_1 = np.histogram(channel_1_vals, bins = nbinscol, range = (0, 256))
hist_2 = np.histogram(channel_2_vals, bins = nbinscol, range = (0, 256))
hist_3 = np.histogram(channel_3_vals, bins = nbinscol, range = (0, 256))
# TODO: Concatenate and normalize the histograms
features = np.concatenate((hist_1[0],hist_2[0],hist_3[0])).astype(np.float64)
normed_features = features/np.sum(features)
return normed_features
def compute_normal_histograms(normal_cloud):
norm_x_vals = []
norm_y_vals = []
norm_z_vals = []
for norm_component in pc2.read_points(normal_cloud,
field_names = ('normal_x', 'normal_y', 'normal_z'),
skip_nans=True):
norm_x_vals.append(norm_component[0])
norm_y_vals.append(norm_component[1])
norm_z_vals.append(norm_component[2])
# TODO: Compute histograms of normal values (just like with color)
hist_1 = np.histogram(norm_x_vals, bins = nbinsnor, range = (0, 256))
hist_2 = np.histogram(norm_y_vals, bins = nbinsnor, range = (0, 256))
hist_3 = np.histogram(norm_z_vals, bins = nbinsnor, range = (0, 256))
# TODO: Concatenate and normalize the histograms
features = np.concatenate((hist_1[0],hist_2[0],hist_3[0])).astype(np.float64)
normed_features = features/np.sum(features)
return normed_features
| 32.309859 | 84 | 0.725806 | import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
from pcl_helper import *
nbinscol = 32
nbinsnor = 20
def rgb_to_hsv(rgb_list):
rgb_normalized = [1.0*rgb_list[0]/255, 1.0*rgb_list[1]/255, 1.0*rgb_list[2]/255]
hsv_normalized = matplotlib.colors.rgb_to_hsv([[rgb_normalized]])[0][0]
return hsv_normalized
def compute_color_histograms(cloud, using_hsv=False):
point_colors_list = []
for point in pc2.read_points(cloud, skip_nans=True):
rgb_list = float_to_rgb(point[3])
if using_hsv:
point_colors_list.append(rgb_to_hsv(rgb_list) * 255)
else:
point_colors_list.append(rgb_list)
channel_1_vals = []
channel_2_vals = []
channel_3_vals = []
for color in point_colors_list:
channel_1_vals.append(color[0])
channel_2_vals.append(color[1])
channel_3_vals.append(color[2])
hist_1 = np.histogram(channel_1_vals, bins = nbinscol, range = (0, 256))
hist_2 = np.histogram(channel_2_vals, bins = nbinscol, range = (0, 256))
hist_3 = np.histogram(channel_3_vals, bins = nbinscol, range = (0, 256))
features = np.concatenate((hist_1[0],hist_2[0],hist_3[0])).astype(np.float64)
normed_features = features/np.sum(features)
return normed_features
def compute_normal_histograms(normal_cloud):
norm_x_vals = []
norm_y_vals = []
norm_z_vals = []
for norm_component in pc2.read_points(normal_cloud,
field_names = ('normal_x', 'normal_y', 'normal_z'),
skip_nans=True):
norm_x_vals.append(norm_component[0])
norm_y_vals.append(norm_component[1])
norm_z_vals.append(norm_component[2])
hist_1 = np.histogram(norm_x_vals, bins = nbinsnor, range = (0, 256))
hist_2 = np.histogram(norm_y_vals, bins = nbinsnor, range = (0, 256))
hist_3 = np.histogram(norm_z_vals, bins = nbinsnor, range = (0, 256))
features = np.concatenate((hist_1[0],hist_2[0],hist_3[0])).astype(np.float64)
normed_features = features/np.sum(features)
return normed_features
| true | true |
f734cb94f1c618b92f07fe70c5d48199f6e127ba | 2,737 | py | Python | .Config/FslBuildGen/Xml/SubPackageSupportConfig.py | alejandrolozano2/OpenGL_DemoFramework | 5fd85f05c98cc3d0c0a68bac438035df8cabaee7 | [
"MIT",
"BSD-3-Clause"
] | 3 | 2019-01-19T20:21:24.000Z | 2021-08-10T02:11:32.000Z | .Config/FslBuildGen/Xml/SubPackageSupportConfig.py | alejandrolozano2/OpenGL_DemoFramework | 5fd85f05c98cc3d0c0a68bac438035df8cabaee7 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | .Config/FslBuildGen/Xml/SubPackageSupportConfig.py | alejandrolozano2/OpenGL_DemoFramework | 5fd85f05c98cc3d0c0a68bac438035df8cabaee7 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2021-08-10T02:11:33.000Z | 2021-08-10T02:11:33.000Z | #!/usr/bin/env python3
#****************************************************************************************************************************************************
# Copyright (c) 2014 Freescale Semiconductor, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the Freescale Semiconductor, Inc. nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#****************************************************************************************************************************************************
from FslBuildGen.DataTypes import PackageType
from FslBuildGen.DataTypes import SubPackageSupport
class SubPackageSupportConfig(object):
def __init__(self, packageType: int, subPackageSupport: int) -> None:
super(SubPackageSupportConfig, self).__init__()
self.Type = packageType # type: int
self.Support = subPackageSupport # type: int
self.AllowSubPackages = self.__AllowSubPackages(packageType, subPackageSupport) # type: bool
def __AllowSubPackages(self, packageType: int, subPackageSupport: int) -> bool:
return subPackageSupport == SubPackageSupport.Enabled or (subPackageSupport == SubPackageSupport.ExecutableOnly and packageType == PackageType.Executable)
| 58.234043 | 162 | 0.656924 |
from FslBuildGen.DataTypes import PackageType
from FslBuildGen.DataTypes import SubPackageSupport
class SubPackageSupportConfig(object):
def __init__(self, packageType: int, subPackageSupport: int) -> None:
super(SubPackageSupportConfig, self).__init__()
self.Type = packageType
self.Support = subPackageSupport
self.AllowSubPackages = self.__AllowSubPackages(packageType, subPackageSupport)
def __AllowSubPackages(self, packageType: int, subPackageSupport: int) -> bool:
return subPackageSupport == SubPackageSupport.Enabled or (subPackageSupport == SubPackageSupport.ExecutableOnly and packageType == PackageType.Executable)
| true | true |
f734cd4cfbc6e0bc534a4349136cc39749e30092 | 8,442 | py | Python | magnum/objects/x509keypair.py | mjbrewer/testIndex | 420dc071d4240a89b6f266e8d2575cedb39bfea0 | [
"Apache-2.0"
] | null | null | null | magnum/objects/x509keypair.py | mjbrewer/testIndex | 420dc071d4240a89b6f266e8d2575cedb39bfea0 | [
"Apache-2.0"
] | null | null | null | magnum/objects/x509keypair.py | mjbrewer/testIndex | 420dc071d4240a89b6f266e8d2575cedb39bfea0 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import fields
from magnum.common import exception
from magnum.common import utils
from magnum.db import api as dbapi
from magnum.objects import base
@base.MagnumObjectRegistry.register
class X509KeyPair(base.MagnumPersistentObject, base.MagnumObject,
base.MagnumObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
dbapi = dbapi.get_instance()
fields = {
'id': fields.IntegerField(),
'uuid': fields.UUIDField(nullable=True),
'name': fields.StringField(nullable=True),
'bay_uuid': fields.StringField(nullable=True),
'ca_cert': fields.StringField(nullable=True),
'certificate': fields.StringField(nullable=True),
'private_key': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(x509keypair, db_x509keypair):
"""Converts a database entity to a formal object."""
for field in x509keypair.fields:
x509keypair[field] = db_x509keypair[field]
x509keypair.obj_reset_changes()
return x509keypair
@staticmethod
def _from_db_object_list(db_objects, cls, context):
"""Converts a list of database entities to a list of formal objects."""
return [X509KeyPair._from_db_object(cls(context), obj)
for obj in db_objects]
@base.remotable_classmethod
def get(cls, context, x509keypair_id):
"""Find a x509keypair_id based on its id or uuid and return a
X509KeyPair object.
:param x509keypair_id: the id *or* uuid of a x509keypair.
:returns: a :class:`X509KeyPair` object.
"""
if utils.is_int_like(x509keypair_id):
return cls.get_by_id(context, x509keypair_id)
elif utils.is_uuid_like(x509keypair_id):
return cls.get_by_uuid(context, x509keypair_id)
else:
raise exception.InvalidIdentity(identity=x509keypair_id)
@base.remotable_classmethod
def get_by_id(cls, context, x509keypair_id):
"""Find a x509keypair based on its integer id and return a
X509KeyPair object.
:param x509keypair_id: the id of a x509keypair.
:returns: a :class:`X509KeyPair` object.
"""
db_x509keypair = cls.dbapi.get_x509keypair_by_id(context,
x509keypair_id)
x509keypair = X509KeyPair._from_db_object(cls(context), db_x509keypair)
return x509keypair
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
"""Find a x509keypair based on uuid and return a :class:`X509KeyPair` object.
:param uuid: the uuid of a x509keypair.
:param context: Security context
:returns: a :class:`X509KeyPair` object.
"""
db_x509keypair = cls.dbapi.get_x509keypair_by_uuid(context, uuid)
x509keypair = X509KeyPair._from_db_object(cls(context), db_x509keypair)
return x509keypair
@base.remotable_classmethod
def get_by_name(cls, context, name):
"""Find a x509keypair based on name and return a X509KeyPair object.
:param name: the logical name of a x509keypair.
:param context: Security context
:returns: a :class:`X509KeyPair` object.
"""
db_x509keypair = cls.dbapi.get_x509keypair_by_name(context, name)
x509keypair = X509KeyPair._from_db_object(cls(context), db_x509keypair)
return x509keypair
@base.remotable_classmethod
def list(cls, context, limit=None, marker=None,
sort_key=None, sort_dir=None, filters=None):
"""Return a list of X509KeyPair objects.
:param context: Security context.
:param limit: maximum number of resources to return in a single result.
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
:param filters: filter dict, can include 'x509keypairmodel_id', 'name',
'node_count', 'stack_id', 'api_address',
'node_addresses', 'project_id', 'user_id',
'status'(should be a status list).
:returns: a list of :class:`X509KeyPair` object.
"""
db_x509keypairs = cls.dbapi.get_x509keypair_list(context, limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir,
filters=filters)
return X509KeyPair._from_db_object_list(db_x509keypairs, cls, context)
@base.remotable
def create(self, context=None):
"""Create a X509KeyPair record in the DB.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: X509KeyPair(context)
"""
values = self.obj_get_changes()
db_x509keypair = self.dbapi.create_x509keypair(values)
self._from_db_object(self, db_x509keypair)
@base.remotable
def destroy(self, context=None):
"""Delete the X509KeyPair from the DB.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: X509KeyPair(context)
"""
self.dbapi.destroy_x509keypair(self.uuid)
self.obj_reset_changes()
@base.remotable
def save(self, context=None):
"""Save updates to this X509KeyPair.
Updates will be made column by column based on the result
of self.what_changed().
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: X509KeyPair(context)
"""
updates = self.obj_get_changes()
self.dbapi.update_x509keypair(self.uuid, updates)
self.obj_reset_changes()
@base.remotable
def refresh(self, context=None):
"""Loads updates for this X509KeyPair.
Loads a x509keypair with the same uuid from the database and
checks for updated attributes. Updates are applied from
the loaded x509keypair column by column, if there are any updates.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: X509KeyPair(context)
"""
current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
for field in self.fields:
if self.obj_attr_is_set(field) and self[field] != current[field]:
self[field] = current[field]
| 41.586207 | 85 | 0.622957 |
from oslo_versionedobjects import fields
from magnum.common import exception
from magnum.common import utils
from magnum.db import api as dbapi
from magnum.objects import base
@base.MagnumObjectRegistry.register
class X509KeyPair(base.MagnumPersistentObject, base.MagnumObject,
base.MagnumObjectDictCompat):
VERSION = '1.0'
dbapi = dbapi.get_instance()
fields = {
'id': fields.IntegerField(),
'uuid': fields.UUIDField(nullable=True),
'name': fields.StringField(nullable=True),
'bay_uuid': fields.StringField(nullable=True),
'ca_cert': fields.StringField(nullable=True),
'certificate': fields.StringField(nullable=True),
'private_key': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(x509keypair, db_x509keypair):
for field in x509keypair.fields:
x509keypair[field] = db_x509keypair[field]
x509keypair.obj_reset_changes()
return x509keypair
@staticmethod
def _from_db_object_list(db_objects, cls, context):
return [X509KeyPair._from_db_object(cls(context), obj)
for obj in db_objects]
@base.remotable_classmethod
def get(cls, context, x509keypair_id):
if utils.is_int_like(x509keypair_id):
return cls.get_by_id(context, x509keypair_id)
elif utils.is_uuid_like(x509keypair_id):
return cls.get_by_uuid(context, x509keypair_id)
else:
raise exception.InvalidIdentity(identity=x509keypair_id)
@base.remotable_classmethod
def get_by_id(cls, context, x509keypair_id):
db_x509keypair = cls.dbapi.get_x509keypair_by_id(context,
x509keypair_id)
x509keypair = X509KeyPair._from_db_object(cls(context), db_x509keypair)
return x509keypair
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
db_x509keypair = cls.dbapi.get_x509keypair_by_uuid(context, uuid)
x509keypair = X509KeyPair._from_db_object(cls(context), db_x509keypair)
return x509keypair
@base.remotable_classmethod
def get_by_name(cls, context, name):
db_x509keypair = cls.dbapi.get_x509keypair_by_name(context, name)
x509keypair = X509KeyPair._from_db_object(cls(context), db_x509keypair)
return x509keypair
@base.remotable_classmethod
def list(cls, context, limit=None, marker=None,
sort_key=None, sort_dir=None, filters=None):
db_x509keypairs = cls.dbapi.get_x509keypair_list(context, limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir,
filters=filters)
return X509KeyPair._from_db_object_list(db_x509keypairs, cls, context)
@base.remotable
def create(self, context=None):
values = self.obj_get_changes()
db_x509keypair = self.dbapi.create_x509keypair(values)
self._from_db_object(self, db_x509keypair)
@base.remotable
def destroy(self, context=None):
self.dbapi.destroy_x509keypair(self.uuid)
self.obj_reset_changes()
@base.remotable
def save(self, context=None):
updates = self.obj_get_changes()
self.dbapi.update_x509keypair(self.uuid, updates)
self.obj_reset_changes()
@base.remotable
def refresh(self, context=None):
current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
for field in self.fields:
if self.obj_attr_is_set(field) and self[field] != current[field]:
self[field] = current[field]
| true | true |
f734cdbc4784f67ad314325d7468e7791ca4a293 | 2,093 | py | Python | modules/cluster_xyz.py | fonsecag/TBD | d4cb9dec96e753708950e223f921a29ab7bcb9dd | [
"MIT"
] | 6 | 2021-03-04T06:58:11.000Z | 2022-01-30T02:28:48.000Z | modules/cluster_xyz.py | fonsecag/TBD | d4cb9dec96e753708950e223f921a29ab7bcb9dd | [
"MIT"
] | null | null | null | modules/cluster_xyz.py | fonsecag/TBD | d4cb9dec96e753708950e223f921a29ab7bcb9dd | [
"MIT"
] | null | null | null | from run import MainHandler
from .cluster import ClusterHandler
from util import *
class ClusterXYZHandler(ClusterHandler):
def __init__(self, args, **kwargs):
super().__init__(args, **kwargs)
self.n_stages = self.n_main_stages + self.n_substages
n_substages = ClusterHandler.n_substages # one more than ClusterHandler
def run_command(self):
super().run_command()
def save_cluster_xyz(self):
z = self.call_para("R_to_xyz", "z", args=[self, self.dataset])
self.z = z
dir_path = os.path.join(self.storage_dir, "cluster_xyz")
if not os.path.exists(dir_path):
os.mkdir(dir_path)
var_index_R = self.call_para("R_to_xyz", "var_index_R")
R = self.vars[var_index_R]
var_index_F = self.call_para("R_to_xyz", "var_index_F")
F = self.vars[var_index_F]
var_index_E = self.call_para("R_to_xyz", "var_index_E")
E = self.vars[var_index_E]
cl_ind = self.cluster_indices
for i in range(len(cl_ind)):
cl = np.array(cl_ind[i], dtype=np.int64)
self.save_xyz_index(i, R[cl], F[cl], E[cl])
def save_xyz_index(self, i, R, F, E):
file_name = f"cluster_{i}.xyz"
path = os.path.join(self.storage_dir, "cluster_xyz", file_name)
file = open(path, "w+")
for j in range(len(R)):
r_j, f_j, e_j = R[j], F[j], E[j]
s = self.RFE_to_xyz_single(r_j, f_j, e_j)
file.write(s)
file.close()
# Energy=-620726.002662 Properties=species:S:1:pos:R:3:forces:R:3
def RFE_to_xyz_single(self, R, F, E):
z = self.z
s = f"{len(z)}\n"
s += f"{E[0]:.5e}\n"
for i in range(0, len(R), 3):
s += f"{z[i//3]:<3}{R[i]:<13.5e}{R[i+1]:<13.5e}{R[i+2]:<13.5e}"
s += f"{F[i]:<13.5e}{F[i+1]:<13.5e}{F[i+2]:<13.5e}\n"
return s
def save_command(self):
super().save_command()
from time import time
t0 = time()
self.save_cluster_xyz()
print(f"Took {time() - t0} seconds")
| 29.9 | 77 | 0.572862 | from run import MainHandler
from .cluster import ClusterHandler
from util import *
class ClusterXYZHandler(ClusterHandler):
def __init__(self, args, **kwargs):
super().__init__(args, **kwargs)
self.n_stages = self.n_main_stages + self.n_substages
n_substages = ClusterHandler.n_substages
def run_command(self):
super().run_command()
def save_cluster_xyz(self):
z = self.call_para("R_to_xyz", "z", args=[self, self.dataset])
self.z = z
dir_path = os.path.join(self.storage_dir, "cluster_xyz")
if not os.path.exists(dir_path):
os.mkdir(dir_path)
var_index_R = self.call_para("R_to_xyz", "var_index_R")
R = self.vars[var_index_R]
var_index_F = self.call_para("R_to_xyz", "var_index_F")
F = self.vars[var_index_F]
var_index_E = self.call_para("R_to_xyz", "var_index_E")
E = self.vars[var_index_E]
cl_ind = self.cluster_indices
for i in range(len(cl_ind)):
cl = np.array(cl_ind[i], dtype=np.int64)
self.save_xyz_index(i, R[cl], F[cl], E[cl])
def save_xyz_index(self, i, R, F, E):
file_name = f"cluster_{i}.xyz"
path = os.path.join(self.storage_dir, "cluster_xyz", file_name)
file = open(path, "w+")
for j in range(len(R)):
r_j, f_j, e_j = R[j], F[j], E[j]
s = self.RFE_to_xyz_single(r_j, f_j, e_j)
file.write(s)
file.close()
def RFE_to_xyz_single(self, R, F, E):
z = self.z
s = f"{len(z)}\n"
s += f"{E[0]:.5e}\n"
for i in range(0, len(R), 3):
s += f"{z[i//3]:<3}{R[i]:<13.5e}{R[i+1]:<13.5e}{R[i+2]:<13.5e}"
s += f"{F[i]:<13.5e}{F[i+1]:<13.5e}{F[i+2]:<13.5e}\n"
return s
def save_command(self):
super().save_command()
from time import time
t0 = time()
self.save_cluster_xyz()
print(f"Took {time() - t0} seconds")
| true | true |
f734ce2104b99a837e3511b1075b8cf719ab35b6 | 482 | py | Python | pyxrf/db_config/hxn_db_config.py | andrewmkiss/PyXRF | 61de2029c255f77279ba5bc3896107c1a2e4212f | [
"BSD-3-Clause"
] | 19 | 2016-05-25T21:40:41.000Z | 2022-01-19T01:58:15.000Z | pyxrf/db_config/hxn_db_config.py | andrewmkiss/PyXRF | 61de2029c255f77279ba5bc3896107c1a2e4212f | [
"BSD-3-Clause"
] | 90 | 2016-01-11T17:22:05.000Z | 2021-12-02T15:59:58.000Z | pyxrf/db_config/hxn_db_config.py | andrewmkiss/PyXRF | 61de2029c255f77279ba5bc3896107c1a2e4212f | [
"BSD-3-Clause"
] | 22 | 2016-10-16T17:19:19.000Z | 2022-02-18T21:45:08.000Z | try:
from databroker.v0 import Broker
except ModuleNotFoundError:
from databroker import Broker
from hxntools.handlers.xspress3 import Xspress3HDF5Handler
from hxntools.handlers.timepix import TimepixHDF5Handler
db = Broker.named("hxn")
# db_analysis = Broker.named('hxn_analysis')
db.reg.register_handler(Xspress3HDF5Handler.HANDLER_NAME, Xspress3HDF5Handler, overwrite=True)
db.reg.register_handler(TimepixHDF5Handler._handler_name, TimepixHDF5Handler, overwrite=True)
| 34.428571 | 94 | 0.8361 | try:
from databroker.v0 import Broker
except ModuleNotFoundError:
from databroker import Broker
from hxntools.handlers.xspress3 import Xspress3HDF5Handler
from hxntools.handlers.timepix import TimepixHDF5Handler
db = Broker.named("hxn")
db.reg.register_handler(Xspress3HDF5Handler.HANDLER_NAME, Xspress3HDF5Handler, overwrite=True)
db.reg.register_handler(TimepixHDF5Handler._handler_name, TimepixHDF5Handler, overwrite=True)
| true | true |
f734ce5df7ac9a87c9b31d2c6162e94261ab7b86 | 1,031 | bzl | Python | examples/test.bzl | Aghassi/rules_nodejs | 3eb42603c440f7e8496f2e6812337eb47827ff6a | [
"Apache-2.0"
] | 1 | 2021-06-20T18:37:14.000Z | 2021-06-20T18:37:14.000Z | examples/test.bzl | Aghassi/rules_nodejs | 3eb42603c440f7e8496f2e6812337eb47827ff6a | [
"Apache-2.0"
] | 12 | 2020-04-06T21:50:34.000Z | 2022-03-25T18:06:41.000Z | examples/test.bzl | Aghassi/rules_nodejs | 3eb42603c440f7e8496f2e6812337eb47827ff6a | [
"Apache-2.0"
] | 1 | 2018-03-07T13:48:39.000Z | 2018-03-07T13:48:39.000Z | "Define a convenience macro for examples integration testing"
load("@build_bazel_rules_nodejs//internal/bazel_integration_test:bazel_integration_test.bzl", "rules_nodejs_integration_test")
load("//:tools/defaults.bzl", "codeowners")
def example_integration_test(name, owners = [], **kwargs):
"Set defaults for the bazel_integration_test common to our examples"
dirname = name[len("examples_"):]
native.filegroup(
name = "_%s_sources" % name,
srcs = native.glob(
[
"%s/*" % dirname,
"%s/**/*" % dirname,
],
exclude = ["%s/node_modules/**" % dirname],
),
)
if len(owners):
codeowners(
name = "OWNERS." + name,
teams = owners,
pattern = dirname + "/**",
)
rules_nodejs_integration_test(
name = name,
tags = kwargs.pop("tags", []) + ["examples"],
workspace_files = kwargs.pop("workspace_files", "_%s_sources" % name),
**kwargs
)
| 33.258065 | 126 | 0.57129 |
load("@build_bazel_rules_nodejs//internal/bazel_integration_test:bazel_integration_test.bzl", "rules_nodejs_integration_test")
load("//:tools/defaults.bzl", "codeowners")
def example_integration_test(name, owners = [], **kwargs):
dirname = name[len("examples_"):]
native.filegroup(
name = "_%s_sources" % name,
srcs = native.glob(
[
"%s/*" % dirname,
"%s/**/*" % dirname,
],
exclude = ["%s/node_modules/**" % dirname],
),
)
if len(owners):
codeowners(
name = "OWNERS." + name,
teams = owners,
pattern = dirname + "/**",
)
rules_nodejs_integration_test(
name = name,
tags = kwargs.pop("tags", []) + ["examples"],
workspace_files = kwargs.pop("workspace_files", "_%s_sources" % name),
**kwargs
)
| true | true |
f734ce7aa3455146cd42a264d89384c7c91362c6 | 701 | py | Python | send_sms.py | bthaman/noaa_precip | 7c1b33760584fb4591e10eeb59e5904df48a17d0 | [
"Python-2.0",
"OLDAP-2.7"
] | null | null | null | send_sms.py | bthaman/noaa_precip | 7c1b33760584fb4591e10eeb59e5904df48a17d0 | [
"Python-2.0",
"OLDAP-2.7"
] | null | null | null | send_sms.py | bthaman/noaa_precip | 7c1b33760584fb4591e10eeb59e5904df48a17d0 | [
"Python-2.0",
"OLDAP-2.7"
] | null | null | null | # we import the Twilio client from the dependency we just installed
# from twilio.rest import TwilioRestClient
from twilio.rest import Client
def send_text(message):
# the following line needs your Twilio Account SID and Auth Token
client = Client("AC3e84e9cae2390af9a661c1ab35955444", "4a8bf26cb30107ec85d98f6bf1182522")
# change the "from_" number to your Twilio number and the "to" number
# to the phone number you signed up for Twilio with, or upgrade your
# account to send SMS to any phone number
client.messages.create(to="+15129146948", from_="+17372105122",
body=message)
if __name__ == '__main__':
send_text('Hello, this is a test.') | 43.8125 | 93 | 0.723252 |
from twilio.rest import Client
def send_text(message):
client = Client("AC3e84e9cae2390af9a661c1ab35955444", "4a8bf26cb30107ec85d98f6bf1182522")
client.messages.create(to="+15129146948", from_="+17372105122",
body=message)
if __name__ == '__main__':
send_text('Hello, this is a test.') | true | true |
f734cefe7c40ffddafb7ca7ca8a4bd256d52efd5 | 840 | py | Python | tests/v1/test_event_alert_type.py | MichaelTROEHLER/datadog-api-client-python | 12c46626622fb1277bb1e172753b342c671348bd | [
"Apache-2.0"
] | null | null | null | tests/v1/test_event_alert_type.py | MichaelTROEHLER/datadog-api-client-python | 12c46626622fb1277bb1e172753b342c671348bd | [
"Apache-2.0"
] | null | null | null | tests/v1/test_event_alert_type.py | MichaelTROEHLER/datadog-api-client-python | 12c46626622fb1277bb1e172753b342c671348bd | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import absolute_import
import sys
import unittest
import datadog_api_client.v1
from datadog_api_client.v1.model.event_alert_type import EventAlertType
class TestEventAlertType(unittest.TestCase):
"""EventAlertType unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testEventAlertType(self):
"""Test EventAlertType"""
# FIXME: construct object with mandatory attributes with example values
# model = EventAlertType() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.705882 | 108 | 0.72381 |
from __future__ import absolute_import
import sys
import unittest
import datadog_api_client.v1
from datadog_api_client.v1.model.event_alert_type import EventAlertType
class TestEventAlertType(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testEventAlertType(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f734cfda1c639c31c88584c85e5043ba791d021a | 3,000 | py | Python | english/data_processing/lessons/code/vslide1.py | hrutkabence/tutorials | bd76294860804aee8ecda5e1445464506bf02ee0 | [
"CC0-1.0"
] | null | null | null | english/data_processing/lessons/code/vslide1.py | hrutkabence/tutorials | bd76294860804aee8ecda5e1445464506bf02ee0 | [
"CC0-1.0"
] | null | null | null | english/data_processing/lessons/code/vslide1.py | hrutkabence/tutorials | bd76294860804aee8ecda5e1445464506bf02ee0 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from math import hypot, atan2, sin, cos, pi, degrees
import numpy as np
from matplotlib import pyplot as plt
def vplain(x1, y1, x2, y2):
""" set up line equation
vp[0] * x + vp[1] * y + vp[2] = 0
x1, y1 - horizontal coordinates of the start point of the section
x2, y2 - horizontal coordinates of the end point of the section
returns a numpy array with coefficients of the vertical plane
"""
vp = np.zeros((3,))
vp[0] = y1 - y2
vp[1] = x2 - x1
vp[2] = x1 * y2 - x2 * y1
vp = vp / hypot(vp[0], vp[1]) # normalize
return vp
def section(pc, x1, y1, x2, y2, tol):
""" Select point from a point cloud near to a line
pc - point cloud in a numpy array
x1, y1 - horizontal coordinates of the start point of the section
x2, y2 - horizontal coordinates of the end point of the section
tol - tolerance distance from the section
returns a numpy array with points near to the section
"""
pc1 = pc.copy()
pc1[:, 2] = 1 # change to homogenous coordinates
vp = vplain(x1, y1, x2, y2) # equation of vertical plain
sec = pc[np.abs(np.dot(pc1, vp)) < tol] # select points close to the section
return sec
def tr(e1, n1, e2, n2):
""" set up transformation matrix for homogenous coordinates
Parameters:
e1, n1 - start point of the section line
e2, n2 - end point of the section section line
returns the transformation matrix
"""
de = e2 - e1
dn = n2 - n1
a = atan2(dn, de)
ca = cos(a)
sa = sin(a)
return np.dot(np.array([[1, 0, 0], [0, 1, 0], [-e1, -n1, 1]]),
np.array([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1]]))
if __name__ == "__main__":
if len(sys.argv) < 7:
pc = np.loadtxt('lidar.txt', delimiter=',') ;# load point cloud
x1 = 548060.0
y1 = 5129130.0
x2 = 549850.0
y2 = 5129030.0
#x1 = 549400
#y1 = 5128900
#x2 = 549200
#y2 = 5129300
tol = 1.0
else:
pc = np.loadtxt(sys.argv[1], delimiter=',') ;# load point cloud
x1 = float(sys.argv[2])
y1 = float(sys.argv[3])
x2 = float(sys.argv[4])
y2 = float(sys.argv[5])
tol = float(sys.argv[6])
# set up equation for vertical plain a * x + b * y + c = 0
vp = vplain(x1, y1, x2, y2)
sec = section(pc,x1,y1,x2,y2,tol)
# transformation matrix
trm = tr(x1, y1, x2, y2)
if abs(np.dot(np.array([x1, y1, 1]), trm)[1]) > 1e-5 or \
abs(np.dot(np.array([x2, y2, 1]), trm)[1]) > 1e-5:
print("tr error")
# make a copy of section points for homogenous transformation
pc1 = sec.copy()
pc1[:, 2] = 1
pc1 = np.dot(pc1, trm) # rotate points into the section plain
pc1[:, 2] = sec[:, 2] # copy back elevations to transformed points
plt.plot(pc1[:,0], pc1[:,2], 'o')
plt.xlabel('chainage (m)')
plt.ylabel('elevation (m)')
plt.axis('equal')
plt.grid('on')
plt.show()
| 30.927835 | 80 | 0.576667 |
import sys
from math import hypot, atan2, sin, cos, pi, degrees
import numpy as np
from matplotlib import pyplot as plt
def vplain(x1, y1, x2, y2):
vp = np.zeros((3,))
vp[0] = y1 - y2
vp[1] = x2 - x1
vp[2] = x1 * y2 - x2 * y1
vp = vp / hypot(vp[0], vp[1])
return vp
def section(pc, x1, y1, x2, y2, tol):
pc1 = pc.copy()
pc1[:, 2] = 1
vp = vplain(x1, y1, x2, y2)
sec = pc[np.abs(np.dot(pc1, vp)) < tol]
return sec
def tr(e1, n1, e2, n2):
de = e2 - e1
dn = n2 - n1
a = atan2(dn, de)
ca = cos(a)
sa = sin(a)
return np.dot(np.array([[1, 0, 0], [0, 1, 0], [-e1, -n1, 1]]),
np.array([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1]]))
if __name__ == "__main__":
if len(sys.argv) < 7:
pc = np.loadtxt('lidar.txt', delimiter=',') ;
x1 = 548060.0
y1 = 5129130.0
x2 = 549850.0
y2 = 5129030.0
tol = 1.0
else:
pc = np.loadtxt(sys.argv[1], delimiter=',') ;
x1 = float(sys.argv[2])
y1 = float(sys.argv[3])
x2 = float(sys.argv[4])
y2 = float(sys.argv[5])
tol = float(sys.argv[6])
vp = vplain(x1, y1, x2, y2)
sec = section(pc,x1,y1,x2,y2,tol)
trm = tr(x1, y1, x2, y2)
if abs(np.dot(np.array([x1, y1, 1]), trm)[1]) > 1e-5 or \
abs(np.dot(np.array([x2, y2, 1]), trm)[1]) > 1e-5:
print("tr error")
pc1 = sec.copy()
pc1[:, 2] = 1
pc1 = np.dot(pc1, trm)
pc1[:, 2] = sec[:, 2]
plt.plot(pc1[:,0], pc1[:,2], 'o')
plt.xlabel('chainage (m)')
plt.ylabel('elevation (m)')
plt.axis('equal')
plt.grid('on')
plt.show()
| true | true |
f734d00b0261768c9a37cb2968bbdc5343c8ba0c | 1,292 | py | Python | demo_guided.py | MrJohnsson77/bat-country | 894af13bd777ab8aa989baf520db074b97cfad9a | [
"MIT"
] | 274 | 2015-07-06T14:29:09.000Z | 2022-02-06T18:06:14.000Z | demo_guided.py | amsimoes/bat-country | c0d29a0b32c196ca3d4c40fbaf960432b507e8bb | [
"MIT"
] | 11 | 2015-07-13T23:54:15.000Z | 2022-03-11T23:11:38.000Z | demo_guided.py | amsimoes/bat-country | c0d29a0b32c196ca3d4c40fbaf960432b507e8bb | [
"MIT"
] | 68 | 2015-07-06T15:16:53.000Z | 2021-08-09T19:03:31.000Z | # USAGE
# python demo_guided.py --base-model $CAFFE_ROOT/models/bvlc_googlenet \
# --image initial_images/clouds.jpg \
# --guide-image initial_images/seed_images/starry_night.jpg \
# --output examples/output/seeded/clouds_and_starry_night.jpg
# import the necessary packages
from batcountry import BatCountry
from PIL import Image
import numpy as np
import argparse
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-b", "--base-model", required=True, help="base model path")
ap.add_argument("-l", "--layer", type=str, default="inception_4c/output",
help="layer of CNN to use")
ap.add_argument("-i", "--image", required=True, help="path to base image")
ap.add_argument("-g", "--guide-image", required=True, help="path to guide image")
ap.add_argument("-o", "--output", required=True, help="path to output image")
args = ap.parse_args()
# we can't stop here...
bc = BatCountry(args.base_model)
features = bc.prepare_guide(Image.open(args.guide_image), end=args.layer)
image = bc.dream(np.float32(Image.open(args.image)), end=args.layer,
iter_n=20, objective_fn=BatCountry.guided_objective,
objective_features=features,)
bc.cleanup()
# write the output image to file
result = Image.fromarray(np.uint8(image))
result.save(args.output) | 39.151515 | 81 | 0.755418 |
from batcountry import BatCountry
from PIL import Image
import numpy as np
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-b", "--base-model", required=True, help="base model path")
ap.add_argument("-l", "--layer", type=str, default="inception_4c/output",
help="layer of CNN to use")
ap.add_argument("-i", "--image", required=True, help="path to base image")
ap.add_argument("-g", "--guide-image", required=True, help="path to guide image")
ap.add_argument("-o", "--output", required=True, help="path to output image")
args = ap.parse_args()
bc = BatCountry(args.base_model)
features = bc.prepare_guide(Image.open(args.guide_image), end=args.layer)
image = bc.dream(np.float32(Image.open(args.image)), end=args.layer,
iter_n=20, objective_fn=BatCountry.guided_objective,
objective_features=features,)
bc.cleanup()
# write the output image to file
result = Image.fromarray(np.uint8(image))
result.save(args.output) | true | true |
f734d00b18af99f6b7b1b6eeaf82cc30c32aef02 | 3,245 | py | Python | polling_stations/apps/data_collection/management/commands/import_harborough.py | mtravis/UK-Polling-Stations | 26e0331dc29253dc436a0462ffaa01e974c5dc52 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_collection/management/commands/import_harborough.py | mtravis/UK-Polling-Stations | 26e0331dc29253dc436a0462ffaa01e974c5dc52 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_collection/management/commands/import_harborough.py | mtravis/UK-Polling-Stations | 26e0331dc29253dc436a0462ffaa01e974c5dc52 | [
"BSD-3-Clause"
] | null | null | null | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E07000131"
addresses_name = (
"local.2019-05-02/Version 1/Democracy_Club__02May2019 Harborough DC.tsv"
)
stations_name = (
"local.2019-05-02/Version 1/Democracy_Club__02May2019 Harborough DC.tsv"
)
elections = ["local.2019-05-02"]
csv_delimiter = "\t"
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if uprn == "200003741884":
rec["postcode"] = "LE14 2QY"
if (
record.addressline1.strip() == "69 Main Street"
and record.addressline2.strip() == "Great Bowden"
and record.addressline3.strip() == "Market Harborough, Leics"
):
rec["postcode"] = "LE16 7HD"
rec["accept_suggestion"] = False
if uprn in [
"100030474314", # LE79DE -> LE79DP : Grange Barn, Loddington Road, Tilton on the Hill, Leicester
"100030474315", # LE79DE -> LE79DP : Grange Yard, Loddington Road, Tilton on the Hill, Leicester
"200003741317", # LE79DE -> LE79DP : Robin A Tiptoe Farm, Loddington Road, Tilton on the Hill, Leicester
"200003742237", # LE79XE -> LE79XB : Ash Tree Cottage, Launde Road, Loddington, Leicester
"100030477785", # LE96PU -> LE96PW : 102 Station Road, Broughton Astley, Leics
]:
rec["accept_suggestion"] = True
if uprn in [
"200003741417", # LE79YE -> LE79FN : Park Farm, Uppingham Road, Skeffington, Leicester
"200003737159", # LE175EA -> LE175RA : Hillcrest Farm, Frolesworth Road, Leire, Lutterworth, Leics
"200003737160", # LE175EA -> LE175RA : Mount Pleasant, Frolesworth Road, Leire, Lutterworth, Leics
"100032072508", # LE88AQ -> LE88AN : Wayside, Arnesby Road, Fleckney, Leicestershire
"100030493011", # LE167SZ -> LE167SX : The Old Rectory, Stonton Road, Church Langton, Market Harborough, Leics
"200003739029", # LE167RU -> LE167RT : Hunters Lodge, Main Street, Gumley, Market Harborough, Leics
"100030480043", # LE174RU -> LE174RX : Toll Gate Cottage, Bitteswell Road, Lutterworth, Leics
"10034458557", # LE175LE -> LE174LE : The Milking Parlour Boston Lodge, Lutterworth Road, Gilmorton, Lutterworth, Leics
"200003744797", # LE175PL -> LE175RZ : Ewe Cottage Gilmorton Lodge, Kimcote Road, Gilmorton, Lutterworth, Leics
"100030493741", # LE167TT -> LE167TX : Birchtree Farm, Welham Road, Thorpe Langton, Leics
"200003742100", # LE174LH -> LE174LR : The Mere, Mere Road, Bitteswell, Lutterworth, Leics
"200003741377", # LE79XL -> LE79XJ : 3 Fiddlers Green, Uppingham Road, East Norton, Leicester
"200003741379", # LE79XL -> LE79XJ : 2 Fiddlers Green, Uppingham Road, East Norton, Leicester
"200003741382", # LE79XL -> LE79XJ : 1 Fiddlers Green, Uppingham Road, East Norton, Leicester
]:
rec["accept_suggestion"] = False
return rec
| 55.948276 | 132 | 0.642527 | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E07000131"
addresses_name = (
"local.2019-05-02/Version 1/Democracy_Club__02May2019 Harborough DC.tsv"
)
stations_name = (
"local.2019-05-02/Version 1/Democracy_Club__02May2019 Harborough DC.tsv"
)
elections = ["local.2019-05-02"]
csv_delimiter = "\t"
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if uprn == "200003741884":
rec["postcode"] = "LE14 2QY"
if (
record.addressline1.strip() == "69 Main Street"
and record.addressline2.strip() == "Great Bowden"
and record.addressline3.strip() == "Market Harborough, Leics"
):
rec["postcode"] = "LE16 7HD"
rec["accept_suggestion"] = False
if uprn in [
"100030474314",
"100030474315",
"200003741317",
"200003742237",
"100030477785",
]:
rec["accept_suggestion"] = True
if uprn in [
"200003741417",
"200003737159",
"200003737160",
"100032072508",
"100030493011",
"200003739029",
"100030480043",
"10034458557",
"200003744797",
"100030493741",
"200003742100",
"200003741377",
"200003741379",
"200003741382",
]:
rec["accept_suggestion"] = False
return rec
| true | true |
f734d0ce5e31881671bde02a0d35ed0eb21415f4 | 2,857 | py | Python | data/transcoder_evaluation_gfg/python/STOOGE_SORT.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 241 | 2021-07-20T08:35:20.000Z | 2022-03-31T02:39:08.000Z | data/transcoder_evaluation_gfg/python/STOOGE_SORT.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 49 | 2021-07-22T23:18:42.000Z | 2022-03-24T09:15:26.000Z | data/transcoder_evaluation_gfg/python/STOOGE_SORT.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 71 | 2021-07-21T05:17:52.000Z | 2022-03-29T23:49:28.000Z | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( arr , l , h ) :
if l >= h :
return
if arr [ l ] > arr [ h ] :
t = arr [ l ]
arr [ l ] = arr [ h ]
arr [ h ] = t
if h - l + 1 > 2 :
t = ( int ) ( ( h - l + 1 ) / 3 )
f_gold ( arr , l , ( h - t ) )
f_gold ( arr , l + t , ( h ) )
f_gold ( arr , l , ( h - t ) )
#TOFILL
if __name__ == '__main__':
param = [
([6, 25, 42, 52, 53, 54, 58, 66, 67, 70],6,6,),
([-13, -98, 50, -63, 48, 3, -76, 12, -35, 93, 29, 17, 16, 5, -97, -54, -45, -25],16,14,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],28,24,),
([7, 49, 26, 33, 48, 79, 2, 71, 32, 4, 20, 36],9,10,),
([88],0,0,),
([1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],20,28,),
([2, 2, 4, 5, 7, 12, 12, 14, 14, 16, 17, 29, 29, 31, 32, 39, 41, 47, 48, 49, 51, 54, 58, 58, 59, 60, 73, 78, 80, 81, 82, 83, 84, 85, 90, 95, 97, 99, 99],28,29,),
([-31, -55, 6, 37, 77, 61, 0, 46, -91, -38, 85, -71, 25, 14, 53, 43, 34],15,11,),
([0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],12,17,),
([77, 68, 78, 97, 92, 52, 37, 8, 44, 98, 5, 69, 31, 45, 9, 32, 33, 67, 30, 76, 29, 3, 90, 57, 30, 9, 26, 2, 62, 3, 46, 68, 25, 51, 13, 44, 35, 55],27,20,)
]
filled_function_param = [
([6, 25, 42, 52, 53, 54, 58, 66, 67, 70],6,6,),
([-13, -98, 50, -63, 48, 3, -76, 12, -35, 93, 29, 17, 16, 5, -97, -54, -45, -25],16,14,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],28,24,),
([7, 49, 26, 33, 48, 79, 2, 71, 32, 4, 20, 36],9,10,),
([88],0,0,),
([1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],20,28,),
([2, 2, 4, 5, 7, 12, 12, 14, 14, 16, 17, 29, 29, 31, 32, 39, 41, 47, 48, 49, 51, 54, 58, 58, 59, 60, 73, 78, 80, 81, 82, 83, 84, 85, 90, 95, 97, 99, 99],28,29,),
([-31, -55, 6, 37, 77, 61, 0, 46, -91, -38, 85, -71, 25, 14, 53, 43, 34],15,11,),
([0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],12,17,),
([77, 68, 78, 97, 92, 52, 37, 8, 44, 98, 5, 69, 31, 45, 9, 32, 33, 67, 30, 76, 29, 3, 90, 57, 30, 9, 26, 2, 62, 3, 46, 68, 25, 51, 13, 44, 35, 55],27,20,)
]
n_success = 0
for i, parameters_set in enumerate(param):
f_filled(*(filled_function_param[i]))
f_gold(*parameters_set)
if parameters_set == filled_function_param[i]:
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | 52.907407 | 165 | 0.431222 |
def f_gold ( arr , l , h ) :
if l >= h :
return
if arr [ l ] > arr [ h ] :
t = arr [ l ]
arr [ l ] = arr [ h ]
arr [ h ] = t
if h - l + 1 > 2 :
t = ( int ) ( ( h - l + 1 ) / 3 )
f_gold ( arr , l , ( h - t ) )
f_gold ( arr , l + t , ( h ) )
f_gold ( arr , l , ( h - t ) )
if __name__ == '__main__':
param = [
([6, 25, 42, 52, 53, 54, 58, 66, 67, 70],6,6,),
([-13, -98, 50, -63, 48, 3, -76, 12, -35, 93, 29, 17, 16, 5, -97, -54, -45, -25],16,14,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],28,24,),
([7, 49, 26, 33, 48, 79, 2, 71, 32, 4, 20, 36],9,10,),
([88],0,0,),
([1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],20,28,),
([2, 2, 4, 5, 7, 12, 12, 14, 14, 16, 17, 29, 29, 31, 32, 39, 41, 47, 48, 49, 51, 54, 58, 58, 59, 60, 73, 78, 80, 81, 82, 83, 84, 85, 90, 95, 97, 99, 99],28,29,),
([-31, -55, 6, 37, 77, 61, 0, 46, -91, -38, 85, -71, 25, 14, 53, 43, 34],15,11,),
([0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],12,17,),
([77, 68, 78, 97, 92, 52, 37, 8, 44, 98, 5, 69, 31, 45, 9, 32, 33, 67, 30, 76, 29, 3, 90, 57, 30, 9, 26, 2, 62, 3, 46, 68, 25, 51, 13, 44, 35, 55],27,20,)
]
filled_function_param = [
([6, 25, 42, 52, 53, 54, 58, 66, 67, 70],6,6,),
([-13, -98, 50, -63, 48, 3, -76, 12, -35, 93, 29, 17, 16, 5, -97, -54, -45, -25],16,14,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],28,24,),
([7, 49, 26, 33, 48, 79, 2, 71, 32, 4, 20, 36],9,10,),
([88],0,0,),
([1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],20,28,),
([2, 2, 4, 5, 7, 12, 12, 14, 14, 16, 17, 29, 29, 31, 32, 39, 41, 47, 48, 49, 51, 54, 58, 58, 59, 60, 73, 78, 80, 81, 82, 83, 84, 85, 90, 95, 97, 99, 99],28,29,),
([-31, -55, 6, 37, 77, 61, 0, 46, -91, -38, 85, -71, 25, 14, 53, 43, 34],15,11,),
([0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],12,17,),
([77, 68, 78, 97, 92, 52, 37, 8, 44, 98, 5, 69, 31, 45, 9, 32, 33, 67, 30, 76, 29, 3, 90, 57, 30, 9, 26, 2, 62, 3, 46, 68, 25, 51, 13, 44, 35, 55],27,20,)
]
n_success = 0
for i, parameters_set in enumerate(param):
f_filled(*(filled_function_param[i]))
f_gold(*parameters_set)
if parameters_set == filled_function_param[i]:
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | true | true |
f734d1983e0e270444f39c71128b6e3bc933d71c | 1,442 | py | Python | aliyun-python-sdk-linkwan/aliyunsdklinkwan/request/v20190301/GetLocalConfigSyncTaskRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-linkwan/aliyunsdklinkwan/request/v20190301/GetLocalConfigSyncTaskRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-linkwan/aliyunsdklinkwan/request/v20190301/GetLocalConfigSyncTaskRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklinkwan.endpoint import endpoint_data
class GetLocalConfigSyncTaskRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'LinkWAN', '2019-03-01', 'GetLocalConfigSyncTask','linkwan')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TaskId(self):
return self.get_query_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_query_param('TaskId',TaskId) | 37.947368 | 89 | 0.768377 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdklinkwan.endpoint import endpoint_data
class GetLocalConfigSyncTaskRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'LinkWAN', '2019-03-01', 'GetLocalConfigSyncTask','linkwan')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TaskId(self):
return self.get_query_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_query_param('TaskId',TaskId) | true | true |
f734d1e49cc32d7b6f5465291abd73c61b5e8638 | 1,175 | py | Python | File.py | jessedeveloperinvestor/Multiple-Jesse-Projects | f66a6eed79117fd005668f79e0bee3d8ac76e2b8 | [
"Apache-2.0"
] | null | null | null | File.py | jessedeveloperinvestor/Multiple-Jesse-Projects | f66a6eed79117fd005668f79e0bee3d8ac76e2b8 | [
"Apache-2.0"
] | null | null | null | File.py | jessedeveloperinvestor/Multiple-Jesse-Projects | f66a6eed79117fd005668f79e0bee3d8ac76e2b8 | [
"Apache-2.0"
] | null | null | null | y={0:{'Jesse Leite',23,'Best computer Engineer'},1:{'Lana Rhoades',22,'Best pleasure professional'}}
with open('file.txt', 'w') as file:
file.write("Python and Jesse built this file!")
with open('file.txt', 'a') as file:
file.write('\nJesse\n'+str(y)+'\n'+str(y))
inf=[]
with open('file.data', 'r') as file:
for row in file.readlines():
inf.append(row.split(','))
data=[]
with open('file.txt', 'r') as file:
a=file.read()+'\n'
with open('file.data', 'w') as file:
file.write(str(y))
with open('file.data', 'r') as file:
for row in file.readlines():
data.append(row.split(','))
aa=inf[0][0]+inf[0][1]+inf[0][2]
ab=str(aa)
a0=data[0][0]+data[0][1]+data[0][2]
a1=' Data from .TXT file:\n\n'+str(a0)+'\n\n----------------------------------------------------------\n Data from .DATA file:\n\n'+ab
a2=a1.translate({ord(i): None for i in "["})
a3=a2.translate({ord(i): None for i in "]"})
a4=a3.translate({ord(i): None for i in ","})
a5=a4.translate({ord(i): None for i in "{"})
a6=a5.translate({ord(i): None for i in "}"})
a7=a6.translate({ord(i): None for i in ":"})
a8=a7.translate({ord(i): '\n' for i in "'"})
print(a8)
| 35.606061 | 147 | 0.560851 | y={0:{'Jesse Leite',23,'Best computer Engineer'},1:{'Lana Rhoades',22,'Best pleasure professional'}}
with open('file.txt', 'w') as file:
file.write("Python and Jesse built this file!")
with open('file.txt', 'a') as file:
file.write('\nJesse\n'+str(y)+'\n'+str(y))
inf=[]
with open('file.data', 'r') as file:
for row in file.readlines():
inf.append(row.split(','))
data=[]
with open('file.txt', 'r') as file:
a=file.read()+'\n'
with open('file.data', 'w') as file:
file.write(str(y))
with open('file.data', 'r') as file:
for row in file.readlines():
data.append(row.split(','))
aa=inf[0][0]+inf[0][1]+inf[0][2]
ab=str(aa)
a0=data[0][0]+data[0][1]+data[0][2]
a1=' Data from .TXT file:\n\n'+str(a0)+'\n\n----------------------------------------------------------\n Data from .DATA file:\n\n'+ab
a2=a1.translate({ord(i): None for i in "["})
a3=a2.translate({ord(i): None for i in "]"})
a4=a3.translate({ord(i): None for i in ","})
a5=a4.translate({ord(i): None for i in "{"})
a6=a5.translate({ord(i): None for i in "}"})
a7=a6.translate({ord(i): None for i in ":"})
a8=a7.translate({ord(i): '\n' for i in "'"})
print(a8)
| true | true |
f734d1f2191f58f7a8e4dabb4fc3ba6dbc11ad51 | 158 | py | Python | depth/core/evaluation/__init__.py | zhyever/Monocular-Depth-Estimation-Toolbox | c591b9711321450387ffa7322ec1db9a340347c2 | [
"Apache-2.0"
] | 21 | 2022-03-12T01:42:05.000Z | 2022-03-31T17:01:45.000Z | depth/core/evaluation/__init__.py | zhyever/Monocular-Depth-Estimation-Toolbox | c591b9711321450387ffa7322ec1db9a340347c2 | [
"Apache-2.0"
] | 2 | 2022-03-29T10:50:33.000Z | 2022-03-30T10:40:53.000Z | depth/core/evaluation/__init__.py | zhyever/Monocular-Depth-Estimation-Toolbox | c591b9711321450387ffa7322ec1db9a340347c2 | [
"Apache-2.0"
] | 3 | 2022-03-26T11:52:44.000Z | 2022-03-30T21:24:16.000Z | # Copyright (c) OpenMMLab. All rights reserved.
from .metrics import metrics, eval_metrics, pre_eval_to_metrics
from .eval_hooks import EvalHook, DistEvalHook | 52.666667 | 63 | 0.829114 |
from .metrics import metrics, eval_metrics, pre_eval_to_metrics
from .eval_hooks import EvalHook, DistEvalHook | true | true |
f734d27754536095a51f7b1e9025b5c86e2bfe24 | 3,601 | py | Python | amplimap/coverage.py | koelling/amplimap | cbd5b7b8c2f703982d8964a3c77bd350a47f08a6 | [
"Apache-2.0"
] | 11 | 2019-04-08T15:41:50.000Z | 2020-11-04T14:25:19.000Z | amplimap/coverage.py | koelling/amplimap | cbd5b7b8c2f703982d8964a3c77bd350a47f08a6 | [
"Apache-2.0"
] | 5 | 2018-05-23T10:30:18.000Z | 2021-06-29T09:33:17.000Z | amplimap/coverage.py | koelling/amplimap | cbd5b7b8c2f703982d8964a3c77bd350a47f08a6 | [
"Apache-2.0"
] | 9 | 2019-04-08T15:16:04.000Z | 2021-06-09T15:14:09.000Z | # -*- coding: utf-8 -*-
"""
This module contains methods for processing and aggregating coverage files generated by ``bedtools``.
"""
import pandas as pd
import numpy as np
import re
import os
from .reader import read_sample_info
cov_cols = ['Target', 'min_coverage', 'sum_coverage', 'basepairs', 'cov_per_bp', 'fraction_zero_coverage', 'fraction_10x_coverage', 'fraction_30x_coverage']
cov_cols_dtypes = dict(zip(cov_cols, [str, int, int, int, float, float]))
def fraction_zero_coverage(coverage):
"""Calculate fraction of bases with coverage 0."""
return 1.0 * (coverage == 0).sum() / len(coverage)
def fraction_10x_coverage(coverage):
"""Calculate fraction of bases with coverage 10 or more."""
return 1.0 * (coverage >= 10).sum() / len(coverage)
def fraction_30x_coverage(coverage):
"""Calculate fraction of bases with coverage 30 or more."""
return 1.0 * (coverage >= 30).sum() / len(coverage)
def process_file(input: str, output: str):
"""Read raw bedtools coverage file, calculate summary statistics and output them as CSV file.
Args:
input: path to a bedtools coverage file
output: path to the summary CSV file
"""
# read bedtools output
depth = pd.read_csv(input, sep='\t', names = ['chr', 'start_0', 'end', 'id', 'score', 'strand', 'position', 'coverage'], low_memory=False)
# summarize
summary = depth.groupby('id').aggregate({'coverage': [np.min, np.sum, len, np.mean, fraction_zero_coverage, fraction_10x_coverage, fraction_30x_coverage]})
# make id index into normal column, then reset column names
summary.reset_index(level=0, inplace=True)
summary.columns = cov_cols
# write file
summary.to_csv(output, index = False)
def aggregate(input, output):
"""Read coverage summary files and create aggregate files.
Args:
input: dict containing 'csvs', the list of csvs fils to aggregate, and optionally 'sample_info', a table with additional sample annotation
output: dict containing paths for output files: merged, min_coverage, cov_per_bp, fraction_zero_coverage
"""
# load sample information table
sample_info = None
if 'sample_info' in input and len(input['sample_info']) > 0:
sample_info = read_sample_info(input['sample_info'][0])
merged = None
for file in input['csvs']:
sname = os.path.basename(file)
sname = re.sub(r'\.coverage\.csv$', '', sname)
print('Reading', file, 'for', sname, '...')
df = pd.read_csv(file,
index_col = False,
dtype = cov_cols_dtypes)
df['Sample'] = sname
print(sname, 'coverage data shape:', str(df.shape))
if merged is None:
merged = df
else:
merged = merged.append(df, ignore_index = True)
assert merged is not None, \
'\n\nABORTED: Did not find any coverage data!\n\n'
print('Merged data shape:', str(merged.shape))
print(merged.head())
print('Duplicated:')
print(merged[merged.duplicated(['Target', 'Sample'], keep=False)])
if sample_info is not None:
merged = merged.join(sample_info, on = ['Sample', 'Target'], how = 'left')
# make matrices
for column in ['min_coverage', 'cov_per_bp', 'fraction_zero_coverage']:
pivoted = merged.pivot(index='Target', columns='Sample', values=column)
print('Made pivot table for', column, ' with shape', str(pivoted.shape))
pivoted.to_csv(output[column])
print(output[column])
# output full merged data set
merged.to_csv(output['merged'], index = False) | 36.744898 | 159 | 0.663149 |
import pandas as pd
import numpy as np
import re
import os
from .reader import read_sample_info
cov_cols = ['Target', 'min_coverage', 'sum_coverage', 'basepairs', 'cov_per_bp', 'fraction_zero_coverage', 'fraction_10x_coverage', 'fraction_30x_coverage']
cov_cols_dtypes = dict(zip(cov_cols, [str, int, int, int, float, float]))
def fraction_zero_coverage(coverage):
return 1.0 * (coverage == 0).sum() / len(coverage)
def fraction_10x_coverage(coverage):
return 1.0 * (coverage >= 10).sum() / len(coverage)
def fraction_30x_coverage(coverage):
return 1.0 * (coverage >= 30).sum() / len(coverage)
def process_file(input: str, output: str):
depth = pd.read_csv(input, sep='\t', names = ['chr', 'start_0', 'end', 'id', 'score', 'strand', 'position', 'coverage'], low_memory=False)
summary = depth.groupby('id').aggregate({'coverage': [np.min, np.sum, len, np.mean, fraction_zero_coverage, fraction_10x_coverage, fraction_30x_coverage]})
summary.reset_index(level=0, inplace=True)
summary.columns = cov_cols
summary.to_csv(output, index = False)
def aggregate(input, output):
sample_info = None
if 'sample_info' in input and len(input['sample_info']) > 0:
sample_info = read_sample_info(input['sample_info'][0])
merged = None
for file in input['csvs']:
sname = os.path.basename(file)
sname = re.sub(r'\.coverage\.csv$', '', sname)
print('Reading', file, 'for', sname, '...')
df = pd.read_csv(file,
index_col = False,
dtype = cov_cols_dtypes)
df['Sample'] = sname
print(sname, 'coverage data shape:', str(df.shape))
if merged is None:
merged = df
else:
merged = merged.append(df, ignore_index = True)
assert merged is not None, \
'\n\nABORTED: Did not find any coverage data!\n\n'
print('Merged data shape:', str(merged.shape))
print(merged.head())
print('Duplicated:')
print(merged[merged.duplicated(['Target', 'Sample'], keep=False)])
if sample_info is not None:
merged = merged.join(sample_info, on = ['Sample', 'Target'], how = 'left')
for column in ['min_coverage', 'cov_per_bp', 'fraction_zero_coverage']:
pivoted = merged.pivot(index='Target', columns='Sample', values=column)
print('Made pivot table for', column, ' with shape', str(pivoted.shape))
pivoted.to_csv(output[column])
print(output[column])
merged.to_csv(output['merged'], index = False) | true | true |
f734d2d7c65ce3567f24d550214f74a82b5809d1 | 4,413 | py | Python | src/streamlink/plugins/vimeo.py | kyldery/streamlink | ef36240408c194a543557fb31e4535b0426ec153 | [
"BSD-2-Clause"
] | 1 | 2022-02-25T20:14:03.000Z | 2022-02-25T20:14:03.000Z | src/streamlink/plugins/vimeo.py | kyldery/streamlink | ef36240408c194a543557fb31e4535b0426ec153 | [
"BSD-2-Clause"
] | null | null | null | src/streamlink/plugins/vimeo.py | kyldery/streamlink | ef36240408c194a543557fb31e4535b0426ec153 | [
"BSD-2-Clause"
] | 12 | 2022-01-30T23:34:18.000Z | 2022-03-26T17:09:43.000Z | """
$description Global live streaming and video hosting social platform.
$url vimeo.com
$type live, vod
$notes Password protected streams are not supported
"""
import logging
import re
from html import unescape as html_unescape
from urllib.parse import urlparse
from streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.dash import DASHStream
from streamlink.stream.ffmpegmux import MuxedStream
from streamlink.stream.hls import HLSStream
from streamlink.stream.http import HTTPStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(player\.vimeo\.com/video/\d+|(www\.)?vimeo\.com/.+)"
))
class Vimeo(Plugin):
_config_url_re = re.compile(r'(?:"config_url"|\bdata-config-url)\s*[:=]\s*(".+?")')
_config_re = re.compile(r"var\s+config\s*=\s*({.+?})\s*;")
_config_url_schema = validate.Schema(
validate.transform(_config_url_re.search),
validate.any(
None,
validate.Schema(
validate.get(1),
validate.parse_json(),
validate.transform(html_unescape),
validate.url(),
),
),
)
_config_schema = validate.Schema(
validate.parse_json(),
{
"request": {
"files": {
validate.optional("dash"): {"cdns": {validate.text: {"url": validate.url()}}},
validate.optional("hls"): {"cdns": {validate.text: {"url": validate.url()}}},
validate.optional("progressive"): validate.all(
[{"url": validate.url(), "quality": validate.text}]
),
},
validate.optional("text_tracks"): validate.all(
[{"url": validate.text, "lang": validate.text}]
),
}
},
)
_player_schema = validate.Schema(
validate.transform(_config_re.search),
validate.any(None, validate.Schema(validate.get(1), _config_schema)),
)
arguments = PluginArguments(
PluginArgument("mux-subtitles", is_global=True)
)
def _get_streams(self):
if "player.vimeo.com" in self.url:
data = self.session.http.get(self.url, schema=self._player_schema)
else:
api_url = self.session.http.get(self.url, schema=self._config_url_schema)
if not api_url:
return
data = self.session.http.get(api_url, schema=self._config_schema)
videos = data["request"]["files"]
streams = []
for stream_type in ("hls", "dash"):
if stream_type not in videos:
continue
for _, video_data in videos[stream_type]["cdns"].items():
log.trace("{0!r}".format(video_data))
url = video_data.get("url")
if stream_type == "hls":
for stream in HLSStream.parse_variant_playlist(self.session, url).items():
streams.append(stream)
elif stream_type == "dash":
p = urlparse(url)
if p.path.endswith("dash.mpd"):
# LIVE
url = self.session.http.get(url).json()["url"]
elif p.path.endswith("master.json"):
# VOD
url = url.replace("master.json", "master.mpd")
else:
log.error("Unsupported DASH path: {0}".format(p.path))
continue
for stream in DASHStream.parse_manifest(self.session, url).items():
streams.append(stream)
for stream in videos.get("progressive", []):
streams.append((stream["quality"], HTTPStream(self.session, stream["url"])))
if self.get_option("mux_subtitles") and data["request"].get("text_tracks"):
substreams = {
s["lang"]: HTTPStream(self.session, "https://vimeo.com" + s["url"])
for s in data["request"]["text_tracks"]
}
for quality, stream in streams:
yield quality, MuxedStream(self.session, stream, subtitles=substreams)
else:
for stream in streams:
yield stream
__plugin__ = Vimeo
| 37.084034 | 98 | 0.554498 |
import logging
import re
from html import unescape as html_unescape
from urllib.parse import urlparse
from streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.dash import DASHStream
from streamlink.stream.ffmpegmux import MuxedStream
from streamlink.stream.hls import HLSStream
from streamlink.stream.http import HTTPStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(player\.vimeo\.com/video/\d+|(www\.)?vimeo\.com/.+)"
))
class Vimeo(Plugin):
_config_url_re = re.compile(r'(?:"config_url"|\bdata-config-url)\s*[:=]\s*(".+?")')
_config_re = re.compile(r"var\s+config\s*=\s*({.+?})\s*;")
_config_url_schema = validate.Schema(
validate.transform(_config_url_re.search),
validate.any(
None,
validate.Schema(
validate.get(1),
validate.parse_json(),
validate.transform(html_unescape),
validate.url(),
),
),
)
_config_schema = validate.Schema(
validate.parse_json(),
{
"request": {
"files": {
validate.optional("dash"): {"cdns": {validate.text: {"url": validate.url()}}},
validate.optional("hls"): {"cdns": {validate.text: {"url": validate.url()}}},
validate.optional("progressive"): validate.all(
[{"url": validate.url(), "quality": validate.text}]
),
},
validate.optional("text_tracks"): validate.all(
[{"url": validate.text, "lang": validate.text}]
),
}
},
)
_player_schema = validate.Schema(
validate.transform(_config_re.search),
validate.any(None, validate.Schema(validate.get(1), _config_schema)),
)
arguments = PluginArguments(
PluginArgument("mux-subtitles", is_global=True)
)
def _get_streams(self):
if "player.vimeo.com" in self.url:
data = self.session.http.get(self.url, schema=self._player_schema)
else:
api_url = self.session.http.get(self.url, schema=self._config_url_schema)
if not api_url:
return
data = self.session.http.get(api_url, schema=self._config_schema)
videos = data["request"]["files"]
streams = []
for stream_type in ("hls", "dash"):
if stream_type not in videos:
continue
for _, video_data in videos[stream_type]["cdns"].items():
log.trace("{0!r}".format(video_data))
url = video_data.get("url")
if stream_type == "hls":
for stream in HLSStream.parse_variant_playlist(self.session, url).items():
streams.append(stream)
elif stream_type == "dash":
p = urlparse(url)
if p.path.endswith("dash.mpd"):
url = self.session.http.get(url).json()["url"]
elif p.path.endswith("master.json"):
url = url.replace("master.json", "master.mpd")
else:
log.error("Unsupported DASH path: {0}".format(p.path))
continue
for stream in DASHStream.parse_manifest(self.session, url).items():
streams.append(stream)
for stream in videos.get("progressive", []):
streams.append((stream["quality"], HTTPStream(self.session, stream["url"])))
if self.get_option("mux_subtitles") and data["request"].get("text_tracks"):
substreams = {
s["lang"]: HTTPStream(self.session, "https://vimeo.com" + s["url"])
for s in data["request"]["text_tracks"]
}
for quality, stream in streams:
yield quality, MuxedStream(self.session, stream, subtitles=substreams)
else:
for stream in streams:
yield stream
__plugin__ = Vimeo
| true | true |
f734d363117a1d24b35c8edb306662ee55c567e1 | 625 | py | Python | flatland/database/population/node/diagram_type_instances.py | lelandstarr/flatland-model-diagram-editor | dfbd10d80542359c6951d7b039a5a4e3da2a0f50 | [
"MIT"
] | 10 | 2021-01-03T16:47:34.000Z | 2022-03-30T18:47:07.000Z | flatland/database/population/node/diagram_type_instances.py | lelandstarr/flatland-model-diagram-editor | dfbd10d80542359c6951d7b039a5a4e3da2a0f50 | [
"MIT"
] | 91 | 2021-01-09T02:14:13.000Z | 2022-02-24T10:24:10.000Z | flatland/database/population/node/diagram_type_instances.py | lelandstarr/flatland-model-diagram-editor | dfbd10d80542359c6951d7b039a5a4e3da2a0f50 | [
"MIT"
] | 1 | 2021-01-13T22:13:19.000Z | 2021-01-13T22:13:19.000Z | """
diagram_type_instances.py
"""
population = [
{'Name': 'class', 'Abbreviation': 'CD',
'About': 'Show data, logic and constraints in a domain'},
{'Name': 'state machine', 'Abbreviation': 'SMD',
'About': 'lifecycle of a class or assigner relationship'},
{'Name': 'class collaboration', 'Abbreviation': 'CCD',
'About': 'Shows interactions among classes and external entities in a domain'},
{'Name': 'domain', 'Abbreviation': 'DD',
'About': 'Illustrates your logical subject matter "stack" ' +
'as a network of platform independent domains with requirements dependencies'}
]
| 39.0625 | 92 | 0.6512 |
population = [
{'Name': 'class', 'Abbreviation': 'CD',
'About': 'Show data, logic and constraints in a domain'},
{'Name': 'state machine', 'Abbreviation': 'SMD',
'About': 'lifecycle of a class or assigner relationship'},
{'Name': 'class collaboration', 'Abbreviation': 'CCD',
'About': 'Shows interactions among classes and external entities in a domain'},
{'Name': 'domain', 'Abbreviation': 'DD',
'About': 'Illustrates your logical subject matter "stack" ' +
'as a network of platform independent domains with requirements dependencies'}
]
| true | true |
f734d3c25a06993d61e3d706c674c3425718395c | 12,885 | py | Python | python/paddle/fluid/tests/unittests/xpu/test_softmax_with_cross_entropy_op_xpu.py | joey12300/Paddle | 59102c6dcd2def3091f5c37816354ac69d669809 | [
"Apache-2.0"
] | 1 | 2020-12-03T05:11:47.000Z | 2020-12-03T05:11:47.000Z | python/paddle/fluid/tests/unittests/xpu/test_softmax_with_cross_entropy_op_xpu.py | joey12300/Paddle | 59102c6dcd2def3091f5c37816354ac69d669809 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/xpu/test_softmax_with_cross_entropy_op_xpu.py | joey12300/Paddle | 59102c6dcd2def3091f5c37816354ac69d669809 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
sys.path.append("..")
from test_softmax_op import stable_softmax
from op_test import OpTest
import paddle.fluid.core as core
import paddle
import unittest
import numpy as np
def cross_entropy(softmax, label, soft_label, axis, ignore_index=-1):
if soft_label:
return (-label * np.log(softmax)).sum(axis=axis, keepdims=True)
shape = softmax.shape
axis %= len(shape)
n = int(np.prod(shape[:axis]))
axis_dim = shape[axis]
remain = int(np.prod(shape[axis + 1:]))
softmax_reshape = softmax.reshape((n, axis_dim, remain))
label_reshape = label.reshape((n, 1, remain))
result = np.zeros_like(label_reshape, dtype=softmax.dtype)
for i in range(n):
for j in range(remain):
lbl = label_reshape[i, 0, j]
if lbl != ignore_index:
result[i, 0, j] -= np.log(softmax_reshape[i, lbl, j])
return result.reshape(label.shape)
class TestSoftmaxWithCrossEntropyOp(OpTest):
"""
Test softmax with cross entropy operator with discreate one-hot labels.
"""
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = False
self.soft_label = False
self.dtype = np.float32
self.axis = -1
self.ignore_index = -1
self.shape = [41, 37]
self.use_xpu = True
def setUp(self):
self.initParams()
logits = getattr(
self, "logits",
np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype))
softmax = np.apply_along_axis(stable_softmax, self.axis, logits)
if self.soft_label:
labels = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)
labels /= np.sum(labels, axis=self.axis, keepdims=True)
else:
axis_dim = self.shape[self.axis]
self.shape[self.axis] = 1
labels = np.random.randint(0, axis_dim, self.shape, dtype="int64")
loss = cross_entropy(softmax, labels, self.soft_label, self.axis,
self.ignore_index)
self.inputs = {"Logits": logits, "Label": labels}
self.outputs = {
"Softmax": softmax.astype(self.dtype),
"Loss": loss.astype(self.dtype)
}
self.attrs = {
"numeric_stable_mode": self.numeric_stable_mode,
"soft_label": self.soft_label,
}
if self.ignore_index >= 0:
self.attrs['ignore_index'] = self.ignore_index
if self.axis != -1:
self.attrs['axis'] = self.axis
def test_check_output(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_output_with_place(place, atol=1e-2)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ["Logits"], "Loss", max_relative_error=0.2)
class TestXPUSoftmaxWithCrossEntropyOp(TestSoftmaxWithCrossEntropyOp):
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = False
self.shape = [3, 5, 7, 11]
self.axis = -1
self.ignore_index = -1
self.dtype = np.float32
self.use_xpu = True
def test_check_output(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_output_with_place(place, atol=1e-2)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ["Logits"], "Loss", max_relative_error=0.2)
class TestXPUSoftmaxWithCrossEntropyOp2(TestXPUSoftmaxWithCrossEntropyOp):
"""
Test softmax with cross entropy operator with soft labels.
"""
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = True
self.dtype = np.float32
self.axis = -1
self.ignore_index = -1
self.shape = [41, 37]
self.use_xpu = True
def test_check_output(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_output_with_place(place, atol=1e-2)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ["Logits"], "Loss", max_relative_error=0.2)
class TestXPUSoftmaxWithCrossEntropyOp3(TestXPUSoftmaxWithCrossEntropyOp):
"""
Test softmax with cross entropy operator with ignore_index.
"""
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = False
self.shape = [41, 37]
self.ignore_index = 5
self.axis = -1
self.dtype = np.float32
# xpu only support axis = rank -1
# class TestXPUSoftmaxWithCrossEntropyOpAxis1(TestXPUSoftmaxWithCrossEntropyOp):
# """
# Test softmax with cross entropy operator with discreate one-hot labels.
# Given axis != -1
# """
# def initParams(self):
# self.op_type = "softmax_with_cross_entropy"
# self.numeric_stable_mode = True
# self.soft_label = False
# self.dtype = np.float32
# self.axis = 0
# self.ignore_index = -1
# self.shape = [3, 5, 7, 11]
# xpu only support axis = rank -1
# class TestXPUSoftmaxWithCrossEntropyOpAxis2(TestXPUSoftmaxWithCrossEntropyOp):
# """
# Test softmax with cross entropy operator with discreate one-hot labels.
# Given axis != -1
# """
# def initParams(self):
# self.op_type = "softmax_with_cross_entropy"
# self.numeric_stable_mode = True
# self.soft_label = False
# self.dtype = np.float32
# self.axis = 1
# self.ignore_index = -1
# self.shape = [3, 5, 7, 11]
# xpu only support axis = rank -1
# class TestXPUSoftmaxWithCrossEntropyOpAxis3(TestXPUSoftmaxWithCrossEntropyOp):
# """
# Test softmax with cross entropy operator with discreate one-hot labels.
# Given axis != -1
# """
# def initParams(self):
# self.op_type = "softmax_with_cross_entropy"
# self.numeric_stable_mode = True
# self.soft_label = False
# self.dtype = np.float32
# self.axis = 2
# self.ignore_index = -1
# self.shape = [3, 5, 7, 11]
class TestXPUSoftmaxWithCrossEntropyOpAxis4(TestXPUSoftmaxWithCrossEntropyOp):
"""
Test softmax with cross entropy operator with discreate one-hot labels.
Given axis != -1
"""
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = False
self.dtype = np.float32
self.axis = 3
self.ignore_index = -1
self.shape = [3, 5, 7, 11]
class TestXPUSoftmaxWithCrossEntropyOpAxisDimEqualOne(
TestXPUSoftmaxWithCrossEntropyOp):
"""
Test softmax with cross entropy operator with discreate one-hot labels.
Given axis != -1
"""
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = False
self.dtype = np.float32
self.axis = -1
self.ignore_index = -1
self.shape = [3, 5, 7, 1]
# xpu only support axis = rank -1
# class TestXPUSoftmaxWithCrossEntropyOpSoftLabelAxis1(
# TestXPUSoftmaxWithCrossEntropyOp):
# def initParams(self):
# self.op_type = "softmax_with_cross_entropy"
# self.numeric_stable_mode = True
# self.soft_label = True
# self.shape = [3, 5, 7, 11]
# self.axis = 0
# self.ignore_index = -1
# self.dtype = np.float32
# xpu only support axis = rank -1
# class TestXPUSoftmaxWithCrossEntropyOpSoftLabelAxis2(
# TestXPUSoftmaxWithCrossEntropyOp2):
# def initParams(self):
# self.op_type = "softmax_with_cross_entropy"
# self.numeric_stable_mode = True
# self.soft_label = True
# self.shape = [3, 5, 7, 11]
# self.axis = 1
# self.ignore_index = -1
# self.dtype = np.float32
# xpu only support axis = rank -1
# class TestXPUSoftmaxWithCrossEntropyOpSoftLabelAxis3(
# TestXPUSoftmaxWithCrossEntropyOp2):
# def initParams(self):
# self.op_type = "softmax_with_cross_entropy"
# self.numeric_stable_mode = True
# self.soft_label = True
# self.shape = [3, 5, 7, 11]
# self.axis = 2
# self.ignore_index = -1
# self.dtype = np.float32
class TestXPUSoftmaxWithCrossEntropyOpSoftLabelAxis4(
TestXPUSoftmaxWithCrossEntropyOp2):
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = True
self.shape = [3, 5, 7, 11]
self.axis = 3
self.ignore_index = -1
self.dtype = np.float32
# xpu only support axis = rank -1
# class TestXPUSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis1(
# TestXPUSoftmaxWithCrossEntropyOp3):
# def initParams(self):
# self.op_type = "softmax_with_cross_entropy"
# self.numeric_stable_mode = True
# self.soft_label = False
# self.shape = [3, 5, 7, 11]
# self.ignore_index = 1
# self.axis = 0
# self.dtype = np.float32
# xpu only support axis = rank -1
# class TestXPUSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis2(
# TestXPUSoftmaxWithCrossEntropyOp3):
# def initParams(self):
# self.op_type = "softmax_with_cross_entropy"
# self.numeric_stable_mode = True
# self.soft_label = False
# self.shape = [3, 5, 7, 11]
# self.ignore_index = 0
# self.axis = 1
# self.dtype = np.float32
# xpu only support axis = rank -1
# class TestXPUSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis3(
# TestXPUSoftmaxWithCrossEntropyOp3):
# def initParams(self):
# self.op_type = "softmax_with_cross_entropy"
# self.numeric_stable_mode = True
# self.soft_label = False
# self.shape = [3, 5, 7, 11]
# self.ignore_index = 3
# self.axis = 2
# self.dtype = np.float32
class TestXPUSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis4(
TestXPUSoftmaxWithCrossEntropyOp3):
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = False
self.shape = [3, 5, 7, 11]
self.ignore_index = 3
self.axis = 3
self.dtype = np.float32
class TestXPUSoftmaxWithCrossEntropyOpBoundary0(
TestXPUSoftmaxWithCrossEntropyOp):
"""
Test stable softmax with cross entropy operator will not product INF
with small logits value.
"""
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = False
self.shape = [3, 5, 7, 11]
self.axis = -1
self.ignore_index = -1
self.dtype = np.float32
self.logits = np.full(self.shape, -500.0).astype(self.dtype)
class TestXPUSoftmaxWithCrossEntropyOpBoundary1(
TestXPUSoftmaxWithCrossEntropyOp):
"""
Test stable softmax with cross entropy operator will not product INF
with small logits value.
"""
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = False
self.shape = [3, 5, 7, 11]
self.axis = -1
self.ignore_index = -1
self.dtype = np.float32
self.logits = np.full(self.shape, 1000.0).astype(self.dtype)
self.logits[:, :, 0, :] = -1000.0
if __name__ == "__main__":
unittest.main()
| 32.374372 | 80 | 0.632906 |
from __future__ import print_function
import sys
sys.path.append("..")
from test_softmax_op import stable_softmax
from op_test import OpTest
import paddle.fluid.core as core
import paddle
import unittest
import numpy as np
def cross_entropy(softmax, label, soft_label, axis, ignore_index=-1):
if soft_label:
return (-label * np.log(softmax)).sum(axis=axis, keepdims=True)
shape = softmax.shape
axis %= len(shape)
n = int(np.prod(shape[:axis]))
axis_dim = shape[axis]
remain = int(np.prod(shape[axis + 1:]))
softmax_reshape = softmax.reshape((n, axis_dim, remain))
label_reshape = label.reshape((n, 1, remain))
result = np.zeros_like(label_reshape, dtype=softmax.dtype)
for i in range(n):
for j in range(remain):
lbl = label_reshape[i, 0, j]
if lbl != ignore_index:
result[i, 0, j] -= np.log(softmax_reshape[i, lbl, j])
return result.reshape(label.shape)
class TestSoftmaxWithCrossEntropyOp(OpTest):
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = False
self.soft_label = False
self.dtype = np.float32
self.axis = -1
self.ignore_index = -1
self.shape = [41, 37]
self.use_xpu = True
def setUp(self):
self.initParams()
logits = getattr(
self, "logits",
np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype))
softmax = np.apply_along_axis(stable_softmax, self.axis, logits)
if self.soft_label:
labels = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)
labels /= np.sum(labels, axis=self.axis, keepdims=True)
else:
axis_dim = self.shape[self.axis]
self.shape[self.axis] = 1
labels = np.random.randint(0, axis_dim, self.shape, dtype="int64")
loss = cross_entropy(softmax, labels, self.soft_label, self.axis,
self.ignore_index)
self.inputs = {"Logits": logits, "Label": labels}
self.outputs = {
"Softmax": softmax.astype(self.dtype),
"Loss": loss.astype(self.dtype)
}
self.attrs = {
"numeric_stable_mode": self.numeric_stable_mode,
"soft_label": self.soft_label,
}
if self.ignore_index >= 0:
self.attrs['ignore_index'] = self.ignore_index
if self.axis != -1:
self.attrs['axis'] = self.axis
def test_check_output(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_output_with_place(place, atol=1e-2)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ["Logits"], "Loss", max_relative_error=0.2)
class TestXPUSoftmaxWithCrossEntropyOp(TestSoftmaxWithCrossEntropyOp):
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = False
self.shape = [3, 5, 7, 11]
self.axis = -1
self.ignore_index = -1
self.dtype = np.float32
self.use_xpu = True
def test_check_output(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_output_with_place(place, atol=1e-2)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ["Logits"], "Loss", max_relative_error=0.2)
class TestXPUSoftmaxWithCrossEntropyOp2(TestXPUSoftmaxWithCrossEntropyOp):
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = True
self.dtype = np.float32
self.axis = -1
self.ignore_index = -1
self.shape = [41, 37]
self.use_xpu = True
def test_check_output(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_output_with_place(place, atol=1e-2)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ["Logits"], "Loss", max_relative_error=0.2)
class TestXPUSoftmaxWithCrossEntropyOp3(TestXPUSoftmaxWithCrossEntropyOp):
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = False
self.shape = [41, 37]
self.ignore_index = 5
self.axis = -1
self.dtype = np.float32
# Test softmax with cross entropy operator with discreate one-hot labels.
# Given axis != -1
# """
# Test softmax with cross entropy operator with discreate one-hot labels.
# Given axis != -1
# """
# Test softmax with cross entropy operator with discreate one-hot labels.
# Given axis != -1
# """
class TestXPUSoftmaxWithCrossEntropyOpAxis4(TestXPUSoftmaxWithCrossEntropyOp):
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = False
self.dtype = np.float32
self.axis = 3
self.ignore_index = -1
self.shape = [3, 5, 7, 11]
class TestXPUSoftmaxWithCrossEntropyOpAxisDimEqualOne(
TestXPUSoftmaxWithCrossEntropyOp):
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = False
self.dtype = np.float32
self.axis = -1
self.ignore_index = -1
self.shape = [3, 5, 7, 1]
class TestXPUSoftmaxWithCrossEntropyOpSoftLabelAxis4(
TestXPUSoftmaxWithCrossEntropyOp2):
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = True
self.shape = [3, 5, 7, 11]
self.axis = 3
self.ignore_index = -1
self.dtype = np.float32
class TestXPUSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis4(
TestXPUSoftmaxWithCrossEntropyOp3):
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = False
self.shape = [3, 5, 7, 11]
self.ignore_index = 3
self.axis = 3
self.dtype = np.float32
class TestXPUSoftmaxWithCrossEntropyOpBoundary0(
TestXPUSoftmaxWithCrossEntropyOp):
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = False
self.shape = [3, 5, 7, 11]
self.axis = -1
self.ignore_index = -1
self.dtype = np.float32
self.logits = np.full(self.shape, -500.0).astype(self.dtype)
class TestXPUSoftmaxWithCrossEntropyOpBoundary1(
TestXPUSoftmaxWithCrossEntropyOp):
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
self.numeric_stable_mode = True
self.soft_label = False
self.shape = [3, 5, 7, 11]
self.axis = -1
self.ignore_index = -1
self.dtype = np.float32
self.logits = np.full(self.shape, 1000.0).astype(self.dtype)
self.logits[:, :, 0, :] = -1000.0
if __name__ == "__main__":
unittest.main()
| true | true |
f734d4b1af0cc7fc43f7f8c7fe07948454d387eb | 5,473 | py | Python | tensorflow/python/kernel_tests/math_ops/argmax_op_test.py | computationalartist/tensorflow | b89cf636c412abdff53b3e8f201bde671c92209d | [
"Apache-2.0"
] | 190,993 | 2015-11-09T13:17:30.000Z | 2022-03-31T23:05:27.000Z | tensorflow/python/kernel_tests/math_ops/argmax_op_test.py | computationalartist/tensorflow | b89cf636c412abdff53b3e8f201bde671c92209d | [
"Apache-2.0"
] | 48,461 | 2015-11-09T14:21:11.000Z | 2022-03-31T23:17:33.000Z | tensorflow/python/kernel_tests/math_ops/argmax_op_test.py | computationalartist/tensorflow | b89cf636c412abdff53b3e8f201bde671c92209d | [
"Apache-2.0"
] | 104,981 | 2015-11-09T13:40:17.000Z | 2022-03-31T19:51:54.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.argmax_op."""
import functools
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ArgMaxTest(test.TestCase):
def _testArg(self,
method,
x,
axis,
expected_values,
use_gpu=False,
expected_err_re=None):
with self.session(use_gpu=use_gpu):
ans = method(x, axis=axis)
if expected_err_re is None:
tf_ans = self.evaluate(ans)
# Defaults to int64 output.
self.assertEqual(np.int64, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
self.assertShapeEqual(expected_values, ans)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testBothArg(self,
method,
x,
axis,
expected_values,
expected_err_re=None):
self._testArg(method, x, axis, expected_values, True, expected_err_re)
# Compilation time is too large with XLA/CPU autojit.
if not test_util.is_xla_enabled():
self._testArg(method, x, axis, expected_values, False, expected_err_re)
def _testBasic(self, dtype):
x = np.arange(200, dtype=np.float32).astype(dtype)
np.random.shuffle(x)
# Check that argmin and argmax match numpy along the primary axis
self._testBothArg(math_ops.argmax, x, 0, x.argmax())
self._testBothArg(math_ops.argmin, x, 0, x.argmin())
def _testTieBreaking(self, dtype):
x = np.zeros(200, dtype=dtype)
# Check that argmin and argmax match numpy along the primary axis for
# breaking ties.
self._testBothArg(math_ops.argmax, x, 0, x.argmax())
self._testBothArg(math_ops.argmin, x, 0, x.argmin())
# Check that argmin and argmax match numpy along axis=1 for
# breaking ties.
x = np.array([[0, 0, 1, 1], [1, 1, 0, 0], [0, 1, 0, 1]], dtype=dtype)
self._testBothArg(math_ops.argmax, x, 1, x.argmax(axis=1))
self._testBothArg(math_ops.argmin, x, 1, x.argmin(axis=1))
def _testDim(self, dtype):
shape = (3, 2, 4, 5, 6, 3, 7)
x = np.arange(
functools.reduce(lambda x, y: x * y, shape),
dtype=np.float32).astype(dtype)
np.random.shuffle(x)
x = x.reshape(shape)
# Check that argmin and argmax match numpy along all axes
for axis in range(-7, 7):
self._testBothArg(math_ops.argmax, x, axis, x.argmax(axis))
self._testBothArg(math_ops.argmin, x, axis, x.argmin(axis))
def testFloat(self):
self._testBasic(np.float32)
self._testTieBreaking(np.float32)
self._testDim(np.float32)
def testFloatInt32Output(self):
x = np.asarray(100 * np.random.randn(200), dtype=np.float32)
expected_values = x.argmax()
with self.session():
ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)
tf_ans = self.evaluate(ans)
self.assertEqual(np.int32, tf_ans.dtype)
# The values are equal when comparing int32 to int64 because
# the values don't have a range that exceeds 32-bit integers.
self.assertAllEqual(tf_ans, expected_values)
expected_values = x.argmin()
with self.session():
ans = math_ops.argmin(x, axis=0, output_type=dtypes.int32)
tf_ans = self.evaluate(ans)
self.assertEqual(np.int32, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
def testDouble(self):
self._testBasic(np.float64)
self._testTieBreaking(np.float64)
self._testDim(np.float64)
def testInt32(self):
self._testBasic(np.int32)
self._testTieBreaking(np.int32)
self._testDim(np.int32)
def testInt64(self):
self._testBasic(np.int64)
self._testTieBreaking(np.int64)
self._testDim(np.int64)
def testBool(self):
self._testBasic(np.bool_)
self._testTieBreaking(np.bool_)
self._testDim(np.bool_)
def testEmpty(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
with self.assertRaisesOpError(
r"Reduction axis 0 is empty in shape \[0\]"):
op([], 0).eval()
@test_util.run_deprecated_v1
def testDefaultAxis(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
ans = op([1]).eval()
self.assertAllEqual(ans, 0)
@test_util.run_deprecated_v1
def testOutputEmpty(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
ret = op(array_ops.zeros(shape=[1, 0, 2]), axis=-1).eval()
self.assertEqual(ret.shape, (1, 0))
if __name__ == "__main__":
test.main()
| 34.20625 | 80 | 0.661794 |
import functools
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ArgMaxTest(test.TestCase):
def _testArg(self,
method,
x,
axis,
expected_values,
use_gpu=False,
expected_err_re=None):
with self.session(use_gpu=use_gpu):
ans = method(x, axis=axis)
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertEqual(np.int64, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
self.assertShapeEqual(expected_values, ans)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testBothArg(self,
method,
x,
axis,
expected_values,
expected_err_re=None):
self._testArg(method, x, axis, expected_values, True, expected_err_re)
if not test_util.is_xla_enabled():
self._testArg(method, x, axis, expected_values, False, expected_err_re)
def _testBasic(self, dtype):
x = np.arange(200, dtype=np.float32).astype(dtype)
np.random.shuffle(x)
self._testBothArg(math_ops.argmax, x, 0, x.argmax())
self._testBothArg(math_ops.argmin, x, 0, x.argmin())
def _testTieBreaking(self, dtype):
x = np.zeros(200, dtype=dtype)
self._testBothArg(math_ops.argmax, x, 0, x.argmax())
self._testBothArg(math_ops.argmin, x, 0, x.argmin())
x = np.array([[0, 0, 1, 1], [1, 1, 0, 0], [0, 1, 0, 1]], dtype=dtype)
self._testBothArg(math_ops.argmax, x, 1, x.argmax(axis=1))
self._testBothArg(math_ops.argmin, x, 1, x.argmin(axis=1))
def _testDim(self, dtype):
shape = (3, 2, 4, 5, 6, 3, 7)
x = np.arange(
functools.reduce(lambda x, y: x * y, shape),
dtype=np.float32).astype(dtype)
np.random.shuffle(x)
x = x.reshape(shape)
for axis in range(-7, 7):
self._testBothArg(math_ops.argmax, x, axis, x.argmax(axis))
self._testBothArg(math_ops.argmin, x, axis, x.argmin(axis))
def testFloat(self):
self._testBasic(np.float32)
self._testTieBreaking(np.float32)
self._testDim(np.float32)
def testFloatInt32Output(self):
x = np.asarray(100 * np.random.randn(200), dtype=np.float32)
expected_values = x.argmax()
with self.session():
ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)
tf_ans = self.evaluate(ans)
self.assertEqual(np.int32, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
expected_values = x.argmin()
with self.session():
ans = math_ops.argmin(x, axis=0, output_type=dtypes.int32)
tf_ans = self.evaluate(ans)
self.assertEqual(np.int32, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
def testDouble(self):
self._testBasic(np.float64)
self._testTieBreaking(np.float64)
self._testDim(np.float64)
def testInt32(self):
self._testBasic(np.int32)
self._testTieBreaking(np.int32)
self._testDim(np.int32)
def testInt64(self):
self._testBasic(np.int64)
self._testTieBreaking(np.int64)
self._testDim(np.int64)
def testBool(self):
self._testBasic(np.bool_)
self._testTieBreaking(np.bool_)
self._testDim(np.bool_)
def testEmpty(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
with self.assertRaisesOpError(
r"Reduction axis 0 is empty in shape \[0\]"):
op([], 0).eval()
@test_util.run_deprecated_v1
def testDefaultAxis(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
ans = op([1]).eval()
self.assertAllEqual(ans, 0)
@test_util.run_deprecated_v1
def testOutputEmpty(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
ret = op(array_ops.zeros(shape=[1, 0, 2]), axis=-1).eval()
self.assertEqual(ret.shape, (1, 0))
if __name__ == "__main__":
test.main()
| true | true |
f734d54a61d5c8de8595c92a6ae6ea4360716489 | 14,386 | py | Python | pyscf/grad/casci.py | QuESt-Calculator/pyscf | 0ed03633b699505c7278f1eb501342667d0aa910 | [
"Apache-2.0"
] | 501 | 2018-12-06T23:48:17.000Z | 2022-03-31T11:53:18.000Z | pyscf/grad/casci.py | QuESt-Calculator/pyscf | 0ed03633b699505c7278f1eb501342667d0aa910 | [
"Apache-2.0"
] | 710 | 2018-11-26T22:04:52.000Z | 2022-03-30T03:53:12.000Z | pyscf/grad/casci.py | QuESt-Calculator/pyscf | 0ed03633b699505c7278f1eb501342667d0aa910 | [
"Apache-2.0"
] | 273 | 2018-11-26T10:10:24.000Z | 2022-03-30T12:25:28.000Z | #!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
CASCI analytical nuclear gradients
Ref.
J. Comput. Chem., 5, 589
'''
import sys
from functools import reduce
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.grad import rhf as rhf_grad
from pyscf.grad.mp2 import _shell_prange
from pyscf.scf import cphf
if sys.version_info < (3,):
RANGE_TYPE = list
else:
RANGE_TYPE = range
def grad_elec(mc_grad, mo_coeff=None, ci=None, atmlst=None, verbose=None):
mc = mc_grad.base
if mo_coeff is None: mo_coeff = mc._scf.mo_coeff
if ci is None: ci = mc.ci
time0 = logger.process_clock(), logger.perf_counter()
log = logger.new_logger(mc_grad, verbose)
mol = mc_grad.mol
ncore = mc.ncore
ncas = mc.ncas
nocc = ncore + ncas
nelecas = mc.nelecas
nao, nmo = mo_coeff.shape
nao_pair = nao * (nao+1) // 2
mo_energy = mc._scf.mo_energy
mo_occ = mo_coeff[:,:nocc]
mo_core = mo_coeff[:,:ncore]
mo_cas = mo_coeff[:,ncore:nocc]
neleca, nelecb = mol.nelec
assert(neleca == nelecb)
orbo = mo_coeff[:,:neleca]
orbv = mo_coeff[:,neleca:]
casdm1, casdm2 = mc.fcisolver.make_rdm12(ci, ncas, nelecas)
dm_core = numpy.dot(mo_core, mo_core.T) * 2
dm_cas = reduce(numpy.dot, (mo_cas, casdm1, mo_cas.T))
aapa = ao2mo.kernel(mol, (mo_cas, mo_cas, mo_coeff, mo_cas), compact=False)
aapa = aapa.reshape(ncas,ncas,nmo,ncas)
vj, vk = mc._scf.get_jk(mol, (dm_core, dm_cas))
h1 = mc.get_hcore()
vhf_c = vj[0] - vk[0] * .5
vhf_a = vj[1] - vk[1] * .5
# Imat = h1_{pi} gamma1_{iq} + h2_{pijk} gamma_{iqkj}
Imat = numpy.zeros((nmo,nmo))
Imat[:,:nocc] = reduce(numpy.dot, (mo_coeff.T, h1 + vhf_c + vhf_a, mo_occ)) * 2
Imat[:,ncore:nocc] = reduce(numpy.dot, (mo_coeff.T, h1 + vhf_c, mo_cas, casdm1))
Imat[:,ncore:nocc] += lib.einsum('uviw,vuwt->it', aapa, casdm2)
aapa = vj = vk = vhf_c = vhf_a = h1 = None
ee = mo_energy[:,None] - mo_energy
zvec = numpy.zeros_like(Imat)
zvec[:ncore,ncore:neleca] = Imat[:ncore,ncore:neleca] / -ee[:ncore,ncore:neleca]
zvec[ncore:neleca,:ncore] = Imat[ncore:neleca,:ncore] / -ee[ncore:neleca,:ncore]
zvec[nocc:,neleca:nocc] = Imat[nocc:,neleca:nocc] / -ee[nocc:,neleca:nocc]
zvec[neleca:nocc,nocc:] = Imat[neleca:nocc,nocc:] / -ee[neleca:nocc,nocc:]
zvec_ao = reduce(numpy.dot, (mo_coeff, zvec+zvec.T, mo_coeff.T))
vhf = mc._scf.get_veff(mol, zvec_ao) * 2
xvo = reduce(numpy.dot, (orbv.T, vhf, orbo))
xvo += Imat[neleca:,:neleca] - Imat[:neleca,neleca:].T
def fvind(x):
x = x.reshape(xvo.shape)
dm = reduce(numpy.dot, (orbv, x, orbo.T))
v = mc._scf.get_veff(mol, dm + dm.T)
v = reduce(numpy.dot, (orbv.T, v, orbo))
return v * 2
dm1resp = cphf.solve(fvind, mo_energy, mc._scf.mo_occ, xvo, max_cycle=30)[0]
zvec[neleca:,:neleca] = dm1resp
zeta = numpy.einsum('ij,j->ij', zvec, mo_energy)
zeta = reduce(numpy.dot, (mo_coeff, zeta, mo_coeff.T))
zvec_ao = reduce(numpy.dot, (mo_coeff, zvec+zvec.T, mo_coeff.T))
p1 = numpy.dot(mo_coeff[:,:neleca], mo_coeff[:,:neleca].T)
vhf_s1occ = reduce(numpy.dot, (p1, mc._scf.get_veff(mol, zvec_ao), p1))
Imat[:ncore,ncore:neleca] = 0
Imat[ncore:neleca,:ncore] = 0
Imat[nocc:,neleca:nocc] = 0
Imat[neleca:nocc,nocc:] = 0
Imat[neleca:,:neleca] = Imat[:neleca,neleca:].T
im1 = reduce(numpy.dot, (mo_coeff, Imat, mo_coeff.T))
casci_dm1 = dm_core + dm_cas
hf_dm1 = mc._scf.make_rdm1(mo_coeff, mc._scf.mo_occ)
hcore_deriv = mc_grad.hcore_generator(mol)
s1 = mc_grad.get_ovlp(mol)
diag_idx = numpy.arange(nao)
diag_idx = diag_idx * (diag_idx+1) // 2 + diag_idx
casdm2_cc = casdm2 + casdm2.transpose(0,1,3,2)
dm2buf = ao2mo._ao2mo.nr_e2(casdm2_cc.reshape(ncas**2,ncas**2), mo_cas.T,
(0, nao, 0, nao)).reshape(ncas**2,nao,nao)
dm2buf = lib.pack_tril(dm2buf)
dm2buf[:,diag_idx] *= .5
dm2buf = dm2buf.reshape(ncas,ncas,nao_pair)
casdm2 = casdm2_cc = None
if atmlst is None:
atmlst = range(mol.natm)
aoslices = mol.aoslice_by_atom()
de = numpy.zeros((len(atmlst),3))
max_memory = mc_grad.max_memory - lib.current_memory()[0]
blksize = int(max_memory*.9e6/8 / ((aoslices[:,3]-aoslices[:,2]).max()*nao_pair))
blksize = min(nao, max(2, blksize))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
h1ao = hcore_deriv(ia)
de[k] += numpy.einsum('xij,ij->x', h1ao, casci_dm1)
de[k] += numpy.einsum('xij,ij->x', h1ao, zvec_ao)
q1 = 0
for b0, b1, nf in _shell_prange(mol, 0, mol.nbas, blksize):
q0, q1 = q1, q1 + nf
dm2_ao = lib.einsum('ijw,pi,qj->pqw', dm2buf, mo_cas[p0:p1], mo_cas[q0:q1])
shls_slice = (shl0,shl1,b0,b1,0,mol.nbas,0,mol.nbas)
eri1 = mol.intor('int2e_ip1', comp=3, aosym='s2kl',
shls_slice=shls_slice).reshape(3,p1-p0,nf,nao_pair)
de[k] -= numpy.einsum('xijw,ijw->x', eri1, dm2_ao) * 2
for i in range(3):
eri1tmp = lib.unpack_tril(eri1[i].reshape((p1-p0)*nf,-1))
eri1tmp = eri1tmp.reshape(p1-p0,nf,nao,nao)
de[k,i] -= numpy.einsum('ijkl,ij,kl', eri1tmp, hf_dm1[p0:p1,q0:q1], zvec_ao) * 2
de[k,i] -= numpy.einsum('ijkl,kl,ij', eri1tmp, hf_dm1, zvec_ao[p0:p1,q0:q1]) * 2
de[k,i] += numpy.einsum('ijkl,il,kj', eri1tmp, hf_dm1[p0:p1], zvec_ao[q0:q1])
de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, hf_dm1[q0:q1], zvec_ao[p0:p1])
#:vhf1c, vhf1a = mc_grad.get_veff(mol, (dm_core, dm_cas))
#:de[k] += numpy.einsum('xij,ij->x', vhf1c[:,p0:p1], casci_dm1[p0:p1]) * 2
#:de[k] += numpy.einsum('xij,ij->x', vhf1a[:,p0:p1], dm_core[p0:p1]) * 2
de[k,i] -= numpy.einsum('ijkl,lk,ij', eri1tmp, dm_core[q0:q1], casci_dm1[p0:p1]) * 2
de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, dm_core[q0:q1], casci_dm1[p0:p1])
de[k,i] -= numpy.einsum('ijkl,lk,ij', eri1tmp, dm_cas[q0:q1], dm_core[p0:p1]) * 2
de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, dm_cas[q0:q1], dm_core[p0:p1])
eri1 = eri1tmp = None
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], im1[p0:p1])
de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], im1[:,p0:p1])
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], zeta[p0:p1]) * 2
de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], zeta[:,p0:p1]) * 2
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], vhf_s1occ[p0:p1]) * 2
de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], vhf_s1occ[:,p0:p1]) * 2
log.timer('CASCI nuclear gradients', *time0)
return de
def as_scanner(mcscf_grad, state=None):
'''Generating a nuclear gradients scanner/solver (for geometry optimizer).
The returned solver is a function. This function requires one argument
"mol" as input and returns energy and first order nuclear derivatives.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
nuc-grad object and SCF object (DIIS, conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples:
>>> from pyscf import gto, scf, mcscf
>>> mol = gto.M(atom='N 0 0 0; N 0 0 1.1', verbose=0)
>>> mc_grad_scanner = mcscf.CASCI(scf.RHF(mol), 4, 4).nuc_grad_method().as_scanner()
>>> etot, grad = mc_grad_scanner(gto.M(atom='N 0 0 0; N 0 0 1.1'))
>>> etot, grad = mc_grad_scanner(gto.M(atom='N 0 0 0; N 0 0 1.5'))
'''
from pyscf import gto
from pyscf.mcscf.addons import StateAverageMCSCFSolver
if isinstance(mcscf_grad, lib.GradScanner):
return mcscf_grad
if (state is not None and
isinstance(mcscf_grad.base, StateAverageMCSCFSolver)):
raise RuntimeError('State-Average MCSCF Gradients does not support '
'state-specific nuclear gradients.')
logger.info(mcscf_grad, 'Create scanner for %s', mcscf_grad.__class__)
class CASCI_GradScanner(mcscf_grad.__class__, lib.GradScanner):
def __init__(self, g):
lib.GradScanner.__init__(self, g)
def __call__(self, mol_or_geom, state=state, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
if state is None:
state = self.state
mc_scanner = self.base
# TODO: Check root flip
e_tot = mc_scanner(mol)
ci = mc_scanner.ci
if isinstance(mc_scanner, StateAverageMCSCFSolver):
e_tot = mc_scanner.e_average
elif not isinstance(e_tot, float):
if state >= mc_scanner.fcisolver.nroots:
raise ValueError('State ID greater than the number of CASCI roots')
e_tot = e_tot[state]
# target at a specific state, to avoid overwriting self.state
# in self.kernel
ci = ci[state]
self.mol = mol
de = self.kernel(ci=ci, state=state, **kwargs)
return e_tot, de
return CASCI_GradScanner(mcscf_grad)
class Gradients(rhf_grad.GradientsMixin):
'''Non-relativistic restricted Hartree-Fock gradients'''
def __init__(self, mc):
from pyscf.mcscf.addons import StateAverageMCSCFSolver
if isinstance(mc, StateAverageMCSCFSolver):
self.state = None # not a specific state
else:
self.state = 0 # of which the gradients to be computed.
rhf_grad.GradientsMixin.__init__(self, mc)
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('\n')
if not self.base.converged:
log.warn('Ground state %s not converged', self.base.__class__)
log.info('******** %s for %s ********',
self.__class__, self.base.__class__)
if self.state is None:
weights = self.base.weights
log.info('State-average gradients over %d states with weights %s',
len(weights), weights)
elif self.state != 0 and self.base.fcisolver.nroots > 1:
log.info('State ID = %d', self.state)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
return self
grad_elec = grad_elec
def kernel(self, mo_coeff=None, ci=None, atmlst=None,
state=None, verbose=None):
log = logger.new_logger(self, verbose)
if ci is None: ci = self.base.ci
if self.state is None: # state average MCSCF calculations
assert(state is None)
elif isinstance(ci, (list, tuple, RANGE_TYPE)):
if state is None:
state = self.state
else:
self.state = state
ci = ci[state]
log.info('Multiple roots are found in CASCI solver. '
'Nuclear gradients of root %d are computed.', state)
if atmlst is None:
atmlst = self.atmlst
else:
self.atmlst = atmlst
if self.verbose >= logger.WARN:
self.check_sanity()
if self.verbose >= logger.INFO:
self.dump_flags()
de = self.grad_elec(mo_coeff, ci, atmlst, log)
self.de = de = de + self.grad_nuc(atmlst=atmlst)
if self.mol.symmetry:
self.de = self.symmetrize(self.de, atmlst)
self._finalize()
return self.de
# Initialize hcore_deriv with the underlying SCF object because some
# extensions (e.g. x2c, QM/MM, solvent) modifies the SCF object only.
def hcore_generator(self, mol=None):
mf_grad = self.base._scf.nuc_grad_method()
return mf_grad.hcore_generator(mol)
# Calling the underlying SCF nuclear gradients because it may be modified
# by external modules (e.g. QM/MM, solvent)
def grad_nuc(self, mol=None, atmlst=None):
mf_grad = self.base._scf.nuc_grad_method()
return mf_grad.grad_nuc(mol, atmlst)
def _finalize(self):
if self.verbose >= logger.NOTE:
if self.state is None:
logger.note(self, '--------- %s gradients ----------',
self.base.__class__.__name__)
else:
logger.note(self, '--------- %s gradients for state %d ----------',
self.base.__class__.__name__, self.state)
self._write(self.mol, self.de, self.atmlst)
logger.note(self, '----------------------------------------------')
as_scanner = as_scanner
Grad = Gradients
from pyscf import mcscf
mcscf.casci.CASCI.Gradients = lib.class_as_method(Gradients)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
mol = gto.Mole()
mol.atom = 'N 0 0 0; N 0 0 1.2; H 1 1 0; H 1 1 1.2'
mol.build()
mf = scf.RHF(mol).run(conv_tol=1e-14)
mc = mcscf.CASCI(mf, 4, 4).run()
g1 = mc.Gradients().kernel()
print(lib.finger(g1) - -0.066025991364829367)
mcs = mc.as_scanner()
mol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2')
e1 = mcs(mol)
mol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2')
e2 = mcs(mol)
print(g1[1,2], (e1-e2)/0.002*lib.param.BOHR)
| 39.740331 | 100 | 0.605172 |
import sys
from functools import reduce
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.grad import rhf as rhf_grad
from pyscf.grad.mp2 import _shell_prange
from pyscf.scf import cphf
if sys.version_info < (3,):
RANGE_TYPE = list
else:
RANGE_TYPE = range
def grad_elec(mc_grad, mo_coeff=None, ci=None, atmlst=None, verbose=None):
mc = mc_grad.base
if mo_coeff is None: mo_coeff = mc._scf.mo_coeff
if ci is None: ci = mc.ci
time0 = logger.process_clock(), logger.perf_counter()
log = logger.new_logger(mc_grad, verbose)
mol = mc_grad.mol
ncore = mc.ncore
ncas = mc.ncas
nocc = ncore + ncas
nelecas = mc.nelecas
nao, nmo = mo_coeff.shape
nao_pair = nao * (nao+1) // 2
mo_energy = mc._scf.mo_energy
mo_occ = mo_coeff[:,:nocc]
mo_core = mo_coeff[:,:ncore]
mo_cas = mo_coeff[:,ncore:nocc]
neleca, nelecb = mol.nelec
assert(neleca == nelecb)
orbo = mo_coeff[:,:neleca]
orbv = mo_coeff[:,neleca:]
casdm1, casdm2 = mc.fcisolver.make_rdm12(ci, ncas, nelecas)
dm_core = numpy.dot(mo_core, mo_core.T) * 2
dm_cas = reduce(numpy.dot, (mo_cas, casdm1, mo_cas.T))
aapa = ao2mo.kernel(mol, (mo_cas, mo_cas, mo_coeff, mo_cas), compact=False)
aapa = aapa.reshape(ncas,ncas,nmo,ncas)
vj, vk = mc._scf.get_jk(mol, (dm_core, dm_cas))
h1 = mc.get_hcore()
vhf_c = vj[0] - vk[0] * .5
vhf_a = vj[1] - vk[1] * .5
Imat = numpy.zeros((nmo,nmo))
Imat[:,:nocc] = reduce(numpy.dot, (mo_coeff.T, h1 + vhf_c + vhf_a, mo_occ)) * 2
Imat[:,ncore:nocc] = reduce(numpy.dot, (mo_coeff.T, h1 + vhf_c, mo_cas, casdm1))
Imat[:,ncore:nocc] += lib.einsum('uviw,vuwt->it', aapa, casdm2)
aapa = vj = vk = vhf_c = vhf_a = h1 = None
ee = mo_energy[:,None] - mo_energy
zvec = numpy.zeros_like(Imat)
zvec[:ncore,ncore:neleca] = Imat[:ncore,ncore:neleca] / -ee[:ncore,ncore:neleca]
zvec[ncore:neleca,:ncore] = Imat[ncore:neleca,:ncore] / -ee[ncore:neleca,:ncore]
zvec[nocc:,neleca:nocc] = Imat[nocc:,neleca:nocc] / -ee[nocc:,neleca:nocc]
zvec[neleca:nocc,nocc:] = Imat[neleca:nocc,nocc:] / -ee[neleca:nocc,nocc:]
zvec_ao = reduce(numpy.dot, (mo_coeff, zvec+zvec.T, mo_coeff.T))
vhf = mc._scf.get_veff(mol, zvec_ao) * 2
xvo = reduce(numpy.dot, (orbv.T, vhf, orbo))
xvo += Imat[neleca:,:neleca] - Imat[:neleca,neleca:].T
def fvind(x):
x = x.reshape(xvo.shape)
dm = reduce(numpy.dot, (orbv, x, orbo.T))
v = mc._scf.get_veff(mol, dm + dm.T)
v = reduce(numpy.dot, (orbv.T, v, orbo))
return v * 2
dm1resp = cphf.solve(fvind, mo_energy, mc._scf.mo_occ, xvo, max_cycle=30)[0]
zvec[neleca:,:neleca] = dm1resp
zeta = numpy.einsum('ij,j->ij', zvec, mo_energy)
zeta = reduce(numpy.dot, (mo_coeff, zeta, mo_coeff.T))
zvec_ao = reduce(numpy.dot, (mo_coeff, zvec+zvec.T, mo_coeff.T))
p1 = numpy.dot(mo_coeff[:,:neleca], mo_coeff[:,:neleca].T)
vhf_s1occ = reduce(numpy.dot, (p1, mc._scf.get_veff(mol, zvec_ao), p1))
Imat[:ncore,ncore:neleca] = 0
Imat[ncore:neleca,:ncore] = 0
Imat[nocc:,neleca:nocc] = 0
Imat[neleca:nocc,nocc:] = 0
Imat[neleca:,:neleca] = Imat[:neleca,neleca:].T
im1 = reduce(numpy.dot, (mo_coeff, Imat, mo_coeff.T))
casci_dm1 = dm_core + dm_cas
hf_dm1 = mc._scf.make_rdm1(mo_coeff, mc._scf.mo_occ)
hcore_deriv = mc_grad.hcore_generator(mol)
s1 = mc_grad.get_ovlp(mol)
diag_idx = numpy.arange(nao)
diag_idx = diag_idx * (diag_idx+1) // 2 + diag_idx
casdm2_cc = casdm2 + casdm2.transpose(0,1,3,2)
dm2buf = ao2mo._ao2mo.nr_e2(casdm2_cc.reshape(ncas**2,ncas**2), mo_cas.T,
(0, nao, 0, nao)).reshape(ncas**2,nao,nao)
dm2buf = lib.pack_tril(dm2buf)
dm2buf[:,diag_idx] *= .5
dm2buf = dm2buf.reshape(ncas,ncas,nao_pair)
casdm2 = casdm2_cc = None
if atmlst is None:
atmlst = range(mol.natm)
aoslices = mol.aoslice_by_atom()
de = numpy.zeros((len(atmlst),3))
max_memory = mc_grad.max_memory - lib.current_memory()[0]
blksize = int(max_memory*.9e6/8 / ((aoslices[:,3]-aoslices[:,2]).max()*nao_pair))
blksize = min(nao, max(2, blksize))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
h1ao = hcore_deriv(ia)
de[k] += numpy.einsum('xij,ij->x', h1ao, casci_dm1)
de[k] += numpy.einsum('xij,ij->x', h1ao, zvec_ao)
q1 = 0
for b0, b1, nf in _shell_prange(mol, 0, mol.nbas, blksize):
q0, q1 = q1, q1 + nf
dm2_ao = lib.einsum('ijw,pi,qj->pqw', dm2buf, mo_cas[p0:p1], mo_cas[q0:q1])
shls_slice = (shl0,shl1,b0,b1,0,mol.nbas,0,mol.nbas)
eri1 = mol.intor('int2e_ip1', comp=3, aosym='s2kl',
shls_slice=shls_slice).reshape(3,p1-p0,nf,nao_pair)
de[k] -= numpy.einsum('xijw,ijw->x', eri1, dm2_ao) * 2
for i in range(3):
eri1tmp = lib.unpack_tril(eri1[i].reshape((p1-p0)*nf,-1))
eri1tmp = eri1tmp.reshape(p1-p0,nf,nao,nao)
de[k,i] -= numpy.einsum('ijkl,ij,kl', eri1tmp, hf_dm1[p0:p1,q0:q1], zvec_ao) * 2
de[k,i] -= numpy.einsum('ijkl,kl,ij', eri1tmp, hf_dm1, zvec_ao[p0:p1,q0:q1]) * 2
de[k,i] += numpy.einsum('ijkl,il,kj', eri1tmp, hf_dm1[p0:p1], zvec_ao[q0:q1])
de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, hf_dm1[q0:q1], zvec_ao[p0:p1])
de[k,i] -= numpy.einsum('ijkl,lk,ij', eri1tmp, dm_core[q0:q1], casci_dm1[p0:p1]) * 2
de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, dm_core[q0:q1], casci_dm1[p0:p1])
de[k,i] -= numpy.einsum('ijkl,lk,ij', eri1tmp, dm_cas[q0:q1], dm_core[p0:p1]) * 2
de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, dm_cas[q0:q1], dm_core[p0:p1])
eri1 = eri1tmp = None
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], im1[p0:p1])
de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], im1[:,p0:p1])
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], zeta[p0:p1]) * 2
de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], zeta[:,p0:p1]) * 2
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], vhf_s1occ[p0:p1]) * 2
de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], vhf_s1occ[:,p0:p1]) * 2
log.timer('CASCI nuclear gradients', *time0)
return de
def as_scanner(mcscf_grad, state=None):
from pyscf import gto
from pyscf.mcscf.addons import StateAverageMCSCFSolver
if isinstance(mcscf_grad, lib.GradScanner):
return mcscf_grad
if (state is not None and
isinstance(mcscf_grad.base, StateAverageMCSCFSolver)):
raise RuntimeError('State-Average MCSCF Gradients does not support '
'state-specific nuclear gradients.')
logger.info(mcscf_grad, 'Create scanner for %s', mcscf_grad.__class__)
class CASCI_GradScanner(mcscf_grad.__class__, lib.GradScanner):
def __init__(self, g):
lib.GradScanner.__init__(self, g)
def __call__(self, mol_or_geom, state=state, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
if state is None:
state = self.state
mc_scanner = self.base
e_tot = mc_scanner(mol)
ci = mc_scanner.ci
if isinstance(mc_scanner, StateAverageMCSCFSolver):
e_tot = mc_scanner.e_average
elif not isinstance(e_tot, float):
if state >= mc_scanner.fcisolver.nroots:
raise ValueError('State ID greater than the number of CASCI roots')
e_tot = e_tot[state]
ci = ci[state]
self.mol = mol
de = self.kernel(ci=ci, state=state, **kwargs)
return e_tot, de
return CASCI_GradScanner(mcscf_grad)
class Gradients(rhf_grad.GradientsMixin):
def __init__(self, mc):
from pyscf.mcscf.addons import StateAverageMCSCFSolver
if isinstance(mc, StateAverageMCSCFSolver):
self.state = None
else:
self.state = 0
rhf_grad.GradientsMixin.__init__(self, mc)
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('\n')
if not self.base.converged:
log.warn('Ground state %s not converged', self.base.__class__)
log.info('******** %s for %s ********',
self.__class__, self.base.__class__)
if self.state is None:
weights = self.base.weights
log.info('State-average gradients over %d states with weights %s',
len(weights), weights)
elif self.state != 0 and self.base.fcisolver.nroots > 1:
log.info('State ID = %d', self.state)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
return self
grad_elec = grad_elec
def kernel(self, mo_coeff=None, ci=None, atmlst=None,
state=None, verbose=None):
log = logger.new_logger(self, verbose)
if ci is None: ci = self.base.ci
if self.state is None:
assert(state is None)
elif isinstance(ci, (list, tuple, RANGE_TYPE)):
if state is None:
state = self.state
else:
self.state = state
ci = ci[state]
log.info('Multiple roots are found in CASCI solver. '
'Nuclear gradients of root %d are computed.', state)
if atmlst is None:
atmlst = self.atmlst
else:
self.atmlst = atmlst
if self.verbose >= logger.WARN:
self.check_sanity()
if self.verbose >= logger.INFO:
self.dump_flags()
de = self.grad_elec(mo_coeff, ci, atmlst, log)
self.de = de = de + self.grad_nuc(atmlst=atmlst)
if self.mol.symmetry:
self.de = self.symmetrize(self.de, atmlst)
self._finalize()
return self.de
def hcore_generator(self, mol=None):
mf_grad = self.base._scf.nuc_grad_method()
return mf_grad.hcore_generator(mol)
def grad_nuc(self, mol=None, atmlst=None):
mf_grad = self.base._scf.nuc_grad_method()
return mf_grad.grad_nuc(mol, atmlst)
def _finalize(self):
if self.verbose >= logger.NOTE:
if self.state is None:
logger.note(self, '--------- %s gradients ----------',
self.base.__class__.__name__)
else:
logger.note(self, '--------- %s gradients for state %d ----------',
self.base.__class__.__name__, self.state)
self._write(self.mol, self.de, self.atmlst)
logger.note(self, '----------------------------------------------')
as_scanner = as_scanner
Grad = Gradients
from pyscf import mcscf
mcscf.casci.CASCI.Gradients = lib.class_as_method(Gradients)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
mol = gto.Mole()
mol.atom = 'N 0 0 0; N 0 0 1.2; H 1 1 0; H 1 1 1.2'
mol.build()
mf = scf.RHF(mol).run(conv_tol=1e-14)
mc = mcscf.CASCI(mf, 4, 4).run()
g1 = mc.Gradients().kernel()
print(lib.finger(g1) - -0.066025991364829367)
mcs = mc.as_scanner()
mol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2')
e1 = mcs(mol)
mol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2')
e2 = mcs(mol)
print(g1[1,2], (e1-e2)/0.002*lib.param.BOHR)
| true | true |
f734d5b2ccd738ec641aa3b8d778120e95269d71 | 18,438 | py | Python | keras/utils/layer_utils_test.py | tsheaff/keras | ee227dda766d769b7499a5549e8ed77b5e88105b | [
"Apache-2.0"
] | 1 | 2020-02-02T04:43:33.000Z | 2020-02-02T04:43:33.000Z | keras/utils/layer_utils_test.py | tsheaff/keras | ee227dda766d769b7499a5549e8ed77b5e88105b | [
"Apache-2.0"
] | 1 | 2022-03-31T16:40:51.000Z | 2022-03-31T16:40:51.000Z | keras/utils/layer_utils_test.py | tsheaff/keras | ee227dda766d769b7499a5549e8ed77b5e88105b | [
"Apache-2.0"
] | 1 | 2020-12-13T22:14:48.000Z | 2020-12-13T22:14:48.000Z | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layer_utils."""
import keras
import tensorflow.compat.v2 as tf
import collections
import contextlib
import multiprocessing.dummy
import os
import pickle
import shutil
import sys
import time
import timeit
import numpy as np
from keras.utils import io_utils
from keras.utils import layer_utils
_PICKLEABLE_CALL_COUNT = collections.Counter()
class MyPickleableObject(tf.__internal__.tracking.AutoTrackable):
"""Needed for InterfaceTests.test_property_cache_serialization.
This class must be at the top level. This is a constraint of pickle,
unrelated to `cached_per_instance`.
"""
@property
@layer_utils.cached_per_instance
def my_id(self):
_PICKLEABLE_CALL_COUNT[self] += 1
return id(self)
class LayerUtilsTest(tf.test.TestCase):
def test_print_summary(self):
model = keras.Sequential()
model.add(
keras.layers.Conv2D(
filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv'))
model.add(keras.layers.Flatten(name='flat'))
model.add(keras.layers.Dense(5, name='dense'))
file_name = 'model_1.txt'
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
fpath = os.path.join(temp_dir, file_name)
writer = open(fpath, 'w')
def print_to_file(text):
print(text, file=writer)
try:
layer_utils.print_summary(model, print_fn=print_to_file)
self.assertTrue(tf.io.gfile.exists(fpath))
writer.close()
reader = open(fpath, 'r')
lines = reader.readlines()
reader.close()
self.assertEqual(len(lines), 15)
except ImportError:
pass
def test_print_summary_without_print_fn(self):
model = keras.Sequential([
keras.layers.Dense(5, input_shape=(10,), name='dense')])
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
layer_utils.print_summary(model)
self.assertIn('dense (Dense)', printed.contents())
def test_print_summary_expand_nested(self):
shape = (None, None, 3)
def make_model():
x = inputs = keras.Input(shape)
x = keras.layers.Conv2D(3, 1)(x)
x = keras.layers.BatchNormalization()(x)
return keras.Model(inputs, x)
x = inner_inputs = keras.Input(shape)
x = make_model()(x)
inner_model = keras.Model(inner_inputs, x)
inputs = keras.Input(shape)
model = keras.Model(inputs, inner_model(inputs))
file_name = 'model_2.txt'
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
fpath = os.path.join(temp_dir, file_name)
writer = open(fpath, 'w')
def print_to_file(text):
print(text, file=writer)
try:
layer_utils.print_summary(
model, print_fn=print_to_file, expand_nested=True)
self.assertTrue(tf.io.gfile.exists(fpath))
writer.close()
reader = open(fpath, 'r')
lines = reader.readlines()
reader.close()
check_str = (
'Model: "model_2"\n'
'_________________________________________________________________\n'
' Layer (type) Output Shape Param # \n'
'=================================================================\n'
' input_3 (InputLayer) [(None, None, None, 3)] 0 \n'
' \n'
' model_1 (Functional) (None, None, None, 3) 24 \n'
'|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n'
'| input_1 (InputLayer) [(None, None, None, 3)] 0 |\n'
'| |\n'
'| model (Functional) (None, None, None, 3) 24 |\n'
'||¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯||\n'
'|| input_2 (InputLayer) [(None, None, None, 3)] 0 ||\n'
'|| ||\n'
'|| conv2d (Conv2D) (None, None, None, 3) 12 ||\n'
'|| ||\n'
'|| batch_normalization (BatchN (None, None, None, 3) 12 ||\n'
'|| ormalization) ||\n'
'|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n'
'¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\n'
'=================================================================\n'
'Total params: 24\n'
'Trainable params: 18\n'
'Non-trainable params: 6\n'
'_________________________________________________________________\n')
fin_str = ''
for line in lines:
fin_str += line
self.assertIn(fin_str, check_str)
self.assertEqual(len(lines), 25)
except ImportError:
pass
def test_summary_subclass_model_expand_nested(self):
class Sequential(keras.Model):
def __init__(self, *args):
super(Sequential, self).__init__()
self.module_list = list(args) if args else []
def call(self, x):
for module in self.module_list:
x = module(x)
return x
class Block(keras.Model):
def __init__(self):
super(Block, self).__init__()
self.module = Sequential(
keras.layers.Dense(10),
keras.layers.Dense(10),
)
def call(self, input_tensor):
x = self.module(input_tensor)
return x
class Base(keras.Model):
def __init__(self):
super(Base, self).__init__()
self.module = Sequential(Block(), Block())
def call(self, input_tensor):
x = self.module(input_tensor)
y = self.module(x)
return x, y
class Network(keras.Model):
def __init__(self):
super(Network, self).__init__()
self.child = Base()
def call(self, inputs):
return self.child(inputs)
net = Network()
inputs = keras.Input(shape=(10,))
outputs = net(inputs)
model = keras.models.Model(inputs=inputs, outputs=outputs)
file_name = 'model_3.txt'
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
fpath = os.path.join(temp_dir, file_name)
writer = open(fpath, 'w')
def print_to_file(text):
print(text, file=writer)
try:
layer_utils.print_summary(
model, line_length=120, print_fn=print_to_file, expand_nested=True)
self.assertTrue(tf.io.gfile.exists(fpath))
writer.close()
reader = open(fpath, 'r')
lines = reader.readlines()
reader.close()
# The output content are slightly different for the input shapes between
# v1 and v2.
if tf.__internal__.tf2.enabled():
self.assertEqual(len(lines), 39)
else:
self.assertEqual(len(lines), 40)
except ImportError:
pass
def test_print_summary_show_trainable(self):
model = keras.Sequential(name='trainable')
untrained = keras.layers.Conv2D(
filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv')
model.add(untrained)
model.add(keras.layers.Flatten(name='flat'))
model.add(keras.layers.Dense(5, name='dense'))
untrained.trainable = False
file_name = 'model_4.txt'
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
fpath = os.path.join(temp_dir, file_name)
writer = open(fpath, 'w')
def print_to_file(text):
print(text, file=writer)
try:
layer_utils.print_summary(
model, print_fn=print_to_file, show_trainable=True)
self.assertTrue(tf.io.gfile.exists(fpath))
writer.close()
reader = open(fpath, 'r')
lines = reader.readlines()
reader.close()
check_str = (
'Model: '
'"trainable"\n____________________________________________________________________________\n'
' Layer (type) Output Shape Param # '
'Trainable '
'\n============================================================================\n'
' conv (Conv2D) (None, 2, 3, 2) 62 N'
' \n'
' '
'\n flat (Flatten) (None, 12) 0 '
'Y \n'
' '
'\n dense (Dense) (None, 5) 65 '
'Y \n'
' '
'\n============================================================================\nTotal'
' params: 127\nTrainable params: 65\nNon-trainable params: '
'62\n____________________________________________________________________________\n'
'____________________________________________________________________________\n'
)
fin_str = ''
for line in lines:
fin_str += line
self.assertIn(fin_str, check_str)
self.assertEqual(len(lines), 15)
except ImportError:
pass
def test_print_summary_expand_nested_show_trainable(self):
shape = (None, None, 3)
def make_model():
x = inputs = keras.Input(shape, name='input2')
untrainable = keras.layers.Conv2D(3, 1)
untrainable.trainable = False
x = untrainable(x)
x = keras.layers.BatchNormalization()(x)
return keras.Model(inputs, x)
x = inner_inputs = keras.Input(shape, name='input1')
x = make_model()(x)
inner_model = keras.Model(inner_inputs, x)
inputs = keras.Input(shape, name='input3')
model = keras.Model(inputs, inner_model(inputs))
file_name = 'model_6.txt'
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
fpath = os.path.join(temp_dir, file_name)
writer = open(fpath, 'w')
def print_to_file(text):
print(text, file=writer)
try:
layer_utils.print_summary(
model,
print_fn=print_to_file,
expand_nested=True,
show_trainable=True)
self.assertTrue(tf.io.gfile.exists(fpath))
writer.close()
reader = open(fpath, 'r')
lines = reader.readlines()
reader.close()
check_str = (
'Model: '
'"model_2"\n____________________________________________________________________________\n'
' Layer (type) Output Shape Param # '
'Trainable '
'\n============================================================================\n'
' input3 (InputLayer) [(None, None, None, 3)] 0 Y'
' \n'
' '
'\n model_1 (Functional) (None, None, None, 3) 24 '
'Y '
'\n|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n|'
' input1 (InputLayer) [(None, None, None, 3)] 0 Y'
' |\n|'
' '
'|\n| model (Functional) (None, None, None, 3) 24 '
'Y '
'|\n||¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯||\n||'
' input2 (InputLayer) [(None, None, None, 3)] 0 Y'
' ||\n||'
' '
'||\n|| conv2d (Conv2D) (None, None, None, 3) 12 '
'N ||\n||'
' '
'||\n|| batch_normalization (BatchN (None, None, None, 3) 12 '
'Y ||\n|| ormalization)'
' '
'||\n|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\n============================================================================\nTotal'
' params: 24\nTrainable params: 6\nNon-trainable params: '
'18\n____________________________________________________________________________\n'
'____________________________________________________________________________\n'
)
fin_str = ''
for line in lines:
fin_str += line
self.assertIn(fin_str, check_str)
self.assertEqual(len(lines), 25)
except ImportError:
pass
def test_property_cache(self):
test_counter = collections.Counter()
class MyObject(tf.__internal__.tracking.AutoTrackable):
def __init__(self):
super(MyObject, self).__init__()
self._frozen = True
def __setattr__(self, key, value):
"""Enforce that cache does not set attribute on MyObject."""
if getattr(self, '_frozen', False):
raise ValueError('Cannot mutate when frozen.')
return super(MyObject, self).__setattr__(key, value)
@property
@layer_utils.cached_per_instance
def test_property(self):
test_counter[id(self)] += 1
return id(self)
first_object = MyObject()
second_object = MyObject()
# Make sure the objects return the correct values
self.assertEqual(first_object.test_property, id(first_object))
self.assertEqual(second_object.test_property, id(second_object))
# Make sure the cache does not share across objects
self.assertNotEqual(first_object.test_property, second_object.test_property)
# Check again (Now the values should be cached.)
self.assertEqual(first_object.test_property, id(first_object))
self.assertEqual(second_object.test_property, id(second_object))
# Count the function calls to make sure the cache is actually being used.
self.assertAllEqual(tuple(test_counter.values()), (1, 1))
def test_property_cache_threaded(self):
call_count = collections.Counter()
class MyObject(tf.__internal__.tracking.AutoTrackable):
@property
@layer_utils.cached_per_instance
def test_property(self):
# Random sleeps to ensure that the execution thread changes
# mid-computation.
call_count['test_property'] += 1
time.sleep(np.random.random() + 1.)
# Use a RandomState which is seeded off the instance's id (the mod is
# because numpy limits the range of seeds) to ensure that an instance
# returns the same value in different threads, but different instances
# return different values.
return int(np.random.RandomState(id(self) % (2 ** 31)).randint(2 ** 16))
def get_test_property(self, _):
"""Function provided to .map for threading test."""
return self.test_property
# Test that multiple threads return the same value. This requires that
# the underlying function is repeatable, as cached_property makes no attempt
# to prioritize the first call.
test_obj = MyObject()
with contextlib.closing(multiprocessing.dummy.Pool(32)) as pool:
# Intentionally make a large pool (even when there are only a small number
# of cpus) to ensure that the runtime switches threads.
results = pool.map(test_obj.get_test_property, range(64))
self.assertEqual(len(set(results)), 1)
# Make sure we actually are testing threaded behavior.
self.assertGreater(call_count['test_property'], 1)
# Make sure new threads still cache hit.
with contextlib.closing(multiprocessing.dummy.Pool(2)) as pool:
start_time = timeit.default_timer() # Don't time pool instantiation.
results = pool.map(test_obj.get_test_property, range(4))
total_time = timeit.default_timer() - start_time
# Note(taylorrobie): The reason that it is safe to time a unit test is that
# a cache hit will be << 1 second, and a cache miss is
# guaranteed to be >= 1 second. Empirically confirmed by
# 100,000 runs with no flakes.
self.assertLess(total_time, 0.95)
def test_property_cache_serialization(self):
# Reset call count. .keys() must be wrapped in a list, because otherwise we
# would mutate the iterator while iterating.
for k in list(_PICKLEABLE_CALL_COUNT.keys()):
_PICKLEABLE_CALL_COUNT.pop(k)
first_instance = MyPickleableObject()
self.assertEqual(id(first_instance), first_instance.my_id)
# Test that we can pickle and un-pickle
second_instance = pickle.loads(pickle.dumps(first_instance))
self.assertEqual(id(second_instance), second_instance.my_id)
self.assertNotEqual(first_instance.my_id, second_instance.my_id)
# Make sure de-serialized object uses the cache.
self.assertEqual(_PICKLEABLE_CALL_COUNT[second_instance], 1)
# Make sure the decorator cache is not being serialized with the object.
expected_size = len(pickle.dumps(second_instance))
for _ in range(5):
# Add some more entries to the cache.
_ = MyPickleableObject().my_id
self.assertEqual(len(_PICKLEABLE_CALL_COUNT), 7)
size_check_instance = MyPickleableObject()
_ = size_check_instance.my_id
self.assertEqual(expected_size, len(pickle.dumps(size_check_instance)))
if __name__ == '__main__':
tf.test.main()
| 37.628571 | 255 | 0.558087 |
import keras
import tensorflow.compat.v2 as tf
import collections
import contextlib
import multiprocessing.dummy
import os
import pickle
import shutil
import sys
import time
import timeit
import numpy as np
from keras.utils import io_utils
from keras.utils import layer_utils
_PICKLEABLE_CALL_COUNT = collections.Counter()
class MyPickleableObject(tf.__internal__.tracking.AutoTrackable):
@property
@layer_utils.cached_per_instance
def my_id(self):
_PICKLEABLE_CALL_COUNT[self] += 1
return id(self)
class LayerUtilsTest(tf.test.TestCase):
def test_print_summary(self):
model = keras.Sequential()
model.add(
keras.layers.Conv2D(
filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv'))
model.add(keras.layers.Flatten(name='flat'))
model.add(keras.layers.Dense(5, name='dense'))
file_name = 'model_1.txt'
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
fpath = os.path.join(temp_dir, file_name)
writer = open(fpath, 'w')
def print_to_file(text):
print(text, file=writer)
try:
layer_utils.print_summary(model, print_fn=print_to_file)
self.assertTrue(tf.io.gfile.exists(fpath))
writer.close()
reader = open(fpath, 'r')
lines = reader.readlines()
reader.close()
self.assertEqual(len(lines), 15)
except ImportError:
pass
def test_print_summary_without_print_fn(self):
model = keras.Sequential([
keras.layers.Dense(5, input_shape=(10,), name='dense')])
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
layer_utils.print_summary(model)
self.assertIn('dense (Dense)', printed.contents())
def test_print_summary_expand_nested(self):
shape = (None, None, 3)
def make_model():
x = inputs = keras.Input(shape)
x = keras.layers.Conv2D(3, 1)(x)
x = keras.layers.BatchNormalization()(x)
return keras.Model(inputs, x)
x = inner_inputs = keras.Input(shape)
x = make_model()(x)
inner_model = keras.Model(inner_inputs, x)
inputs = keras.Input(shape)
model = keras.Model(inputs, inner_model(inputs))
file_name = 'model_2.txt'
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
fpath = os.path.join(temp_dir, file_name)
writer = open(fpath, 'w')
def print_to_file(text):
print(text, file=writer)
try:
layer_utils.print_summary(
model, print_fn=print_to_file, expand_nested=True)
self.assertTrue(tf.io.gfile.exists(fpath))
writer.close()
reader = open(fpath, 'r')
lines = reader.readlines()
reader.close()
check_str = (
'Model: "model_2"\n'
'_________________________________________________________________\n'
' Layer (type) Output Shape Param # \n'
'=================================================================\n'
' input_3 (InputLayer) [(None, None, None, 3)] 0 \n'
' \n'
' model_1 (Functional) (None, None, None, 3) 24 \n'
'|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n'
'| input_1 (InputLayer) [(None, None, None, 3)] 0 |\n'
'| |\n'
'| model (Functional) (None, None, None, 3) 24 |\n'
'||¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯||\n'
'|| input_2 (InputLayer) [(None, None, None, 3)] 0 ||\n'
'|| ||\n'
'|| conv2d (Conv2D) (None, None, None, 3) 12 ||\n'
'|| ||\n'
'|| batch_normalization (BatchN (None, None, None, 3) 12 ||\n'
'|| ormalization) ||\n'
'|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n'
'¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\n'
'=================================================================\n'
'Total params: 24\n'
'Trainable params: 18\n'
'Non-trainable params: 6\n'
'_________________________________________________________________\n')
fin_str = ''
for line in lines:
fin_str += line
self.assertIn(fin_str, check_str)
self.assertEqual(len(lines), 25)
except ImportError:
pass
def test_summary_subclass_model_expand_nested(self):
class Sequential(keras.Model):
def __init__(self, *args):
super(Sequential, self).__init__()
self.module_list = list(args) if args else []
def call(self, x):
for module in self.module_list:
x = module(x)
return x
class Block(keras.Model):
def __init__(self):
super(Block, self).__init__()
self.module = Sequential(
keras.layers.Dense(10),
keras.layers.Dense(10),
)
def call(self, input_tensor):
x = self.module(input_tensor)
return x
class Base(keras.Model):
def __init__(self):
super(Base, self).__init__()
self.module = Sequential(Block(), Block())
def call(self, input_tensor):
x = self.module(input_tensor)
y = self.module(x)
return x, y
class Network(keras.Model):
def __init__(self):
super(Network, self).__init__()
self.child = Base()
def call(self, inputs):
return self.child(inputs)
net = Network()
inputs = keras.Input(shape=(10,))
outputs = net(inputs)
model = keras.models.Model(inputs=inputs, outputs=outputs)
file_name = 'model_3.txt'
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
fpath = os.path.join(temp_dir, file_name)
writer = open(fpath, 'w')
def print_to_file(text):
print(text, file=writer)
try:
layer_utils.print_summary(
model, line_length=120, print_fn=print_to_file, expand_nested=True)
self.assertTrue(tf.io.gfile.exists(fpath))
writer.close()
reader = open(fpath, 'r')
lines = reader.readlines()
reader.close()
if tf.__internal__.tf2.enabled():
self.assertEqual(len(lines), 39)
else:
self.assertEqual(len(lines), 40)
except ImportError:
pass
def test_print_summary_show_trainable(self):
model = keras.Sequential(name='trainable')
untrained = keras.layers.Conv2D(
filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv')
model.add(untrained)
model.add(keras.layers.Flatten(name='flat'))
model.add(keras.layers.Dense(5, name='dense'))
untrained.trainable = False
file_name = 'model_4.txt'
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
fpath = os.path.join(temp_dir, file_name)
writer = open(fpath, 'w')
def print_to_file(text):
print(text, file=writer)
try:
layer_utils.print_summary(
model, print_fn=print_to_file, show_trainable=True)
self.assertTrue(tf.io.gfile.exists(fpath))
writer.close()
reader = open(fpath, 'r')
lines = reader.readlines()
reader.close()
check_str = (
'Model: '
'"trainable"\n____________________________________________________________________________\n'
' Layer (type) Output Shape Param # '
'Trainable '
'\n============================================================================\n'
' conv (Conv2D) (None, 2, 3, 2) 62 N'
' \n'
' '
'\n flat (Flatten) (None, 12) 0 '
'Y \n'
' '
'\n dense (Dense) (None, 5) 65 '
'Y \n'
' '
'\n============================================================================\nTotal'
' params: 127\nTrainable params: 65\nNon-trainable params: '
'62\n____________________________________________________________________________\n'
'____________________________________________________________________________\n'
)
fin_str = ''
for line in lines:
fin_str += line
self.assertIn(fin_str, check_str)
self.assertEqual(len(lines), 15)
except ImportError:
pass
def test_print_summary_expand_nested_show_trainable(self):
shape = (None, None, 3)
def make_model():
x = inputs = keras.Input(shape, name='input2')
untrainable = keras.layers.Conv2D(3, 1)
untrainable.trainable = False
x = untrainable(x)
x = keras.layers.BatchNormalization()(x)
return keras.Model(inputs, x)
x = inner_inputs = keras.Input(shape, name='input1')
x = make_model()(x)
inner_model = keras.Model(inner_inputs, x)
inputs = keras.Input(shape, name='input3')
model = keras.Model(inputs, inner_model(inputs))
file_name = 'model_6.txt'
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
fpath = os.path.join(temp_dir, file_name)
writer = open(fpath, 'w')
def print_to_file(text):
print(text, file=writer)
try:
layer_utils.print_summary(
model,
print_fn=print_to_file,
expand_nested=True,
show_trainable=True)
self.assertTrue(tf.io.gfile.exists(fpath))
writer.close()
reader = open(fpath, 'r')
lines = reader.readlines()
reader.close()
check_str = (
'Model: '
'"model_2"\n____________________________________________________________________________\n'
' Layer (type) Output Shape Param # '
'Trainable '
'\n============================================================================\n'
' input3 (InputLayer) [(None, None, None, 3)] 0 Y'
' \n'
' '
'\n model_1 (Functional) (None, None, None, 3) 24 '
'Y '
'\n|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n|'
' input1 (InputLayer) [(None, None, None, 3)] 0 Y'
' |\n|'
' '
'|\n| model (Functional) (None, None, None, 3) 24 '
'Y '
'|\n||¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯||\n||'
' input2 (InputLayer) [(None, None, None, 3)] 0 Y'
' ||\n||'
' '
'||\n|| conv2d (Conv2D) (None, None, None, 3) 12 '
'N ||\n||'
' '
'||\n|| batch_normalization (BatchN (None, None, None, 3) 12 '
'Y ||\n|| ormalization)'
' '
'||\n|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\n============================================================================\nTotal'
' params: 24\nTrainable params: 6\nNon-trainable params: '
'18\n____________________________________________________________________________\n'
'____________________________________________________________________________\n'
)
fin_str = ''
for line in lines:
fin_str += line
self.assertIn(fin_str, check_str)
self.assertEqual(len(lines), 25)
except ImportError:
pass
def test_property_cache(self):
test_counter = collections.Counter()
class MyObject(tf.__internal__.tracking.AutoTrackable):
def __init__(self):
super(MyObject, self).__init__()
self._frozen = True
def __setattr__(self, key, value):
if getattr(self, '_frozen', False):
raise ValueError('Cannot mutate when frozen.')
return super(MyObject, self).__setattr__(key, value)
@property
@layer_utils.cached_per_instance
def test_property(self):
test_counter[id(self)] += 1
return id(self)
first_object = MyObject()
second_object = MyObject()
self.assertEqual(first_object.test_property, id(first_object))
self.assertEqual(second_object.test_property, id(second_object))
self.assertNotEqual(first_object.test_property, second_object.test_property)
self.assertEqual(first_object.test_property, id(first_object))
self.assertEqual(second_object.test_property, id(second_object))
self.assertAllEqual(tuple(test_counter.values()), (1, 1))
def test_property_cache_threaded(self):
call_count = collections.Counter()
class MyObject(tf.__internal__.tracking.AutoTrackable):
@property
@layer_utils.cached_per_instance
def test_property(self):
call_count['test_property'] += 1
time.sleep(np.random.random() + 1.)
# because numpy limits the range of seeds) to ensure that an instance
# returns the same value in different threads, but different instances
# return different values.
return int(np.random.RandomState(id(self) % (2 ** 31)).randint(2 ** 16))
def get_test_property(self, _):
return self.test_property
# Test that multiple threads return the same value. This requires that
# the underlying function is repeatable, as cached_property makes no attempt
# to prioritize the first call.
test_obj = MyObject()
with contextlib.closing(multiprocessing.dummy.Pool(32)) as pool:
# Intentionally make a large pool (even when there are only a small number
# of cpus) to ensure that the runtime switches threads.
results = pool.map(test_obj.get_test_property, range(64))
self.assertEqual(len(set(results)), 1)
# Make sure we actually are testing threaded behavior.
self.assertGreater(call_count['test_property'], 1)
# Make sure new threads still cache hit.
with contextlib.closing(multiprocessing.dummy.Pool(2)) as pool:
start_time = timeit.default_timer() # Don't time pool instantiation.
results = pool.map(test_obj.get_test_property, range(4))
total_time = timeit.default_timer() - start_time
self.assertLess(total_time, 0.95)
def test_property_cache_serialization(self):
for k in list(_PICKLEABLE_CALL_COUNT.keys()):
_PICKLEABLE_CALL_COUNT.pop(k)
first_instance = MyPickleableObject()
self.assertEqual(id(first_instance), first_instance.my_id)
second_instance = pickle.loads(pickle.dumps(first_instance))
self.assertEqual(id(second_instance), second_instance.my_id)
self.assertNotEqual(first_instance.my_id, second_instance.my_id)
self.assertEqual(_PICKLEABLE_CALL_COUNT[second_instance], 1)
expected_size = len(pickle.dumps(second_instance))
for _ in range(5):
_ = MyPickleableObject().my_id
self.assertEqual(len(_PICKLEABLE_CALL_COUNT), 7)
size_check_instance = MyPickleableObject()
_ = size_check_instance.my_id
self.assertEqual(expected_size, len(pickle.dumps(size_check_instance)))
if __name__ == '__main__':
tf.test.main()
| true | true |
f734d64a2d3fe42732aa1ee1e867be66cb0f7bf3 | 47,046 | py | Python | ranger/config_ranger/commands_full.py | nexayq/dot_files | 473614b8ecdb4efb3ae7098ef3d8687da0585260 | [
"MIT"
] | null | null | null | ranger/config_ranger/commands_full.py | nexayq/dot_files | 473614b8ecdb4efb3ae7098ef3d8687da0585260 | [
"MIT"
] | null | null | null | ranger/config_ranger/commands_full.py | nexayq/dot_files | 473614b8ecdb4efb3ae7098ef3d8687da0585260 | [
"MIT"
] | 2 | 2016-10-24T08:36:03.000Z | 2018-09-09T15:32:21.000Z | # -*- coding: utf-8 -*-
# This file is part of ranger, the console file manager.
# This configuration file is licensed under the same terms as ranger.
# ===================================================================
#
# NOTE: If you copied this file to ~/.config/ranger/commands_full.py,
# then it will NOT be loaded by ranger, and only serve as a reference.
#
# ===================================================================
# This file contains ranger's commands.
# It's all in python; lines beginning with # are comments.
#
# Note that additional commands are automatically generated from the methods
# of the class ranger.core.actions.Actions.
#
# You can customize commands in the file ~/.config/ranger/commands.py.
# It has the same syntax as this file. In fact, you can just copy this
# file there with `ranger --copy-config=commands' and make your modifications.
# But make sure you update your configs when you update ranger.
#
# ===================================================================
# Every class defined here which is a subclass of `Command' will be used as a
# command in ranger. Several methods are defined to interface with ranger:
# execute(): called when the command is executed.
# cancel(): called when closing the console.
# tab(tabnum): called when <TAB> is pressed.
# quick(): called after each keypress.
#
# tab() argument tabnum is 1 for <TAB> and -1 for <S-TAB> by default
#
# The return values for tab() can be either:
# None: There is no tab completion
# A string: Change the console to this string
# A list/tuple/generator: cycle through every item in it
#
# The return value for quick() can be:
# False: Nothing happens
# True: Execute the command afterwards
#
# The return value for execute() and cancel() doesn't matter.
#
# ===================================================================
# Commands have certain attributes and methods that facilitate parsing of
# the arguments:
#
# self.line: The whole line that was written in the console.
# self.args: A list of all (space-separated) arguments to the command.
# self.quantifier: If this command was mapped to the key "X" and
# the user pressed 6X, self.quantifier will be 6.
# self.arg(n): The n-th argument, or an empty string if it doesn't exist.
# self.rest(n): The n-th argument plus everything that followed. For example,
# if the command was "search foo bar a b c", rest(2) will be "bar a b c"
# self.start(n): Anything before the n-th argument. For example, if the
# command was "search foo bar a b c", start(2) will be "search foo"
#
# ===================================================================
# And this is a little reference for common ranger functions and objects:
#
# self.fm: A reference to the "fm" object which contains most information
# about ranger.
# self.fm.notify(string): Print the given string on the screen.
# self.fm.notify(string, bad=True): Print the given string in RED.
# self.fm.reload_cwd(): Reload the current working directory.
# self.fm.thisdir: The current working directory. (A File object.)
# self.fm.thisfile: The current file. (A File object too.)
# self.fm.thistab.get_selection(): A list of all selected files.
# self.fm.execute_console(string): Execute the string as a ranger command.
# self.fm.open_console(string): Open the console with the given string
# already typed in for you.
# self.fm.move(direction): Moves the cursor in the given direction, which
# can be something like down=3, up=5, right=1, left=1, to=6, ...
#
# File objects (for example self.fm.thisfile) have these useful attributes and
# methods:
#
# cf.path: The path to the file.
# cf.basename: The base name only.
# cf.load_content(): Force a loading of the directories content (which
# obviously works with directories only)
# cf.is_directory: True/False depending on whether it's a directory.
#
# For advanced commands it is unavoidable to dive a bit into the source code
# of ranger.
# ===================================================================
from ranger.api.commands import *
class alias(Command):
""":alias <newcommand> <oldcommand>
Copies the oldcommand as newcommand.
"""
context = 'browser'
resolve_macros = False
def execute(self):
if not self.arg(1) or not self.arg(2):
self.fm.notify('Syntax: alias <newcommand> <oldcommand>', bad=True)
else:
self.fm.commands.alias(self.arg(1), self.rest(2))
class echo(Command):
""":echo <text>
Display the text in the statusbar.
"""
def execute(self):
self.fm.notify(self.rest(1))
class cd(Command):
""":cd [-r] <dirname>
The cd command changes the directory.
The command 'cd -' is equivalent to typing ``.
Using the option "-r" will get you to the real path.
"""
def execute(self):
import os.path
if self.arg(1) == '-r':
self.shift()
destination = os.path.realpath(self.rest(1))
if os.path.isfile(destination):
self.fm.select_file(destination)
return
else:
destination = self.rest(1)
if not destination:
destination = '~'
if destination == '-':
self.fm.enter_bookmark('`')
else:
self.fm.cd(destination)
def tab(self, tabnum):
import os
from os.path import dirname, basename, expanduser, join
cwd = self.fm.thisdir.path
rel_dest = self.rest(1)
bookmarks = [v.path for v in self.fm.bookmarks.dct.values()
if rel_dest in v.path ]
# expand the tilde into the user directory
if rel_dest.startswith('~'):
rel_dest = expanduser(rel_dest)
# define some shortcuts
abs_dest = join(cwd, rel_dest)
abs_dirname = dirname(abs_dest)
rel_basename = basename(rel_dest)
rel_dirname = dirname(rel_dest)
try:
# are we at the end of a directory?
if rel_dest.endswith('/') or rel_dest == '':
_, dirnames, _ = next(os.walk(abs_dest))
# are we in the middle of the filename?
else:
_, dirnames, _ = next(os.walk(abs_dirname))
dirnames = [dn for dn in dirnames \
if dn.startswith(rel_basename)]
except (OSError, StopIteration):
# os.walk found nothing
pass
else:
dirnames.sort()
if self.fm.settings.cd_bookmarks:
dirnames = bookmarks + dirnames
# no results, return None
if len(dirnames) == 0:
return
# one result. since it must be a directory, append a slash.
if len(dirnames) == 1:
return self.start(1) + join(rel_dirname, dirnames[0]) + '/'
# more than one result. append no slash, so the user can
# manually type in the slash to advance into that directory
return (self.start(1) + join(rel_dirname, dirname) for dirname in dirnames)
class chain(Command):
""":chain <command1>; <command2>; ...
Calls multiple commands at once, separated by semicolons.
"""
def execute(self):
for command in self.rest(1).split(";"):
self.fm.execute_console(command)
class shell(Command):
escape_macros_for_shell = True
def execute(self):
if self.arg(1) and self.arg(1)[0] == '-':
flags = self.arg(1)[1:]
command = self.rest(2)
else:
flags = ''
command = self.rest(1)
if command:
self.fm.execute_command(command, flags=flags)
def tab(self, tabnum):
from ranger.ext.get_executables import get_executables
if self.arg(1) and self.arg(1)[0] == '-':
command = self.rest(2)
else:
command = self.rest(1)
start = self.line[0:len(self.line) - len(command)]
try:
position_of_last_space = command.rindex(" ")
except ValueError:
return (start + program + ' ' for program \
in get_executables() if program.startswith(command))
if position_of_last_space == len(command) - 1:
selection = self.fm.thistab.get_selection()
if len(selection) == 1:
return self.line + selection[0].shell_escaped_basename + ' '
else:
return self.line + '%s '
else:
before_word, start_of_word = self.line.rsplit(' ', 1)
return (before_word + ' ' + file.shell_escaped_basename \
for file in self.fm.thisdir.files or [] \
if file.shell_escaped_basename.startswith(start_of_word))
class open_with(Command):
def execute(self):
app, flags, mode = self._get_app_flags_mode(self.rest(1))
self.fm.execute_file(
files = [f for f in self.fm.thistab.get_selection()],
app = app,
flags = flags,
mode = mode)
def tab(self, tabnum):
return self._tab_through_executables()
def _get_app_flags_mode(self, string):
"""Extracts the application, flags and mode from a string.
examples:
"mplayer f 1" => ("mplayer", "f", 1)
"aunpack 4" => ("aunpack", "", 4)
"p" => ("", "p", 0)
"" => None
"""
app = ''
flags = ''
mode = 0
split = string.split()
if len(split) == 0:
pass
elif len(split) == 1:
part = split[0]
if self._is_app(part):
app = part
elif self._is_flags(part):
flags = part
elif self._is_mode(part):
mode = part
elif len(split) == 2:
part0 = split[0]
part1 = split[1]
if self._is_app(part0):
app = part0
if self._is_flags(part1):
flags = part1
elif self._is_mode(part1):
mode = part1
elif self._is_flags(part0):
flags = part0
if self._is_mode(part1):
mode = part1
elif self._is_mode(part0):
mode = part0
if self._is_flags(part1):
flags = part1
elif len(split) >= 3:
part0 = split[0]
part1 = split[1]
part2 = split[2]
if self._is_app(part0):
app = part0
if self._is_flags(part1):
flags = part1
if self._is_mode(part2):
mode = part2
elif self._is_mode(part1):
mode = part1
if self._is_flags(part2):
flags = part2
elif self._is_flags(part0):
flags = part0
if self._is_mode(part1):
mode = part1
elif self._is_mode(part0):
mode = part0
if self._is_flags(part1):
flags = part1
return app, flags, int(mode)
def _is_app(self, arg):
return not self._is_flags(arg) and not arg.isdigit()
def _is_flags(self, arg):
from ranger.core.runner import ALLOWED_FLAGS
return all(x in ALLOWED_FLAGS for x in arg)
def _is_mode(self, arg):
return all(x in '0123456789' for x in arg)
class set_(Command):
""":set <option name>=<python expression>
Gives an option a new value.
"""
name = 'set' # don't override the builtin set class
def execute(self):
name = self.arg(1)
name, value, _ = self.parse_setting_line()
self.fm.set_option_from_string(name, value)
def tab(self, tabnum):
from ranger.gui.colorscheme import get_all_colorschemes
name, value, name_done = self.parse_setting_line()
settings = self.fm.settings
if not name:
return sorted(self.firstpart + setting for setting in settings)
if not value and not name_done:
return sorted(self.firstpart + setting for setting in settings \
if setting.startswith(name))
if not value:
# Cycle through colorschemes when name, but no value is specified
if name == "colorscheme":
return sorted(self.firstpart + colorscheme for colorscheme \
in get_all_colorschemes())
return self.firstpart + str(settings[name])
if bool in settings.types_of(name):
if 'true'.startswith(value.lower()):
return self.firstpart + 'True'
if 'false'.startswith(value.lower()):
return self.firstpart + 'False'
# Tab complete colorscheme values if incomplete value is present
if name == "colorscheme":
return sorted(self.firstpart + colorscheme for colorscheme \
in get_all_colorschemes() if colorscheme.startswith(value))
class setlocal(set_):
""":setlocal path=<regular expression> <option name>=<python expression>
Gives an option a new value.
"""
PATH_RE = re.compile(r'^\s*path="?(.*?)"?\s*$')
def execute(self):
import os.path
match = self.PATH_RE.match(self.arg(1))
if match:
path = os.path.normpath(os.path.expanduser(match.group(1)))
self.shift()
elif self.fm.thisdir:
path = self.fm.thisdir.path
else:
path = None
if path:
name = self.arg(1)
name, value, _ = self.parse_setting_line()
self.fm.set_option_from_string(name, value, localpath=path)
class setintag(setlocal):
""":setintag <tag or tags> <option name>=<option value>
Sets an option for directories that are tagged with a specific tag.
"""
def execute(self):
tags = self.arg(1)
self.shift()
name, value, _ = self.parse_setting_line()
self.fm.set_option_from_string(name, value, tags=tags)
class default_linemode(Command):
def execute(self):
import re
from ranger.container.fsobject import FileSystemObject
if len(self.args) < 2:
self.fm.notify("Usage: default_linemode [path=<regexp> | tag=<tag(s)>] <linemode>", bad=True)
# Extract options like "path=..." or "tag=..." from the command line
arg1 = self.arg(1)
method = "always"
argument = None
if arg1.startswith("path="):
method = "path"
argument = re.compile(arg1[5:])
self.shift()
elif arg1.startswith("tag="):
method = "tag"
argument = arg1[4:]
self.shift()
# Extract and validate the line mode from the command line
linemode = self.rest(1)
if linemode not in FileSystemObject.linemode_dict:
self.fm.notify("Invalid linemode: %s; should be %s" %
(linemode, "/".join(FileSystemObject.linemode_dict)), bad=True)
# Add the prepared entry to the fm.default_linemodes
entry = [method, argument, linemode]
self.fm.default_linemodes.appendleft(entry)
# Redraw the columns
if hasattr(self.fm.ui, "browser"):
for col in self.fm.ui.browser.columns:
col.need_redraw = True
def tab(self, tabnum):
mode = self.arg(1)
return (self.arg(0) + " " + linemode
for linemode in self.fm.thisfile.linemode_dict.keys()
if linemode.startswith(self.arg(1)))
class quit(Command):
""":quit
Closes the current tab. If there is only one tab, quit the program.
"""
def execute(self):
if len(self.fm.tabs) <= 1:
self.fm.exit()
self.fm.tab_close()
class quitall(Command):
""":quitall
Quits the program immediately.
"""
def execute(self):
self.fm.exit()
class quit_bang(quitall):
""":quit!
Quits the program immediately.
"""
name = 'quit!'
allow_abbrev = False
class terminal(Command):
""":terminal
Spawns an "x-terminal-emulator" starting in the current directory.
"""
def execute(self):
import os
from ranger.ext.get_executables import get_executables
command = os.environ.get('TERMCMD', os.environ.get('TERM'))
if command not in get_executables():
command = 'x-terminal-emulator'
if command not in get_executables():
command = 'xterm'
self.fm.run(command, flags='f')
class delete(Command):
""":delete
Tries to delete the selection or the files passed in arguments (if any).
The arguments use a shell-like escaping.
"Selection" is defined as all the "marked files" (by default, you
can mark files with space or v). If there are no marked files,
use the "current file" (where the cursor is)
When attempting to delete non-empty directories or multiple
marked files, it will require a confirmation.
"""
allow_abbrev = False
escape_macros_for_shell = True
def execute(self):
import os
import shlex
from functools import partial
from ranger.container.file import File
def is_directory_with_files(f):
import os.path
return (os.path.isdir(f) and not os.path.islink(f) \
and len(os.listdir(f)) > 0)
if self.rest(1):
files = shlex.split(self.rest(1))
many_files = (len(files) > 1 or is_directory_with_files(files[0]))
else:
cwd = self.fm.thisdir
cf = self.fm.thisfile
if not cwd or not cf:
self.fm.notify("Error: no file selected for deletion!", bad=True)
return
# relative_path used for a user-friendly output in the confirmation.
files = [f.relative_path for f in self.fm.thistab.get_selection()]
many_files = (cwd.marked_items or is_directory_with_files(cf.path))
confirm = self.fm.settings.confirm_on_delete
if confirm != 'never' and (confirm != 'multiple' or many_files):
filename_list = files
self.fm.ui.console.ask("Confirm deletion of: %s (y/N)" %
', '.join(files),
partial(self._question_callback, files), ('n', 'N', 'y', 'Y'))
else:
# no need for a confirmation, just delete
self.fm.delete(files)
def tab(self, tabnum):
return self._tab_directory_content()
def _question_callback(self, files, answer):
if answer == 'y' or answer == 'Y':
self.fm.delete(files)
class mark_tag(Command):
""":mark_tag [<tags>]
Mark all tags that are tagged with either of the given tags.
When leaving out the tag argument, all tagged files are marked.
"""
do_mark = True
def execute(self):
cwd = self.fm.thisdir
tags = self.rest(1).replace(" ","")
if not self.fm.tags or not cwd.files:
return
for fileobj in cwd.files:
try:
tag = self.fm.tags.tags[fileobj.realpath]
except KeyError:
continue
if not tags or tag in tags:
cwd.mark_item(fileobj, val=self.do_mark)
self.fm.ui.status.need_redraw = True
self.fm.ui.need_redraw = True
class console(Command):
""":console <command>
Open the console with the given command.
"""
def execute(self):
position = None
if self.arg(1)[0:2] == '-p':
try:
position = int(self.arg(1)[2:])
self.shift()
except:
pass
self.fm.open_console(self.rest(1), position=position)
class load_copy_buffer(Command):
""":load_copy_buffer
Load the copy buffer from confdir/copy_buffer
"""
copy_buffer_filename = 'copy_buffer'
def execute(self):
from ranger.container.file import File
from os.path import exists
try:
fname = self.fm.confpath(self.copy_buffer_filename)
f = open(fname, 'r')
except:
return self.fm.notify("Cannot open %s" % \
(fname or self.copy_buffer_filename), bad=True)
self.fm.copy_buffer = set(File(g) \
for g in f.read().split("\n") if exists(g))
f.close()
self.fm.ui.redraw_main_column()
class save_copy_buffer(Command):
""":save_copy_buffer
Save the copy buffer to confdir/copy_buffer
"""
copy_buffer_filename = 'copy_buffer'
def execute(self):
fname = None
try:
fname = self.fm.confpath(self.copy_buffer_filename)
f = open(fname, 'w')
except:
return self.fm.notify("Cannot open %s" % \
(fname or self.copy_buffer_filename), bad=True)
f.write("\n".join(f.path for f in self.fm.copy_buffer))
f.close()
class unmark_tag(mark_tag):
""":unmark_tag [<tags>]
Unmark all tags that are tagged with either of the given tags.
When leaving out the tag argument, all tagged files are unmarked.
"""
do_mark = False
class mkdir(Command):
""":mkdir <dirname>
Creates a directory with the name <dirname>.
"""
def execute(self):
from os.path import join, expanduser, lexists
from os import makedirs
dirname = join(self.fm.thisdir.path, expanduser(self.rest(1)))
if not lexists(dirname):
makedirs(dirname)
else:
self.fm.notify("file/directory exists!", bad=True)
def tab(self, tabnum):
return self._tab_directory_content()
class touch(Command):
""":touch <fname>
Creates a file with the name <fname>.
"""
def execute(self):
from os.path import join, expanduser, lexists
fname = join(self.fm.thisdir.path, expanduser(self.rest(1)))
if not lexists(fname):
open(fname, 'a').close()
else:
self.fm.notify("file/directory exists!", bad=True)
def tab(self, tabnum):
return self._tab_directory_content()
class edit(Command):
""":edit <filename>
Opens the specified file in vim
"""
def execute(self):
if not self.arg(1):
self.fm.edit_file(self.fm.thisfile.path)
else:
self.fm.edit_file(self.rest(1))
def tab(self, tabnum):
return self._tab_directory_content()
class eval_(Command):
""":eval [-q] <python code>
Evaluates the python code.
`fm' is a reference to the FM instance.
To display text, use the function `p'.
Examples:
:eval fm
:eval len(fm.directories)
:eval p("Hello World!")
"""
name = 'eval'
resolve_macros = False
def execute(self):
if self.arg(1) == '-q':
code = self.rest(2)
quiet = True
else:
code = self.rest(1)
quiet = False
import ranger
global cmd, fm, p, quantifier
fm = self.fm
cmd = self.fm.execute_console
p = fm.notify
quantifier = self.quantifier
try:
try:
result = eval(code)
except SyntaxError:
exec(code)
else:
if result and not quiet:
p(result)
except Exception as err:
p(err)
class rename(Command):
""":rename <newname>
Changes the name of the currently highlighted file to <newname>
"""
def execute(self):
from ranger.container.file import File
from os import access
new_name = self.rest(1)
tagged = {}
old_name = self.fm.thisfile.relative_path
for f in self.fm.tags.tags:
if str(f).startswith(self.fm.thisfile.path):
tagged[f] = self.fm.tags.tags[f]
self.fm.tags.remove(f)
if not new_name:
return self.fm.notify('Syntax: rename <newname>', bad=True)
if new_name == old_name:
return
if access(new_name, os.F_OK):
return self.fm.notify("Can't rename: file already exists!", bad=True)
if self.fm.rename(self.fm.thisfile, new_name):
f = File(new_name)
# Update bookmarks that were pointing on the previous name
obsoletebookmarks = [b for b in self.fm.bookmarks
if b[1].path == self.fm.thisfile]
if obsoletebookmarks:
for key, _ in obsoletebookmarks:
self.fm.bookmarks[key] = f
self.fm.bookmarks.update_if_outdated()
self.fm.thisdir.pointed_obj = f
self.fm.thisfile = f
for t in tagged:
self.fm.tags.tags[t.replace(old_name,new_name)] = tagged[t]
self.fm.tags.dump()
def tab(self, tabnum):
return self._tab_directory_content()
class rename_append(Command):
""":rename_append
Creates an open_console for the rename command, automatically placing the cursor before the file extension.
"""
def execute(self):
cf = self.fm.thisfile
path = cf.relative_path.replace("%", "%%")
if path.find('.') != 0 and path.rfind('.') != -1 and not cf.is_directory:
self.fm.open_console('rename ' + path, position=(7 + path.rfind('.')))
else:
self.fm.open_console('rename ' + path)
class chmod(Command):
""":chmod <octal number>
Sets the permissions of the selection to the octal number.
The octal number is between 0 and 777. The digits specify the
permissions for the user, the group and others.
A 1 permits execution, a 2 permits writing, a 4 permits reading.
Add those numbers to combine them. So a 7 permits everything.
"""
def execute(self):
mode = self.rest(1)
if not mode:
mode = str(self.quantifier)
try:
mode = int(mode, 8)
if mode < 0 or mode > 0o777:
raise ValueError
except ValueError:
self.fm.notify("Need an octal number between 0 and 777!", bad=True)
return
for file in self.fm.thistab.get_selection():
try:
os.chmod(file.path, mode)
except Exception as ex:
self.fm.notify(ex)
try:
# reloading directory. maybe its better to reload the selected
# files only.
self.fm.thisdir.load_content()
except:
pass
class bulkrename(Command):
""":bulkrename
This command opens a list of selected files in an external editor.
After you edit and save the file, it will generate a shell script
which does bulk renaming according to the changes you did in the file.
This shell script is opened in an editor for you to review.
After you close it, it will be executed.
"""
def execute(self):
import sys
import tempfile
from ranger.container.file import File
from ranger.ext.shell_escape import shell_escape as esc
py3 = sys.version_info[0] >= 3
# Create and edit the file list
filenames = [f.relative_path for f in self.fm.thistab.get_selection()]
listfile = tempfile.NamedTemporaryFile(delete=False)
listpath = listfile.name
if py3:
listfile.write("\n".join(filenames).encode("utf-8"))
else:
listfile.write("\n".join(filenames))
listfile.close()
self.fm.execute_file([File(listpath)], app='editor')
listfile = open(listpath, 'r')
new_filenames = listfile.read().split("\n")
listfile.close()
os.unlink(listpath)
if all(a == b for a, b in zip(filenames, new_filenames)):
self.fm.notify("No renaming to be done!")
return
# Generate script
cmdfile = tempfile.NamedTemporaryFile()
script_lines = []
script_lines.append("# This file will be executed when you close the editor.\n")
script_lines.append("# Please double-check everything, clear the file to abort.\n")
script_lines.extend("mv -vi -- %s %s\n" % (esc(old), esc(new)) \
for old, new in zip(filenames, new_filenames) if old != new)
script_content = "".join(script_lines)
if py3:
cmdfile.write(script_content.encode("utf-8"))
else:
cmdfile.write(script_content)
cmdfile.flush()
# Open the script and let the user review it, then check if the script
# was modified by the user
self.fm.execute_file([File(cmdfile.name)], app='editor')
cmdfile.seek(0)
script_was_edited = (script_content != cmdfile.read())
# Do the renaming
self.fm.run(['/bin/sh', cmdfile.name], flags='w')
cmdfile.close()
# Retag the files, but only if the script wasn't changed during review,
# because only then we know which are the source and destination files.
if not script_was_edited:
tags_changed = False
for old, new in zip(filenames, new_filenames):
if old != new:
oldpath = self.fm.thisdir.path + '/' + old
newpath = self.fm.thisdir.path + '/' + new
if oldpath in self.fm.tags:
old_tag = self.fm.tags.tags[oldpath]
self.fm.tags.remove(oldpath)
self.fm.tags.tags[newpath] = old_tag
tags_changed = True
if tags_changed:
self.fm.tags.dump()
else:
fm.notify("files have not been retagged")
class relink(Command):
""":relink <newpath>
Changes the linked path of the currently highlighted symlink to <newpath>
"""
def execute(self):
from ranger.container.file import File
new_path = self.rest(1)
cf = self.fm.thisfile
if not new_path:
return self.fm.notify('Syntax: relink <newpath>', bad=True)
if not cf.is_link:
return self.fm.notify('%s is not a symlink!' % cf.relative_path, bad=True)
if new_path == os.readlink(cf.path):
return
try:
os.remove(cf.path)
os.symlink(new_path, cf.path)
except OSError as err:
self.fm.notify(err)
self.fm.reset()
self.fm.thisdir.pointed_obj = cf
self.fm.thisfile = cf
def tab(self, tabnum):
if not self.rest(1):
return self.line+os.readlink(self.fm.thisfile.path)
else:
return self._tab_directory_content()
class help_(Command):
""":help
Display ranger's manual page.
"""
name = 'help'
def execute(self):
def callback(answer):
if answer == "q":
return
elif answer == "m":
self.fm.display_help()
elif answer == "c":
self.fm.dump_commands()
elif answer == "k":
self.fm.dump_keybindings()
elif answer == "s":
self.fm.dump_settings()
c = self.fm.ui.console.ask("View [m]an page, [k]ey bindings,"
" [c]ommands or [s]ettings? (press q to abort)", callback, list("mkcsq") + [chr(27)])
class copymap(Command):
""":copymap <keys> <newkeys1> [<newkeys2>...]
Copies a "browser" keybinding from <keys> to <newkeys>
"""
context = 'browser'
def execute(self):
if not self.arg(1) or not self.arg(2):
return self.fm.notify("Not enough arguments", bad=True)
for arg in self.args[2:]:
self.fm.ui.keymaps.copy(self.context, self.arg(1), arg)
class copypmap(copymap):
""":copypmap <keys> <newkeys1> [<newkeys2>...]
Copies a "pager" keybinding from <keys> to <newkeys>
"""
context = 'pager'
class copycmap(copymap):
""":copycmap <keys> <newkeys1> [<newkeys2>...]
Copies a "console" keybinding from <keys> to <newkeys>
"""
context = 'console'
class copytmap(copymap):
""":copycmap <keys> <newkeys1> [<newkeys2>...]
Copies a "taskview" keybinding from <keys> to <newkeys>
"""
context = 'taskview'
class unmap(Command):
""":unmap <keys> [<keys2>, ...]
Remove the given "browser" mappings
"""
context = 'browser'
def execute(self):
for arg in self.args[1:]:
self.fm.ui.keymaps.unbind(self.context, arg)
class cunmap(unmap):
""":cunmap <keys> [<keys2>, ...]
Remove the given "console" mappings
"""
context = 'browser'
class punmap(unmap):
""":punmap <keys> [<keys2>, ...]
Remove the given "pager" mappings
"""
context = 'pager'
class tunmap(unmap):
""":tunmap <keys> [<keys2>, ...]
Remove the given "taskview" mappings
"""
context = 'taskview'
class map_(Command):
""":map <keysequence> <command>
Maps a command to a keysequence in the "browser" context.
Example:
map j move down
map J move down 10
"""
name = 'map'
context = 'browser'
resolve_macros = False
def execute(self):
if not self.arg(1) or not self.arg(2):
return self.fm.notify("Not enough arguments", bad=True)
self.fm.ui.keymaps.bind(self.context, self.arg(1), self.rest(2))
class cmap(map_):
""":cmap <keysequence> <command>
Maps a command to a keysequence in the "console" context.
Example:
cmap <ESC> console_close
cmap <C-x> console_type test
"""
context = 'console'
class tmap(map_):
""":tmap <keysequence> <command>
Maps a command to a keysequence in the "taskview" context.
"""
context = 'taskview'
class pmap(map_):
""":pmap <keysequence> <command>
Maps a command to a keysequence in the "pager" context.
"""
context = 'pager'
class scout(Command):
""":scout [-FLAGS] <pattern>
Swiss army knife command for searching, traveling and filtering files.
The command takes various flags as arguments which can be used to
influence its behaviour:
-a = automatically open a file on unambiguous match
-e = open the selected file when pressing enter
-f = filter files that match the current search pattern
-g = interpret pattern as a glob pattern
-i = ignore the letter case of the files
-k = keep the console open when changing a directory with the command
-l = letter skipping; e.g. allow "rdme" to match the file "readme"
-m = mark the matching files after pressing enter
-M = unmark the matching files after pressing enter
-p = permanent filter: hide non-matching files after pressing enter
-s = smart case; like -i unless pattern contains upper case letters
-t = apply filter and search pattern as you type
-v = inverts the match
Multiple flags can be combined. For example, ":scout -gpt" would create
a :filter-like command using globbing.
"""
AUTO_OPEN = 'a'
OPEN_ON_ENTER = 'e'
FILTER = 'f'
SM_GLOB = 'g'
IGNORE_CASE = 'i'
KEEP_OPEN = 'k'
SM_LETTERSKIP = 'l'
MARK = 'm'
UNMARK = 'M'
PERM_FILTER = 'p'
SM_REGEX = 'r'
SMART_CASE = 's'
AS_YOU_TYPE = 't'
INVERT = 'v'
def __init__(self, *args, **kws):
Command.__init__(self, *args, **kws)
self._regex = None
self.flags, self.pattern = self.parse_flags()
def execute(self):
thisdir = self.fm.thisdir
flags = self.flags
pattern = self.pattern
regex = self._build_regex()
count = self._count(move=True)
self.fm.thistab.last_search = regex
self.fm.set_search_method(order="search")
if (self.MARK in flags or self.UNMARK in flags) and thisdir.files:
value = flags.find(self.MARK) > flags.find(self.UNMARK)
if self.FILTER in flags:
for f in thisdir.files:
thisdir.mark_item(f, value)
else:
for f in thisdir.files:
if regex.search(f.relative_path):
thisdir.mark_item(f, value)
if self.PERM_FILTER in flags:
thisdir.filter = regex if pattern else None
# clean up:
self.cancel()
if self.OPEN_ON_ENTER in flags or \
self.AUTO_OPEN in flags and count == 1:
if os.path.exists(pattern):
self.fm.cd(pattern)
else:
self.fm.move(right=1)
if self.KEEP_OPEN in flags and thisdir != self.fm.thisdir:
# reopen the console:
if not pattern:
self.fm.open_console(self.line)
else:
self.fm.open_console(self.line[0:-len(pattern)])
if self.quickly_executed and thisdir != self.fm.thisdir and pattern != "..":
self.fm.block_input(0.5)
def cancel(self):
self.fm.thisdir.temporary_filter = None
self.fm.thisdir.refilter()
def quick(self):
asyoutype = self.AS_YOU_TYPE in self.flags
if self.FILTER in self.flags:
self.fm.thisdir.temporary_filter = self._build_regex()
if self.PERM_FILTER in self.flags and asyoutype:
self.fm.thisdir.filter = self._build_regex()
if self.FILTER in self.flags or self.PERM_FILTER in self.flags:
self.fm.thisdir.refilter()
if self._count(move=asyoutype) == 1 and self.AUTO_OPEN in self.flags:
return True
return False
def tab(self, tabnum):
self._count(move=True, offset=tabnum)
def _build_regex(self):
if self._regex is not None:
return self._regex
frmat = "%s"
flags = self.flags
pattern = self.pattern
if pattern == ".":
return re.compile("")
# Handle carets at start and dollar signs at end separately
if pattern.startswith('^'):
pattern = pattern[1:]
frmat = "^" + frmat
if pattern.endswith('$'):
pattern = pattern[:-1]
frmat += "$"
# Apply one of the search methods
if self.SM_REGEX in flags:
regex = pattern
elif self.SM_GLOB in flags:
regex = re.escape(pattern).replace("\\*", ".*").replace("\\?", ".")
elif self.SM_LETTERSKIP in flags:
regex = ".*".join(re.escape(c) for c in pattern)
else:
regex = re.escape(pattern)
regex = frmat % regex
# Invert regular expression if necessary
if self.INVERT in flags:
regex = "^(?:(?!%s).)*$" % regex
# Compile Regular Expression
options = re.LOCALE | re.UNICODE
if self.IGNORE_CASE in flags or self.SMART_CASE in flags and \
pattern.islower():
options |= re.IGNORECASE
try:
self._regex = re.compile(regex, options)
except:
self._regex = re.compile("")
return self._regex
def _count(self, move=False, offset=0):
count = 0
cwd = self.fm.thisdir
pattern = self.pattern
if not pattern or not cwd.files:
return 0
if pattern == '.':
return 0
if pattern == '..':
return 1
deq = deque(cwd.files)
deq.rotate(-cwd.pointer - offset)
i = offset
regex = self._build_regex()
for fsobj in deq:
if regex.search(fsobj.relative_path):
count += 1
if move and count == 1:
cwd.move(to=(cwd.pointer + i) % len(cwd.files))
self.fm.thisfile = cwd.pointed_obj
if count > 1:
return count
i += 1
return count == 1
class filter_inode_type(Command):
"""
:filter_inode_type [dfl]
Displays only the files of specified inode type. Parameters
can be combined.
d display directories
f display files
l display links
"""
FILTER_DIRS = 'd'
FILTER_FILES = 'f'
FILTER_LINKS = 'l'
def execute(self):
if not self.arg(1):
self.fm.thisdir.inode_type_filter = None
else:
self.fm.thisdir.inode_type_filter = lambda file: (
True if ((self.FILTER_DIRS in self.arg(1) and file.is_directory) or
(self.FILTER_FILES in self.arg(1) and file.is_file and not file.is_link) or
(self.FILTER_LINKS in self.arg(1) and file.is_link)) else False)
self.fm.thisdir.refilter()
class grep(Command):
""":grep <string>
Looks for a string in all marked files or directories
"""
def execute(self):
if self.rest(1):
action = ['grep', '--line-number']
action.extend(['-e', self.rest(1), '-r'])
action.extend(f.path for f in self.fm.thistab.get_selection())
self.fm.execute_command(action, flags='p')
# Version control commands
# --------------------------------
class stage(Command):
"""
:stage
Stage selected files for the corresponding version control system
"""
def execute(self):
from ranger.ext.vcs import VcsError
filelist = [f.path for f in self.fm.thistab.get_selection()]
self.fm.thisdir.vcs_outdated = True
# for f in self.fm.thistab.get_selection():
# f.vcs_outdated = True
try:
self.fm.thisdir.vcs.add(filelist)
except VcsError:
self.fm.notify("Could not stage files.")
self.fm.reload_cwd()
class unstage(Command):
"""
:unstage
Unstage selected files for the corresponding version control system
"""
def execute(self):
from ranger.ext.vcs import VcsError
filelist = [f.path for f in self.fm.thistab.get_selection()]
self.fm.thisdir.vcs_outdated = True
# for f in self.fm.thistab.get_selection():
# f.vcs_outdated = True
try:
self.fm.thisdir.vcs.reset(filelist)
except VcsError:
self.fm.notify("Could not unstage files.")
self.fm.reload_cwd()
class diff(Command):
"""
:diff
Displays a diff of selected files against the last committed version
"""
def execute(self):
from ranger.ext.vcs import VcsError
import tempfile
L = self.fm.thistab.get_selection()
if len(L) == 0: return
filelist = [f.path for f in L]
vcs = L[0].vcs
diff = vcs.get_raw_diff(filelist=filelist)
if len(diff.strip()) > 0:
tmp = tempfile.NamedTemporaryFile()
tmp.write(diff.encode('utf-8'))
tmp.flush()
pager = os.environ.get('PAGER', ranger.DEFAULT_PAGER)
self.fm.run([pager, tmp.name])
else:
raise Exception("diff is empty")
class log(Command):
"""
:log
Displays the log of the current repo or files
"""
def execute(self):
from ranger.ext.vcs import VcsError
import tempfile
L = self.fm.thistab.get_selection()
if len(L) == 0: return
filelist = [f.path for f in L]
vcs = L[0].vcs
log = vcs.get_raw_log(filelist=filelist)
tmp = tempfile.NamedTemporaryFile()
tmp.write(log.encode('utf-8'))
tmp.flush()
pager = os.environ.get('PAGER', ranger.DEFAULT_PAGER)
self.fm.run([pager, tmp.name])
class flat(Command):
"""
:flat <level>
Flattens the directory view up to the specified level.
-1 fully flattened
0 remove flattened view
"""
def execute(self):
try:
level = self.rest(1)
level = int(level)
except ValueError:
level = self.quantifier
if level < -1:
self.fm.notify("Need an integer number (-1, 0, 1, ...)", bad=True)
self.fm.thisdir.unload()
self.fm.thisdir.flat = level
self.fm.thisdir.load_content()
# Metadata commands
# --------------------------------
class prompt_metadata(Command):
"""
:prompt_metadata <key1> [<key2> [<key3> ...]]
Prompt the user to input metadata for multiple keys in a row.
"""
_command_name = "meta"
_console_chain = None
def execute(self):
prompt_metadata._console_chain = self.args[1:]
self._process_command_stack()
def _process_command_stack(self):
if prompt_metadata._console_chain:
key = prompt_metadata._console_chain.pop()
self._fill_console(key)
else:
for col in self.fm.ui.browser.columns:
col.need_redraw = True
def _fill_console(self, key):
metadata = self.fm.metadata.get_metadata(self.fm.thisfile.path)
if key in metadata and metadata[key]:
existing_value = metadata[key]
else:
existing_value = ""
text = "%s %s %s" % (self._command_name, key, existing_value)
self.fm.open_console(text, position=len(text))
class meta(prompt_metadata):
"""
:meta <key> [<value>]
Change metadata of a file. Deletes the key if value is empty.
"""
def execute(self):
key = self.arg(1)
value = self.rest(1)
update_dict = dict()
update_dict[key] = self.rest(2)
selection = self.fm.thistab.get_selection()
for f in selection:
self.fm.metadata.set_metadata(f.path, update_dict)
self._process_command_stack()
def tab(self, tabnum):
key = self.arg(1)
metadata = self.fm.metadata.get_metadata(self.fm.thisfile.path)
if key in metadata and metadata[key]:
return [" ".join([self.arg(0), self.arg(1), metadata[key]])]
else:
return [self.arg(0) + " " + key for key in sorted(metadata)
if key.startswith(self.arg(1))]
class linemode(default_linemode):
"""
:linemode <mode>
Change what is displayed as a filename.
- "mode" may be any of the defined linemodes (see: ranger.core.linemode).
"normal" is mapped to "filename".
"""
def execute(self):
mode = self.arg(1)
if mode == "normal":
mode = DEFAULT_LINEMODE
if mode not in self.fm.thisfile.linemode_dict:
self.fm.notify("Unhandled linemode: `%s'" % mode, bad=True)
return
self.fm.thisdir._set_linemode_of_children(mode)
# Ask the browsercolumns to redraw
for col in self.fm.ui.browser.columns:
col.need_redraw = True
| 30.728935 | 111 | 0.57244 |
# It's all in python; lines beginning with
# But make sure you update your configs when you update ranger.
#
# ===================================================================
# Every class defined here which is a subclass of `Command' will be used as a
#
# ===================================================================
# Commands have certain attributes and methods that facilitate parsing of
# the arguments:
#
# self.line: The whole line that was written in the console.
# self.args: A list of all (space-separated) arguments to the command.
# self.quantifier: If this command was mapped to the key "X" and
# the user pressed 6X, self.quantifier will be 6.
# self.arg(n): The n-th argument, or an empty string if it doesn't exist.
#
# For advanced commands it is unavoidable to dive a bit into the source code
# of ranger.
# ===================================================================
from ranger.api.commands import *
class alias(Command):
context = 'browser'
resolve_macros = False
def execute(self):
if not self.arg(1) or not self.arg(2):
self.fm.notify('Syntax: alias <newcommand> <oldcommand>', bad=True)
else:
self.fm.commands.alias(self.arg(1), self.rest(2))
class echo(Command):
def execute(self):
self.fm.notify(self.rest(1))
class cd(Command):
def execute(self):
import os.path
if self.arg(1) == '-r':
self.shift()
destination = os.path.realpath(self.rest(1))
if os.path.isfile(destination):
self.fm.select_file(destination)
return
else:
destination = self.rest(1)
if not destination:
destination = '~'
if destination == '-':
self.fm.enter_bookmark('`')
else:
self.fm.cd(destination)
def tab(self, tabnum):
import os
from os.path import dirname, basename, expanduser, join
cwd = self.fm.thisdir.path
rel_dest = self.rest(1)
bookmarks = [v.path for v in self.fm.bookmarks.dct.values()
if rel_dest in v.path ]
# expand the tilde into the user directory
if rel_dest.startswith('~'):
rel_dest = expanduser(rel_dest)
# define some shortcuts
abs_dest = join(cwd, rel_dest)
abs_dirname = dirname(abs_dest)
rel_basename = basename(rel_dest)
rel_dirname = dirname(rel_dest)
try:
# are we at the end of a directory?
if rel_dest.endswith('/') or rel_dest == '':
_, dirnames, _ = next(os.walk(abs_dest))
# are we in the middle of the filename?
else:
_, dirnames, _ = next(os.walk(abs_dirname))
dirnames = [dn for dn in dirnames \
if dn.startswith(rel_basename)]
except (OSError, StopIteration):
# os.walk found nothing
pass
else:
dirnames.sort()
if self.fm.settings.cd_bookmarks:
dirnames = bookmarks + dirnames
# no results, return None
if len(dirnames) == 0:
return
# one result. since it must be a directory, append a slash.
if len(dirnames) == 1:
return self.start(1) + join(rel_dirname, dirnames[0]) + '/'
# more than one result. append no slash, so the user can
# manually type in the slash to advance into that directory
return (self.start(1) + join(rel_dirname, dirname) for dirname in dirnames)
class chain(Command):
def execute(self):
for command in self.rest(1).split(";"):
self.fm.execute_console(command)
class shell(Command):
escape_macros_for_shell = True
def execute(self):
if self.arg(1) and self.arg(1)[0] == '-':
flags = self.arg(1)[1:]
command = self.rest(2)
else:
flags = ''
command = self.rest(1)
if command:
self.fm.execute_command(command, flags=flags)
def tab(self, tabnum):
from ranger.ext.get_executables import get_executables
if self.arg(1) and self.arg(1)[0] == '-':
command = self.rest(2)
else:
command = self.rest(1)
start = self.line[0:len(self.line) - len(command)]
try:
position_of_last_space = command.rindex(" ")
except ValueError:
return (start + program + ' ' for program \
in get_executables() if program.startswith(command))
if position_of_last_space == len(command) - 1:
selection = self.fm.thistab.get_selection()
if len(selection) == 1:
return self.line + selection[0].shell_escaped_basename + ' '
else:
return self.line + '%s '
else:
before_word, start_of_word = self.line.rsplit(' ', 1)
return (before_word + ' ' + file.shell_escaped_basename \
for file in self.fm.thisdir.files or [] \
if file.shell_escaped_basename.startswith(start_of_word))
class open_with(Command):
def execute(self):
app, flags, mode = self._get_app_flags_mode(self.rest(1))
self.fm.execute_file(
files = [f for f in self.fm.thistab.get_selection()],
app = app,
flags = flags,
mode = mode)
def tab(self, tabnum):
return self._tab_through_executables()
def _get_app_flags_mode(self, string):
app = ''
flags = ''
mode = 0
split = string.split()
if len(split) == 0:
pass
elif len(split) == 1:
part = split[0]
if self._is_app(part):
app = part
elif self._is_flags(part):
flags = part
elif self._is_mode(part):
mode = part
elif len(split) == 2:
part0 = split[0]
part1 = split[1]
if self._is_app(part0):
app = part0
if self._is_flags(part1):
flags = part1
elif self._is_mode(part1):
mode = part1
elif self._is_flags(part0):
flags = part0
if self._is_mode(part1):
mode = part1
elif self._is_mode(part0):
mode = part0
if self._is_flags(part1):
flags = part1
elif len(split) >= 3:
part0 = split[0]
part1 = split[1]
part2 = split[2]
if self._is_app(part0):
app = part0
if self._is_flags(part1):
flags = part1
if self._is_mode(part2):
mode = part2
elif self._is_mode(part1):
mode = part1
if self._is_flags(part2):
flags = part2
elif self._is_flags(part0):
flags = part0
if self._is_mode(part1):
mode = part1
elif self._is_mode(part0):
mode = part0
if self._is_flags(part1):
flags = part1
return app, flags, int(mode)
def _is_app(self, arg):
return not self._is_flags(arg) and not arg.isdigit()
def _is_flags(self, arg):
from ranger.core.runner import ALLOWED_FLAGS
return all(x in ALLOWED_FLAGS for x in arg)
def _is_mode(self, arg):
return all(x in '0123456789' for x in arg)
class set_(Command):
name = 'set' # don't override the builtin set class
def execute(self):
name = self.arg(1)
name, value, _ = self.parse_setting_line()
self.fm.set_option_from_string(name, value)
def tab(self, tabnum):
from ranger.gui.colorscheme import get_all_colorschemes
name, value, name_done = self.parse_setting_line()
settings = self.fm.settings
if not name:
return sorted(self.firstpart + setting for setting in settings)
if not value and not name_done:
return sorted(self.firstpart + setting for setting in settings \
if setting.startswith(name))
if not value:
if name == "colorscheme":
return sorted(self.firstpart + colorscheme for colorscheme \
in get_all_colorschemes())
return self.firstpart + str(settings[name])
if bool in settings.types_of(name):
if 'true'.startswith(value.lower()):
return self.firstpart + 'True'
if 'false'.startswith(value.lower()):
return self.firstpart + 'False'
if name == "colorscheme":
return sorted(self.firstpart + colorscheme for colorscheme \
in get_all_colorschemes() if colorscheme.startswith(value))
class setlocal(set_):
PATH_RE = re.compile(r'^\s*path="?(.*?)"?\s*$')
def execute(self):
import os.path
match = self.PATH_RE.match(self.arg(1))
if match:
path = os.path.normpath(os.path.expanduser(match.group(1)))
self.shift()
elif self.fm.thisdir:
path = self.fm.thisdir.path
else:
path = None
if path:
name = self.arg(1)
name, value, _ = self.parse_setting_line()
self.fm.set_option_from_string(name, value, localpath=path)
class setintag(setlocal):
def execute(self):
tags = self.arg(1)
self.shift()
name, value, _ = self.parse_setting_line()
self.fm.set_option_from_string(name, value, tags=tags)
class default_linemode(Command):
def execute(self):
import re
from ranger.container.fsobject import FileSystemObject
if len(self.args) < 2:
self.fm.notify("Usage: default_linemode [path=<regexp> | tag=<tag(s)>] <linemode>", bad=True)
arg1 = self.arg(1)
method = "always"
argument = None
if arg1.startswith("path="):
method = "path"
argument = re.compile(arg1[5:])
self.shift()
elif arg1.startswith("tag="):
method = "tag"
argument = arg1[4:]
self.shift()
linemode = self.rest(1)
if linemode not in FileSystemObject.linemode_dict:
self.fm.notify("Invalid linemode: %s; should be %s" %
(linemode, "/".join(FileSystemObject.linemode_dict)), bad=True)
entry = [method, argument, linemode]
self.fm.default_linemodes.appendleft(entry)
if hasattr(self.fm.ui, "browser"):
for col in self.fm.ui.browser.columns:
col.need_redraw = True
def tab(self, tabnum):
mode = self.arg(1)
return (self.arg(0) + " " + linemode
for linemode in self.fm.thisfile.linemode_dict.keys()
if linemode.startswith(self.arg(1)))
class quit(Command):
def execute(self):
if len(self.fm.tabs) <= 1:
self.fm.exit()
self.fm.tab_close()
class quitall(Command):
def execute(self):
self.fm.exit()
class quit_bang(quitall):
name = 'quit!'
allow_abbrev = False
class terminal(Command):
def execute(self):
import os
from ranger.ext.get_executables import get_executables
command = os.environ.get('TERMCMD', os.environ.get('TERM'))
if command not in get_executables():
command = 'x-terminal-emulator'
if command not in get_executables():
command = 'xterm'
self.fm.run(command, flags='f')
class delete(Command):
allow_abbrev = False
escape_macros_for_shell = True
def execute(self):
import os
import shlex
from functools import partial
from ranger.container.file import File
def is_directory_with_files(f):
import os.path
return (os.path.isdir(f) and not os.path.islink(f) \
and len(os.listdir(f)) > 0)
if self.rest(1):
files = shlex.split(self.rest(1))
many_files = (len(files) > 1 or is_directory_with_files(files[0]))
else:
cwd = self.fm.thisdir
cf = self.fm.thisfile
if not cwd or not cf:
self.fm.notify("Error: no file selected for deletion!", bad=True)
return
files = [f.relative_path for f in self.fm.thistab.get_selection()]
many_files = (cwd.marked_items or is_directory_with_files(cf.path))
confirm = self.fm.settings.confirm_on_delete
if confirm != 'never' and (confirm != 'multiple' or many_files):
filename_list = files
self.fm.ui.console.ask("Confirm deletion of: %s (y/N)" %
', '.join(files),
partial(self._question_callback, files), ('n', 'N', 'y', 'Y'))
else:
self.fm.delete(files)
def tab(self, tabnum):
return self._tab_directory_content()
def _question_callback(self, files, answer):
if answer == 'y' or answer == 'Y':
self.fm.delete(files)
class mark_tag(Command):
do_mark = True
def execute(self):
cwd = self.fm.thisdir
tags = self.rest(1).replace(" ","")
if not self.fm.tags or not cwd.files:
return
for fileobj in cwd.files:
try:
tag = self.fm.tags.tags[fileobj.realpath]
except KeyError:
continue
if not tags or tag in tags:
cwd.mark_item(fileobj, val=self.do_mark)
self.fm.ui.status.need_redraw = True
self.fm.ui.need_redraw = True
class console(Command):
def execute(self):
position = None
if self.arg(1)[0:2] == '-p':
try:
position = int(self.arg(1)[2:])
self.shift()
except:
pass
self.fm.open_console(self.rest(1), position=position)
class load_copy_buffer(Command):
copy_buffer_filename = 'copy_buffer'
def execute(self):
from ranger.container.file import File
from os.path import exists
try:
fname = self.fm.confpath(self.copy_buffer_filename)
f = open(fname, 'r')
except:
return self.fm.notify("Cannot open %s" % \
(fname or self.copy_buffer_filename), bad=True)
self.fm.copy_buffer = set(File(g) \
for g in f.read().split("\n") if exists(g))
f.close()
self.fm.ui.redraw_main_column()
class save_copy_buffer(Command):
copy_buffer_filename = 'copy_buffer'
def execute(self):
fname = None
try:
fname = self.fm.confpath(self.copy_buffer_filename)
f = open(fname, 'w')
except:
return self.fm.notify("Cannot open %s" % \
(fname or self.copy_buffer_filename), bad=True)
f.write("\n".join(f.path for f in self.fm.copy_buffer))
f.close()
class unmark_tag(mark_tag):
do_mark = False
class mkdir(Command):
def execute(self):
from os.path import join, expanduser, lexists
from os import makedirs
dirname = join(self.fm.thisdir.path, expanduser(self.rest(1)))
if not lexists(dirname):
makedirs(dirname)
else:
self.fm.notify("file/directory exists!", bad=True)
def tab(self, tabnum):
return self._tab_directory_content()
class touch(Command):
def execute(self):
from os.path import join, expanduser, lexists
fname = join(self.fm.thisdir.path, expanduser(self.rest(1)))
if not lexists(fname):
open(fname, 'a').close()
else:
self.fm.notify("file/directory exists!", bad=True)
def tab(self, tabnum):
return self._tab_directory_content()
class edit(Command):
def execute(self):
if not self.arg(1):
self.fm.edit_file(self.fm.thisfile.path)
else:
self.fm.edit_file(self.rest(1))
def tab(self, tabnum):
return self._tab_directory_content()
class eval_(Command):
name = 'eval'
resolve_macros = False
def execute(self):
if self.arg(1) == '-q':
code = self.rest(2)
quiet = True
else:
code = self.rest(1)
quiet = False
import ranger
global cmd, fm, p, quantifier
fm = self.fm
cmd = self.fm.execute_console
p = fm.notify
quantifier = self.quantifier
try:
try:
result = eval(code)
except SyntaxError:
exec(code)
else:
if result and not quiet:
p(result)
except Exception as err:
p(err)
class rename(Command):
def execute(self):
from ranger.container.file import File
from os import access
new_name = self.rest(1)
tagged = {}
old_name = self.fm.thisfile.relative_path
for f in self.fm.tags.tags:
if str(f).startswith(self.fm.thisfile.path):
tagged[f] = self.fm.tags.tags[f]
self.fm.tags.remove(f)
if not new_name:
return self.fm.notify('Syntax: rename <newname>', bad=True)
if new_name == old_name:
return
if access(new_name, os.F_OK):
return self.fm.notify("Can't rename: file already exists!", bad=True)
if self.fm.rename(self.fm.thisfile, new_name):
f = File(new_name)
# Update bookmarks that were pointing on the previous name
obsoletebookmarks = [b for b in self.fm.bookmarks
if b[1].path == self.fm.thisfile]
if obsoletebookmarks:
for key, _ in obsoletebookmarks:
self.fm.bookmarks[key] = f
self.fm.bookmarks.update_if_outdated()
self.fm.thisdir.pointed_obj = f
self.fm.thisfile = f
for t in tagged:
self.fm.tags.tags[t.replace(old_name,new_name)] = tagged[t]
self.fm.tags.dump()
def tab(self, tabnum):
return self._tab_directory_content()
class rename_append(Command):
def execute(self):
cf = self.fm.thisfile
path = cf.relative_path.replace("%", "%%")
if path.find('.') != 0 and path.rfind('.') != -1 and not cf.is_directory:
self.fm.open_console('rename ' + path, position=(7 + path.rfind('.')))
else:
self.fm.open_console('rename ' + path)
class chmod(Command):
def execute(self):
mode = self.rest(1)
if not mode:
mode = str(self.quantifier)
try:
mode = int(mode, 8)
if mode < 0 or mode > 0o777:
raise ValueError
except ValueError:
self.fm.notify("Need an octal number between 0 and 777!", bad=True)
return
for file in self.fm.thistab.get_selection():
try:
os.chmod(file.path, mode)
except Exception as ex:
self.fm.notify(ex)
try:
# reloading directory. maybe its better to reload the selected
# files only.
self.fm.thisdir.load_content()
except:
pass
class bulkrename(Command):
def execute(self):
import sys
import tempfile
from ranger.container.file import File
from ranger.ext.shell_escape import shell_escape as esc
py3 = sys.version_info[0] >= 3
# Create and edit the file list
filenames = [f.relative_path for f in self.fm.thistab.get_selection()]
listfile = tempfile.NamedTemporaryFile(delete=False)
listpath = listfile.name
if py3:
listfile.write("\n".join(filenames).encode("utf-8"))
else:
listfile.write("\n".join(filenames))
listfile.close()
self.fm.execute_file([File(listpath)], app='editor')
listfile = open(listpath, 'r')
new_filenames = listfile.read().split("\n")
listfile.close()
os.unlink(listpath)
if all(a == b for a, b in zip(filenames, new_filenames)):
self.fm.notify("No renaming to be done!")
return
# Generate script
cmdfile = tempfile.NamedTemporaryFile()
script_lines = []
script_lines.append("# This file will be executed when you close the editor.\n")
script_lines.append("# Please double-check everything, clear the file to abort.\n")
script_lines.extend("mv -vi -- %s %s\n" % (esc(old), esc(new)) \
for old, new in zip(filenames, new_filenames) if old != new)
script_content = "".join(script_lines)
if py3:
cmdfile.write(script_content.encode("utf-8"))
else:
cmdfile.write(script_content)
cmdfile.flush()
# Open the script and let the user review it, then check if the script
# was modified by the user
self.fm.execute_file([File(cmdfile.name)], app='editor')
cmdfile.seek(0)
script_was_edited = (script_content != cmdfile.read())
# Do the renaming
self.fm.run(['/bin/sh', cmdfile.name], flags='w')
cmdfile.close()
# Retag the files, but only if the script wasn't changed during review,
if not script_was_edited:
tags_changed = False
for old, new in zip(filenames, new_filenames):
if old != new:
oldpath = self.fm.thisdir.path + '/' + old
newpath = self.fm.thisdir.path + '/' + new
if oldpath in self.fm.tags:
old_tag = self.fm.tags.tags[oldpath]
self.fm.tags.remove(oldpath)
self.fm.tags.tags[newpath] = old_tag
tags_changed = True
if tags_changed:
self.fm.tags.dump()
else:
fm.notify("files have not been retagged")
class relink(Command):
def execute(self):
from ranger.container.file import File
new_path = self.rest(1)
cf = self.fm.thisfile
if not new_path:
return self.fm.notify('Syntax: relink <newpath>', bad=True)
if not cf.is_link:
return self.fm.notify('%s is not a symlink!' % cf.relative_path, bad=True)
if new_path == os.readlink(cf.path):
return
try:
os.remove(cf.path)
os.symlink(new_path, cf.path)
except OSError as err:
self.fm.notify(err)
self.fm.reset()
self.fm.thisdir.pointed_obj = cf
self.fm.thisfile = cf
def tab(self, tabnum):
if not self.rest(1):
return self.line+os.readlink(self.fm.thisfile.path)
else:
return self._tab_directory_content()
class help_(Command):
name = 'help'
def execute(self):
def callback(answer):
if answer == "q":
return
elif answer == "m":
self.fm.display_help()
elif answer == "c":
self.fm.dump_commands()
elif answer == "k":
self.fm.dump_keybindings()
elif answer == "s":
self.fm.dump_settings()
c = self.fm.ui.console.ask("View [m]an page, [k]ey bindings,"
" [c]ommands or [s]ettings? (press q to abort)", callback, list("mkcsq") + [chr(27)])
class copymap(Command):
context = 'browser'
def execute(self):
if not self.arg(1) or not self.arg(2):
return self.fm.notify("Not enough arguments", bad=True)
for arg in self.args[2:]:
self.fm.ui.keymaps.copy(self.context, self.arg(1), arg)
class copypmap(copymap):
context = 'pager'
class copycmap(copymap):
context = 'console'
class copytmap(copymap):
context = 'taskview'
class unmap(Command):
context = 'browser'
def execute(self):
for arg in self.args[1:]:
self.fm.ui.keymaps.unbind(self.context, arg)
class cunmap(unmap):
context = 'browser'
class punmap(unmap):
context = 'pager'
class tunmap(unmap):
context = 'taskview'
class map_(Command):
name = 'map'
context = 'browser'
resolve_macros = False
def execute(self):
if not self.arg(1) or not self.arg(2):
return self.fm.notify("Not enough arguments", bad=True)
self.fm.ui.keymaps.bind(self.context, self.arg(1), self.rest(2))
class cmap(map_):
context = 'console'
class tmap(map_):
context = 'taskview'
class pmap(map_):
context = 'pager'
class scout(Command):
AUTO_OPEN = 'a'
OPEN_ON_ENTER = 'e'
FILTER = 'f'
SM_GLOB = 'g'
IGNORE_CASE = 'i'
KEEP_OPEN = 'k'
SM_LETTERSKIP = 'l'
MARK = 'm'
UNMARK = 'M'
PERM_FILTER = 'p'
SM_REGEX = 'r'
SMART_CASE = 's'
AS_YOU_TYPE = 't'
INVERT = 'v'
def __init__(self, *args, **kws):
Command.__init__(self, *args, **kws)
self._regex = None
self.flags, self.pattern = self.parse_flags()
def execute(self):
thisdir = self.fm.thisdir
flags = self.flags
pattern = self.pattern
regex = self._build_regex()
count = self._count(move=True)
self.fm.thistab.last_search = regex
self.fm.set_search_method(order="search")
if (self.MARK in flags or self.UNMARK in flags) and thisdir.files:
value = flags.find(self.MARK) > flags.find(self.UNMARK)
if self.FILTER in flags:
for f in thisdir.files:
thisdir.mark_item(f, value)
else:
for f in thisdir.files:
if regex.search(f.relative_path):
thisdir.mark_item(f, value)
if self.PERM_FILTER in flags:
thisdir.filter = regex if pattern else None
self.cancel()
if self.OPEN_ON_ENTER in flags or \
self.AUTO_OPEN in flags and count == 1:
if os.path.exists(pattern):
self.fm.cd(pattern)
else:
self.fm.move(right=1)
if self.KEEP_OPEN in flags and thisdir != self.fm.thisdir:
if not pattern:
self.fm.open_console(self.line)
else:
self.fm.open_console(self.line[0:-len(pattern)])
if self.quickly_executed and thisdir != self.fm.thisdir and pattern != "..":
self.fm.block_input(0.5)
def cancel(self):
self.fm.thisdir.temporary_filter = None
self.fm.thisdir.refilter()
def quick(self):
asyoutype = self.AS_YOU_TYPE in self.flags
if self.FILTER in self.flags:
self.fm.thisdir.temporary_filter = self._build_regex()
if self.PERM_FILTER in self.flags and asyoutype:
self.fm.thisdir.filter = self._build_regex()
if self.FILTER in self.flags or self.PERM_FILTER in self.flags:
self.fm.thisdir.refilter()
if self._count(move=asyoutype) == 1 and self.AUTO_OPEN in self.flags:
return True
return False
def tab(self, tabnum):
self._count(move=True, offset=tabnum)
def _build_regex(self):
if self._regex is not None:
return self._regex
frmat = "%s"
flags = self.flags
pattern = self.pattern
if pattern == ".":
return re.compile("")
if pattern.startswith('^'):
pattern = pattern[1:]
frmat = "^" + frmat
if pattern.endswith('$'):
pattern = pattern[:-1]
frmat += "$"
if self.SM_REGEX in flags:
regex = pattern
elif self.SM_GLOB in flags:
regex = re.escape(pattern).replace("\\*", ".*").replace("\\?", ".")
elif self.SM_LETTERSKIP in flags:
regex = ".*".join(re.escape(c) for c in pattern)
else:
regex = re.escape(pattern)
regex = frmat % regex
if self.INVERT in flags:
regex = "^(?:(?!%s).)*$" % regex
options = re.LOCALE | re.UNICODE
if self.IGNORE_CASE in flags or self.SMART_CASE in flags and \
pattern.islower():
options |= re.IGNORECASE
try:
self._regex = re.compile(regex, options)
except:
self._regex = re.compile("")
return self._regex
def _count(self, move=False, offset=0):
count = 0
cwd = self.fm.thisdir
pattern = self.pattern
if not pattern or not cwd.files:
return 0
if pattern == '.':
return 0
if pattern == '..':
return 1
deq = deque(cwd.files)
deq.rotate(-cwd.pointer - offset)
i = offset
regex = self._build_regex()
for fsobj in deq:
if regex.search(fsobj.relative_path):
count += 1
if move and count == 1:
cwd.move(to=(cwd.pointer + i) % len(cwd.files))
self.fm.thisfile = cwd.pointed_obj
if count > 1:
return count
i += 1
return count == 1
class filter_inode_type(Command):
FILTER_DIRS = 'd'
FILTER_FILES = 'f'
FILTER_LINKS = 'l'
def execute(self):
if not self.arg(1):
self.fm.thisdir.inode_type_filter = None
else:
self.fm.thisdir.inode_type_filter = lambda file: (
True if ((self.FILTER_DIRS in self.arg(1) and file.is_directory) or
(self.FILTER_FILES in self.arg(1) and file.is_file and not file.is_link) or
(self.FILTER_LINKS in self.arg(1) and file.is_link)) else False)
self.fm.thisdir.refilter()
class grep(Command):
def execute(self):
if self.rest(1):
action = ['grep', '--line-number']
action.extend(['-e', self.rest(1), '-r'])
action.extend(f.path for f in self.fm.thistab.get_selection())
self.fm.execute_command(action, flags='p')
class stage(Command):
def execute(self):
from ranger.ext.vcs import VcsError
filelist = [f.path for f in self.fm.thistab.get_selection()]
self.fm.thisdir.vcs_outdated = True
try:
self.fm.thisdir.vcs.add(filelist)
except VcsError:
self.fm.notify("Could not stage files.")
self.fm.reload_cwd()
class unstage(Command):
def execute(self):
from ranger.ext.vcs import VcsError
filelist = [f.path for f in self.fm.thistab.get_selection()]
self.fm.thisdir.vcs_outdated = True
try:
self.fm.thisdir.vcs.reset(filelist)
except VcsError:
self.fm.notify("Could not unstage files.")
self.fm.reload_cwd()
class diff(Command):
def execute(self):
from ranger.ext.vcs import VcsError
import tempfile
L = self.fm.thistab.get_selection()
if len(L) == 0: return
filelist = [f.path for f in L]
vcs = L[0].vcs
diff = vcs.get_raw_diff(filelist=filelist)
if len(diff.strip()) > 0:
tmp = tempfile.NamedTemporaryFile()
tmp.write(diff.encode('utf-8'))
tmp.flush()
pager = os.environ.get('PAGER', ranger.DEFAULT_PAGER)
self.fm.run([pager, tmp.name])
else:
raise Exception("diff is empty")
class log(Command):
def execute(self):
from ranger.ext.vcs import VcsError
import tempfile
L = self.fm.thistab.get_selection()
if len(L) == 0: return
filelist = [f.path for f in L]
vcs = L[0].vcs
log = vcs.get_raw_log(filelist=filelist)
tmp = tempfile.NamedTemporaryFile()
tmp.write(log.encode('utf-8'))
tmp.flush()
pager = os.environ.get('PAGER', ranger.DEFAULT_PAGER)
self.fm.run([pager, tmp.name])
class flat(Command):
def execute(self):
try:
level = self.rest(1)
level = int(level)
except ValueError:
level = self.quantifier
if level < -1:
self.fm.notify("Need an integer number (-1, 0, 1, ...)", bad=True)
self.fm.thisdir.unload()
self.fm.thisdir.flat = level
self.fm.thisdir.load_content()
class prompt_metadata(Command):
_command_name = "meta"
_console_chain = None
def execute(self):
prompt_metadata._console_chain = self.args[1:]
self._process_command_stack()
def _process_command_stack(self):
if prompt_metadata._console_chain:
key = prompt_metadata._console_chain.pop()
self._fill_console(key)
else:
for col in self.fm.ui.browser.columns:
col.need_redraw = True
def _fill_console(self, key):
metadata = self.fm.metadata.get_metadata(self.fm.thisfile.path)
if key in metadata and metadata[key]:
existing_value = metadata[key]
else:
existing_value = ""
text = "%s %s %s" % (self._command_name, key, existing_value)
self.fm.open_console(text, position=len(text))
class meta(prompt_metadata):
def execute(self):
key = self.arg(1)
value = self.rest(1)
update_dict = dict()
update_dict[key] = self.rest(2)
selection = self.fm.thistab.get_selection()
for f in selection:
self.fm.metadata.set_metadata(f.path, update_dict)
self._process_command_stack()
def tab(self, tabnum):
key = self.arg(1)
metadata = self.fm.metadata.get_metadata(self.fm.thisfile.path)
if key in metadata and metadata[key]:
return [" ".join([self.arg(0), self.arg(1), metadata[key]])]
else:
return [self.arg(0) + " " + key for key in sorted(metadata)
if key.startswith(self.arg(1))]
class linemode(default_linemode):
def execute(self):
mode = self.arg(1)
if mode == "normal":
mode = DEFAULT_LINEMODE
if mode not in self.fm.thisfile.linemode_dict:
self.fm.notify("Unhandled linemode: `%s'" % mode, bad=True)
return
self.fm.thisdir._set_linemode_of_children(mode)
# Ask the browsercolumns to redraw
for col in self.fm.ui.browser.columns:
col.need_redraw = True
| true | true |
f734d8142392b73526b2bc0c75d4fbafc3612f58 | 3,661 | py | Python | python_modules/dagster/setup.py | davemasino/dagster | cec365242853579c7100bfd87a9ee4f36bdd8344 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/setup.py | davemasino/dagster | cec365242853579c7100bfd87a9ee4f36bdd8344 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/setup.py | davemasino/dagster | cec365242853579c7100bfd87a9ee4f36bdd8344 | [
"Apache-2.0"
] | null | null | null | from setuptools import find_packages, setup
def long_description():
return """
## Dagster
Dagster is a data orchestrator for machine learning, analytics, and ETL.
Dagster lets you define pipelines in terms of the data flow between reusable, logical components,
then test locally and run anywhere. With a unified view of pipelines and the assets they produce,
Dagster can schedule and orchestrate Pandas, Spark, SQL, or anything else that Python can invoke.
Dagster is designed for data platform engineers, data engineers, and full-stack data scientists.
Building a data platform with Dagster makes your stakeholders more independent and your systems
more robust. Developing data pipelines with Dagster makes testing easier and deploying faster.
""".strip()
def get_version():
version = {}
with open("dagster/version.py") as fp:
exec(fp.read(), version) # pylint: disable=W0122
return version["__version__"]
if __name__ == "__main__":
setup(
name="dagster",
version=get_version(),
author="Elementl",
author_email="hello@elementl.com",
license="Apache-2.0",
description="A data orchestrator for machine learning, analytics, and ETL.",
long_description=long_description(),
long_description_content_type="text/markdown",
url="https://github.com/dagster-io/dagster",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
packages=find_packages(exclude=["dagster_tests"]),
package_data={
"dagster": [
"dagster/core/storage/event_log/sqlite/alembic/*",
"dagster/core/storage/runs/sqlite/alembic/*",
"dagster/core/storage/schedules/sqlite/alembic/*",
"dagster/grpc/protos/*",
]
},
include_package_data=True,
install_requires=[
"future",
# cli
"click>=5.0",
"coloredlogs>=6.1, <=14.0",
"PyYAML",
# core (not explicitly expressed atm)
"alembic>=1.2.1",
"croniter>=0.3.34",
"grpcio>=1.32.0", # ensure version we require is >= that with which we generated the grpc code (set in dev-requirements)
"grpcio-health-checking>=1.32.0",
"pendulum==1.4.4", # pinned to match airflow, can upgrade to 2.0 once airflow 1.10.13 is released
"protobuf>=3.13.0", # ensure version we require is >= that with which we generated the proto code (set in dev-requirements)
"pyrsistent>=0.14.8",
"python-dateutil",
"requests",
"rx<=1.6.1", # 3.0 was a breaking change. No py2 compatability as well.
"six",
"tabulate",
"tqdm",
"sqlalchemy>=1.0",
"toposort>=1.0",
"watchdog>=0.8.3",
'psutil >= 1.0; platform_system=="Windows"',
# https://github.com/mhammond/pywin32/issues/1439
'pywin32 != 226; platform_system=="Windows"',
"pytz",
"docstring-parser==0.7.1",
],
extras_require={"docker": ["docker"],},
entry_points={
"console_scripts": [
"dagster = dagster.cli:main",
"dagster-scheduler = dagster.scheduler.cli:main",
"dagster-daemon = dagster.daemon.cli:main",
]
},
)
| 39.365591 | 136 | 0.589183 | from setuptools import find_packages, setup
def long_description():
return """
## Dagster
Dagster is a data orchestrator for machine learning, analytics, and ETL.
Dagster lets you define pipelines in terms of the data flow between reusable, logical components,
then test locally and run anywhere. With a unified view of pipelines and the assets they produce,
Dagster can schedule and orchestrate Pandas, Spark, SQL, or anything else that Python can invoke.
Dagster is designed for data platform engineers, data engineers, and full-stack data scientists.
Building a data platform with Dagster makes your stakeholders more independent and your systems
more robust. Developing data pipelines with Dagster makes testing easier and deploying faster.
""".strip()
def get_version():
version = {}
with open("dagster/version.py") as fp:
exec(fp.read(), version)
return version["__version__"]
if __name__ == "__main__":
setup(
name="dagster",
version=get_version(),
author="Elementl",
author_email="hello@elementl.com",
license="Apache-2.0",
description="A data orchestrator for machine learning, analytics, and ETL.",
long_description=long_description(),
long_description_content_type="text/markdown",
url="https://github.com/dagster-io/dagster",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
packages=find_packages(exclude=["dagster_tests"]),
package_data={
"dagster": [
"dagster/core/storage/event_log/sqlite/alembic/*",
"dagster/core/storage/runs/sqlite/alembic/*",
"dagster/core/storage/schedules/sqlite/alembic/*",
"dagster/grpc/protos/*",
]
},
include_package_data=True,
install_requires=[
"future",
"click>=5.0",
"coloredlogs>=6.1, <=14.0",
"PyYAML",
"alembic>=1.2.1",
"croniter>=0.3.34",
"grpcio>=1.32.0",
"grpcio-health-checking>=1.32.0",
"pendulum==1.4.4",
"protobuf>=3.13.0",
"pyrsistent>=0.14.8",
"python-dateutil",
"requests",
"rx<=1.6.1",
"six",
"tabulate",
"tqdm",
"sqlalchemy>=1.0",
"toposort>=1.0",
"watchdog>=0.8.3",
'psutil >= 1.0; platform_system=="Windows"',
'pywin32 != 226; platform_system=="Windows"',
"pytz",
"docstring-parser==0.7.1",
],
extras_require={"docker": ["docker"],},
entry_points={
"console_scripts": [
"dagster = dagster.cli:main",
"dagster-scheduler = dagster.scheduler.cli:main",
"dagster-daemon = dagster.daemon.cli:main",
]
},
)
| true | true |
f734d818565dc62e225a1b3a231c734bfd459388 | 1,802 | py | Python | pre_tokenize.py | vyraun/awesome-align | 9871098b94be589f45c505a9732e943fa409cf4d | [
"BSD-3-Clause"
] | null | null | null | pre_tokenize.py | vyraun/awesome-align | 9871098b94be589f45c505a9732e943fa409cf4d | [
"BSD-3-Clause"
] | null | null | null | pre_tokenize.py | vyraun/awesome-align | 9871098b94be589f45c505a9732e943fa409cf4d | [
"BSD-3-Clause"
] | null | null | null | import argparse
import random
import itertools
import os
import tempfile
import warnings
warnings.filterwarnings("ignore")
from awesome_align.tokenization_bert import BasicTokenizer
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_file", default=None, type=str, required=True, help="The input data file (a text file)."
)
parser.add_argument(
"--output_file", default=None, type=str, required=False, help="The output data file to write (a text file)."
)
args = parser.parse_args()
file_path=args.data_file
write_path=args.output_file
if write_path == None:
stdout = True
else:
w_f = open(write_path, "w")
stdout=False
# Default Arguments for Cased Multlingual
tokenizer = BasicTokenizer()
assert os.path.isfile(file_path)
examples = []
with open(file_path, encoding="utf-8") as f:
for idx, line in enumerate(f.readlines()):
if len(line) == 0 or line.isspace() or not len(line.split(' ||| ')) == 2:
raise ValueError(f'Line {idx+1} is not in the correct format!')
src, tgt = line.split(' ||| ')
if src.rstrip() == '' or tgt.rstrip() == '':
raise ValueError(f'Line {idx+1} is not in the correct format!')
sent_src, sent_tgt = src.strip() , tgt.strip()
token_src, token_tgt = tokenizer.tokenize(sent_src) , tokenizer.tokenize(sent_tgt)
token_src_string, token_tgt_string = ' '.join([t for t in token_src]) , ' '.join([t for t in token_tgt])
if stdout:
print(token_src_string + ' ||| ' + token_tgt_string)
else:
w_f.write(token_src_string + ' ||| ' + token_tgt_string + '\n')
if stdout==False:
w_f.close()
if __name__ == "__main__":
main()
| 30.033333 | 116 | 0.630411 | import argparse
import random
import itertools
import os
import tempfile
import warnings
warnings.filterwarnings("ignore")
from awesome_align.tokenization_bert import BasicTokenizer
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_file", default=None, type=str, required=True, help="The input data file (a text file)."
)
parser.add_argument(
"--output_file", default=None, type=str, required=False, help="The output data file to write (a text file)."
)
args = parser.parse_args()
file_path=args.data_file
write_path=args.output_file
if write_path == None:
stdout = True
else:
w_f = open(write_path, "w")
stdout=False
tokenizer = BasicTokenizer()
assert os.path.isfile(file_path)
examples = []
with open(file_path, encoding="utf-8") as f:
for idx, line in enumerate(f.readlines()):
if len(line) == 0 or line.isspace() or not len(line.split(' ||| ')) == 2:
raise ValueError(f'Line {idx+1} is not in the correct format!')
src, tgt = line.split(' ||| ')
if src.rstrip() == '' or tgt.rstrip() == '':
raise ValueError(f'Line {idx+1} is not in the correct format!')
sent_src, sent_tgt = src.strip() , tgt.strip()
token_src, token_tgt = tokenizer.tokenize(sent_src) , tokenizer.tokenize(sent_tgt)
token_src_string, token_tgt_string = ' '.join([t for t in token_src]) , ' '.join([t for t in token_tgt])
if stdout:
print(token_src_string + ' ||| ' + token_tgt_string)
else:
w_f.write(token_src_string + ' ||| ' + token_tgt_string + '\n')
if stdout==False:
w_f.close()
if __name__ == "__main__":
main()
| true | true |
f734d8a1c4282ee4aefbee1bf86ab99face93065 | 930 | py | Python | py3server/swagger_server/test/test_evidence_controller.py | lhannest/pythonBeaconServerStub | 3fee2505f5f7afda9184277b5f6308ff05832e35 | [
"MIT"
] | null | null | null | py3server/swagger_server/test/test_evidence_controller.py | lhannest/pythonBeaconServerStub | 3fee2505f5f7afda9184277b5f6308ff05832e35 | [
"MIT"
] | null | null | null | py3server/swagger_server/test/test_evidence_controller.py | lhannest/pythonBeaconServerStub | 3fee2505f5f7afda9184277b5f6308ff05832e35 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from swagger_server.models.inline_response2004 import InlineResponse2004
from . import BaseTestCase
from six import BytesIO
from flask import json
class TestEvidenceController(BaseTestCase):
""" EvidenceController integration test stubs """
def test_get_evidence(self):
"""
Test case for get_evidence
"""
query_string = [('keywords', 'keywords_example'),
('pageNumber', 56),
('pageSize', 56)]
response = self.client.open('/api/evidence/{statementId}'.format(statementId='statementId_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| 29.0625 | 108 | 0.611828 |
from __future__ import absolute_import
from swagger_server.models.inline_response2004 import InlineResponse2004
from . import BaseTestCase
from six import BytesIO
from flask import json
class TestEvidenceController(BaseTestCase):
def test_get_evidence(self):
query_string = [('keywords', 'keywords_example'),
('pageNumber', 56),
('pageSize', 56)]
response = self.client.open('/api/evidence/{statementId}'.format(statementId='statementId_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| true | true |
f734d8a2dc5aaa3965dc9b461bc243e1cfe6a270 | 1,081 | py | Python | recipes/cpp-peglib/0.x.x/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 562 | 2019-09-04T12:23:43.000Z | 2022-03-29T16:41:43.000Z | recipes/cpp-peglib/0.x.x/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 9,799 | 2019-09-04T12:02:11.000Z | 2022-03-31T23:55:45.000Z | recipes/cpp-peglib/0.x.x/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 1,126 | 2019-09-04T11:57:46.000Z | 2022-03-31T16:43:38.000Z | from conans import ConanFile, tools
required_conan_version = ">=1.33.0"
class CpppeglibConan(ConanFile):
name = "cpp-peglib"
description = "A single file C++11 header-only PEG (Parsing Expression Grammars) library."
license = "MIT"
topics = ("conan", "cpp-peglib", "peg", "parser", "header-only")
homepage = "https://github.com/yhirose/cpp-peglib"
url = "https://github.com/conan-io/conan-center-index"
settings = "os"
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def package_id(self):
self.info.header_only()
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("peglib.h", dst="include", src=self._source_subfolder)
def package_info(self):
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.append("pthread")
| 31.794118 | 94 | 0.655874 | from conans import ConanFile, tools
required_conan_version = ">=1.33.0"
class CpppeglibConan(ConanFile):
name = "cpp-peglib"
description = "A single file C++11 header-only PEG (Parsing Expression Grammars) library."
license = "MIT"
topics = ("conan", "cpp-peglib", "peg", "parser", "header-only")
homepage = "https://github.com/yhirose/cpp-peglib"
url = "https://github.com/conan-io/conan-center-index"
settings = "os"
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def package_id(self):
self.info.header_only()
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("peglib.h", dst="include", src=self._source_subfolder)
def package_info(self):
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.append("pthread")
| true | true |
f734da1f0b7a9ab56219579fceb1f161ba893075 | 14,951 | py | Python | tests/cli/test_rasa_train.py | chaneyjd/rasa | 104a9591fc10b96eaa7fe402b6d64ca652b7ebe2 | [
"Apache-2.0"
] | null | null | null | tests/cli/test_rasa_train.py | chaneyjd/rasa | 104a9591fc10b96eaa7fe402b6d64ca652b7ebe2 | [
"Apache-2.0"
] | 52 | 2020-10-03T11:58:14.000Z | 2022-02-01T13:17:52.000Z | tests/cli/test_rasa_train.py | chaneyjd/rasa | 104a9591fc10b96eaa7fe402b6d64ca652b7ebe2 | [
"Apache-2.0"
] | null | null | null | import os
import tempfile
from pathlib import Path
import pytest
from typing import Callable
from _pytest.pytester import RunResult
import rasa.shared.utils.io
from rasa import model
from rasa.nlu.model import Metadata
from rasa.shared.nlu.training_data import training_data
# noinspection PyProtectedMember
from rasa.cli.train import _get_valid_config
from rasa.shared.constants import (
CONFIG_MANDATORY_KEYS_CORE,
CONFIG_MANDATORY_KEYS_NLU,
CONFIG_MANDATORY_KEYS,
)
import rasa.utils.io as io_utils
def test_train(run_in_simple_project: Callable[..., RunResult]):
temp_dir = os.getcwd()
run_in_simple_project(
"train",
"-c",
"config.yml",
"-d",
"domain.yml",
"--data",
"data",
"--out",
"train_models",
"--fixed-model-name",
"test-model",
)
assert os.path.exists(os.path.join(temp_dir, "train_models"))
files = rasa.shared.utils.io.list_files(os.path.join(temp_dir, "train_models"))
assert len(files) == 1
assert os.path.basename(files[0]) == "test-model.tar.gz"
model_dir = model.get_model("train_models")
assert model_dir is not None
metadata = Metadata.load(os.path.join(model_dir, "nlu"))
assert metadata.get("training_data") is None
assert not os.path.exists(
os.path.join(model_dir, "nlu", training_data.DEFAULT_TRAINING_DATA_OUTPUT_PATH)
)
def test_train_persist_nlu_data(run_in_simple_project: Callable[..., RunResult]):
temp_dir = os.getcwd()
run_in_simple_project(
"train",
"-c",
"config.yml",
"-d",
"domain.yml",
"--data",
"data",
"--out",
"train_models",
"--fixed-model-name",
"test-model",
"--persist-nlu-data",
)
assert os.path.exists(os.path.join(temp_dir, "train_models"))
files = rasa.shared.utils.io.list_files(os.path.join(temp_dir, "train_models"))
assert len(files) == 1
assert os.path.basename(files[0]) == "test-model.tar.gz"
model_dir = model.get_model("train_models")
assert model_dir is not None
metadata = Metadata.load(os.path.join(model_dir, "nlu"))
assert metadata.get("training_data") is not None
assert os.path.exists(
os.path.join(model_dir, "nlu", training_data.DEFAULT_TRAINING_DATA_OUTPUT_PATH)
)
def test_train_core_compare(run_in_simple_project: Callable[..., RunResult]):
temp_dir = os.getcwd()
rasa.shared.utils.io.write_yaml(
{
"language": "en",
"pipeline": "supervised_embeddings",
"policies": [{"name": "MemoizationPolicy"}],
},
"config_1.yml",
)
rasa.shared.utils.io.write_yaml(
{
"language": "en",
"pipeline": "supervised_embeddings",
"policies": [{"name": "MemoizationPolicy"}],
},
"config_2.yml",
)
run_in_simple_project(
"train",
"core",
"-c",
"config_1.yml",
"config_2.yml",
"--stories",
"data/stories.yml",
"--out",
"core_comparison_results",
"--runs",
"2",
"--percentages",
"25",
"75",
"--augmentation",
"5",
)
assert os.path.exists(os.path.join(temp_dir, "core_comparison_results"))
run_directories = rasa.shared.utils.io.list_subdirectories(
os.path.join(temp_dir, "core_comparison_results")
)
assert len(run_directories) == 2
model_files = rasa.shared.utils.io.list_files(
os.path.join(temp_dir, "core_comparison_results", run_directories[0])
)
assert len(model_files) == 4
assert model_files[0].endswith("tar.gz")
def test_train_no_domain_exists(
run_in_simple_project: Callable[..., RunResult]
) -> None:
os.remove("domain.yml")
run_in_simple_project(
"train",
"-c",
"config.yml",
"--data",
"data",
"--out",
"train_models_no_domain",
"--fixed-model-name",
"nlu-model-only",
)
assert os.path.exists("train_models_no_domain")
files = rasa.shared.utils.io.list_files("train_models_no_domain")
assert len(files) == 1
trained_model_path = "train_models_no_domain/nlu-model-only.tar.gz"
unpacked = model.unpack_model(trained_model_path)
metadata_path = os.path.join(unpacked, "nlu", "metadata.json")
assert os.path.exists(metadata_path)
def test_train_skip_on_model_not_changed(
run_in_simple_project_with_model: Callable[..., RunResult]
):
temp_dir = os.getcwd()
assert os.path.exists(os.path.join(temp_dir, "models"))
files = rasa.shared.utils.io.list_files(os.path.join(temp_dir, "models"))
assert len(files) == 1
file_name = files[0]
run_in_simple_project_with_model("train")
assert os.path.exists(os.path.join(temp_dir, "models"))
files = rasa.shared.utils.io.list_files(os.path.join(temp_dir, "models"))
assert len(files) == 1
assert file_name == files[0]
def test_train_force(run_in_simple_project_with_model: Callable[..., RunResult]):
temp_dir = os.getcwd()
assert os.path.exists(os.path.join(temp_dir, "models"))
files = rasa.shared.utils.io.list_files(os.path.join(temp_dir, "models"))
assert len(files) == 1
run_in_simple_project_with_model("train", "--force")
assert os.path.exists(os.path.join(temp_dir, "models"))
files = rasa.shared.utils.io.list_files(os.path.join(temp_dir, "models"))
assert len(files) == 2
def test_train_with_only_nlu_data(run_in_simple_project: Callable[..., RunResult]):
temp_dir = Path.cwd()
for core_file in ["stories.yml", "rules.yml"]:
assert (temp_dir / "data" / core_file).exists()
(temp_dir / "data" / core_file).unlink()
run_in_simple_project("train", "--fixed-model-name", "test-model")
assert os.path.exists(os.path.join(temp_dir, "models"))
files = rasa.shared.utils.io.list_files(os.path.join(temp_dir, "models"))
assert len(files) == 1
assert os.path.basename(files[0]) == "test-model.tar.gz"
def test_train_with_only_core_data(run_in_simple_project: Callable[..., RunResult]):
temp_dir = os.getcwd()
assert os.path.exists(os.path.join(temp_dir, "data/nlu.yml"))
os.remove(os.path.join(temp_dir, "data/nlu.yml"))
run_in_simple_project("train", "--fixed-model-name", "test-model")
assert os.path.exists(os.path.join(temp_dir, "models"))
files = rasa.shared.utils.io.list_files(os.path.join(temp_dir, "models"))
assert len(files) == 1
assert os.path.basename(files[0]) == "test-model.tar.gz"
def test_train_core(run_in_simple_project: Callable[..., RunResult]):
run_in_simple_project(
"train",
"core",
"-c",
"config.yml",
"-d",
"domain.yml",
"--stories",
"data",
"--out",
"train_rasa_models",
"--fixed-model-name",
"rasa-model",
)
assert os.path.exists("train_rasa_models/rasa-model.tar.gz")
assert os.path.isfile("train_rasa_models/rasa-model.tar.gz")
def test_train_core_no_domain_exists(run_in_simple_project: Callable[..., RunResult]):
os.remove("domain.yml")
run_in_simple_project(
"train",
"core",
"--config",
"config.yml",
"--domain",
"domain1.yml",
"--stories",
"data",
"--out",
"train_rasa_models_no_domain",
"--fixed-model-name",
"rasa-model",
)
assert not os.path.exists("train_rasa_models_no_domain/rasa-model.tar.gz")
assert not os.path.isfile("train_rasa_models_no_domain/rasa-model.tar.gz")
def test_train_nlu(run_in_simple_project: Callable[..., RunResult]):
run_in_simple_project(
"train",
"nlu",
"-c",
"config.yml",
"--nlu",
"data/nlu.md",
"--out",
"train_models",
)
assert os.path.exists("train_models")
files = rasa.shared.utils.io.list_files("train_models")
assert len(files) == 1
assert os.path.basename(files[0]).startswith("nlu-")
model_dir = model.get_model("train_models")
assert model_dir is not None
metadata = Metadata.load(os.path.join(model_dir, "nlu"))
assert metadata.get("training_data") is None
assert not os.path.exists(
os.path.join(model_dir, "nlu", training_data.DEFAULT_TRAINING_DATA_OUTPUT_PATH)
)
def test_train_nlu_persist_nlu_data(
run_in_simple_project: Callable[..., RunResult]
) -> None:
run_in_simple_project(
"train",
"nlu",
"-c",
"config.yml",
"--nlu",
"data/nlu.md",
"--out",
"train_models",
"--persist-nlu-data",
)
assert os.path.exists("train_models")
files = rasa.shared.utils.io.list_files("train_models")
assert len(files) == 1
assert os.path.basename(files[0]).startswith("nlu-")
model_dir = model.get_model("train_models")
assert model_dir is not None
metadata = Metadata.load(os.path.join(model_dir, "nlu"))
assert metadata.get("training_data") is not None
assert os.path.exists(
os.path.join(model_dir, "nlu", training_data.DEFAULT_TRAINING_DATA_OUTPUT_PATH)
)
def test_train_help(run):
output = run("train", "--help")
help_text = """usage: rasa train [-h] [-v] [-vv] [--quiet] [--data DATA [DATA ...]]
[-c CONFIG] [-d DOMAIN] [--out OUT]
[--augmentation AUGMENTATION] [--debug-plots]
[--num-threads NUM_THREADS]
[--fixed-model-name FIXED_MODEL_NAME] [--persist-nlu-data]
[--force]
{core,nlu} ..."""
lines = help_text.split("\n")
# expected help text lines should appear somewhere in the output
printed_help = set(output.outlines)
for line in lines:
assert line in printed_help
def test_train_nlu_help(run: Callable[..., RunResult]):
output = run("train", "nlu", "--help")
help_text = """usage: rasa train nlu [-h] [-v] [-vv] [--quiet] [-c CONFIG] [--out OUT]
[-u NLU] [--num-threads NUM_THREADS]
[--fixed-model-name FIXED_MODEL_NAME]
[--persist-nlu-data]"""
lines = help_text.split("\n")
# expected help text lines should appear somewhere in the output
printed_help = set(output.outlines)
for line in lines:
assert line in printed_help
def test_train_core_help(run: Callable[..., RunResult]):
output = run("train", "core", "--help")
help_text = """usage: rasa train core [-h] [-v] [-vv] [--quiet] [-s STORIES] [-d DOMAIN]
[-c CONFIG [CONFIG ...]] [--out OUT]
[--augmentation AUGMENTATION] [--debug-plots] [--force]
[--fixed-model-name FIXED_MODEL_NAME]
[--percentages [PERCENTAGES [PERCENTAGES ...]]]
[--runs RUNS]"""
lines = help_text.split("\n")
# expected help text lines should appear somewhere in the output
printed_help = set(output.outlines)
for line in lines:
assert line in printed_help
@pytest.mark.parametrize(
"parameters",
[
{
"config_data": {"language": "en", "pipeline": "supervised"},
"default_config": {
"language": "en",
"pipeline": "supervised",
"policies": ["TEDPolicy", "FallbackPolicy"],
},
"mandatory_keys": CONFIG_MANDATORY_KEYS_CORE,
"error": False,
},
{
"config_data": {
"language": "en",
"pipeline": "supervised",
"policies": None,
},
"default_config": {
"language": "en",
"pipeline": "supervised",
"policies": ["TEDPolicy", "FallbackPolicy"],
},
"mandatory_keys": CONFIG_MANDATORY_KEYS_CORE,
"error": False,
},
{
"config_data": {},
"default_config": {
"language": "en",
"pipeline": "supervised",
"policies": ["TEDPolicy", "FallbackPolicy"],
},
"mandatory_keys": CONFIG_MANDATORY_KEYS,
"error": True,
},
{
"config_data": {
"policies": ["TEDPolicy", "FallbackPolicy"],
"imports": "other-folder",
},
"default_config": {
"language": "en",
"pipeline": "supervised",
"policies": ["TEDPolicy", "FallbackPolicy"],
},
"mandatory_keys": CONFIG_MANDATORY_KEYS_NLU,
"error": True,
},
{
"config_data": None,
"default_config": {
"pipeline": "supervised",
"policies": ["TEDPolicy", "FallbackPolicy"],
},
"mandatory_keys": CONFIG_MANDATORY_KEYS_NLU,
"error": True,
},
{
"config_data": None,
"default_config": {
"language": "en",
"pipeline": "supervised",
"policies": ["TEDPolicy", "FallbackPolicy"],
},
"mandatory_keys": CONFIG_MANDATORY_KEYS,
"error": False,
},
{
"config_data": None,
"default_config": {"language": "en", "pipeline": "supervised"},
"mandatory_keys": CONFIG_MANDATORY_KEYS_CORE,
"error": False,
},
{
"config_data": None,
"default_config": None,
"mandatory_keys": CONFIG_MANDATORY_KEYS,
"error": True,
},
],
)
def test_get_valid_config(parameters):
config_path = None
if parameters["config_data"] is not None:
config_path = os.path.join(tempfile.mkdtemp(), "config.yml")
rasa.shared.utils.io.write_yaml(parameters["config_data"], config_path)
default_config_path = None
if parameters["default_config"] is not None:
default_config_path = os.path.join(tempfile.mkdtemp(), "default-config.yml")
rasa.shared.utils.io.write_yaml(
parameters["default_config"], default_config_path
)
if parameters["error"]:
with pytest.raises(SystemExit):
_get_valid_config(config_path, parameters["mandatory_keys"])
else:
config_path = _get_valid_config(
config_path, parameters["mandatory_keys"], default_config_path
)
config_data = rasa.shared.utils.io.read_yaml_file(config_path)
for k in parameters["mandatory_keys"]:
assert k in config_data
def test_get_valid_config_with_non_existing_file():
with pytest.raises(SystemExit):
_get_valid_config("non-existing-file.yml", CONFIG_MANDATORY_KEYS)
| 30.826804 | 92 | 0.590596 | import os
import tempfile
from pathlib import Path
import pytest
from typing import Callable
from _pytest.pytester import RunResult
import rasa.shared.utils.io
from rasa import model
from rasa.nlu.model import Metadata
from rasa.shared.nlu.training_data import training_data
from rasa.cli.train import _get_valid_config
from rasa.shared.constants import (
CONFIG_MANDATORY_KEYS_CORE,
CONFIG_MANDATORY_KEYS_NLU,
CONFIG_MANDATORY_KEYS,
)
import rasa.utils.io as io_utils
def test_train(run_in_simple_project: Callable[..., RunResult]):
temp_dir = os.getcwd()
run_in_simple_project(
"train",
"-c",
"config.yml",
"-d",
"domain.yml",
"--data",
"data",
"--out",
"train_models",
"--fixed-model-name",
"test-model",
)
assert os.path.exists(os.path.join(temp_dir, "train_models"))
files = rasa.shared.utils.io.list_files(os.path.join(temp_dir, "train_models"))
assert len(files) == 1
assert os.path.basename(files[0]) == "test-model.tar.gz"
model_dir = model.get_model("train_models")
assert model_dir is not None
metadata = Metadata.load(os.path.join(model_dir, "nlu"))
assert metadata.get("training_data") is None
assert not os.path.exists(
os.path.join(model_dir, "nlu", training_data.DEFAULT_TRAINING_DATA_OUTPUT_PATH)
)
def test_train_persist_nlu_data(run_in_simple_project: Callable[..., RunResult]):
temp_dir = os.getcwd()
run_in_simple_project(
"train",
"-c",
"config.yml",
"-d",
"domain.yml",
"--data",
"data",
"--out",
"train_models",
"--fixed-model-name",
"test-model",
"--persist-nlu-data",
)
assert os.path.exists(os.path.join(temp_dir, "train_models"))
files = rasa.shared.utils.io.list_files(os.path.join(temp_dir, "train_models"))
assert len(files) == 1
assert os.path.basename(files[0]) == "test-model.tar.gz"
model_dir = model.get_model("train_models")
assert model_dir is not None
metadata = Metadata.load(os.path.join(model_dir, "nlu"))
assert metadata.get("training_data") is not None
assert os.path.exists(
os.path.join(model_dir, "nlu", training_data.DEFAULT_TRAINING_DATA_OUTPUT_PATH)
)
def test_train_core_compare(run_in_simple_project: Callable[..., RunResult]):
temp_dir = os.getcwd()
rasa.shared.utils.io.write_yaml(
{
"language": "en",
"pipeline": "supervised_embeddings",
"policies": [{"name": "MemoizationPolicy"}],
},
"config_1.yml",
)
rasa.shared.utils.io.write_yaml(
{
"language": "en",
"pipeline": "supervised_embeddings",
"policies": [{"name": "MemoizationPolicy"}],
},
"config_2.yml",
)
run_in_simple_project(
"train",
"core",
"-c",
"config_1.yml",
"config_2.yml",
"--stories",
"data/stories.yml",
"--out",
"core_comparison_results",
"--runs",
"2",
"--percentages",
"25",
"75",
"--augmentation",
"5",
)
assert os.path.exists(os.path.join(temp_dir, "core_comparison_results"))
run_directories = rasa.shared.utils.io.list_subdirectories(
os.path.join(temp_dir, "core_comparison_results")
)
assert len(run_directories) == 2
model_files = rasa.shared.utils.io.list_files(
os.path.join(temp_dir, "core_comparison_results", run_directories[0])
)
assert len(model_files) == 4
assert model_files[0].endswith("tar.gz")
def test_train_no_domain_exists(
run_in_simple_project: Callable[..., RunResult]
) -> None:
os.remove("domain.yml")
run_in_simple_project(
"train",
"-c",
"config.yml",
"--data",
"data",
"--out",
"train_models_no_domain",
"--fixed-model-name",
"nlu-model-only",
)
assert os.path.exists("train_models_no_domain")
files = rasa.shared.utils.io.list_files("train_models_no_domain")
assert len(files) == 1
trained_model_path = "train_models_no_domain/nlu-model-only.tar.gz"
unpacked = model.unpack_model(trained_model_path)
metadata_path = os.path.join(unpacked, "nlu", "metadata.json")
assert os.path.exists(metadata_path)
def test_train_skip_on_model_not_changed(
run_in_simple_project_with_model: Callable[..., RunResult]
):
temp_dir = os.getcwd()
assert os.path.exists(os.path.join(temp_dir, "models"))
files = rasa.shared.utils.io.list_files(os.path.join(temp_dir, "models"))
assert len(files) == 1
file_name = files[0]
run_in_simple_project_with_model("train")
assert os.path.exists(os.path.join(temp_dir, "models"))
files = rasa.shared.utils.io.list_files(os.path.join(temp_dir, "models"))
assert len(files) == 1
assert file_name == files[0]
def test_train_force(run_in_simple_project_with_model: Callable[..., RunResult]):
temp_dir = os.getcwd()
assert os.path.exists(os.path.join(temp_dir, "models"))
files = rasa.shared.utils.io.list_files(os.path.join(temp_dir, "models"))
assert len(files) == 1
run_in_simple_project_with_model("train", "--force")
assert os.path.exists(os.path.join(temp_dir, "models"))
files = rasa.shared.utils.io.list_files(os.path.join(temp_dir, "models"))
assert len(files) == 2
def test_train_with_only_nlu_data(run_in_simple_project: Callable[..., RunResult]):
temp_dir = Path.cwd()
for core_file in ["stories.yml", "rules.yml"]:
assert (temp_dir / "data" / core_file).exists()
(temp_dir / "data" / core_file).unlink()
run_in_simple_project("train", "--fixed-model-name", "test-model")
assert os.path.exists(os.path.join(temp_dir, "models"))
files = rasa.shared.utils.io.list_files(os.path.join(temp_dir, "models"))
assert len(files) == 1
assert os.path.basename(files[0]) == "test-model.tar.gz"
def test_train_with_only_core_data(run_in_simple_project: Callable[..., RunResult]):
temp_dir = os.getcwd()
assert os.path.exists(os.path.join(temp_dir, "data/nlu.yml"))
os.remove(os.path.join(temp_dir, "data/nlu.yml"))
run_in_simple_project("train", "--fixed-model-name", "test-model")
assert os.path.exists(os.path.join(temp_dir, "models"))
files = rasa.shared.utils.io.list_files(os.path.join(temp_dir, "models"))
assert len(files) == 1
assert os.path.basename(files[0]) == "test-model.tar.gz"
def test_train_core(run_in_simple_project: Callable[..., RunResult]):
run_in_simple_project(
"train",
"core",
"-c",
"config.yml",
"-d",
"domain.yml",
"--stories",
"data",
"--out",
"train_rasa_models",
"--fixed-model-name",
"rasa-model",
)
assert os.path.exists("train_rasa_models/rasa-model.tar.gz")
assert os.path.isfile("train_rasa_models/rasa-model.tar.gz")
def test_train_core_no_domain_exists(run_in_simple_project: Callable[..., RunResult]):
os.remove("domain.yml")
run_in_simple_project(
"train",
"core",
"--config",
"config.yml",
"--domain",
"domain1.yml",
"--stories",
"data",
"--out",
"train_rasa_models_no_domain",
"--fixed-model-name",
"rasa-model",
)
assert not os.path.exists("train_rasa_models_no_domain/rasa-model.tar.gz")
assert not os.path.isfile("train_rasa_models_no_domain/rasa-model.tar.gz")
def test_train_nlu(run_in_simple_project: Callable[..., RunResult]):
run_in_simple_project(
"train",
"nlu",
"-c",
"config.yml",
"--nlu",
"data/nlu.md",
"--out",
"train_models",
)
assert os.path.exists("train_models")
files = rasa.shared.utils.io.list_files("train_models")
assert len(files) == 1
assert os.path.basename(files[0]).startswith("nlu-")
model_dir = model.get_model("train_models")
assert model_dir is not None
metadata = Metadata.load(os.path.join(model_dir, "nlu"))
assert metadata.get("training_data") is None
assert not os.path.exists(
os.path.join(model_dir, "nlu", training_data.DEFAULT_TRAINING_DATA_OUTPUT_PATH)
)
def test_train_nlu_persist_nlu_data(
run_in_simple_project: Callable[..., RunResult]
) -> None:
run_in_simple_project(
"train",
"nlu",
"-c",
"config.yml",
"--nlu",
"data/nlu.md",
"--out",
"train_models",
"--persist-nlu-data",
)
assert os.path.exists("train_models")
files = rasa.shared.utils.io.list_files("train_models")
assert len(files) == 1
assert os.path.basename(files[0]).startswith("nlu-")
model_dir = model.get_model("train_models")
assert model_dir is not None
metadata = Metadata.load(os.path.join(model_dir, "nlu"))
assert metadata.get("training_data") is not None
assert os.path.exists(
os.path.join(model_dir, "nlu", training_data.DEFAULT_TRAINING_DATA_OUTPUT_PATH)
)
def test_train_help(run):
output = run("train", "--help")
help_text = """usage: rasa train [-h] [-v] [-vv] [--quiet] [--data DATA [DATA ...]]
[-c CONFIG] [-d DOMAIN] [--out OUT]
[--augmentation AUGMENTATION] [--debug-plots]
[--num-threads NUM_THREADS]
[--fixed-model-name FIXED_MODEL_NAME] [--persist-nlu-data]
[--force]
{core,nlu} ..."""
lines = help_text.split("\n")
printed_help = set(output.outlines)
for line in lines:
assert line in printed_help
def test_train_nlu_help(run: Callable[..., RunResult]):
output = run("train", "nlu", "--help")
help_text = """usage: rasa train nlu [-h] [-v] [-vv] [--quiet] [-c CONFIG] [--out OUT]
[-u NLU] [--num-threads NUM_THREADS]
[--fixed-model-name FIXED_MODEL_NAME]
[--persist-nlu-data]"""
lines = help_text.split("\n")
printed_help = set(output.outlines)
for line in lines:
assert line in printed_help
def test_train_core_help(run: Callable[..., RunResult]):
output = run("train", "core", "--help")
help_text = """usage: rasa train core [-h] [-v] [-vv] [--quiet] [-s STORIES] [-d DOMAIN]
[-c CONFIG [CONFIG ...]] [--out OUT]
[--augmentation AUGMENTATION] [--debug-plots] [--force]
[--fixed-model-name FIXED_MODEL_NAME]
[--percentages [PERCENTAGES [PERCENTAGES ...]]]
[--runs RUNS]"""
lines = help_text.split("\n")
printed_help = set(output.outlines)
for line in lines:
assert line in printed_help
@pytest.mark.parametrize(
"parameters",
[
{
"config_data": {"language": "en", "pipeline": "supervised"},
"default_config": {
"language": "en",
"pipeline": "supervised",
"policies": ["TEDPolicy", "FallbackPolicy"],
},
"mandatory_keys": CONFIG_MANDATORY_KEYS_CORE,
"error": False,
},
{
"config_data": {
"language": "en",
"pipeline": "supervised",
"policies": None,
},
"default_config": {
"language": "en",
"pipeline": "supervised",
"policies": ["TEDPolicy", "FallbackPolicy"],
},
"mandatory_keys": CONFIG_MANDATORY_KEYS_CORE,
"error": False,
},
{
"config_data": {},
"default_config": {
"language": "en",
"pipeline": "supervised",
"policies": ["TEDPolicy", "FallbackPolicy"],
},
"mandatory_keys": CONFIG_MANDATORY_KEYS,
"error": True,
},
{
"config_data": {
"policies": ["TEDPolicy", "FallbackPolicy"],
"imports": "other-folder",
},
"default_config": {
"language": "en",
"pipeline": "supervised",
"policies": ["TEDPolicy", "FallbackPolicy"],
},
"mandatory_keys": CONFIG_MANDATORY_KEYS_NLU,
"error": True,
},
{
"config_data": None,
"default_config": {
"pipeline": "supervised",
"policies": ["TEDPolicy", "FallbackPolicy"],
},
"mandatory_keys": CONFIG_MANDATORY_KEYS_NLU,
"error": True,
},
{
"config_data": None,
"default_config": {
"language": "en",
"pipeline": "supervised",
"policies": ["TEDPolicy", "FallbackPolicy"],
},
"mandatory_keys": CONFIG_MANDATORY_KEYS,
"error": False,
},
{
"config_data": None,
"default_config": {"language": "en", "pipeline": "supervised"},
"mandatory_keys": CONFIG_MANDATORY_KEYS_CORE,
"error": False,
},
{
"config_data": None,
"default_config": None,
"mandatory_keys": CONFIG_MANDATORY_KEYS,
"error": True,
},
],
)
def test_get_valid_config(parameters):
config_path = None
if parameters["config_data"] is not None:
config_path = os.path.join(tempfile.mkdtemp(), "config.yml")
rasa.shared.utils.io.write_yaml(parameters["config_data"], config_path)
default_config_path = None
if parameters["default_config"] is not None:
default_config_path = os.path.join(tempfile.mkdtemp(), "default-config.yml")
rasa.shared.utils.io.write_yaml(
parameters["default_config"], default_config_path
)
if parameters["error"]:
with pytest.raises(SystemExit):
_get_valid_config(config_path, parameters["mandatory_keys"])
else:
config_path = _get_valid_config(
config_path, parameters["mandatory_keys"], default_config_path
)
config_data = rasa.shared.utils.io.read_yaml_file(config_path)
for k in parameters["mandatory_keys"]:
assert k in config_data
def test_get_valid_config_with_non_existing_file():
with pytest.raises(SystemExit):
_get_valid_config("non-existing-file.yml", CONFIG_MANDATORY_KEYS)
| true | true |
f734da6aa6ab9faacbf6cc70f08d939bab727772 | 146 | py | Python | old/03/08.py | systemquant/book-pandas-for-finance | 90b7eb9be1de20a12ae72b9bb5d51424a979b174 | [
"MIT"
] | 10 | 2021-02-04T12:49:56.000Z | 2022-03-26T11:28:11.000Z | old/03/08.py | systemquant/book-pandas-for-finance | 90b7eb9be1de20a12ae72b9bb5d51424a979b174 | [
"MIT"
] | 1 | 2022-03-24T03:47:14.000Z | 2022-03-24T03:54:52.000Z | old/03/08.py | systemquant/book-pandas-for-finance | 90b7eb9be1de20a12ae72b9bb5d51424a979b174 | [
"MIT"
] | 4 | 2021-07-17T16:50:15.000Z | 2022-03-22T05:55:34.000Z | from pandas import Series
data = [1000, 2000, 3000]
index = ["메로나", "구구콘", "하겐다즈"]
s = Series(data=data, index=index)
print(s.loc['메로나':'구구콘'])
| 18.25 | 34 | 0.636986 | from pandas import Series
data = [1000, 2000, 3000]
index = ["메로나", "구구콘", "하겐다즈"]
s = Series(data=data, index=index)
print(s.loc['메로나':'구구콘'])
| true | true |
f734daed1be6b52d4591d0e3cbfdf1517893f84d | 5,975 | py | Python | proper_forms/fields/field.py | jpsca/hyperform | d5c450ad8684a853fed26f8c2606877151125a9e | [
"MIT"
] | 2 | 2020-09-30T22:41:00.000Z | 2020-12-04T16:47:17.000Z | proper_forms/fields/field.py | jpsca/hyperform | d5c450ad8684a853fed26f8c2606877151125a9e | [
"MIT"
] | null | null | null | proper_forms/fields/field.py | jpsca/hyperform | d5c450ad8684a853fed26f8c2606877151125a9e | [
"MIT"
] | null | null | null | import re
from .field_renderable import FieldRenderable
__all__ = ("Field", )
default_error_messages = {
"required": "This field is required.",
"type": "Invalid type.",
"min_num": "You need at least {num} values.",
"max_num": "You can have at most {num} values.",
}
HARD_MAX_NUM = 1000
class Field(FieldRenderable):
r"""
Arguments are:
*validators,
name=None,
required=False,
strict=True,
error_messages=None,
prepare=None,
clean=None,
collection (bool):
This field takes an open number of values of the same kind.
For example, a list of comma separated tags or email addresses.
sep (str):
If `collection` is True, string to separate each value (default is ",").
Ignored otherwise
multiple=False,
min_num=None,
max_num=None,
**extra
"""
__slots__ = (
"validators",
"name",
"required",
"strict",
"error_messages",
"multiple",
"min_num",
"max_num",
"collection",
"sep",
"extra",
)
object_value = None
input_values = None
input_type = "text"
error = None
error_value = None
updated = False
def __init__(
self,
*validators,
name=None,
required=False,
strict=True,
error_messages=None,
multiple=False,
min_num=None,
max_num=None,
collection=False,
sep=",",
prepare=None,
clean=None,
**extra
):
self.validators = validators
self.name = name or ""
self.required = required
self.strict = strict
self.min_num = min_num
if max_num is not None:
max_num = min(max_num, HARD_MAX_NUM)
self.max_num = max_num
self.error_messages = error_messages or {}
self.collection = collection
if collection:
self.sep = sep
multiple = False
self.multiple = multiple
self.custom_prepare = prepare
self.custom_clean = clean
self.extra = extra
def load_data(self, input_values=None, object_value=None):
self.input_values = input_values
self.object_value = object_value
@property
def values(self):
if self.input_values:
return self.input_values
if self.object_value:
return (self.custom_prepare or self.prepare)(self.object_value)
return []
@property
def value(self):
return self.values[0] if self.values else ""
def get_value(self, index=0):
if self.values and index < len(self.values):
return self.values[index]
return ""
def prepare(self, object_value):
return [object_value]
def validate(self):
self._reset()
values = [str(value).strip() for value in self.input_values or []]
if not values:
if self.required:
self._set_error("required")
return None
values = self._pre(values)
pyvalues = self._typecast_values(values)
if self.error:
return None
# Typecasting with `strict=False` could've emptied the values without erroring.
# An empty string is only an error if the field is required
if (not pyvalues or pyvalues[0] == "") and self.required:
self._set_error("required")
return None
self._validate_values(pyvalues)
if self.error:
return None
pyvalue = self._post(pyvalues)
if self.custom_clean:
pyvalue = self.custom_clean(pyvalue)
self.updated = pyvalue != self.object_value
return pyvalue
def type(self, value, **kwargs):
return str(value)
# Private
def _reset(self):
self.error = None
self.error_value = None
self.updated = False
def _pre(self, values):
if self.collection:
rxsep = r"\s*%s\s*" % re.escape(self.sep.strip())
all_values = []
for value in values:
all_values += re.split(rxsep, value)
return all_values
return values
def _post(self, values):
if self.collection:
return self.sep.join(values)
elif self.multiple:
return values
else:
return values[0] if values else None
def _typecast_values(self, values):
pyvalues = []
for value in values:
try:
pyvalue = self.type(value, **self.extra)
except (ValueError, TypeError, IndexError):
pyvalue = None
if pyvalue is None:
if self.strict:
self._set_error("type")
self.error_value = value
return
continue # pragma: no cover
pyvalues.append(pyvalue)
return pyvalues
def _validate_values(self, pyvalues):
num_values = len(pyvalues)
if self.min_num is not None and self.min_num > num_values:
self._set_error("min_num", num=self.min_num)
return
if self.max_num is not None and self.max_num < num_values:
self._set_error("max_num", num=self.max_num)
return
for validator in self.validators:
message = "Invalid value"
valid = validator(pyvalues)
if valid not in (True, False):
valid, message = valid
if not valid:
self.error = message
return
def _set_error(self, name, **kwargs):
msg = self.error_messages.get(name) or default_error_messages.get(name, "")
for key, repl in kwargs.items():
msg = msg.replace("{" + key + "}", str(repl))
self.error = msg or name
| 25.105042 | 87 | 0.554477 | import re
from .field_renderable import FieldRenderable
__all__ = ("Field", )
default_error_messages = {
"required": "This field is required.",
"type": "Invalid type.",
"min_num": "You need at least {num} values.",
"max_num": "You can have at most {num} values.",
}
HARD_MAX_NUM = 1000
class Field(FieldRenderable):
__slots__ = (
"validators",
"name",
"required",
"strict",
"error_messages",
"multiple",
"min_num",
"max_num",
"collection",
"sep",
"extra",
)
object_value = None
input_values = None
input_type = "text"
error = None
error_value = None
updated = False
def __init__(
self,
*validators,
name=None,
required=False,
strict=True,
error_messages=None,
multiple=False,
min_num=None,
max_num=None,
collection=False,
sep=",",
prepare=None,
clean=None,
**extra
):
self.validators = validators
self.name = name or ""
self.required = required
self.strict = strict
self.min_num = min_num
if max_num is not None:
max_num = min(max_num, HARD_MAX_NUM)
self.max_num = max_num
self.error_messages = error_messages or {}
self.collection = collection
if collection:
self.sep = sep
multiple = False
self.multiple = multiple
self.custom_prepare = prepare
self.custom_clean = clean
self.extra = extra
def load_data(self, input_values=None, object_value=None):
self.input_values = input_values
self.object_value = object_value
@property
def values(self):
if self.input_values:
return self.input_values
if self.object_value:
return (self.custom_prepare or self.prepare)(self.object_value)
return []
@property
def value(self):
return self.values[0] if self.values else ""
def get_value(self, index=0):
if self.values and index < len(self.values):
return self.values[index]
return ""
def prepare(self, object_value):
return [object_value]
def validate(self):
self._reset()
values = [str(value).strip() for value in self.input_values or []]
if not values:
if self.required:
self._set_error("required")
return None
values = self._pre(values)
pyvalues = self._typecast_values(values)
if self.error:
return None
# An empty string is only an error if the field is required
if (not pyvalues or pyvalues[0] == "") and self.required:
self._set_error("required")
return None
self._validate_values(pyvalues)
if self.error:
return None
pyvalue = self._post(pyvalues)
if self.custom_clean:
pyvalue = self.custom_clean(pyvalue)
self.updated = pyvalue != self.object_value
return pyvalue
def type(self, value, **kwargs):
return str(value)
# Private
def _reset(self):
self.error = None
self.error_value = None
self.updated = False
def _pre(self, values):
if self.collection:
rxsep = r"\s*%s\s*" % re.escape(self.sep.strip())
all_values = []
for value in values:
all_values += re.split(rxsep, value)
return all_values
return values
def _post(self, values):
if self.collection:
return self.sep.join(values)
elif self.multiple:
return values
else:
return values[0] if values else None
def _typecast_values(self, values):
pyvalues = []
for value in values:
try:
pyvalue = self.type(value, **self.extra)
except (ValueError, TypeError, IndexError):
pyvalue = None
if pyvalue is None:
if self.strict:
self._set_error("type")
self.error_value = value
return
continue # pragma: no cover
pyvalues.append(pyvalue)
return pyvalues
def _validate_values(self, pyvalues):
num_values = len(pyvalues)
if self.min_num is not None and self.min_num > num_values:
self._set_error("min_num", num=self.min_num)
return
if self.max_num is not None and self.max_num < num_values:
self._set_error("max_num", num=self.max_num)
return
for validator in self.validators:
message = "Invalid value"
valid = validator(pyvalues)
if valid not in (True, False):
valid, message = valid
if not valid:
self.error = message
return
def _set_error(self, name, **kwargs):
msg = self.error_messages.get(name) or default_error_messages.get(name, "")
for key, repl in kwargs.items():
msg = msg.replace("{" + key + "}", str(repl))
self.error = msg or name
| true | true |
f734dbc58f6d9f310e99ffbb6ea2299976b3b377 | 1,078 | py | Python | src/genie/libs/parser/iosxe/tests/ShowPlatformSoftwareFedSwitchActivePtpInterfaceInterface/cli/equal/golden_output1_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxe/tests/ShowPlatformSoftwareFedSwitchActivePtpInterfaceInterface/cli/equal/golden_output1_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxe/tests/ShowPlatformSoftwareFedSwitchActivePtpInterfaceInterface/cli/equal/golden_output1_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | null | null | null | expected_output = {
"interface":{
"if_id":"2a",
"clock_mode":"BOUNDARY CLOCK",
"delay_mechanism":"End-to-End",
"delay_req_seq_num":0,
"domain_value":0,
"log_mean_delay_interval":0,
"log_mean_sync_interval":0,
"num_info":{
"num_delay_requests_received":0,
"num_delay_requests_transmitted":0,
"num_delay_responses_received":0,
"num_delay_responses_transmitted":0,
"num_followup_messages_received":0,
"num_followup_messages_transmitted":1710,
"num_sync_messages_received":0,
"num_sync_messages_transmitted":1710
},
"port_info":{
"clock_identity":"34:ED:1B:FF:FE:7D:F2:80",
"mac_address":"34:ED:1B:7D:F2:A1",
"mode":1,
"number":33,
"port_enabled":"TRUE",
"state":"MASTER"
},
"profile_type":"DEFAULT",
"ptp_info":{
"ptp_vlan_id":0,
"ptp_vlan_is_valid":"FALSE",
"version":2
},
"ptt_port_enabled":"TRUE",
"sync_seq_num":44364,
"tag_native_vlan":"FALSE"
}
}
| 27.641026 | 51 | 0.592764 | expected_output = {
"interface":{
"if_id":"2a",
"clock_mode":"BOUNDARY CLOCK",
"delay_mechanism":"End-to-End",
"delay_req_seq_num":0,
"domain_value":0,
"log_mean_delay_interval":0,
"log_mean_sync_interval":0,
"num_info":{
"num_delay_requests_received":0,
"num_delay_requests_transmitted":0,
"num_delay_responses_received":0,
"num_delay_responses_transmitted":0,
"num_followup_messages_received":0,
"num_followup_messages_transmitted":1710,
"num_sync_messages_received":0,
"num_sync_messages_transmitted":1710
},
"port_info":{
"clock_identity":"34:ED:1B:FF:FE:7D:F2:80",
"mac_address":"34:ED:1B:7D:F2:A1",
"mode":1,
"number":33,
"port_enabled":"TRUE",
"state":"MASTER"
},
"profile_type":"DEFAULT",
"ptp_info":{
"ptp_vlan_id":0,
"ptp_vlan_is_valid":"FALSE",
"version":2
},
"ptt_port_enabled":"TRUE",
"sync_seq_num":44364,
"tag_native_vlan":"FALSE"
}
}
| true | true |
f734dca4d057e52d82e93c9c2cb00c6a673f1488 | 1,303 | py | Python | azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/virtual_machine_scale_set_instance_view_statuses_summary.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/virtual_machine_scale_set_instance_view_statuses_summary.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 2 | 2016-09-30T21:40:24.000Z | 2017-11-10T18:16:18.000Z | azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/virtual_machine_scale_set_instance_view_statuses_summary.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetInstanceViewStatusesSummary(Model):
"""Instance view statuses summary for virtual machines of a virtual machine
scale set.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar statuses_summary: The extensions information.
:vartype statuses_summary:
list[~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachineStatusCodeCount]
"""
_validation = {
'statuses_summary': {'readonly': True},
}
_attribute_map = {
'statuses_summary': {'key': 'statusesSummary', 'type': '[VirtualMachineStatusCodeCount]'},
}
def __init__(self):
super(VirtualMachineScaleSetInstanceViewStatusesSummary, self).__init__()
self.statuses_summary = None
| 34.289474 | 98 | 0.649271 |
from msrest.serialization import Model
class VirtualMachineScaleSetInstanceViewStatusesSummary(Model):
_validation = {
'statuses_summary': {'readonly': True},
}
_attribute_map = {
'statuses_summary': {'key': 'statusesSummary', 'type': '[VirtualMachineStatusCodeCount]'},
}
def __init__(self):
super(VirtualMachineScaleSetInstanceViewStatusesSummary, self).__init__()
self.statuses_summary = None
| true | true |
f734dd1ef6c0c61d40522bf0bb879127537e3d4f | 1,272 | py | Python | exporter/myapp.py | zeshahid/websitecheck_exporter | 282e5f19d9e778479bbf9e00b468e1313060acba | [
"MIT"
] | null | null | null | exporter/myapp.py | zeshahid/websitecheck_exporter | 282e5f19d9e778479bbf9e00b468e1313060acba | [
"MIT"
] | null | null | null | exporter/myapp.py | zeshahid/websitecheck_exporter | 282e5f19d9e778479bbf9e00b468e1313060acba | [
"MIT"
] | null | null | null | from typing import Counter
from prometheus_client import start_http_server, Summary, Counter, Gauge ,__all__ ,Histogram
import random
import time
from requests import get
import requests
from requests.api import post
urls =["https://httpstat.us/503","https://httpstat.us/200"]
sitestatus = Gauge('sample_external_url_up', 'site status check', ['endpoint'])
response_time = Gauge('sample_external_url_response_ms', 'Response Time in milliseconds', ['endpoint'])
# Create a metric to track time spent and requests made.
def response_request (url):
response = requests.get(url)
response_time.labels(endpoint=a).set(response.elapsed.total_seconds())
# response_time2.labels(endpoint=a).observe(response.elapsed.total_seconds())
# Create a metric to for status
def request_state (a):
response = requests.get(a)
if response.status_code == 503:
sitestatus.labels(endpoint=a).set(0)
elif response.status_code == 200:
sitestatus.labels(endpoint=a).set(1)
if __name__ == '__main__':
# Start up the server to expose the metrics.
start_http_server(8000)
# Generate some requests.
while True:
for a in urls:
request_state (a)
response_request (a)
| 34.378378 | 103 | 0.699686 | from typing import Counter
from prometheus_client import start_http_server, Summary, Counter, Gauge ,__all__ ,Histogram
import random
import time
from requests import get
import requests
from requests.api import post
urls =["https://httpstat.us/503","https://httpstat.us/200"]
sitestatus = Gauge('sample_external_url_up', 'site status check', ['endpoint'])
response_time = Gauge('sample_external_url_response_ms', 'Response Time in milliseconds', ['endpoint'])
def response_request (url):
response = requests.get(url)
response_time.labels(endpoint=a).set(response.elapsed.total_seconds())
def request_state (a):
response = requests.get(a)
if response.status_code == 503:
sitestatus.labels(endpoint=a).set(0)
elif response.status_code == 200:
sitestatus.labels(endpoint=a).set(1)
if __name__ == '__main__':
start_http_server(8000)
while True:
for a in urls:
request_state (a)
response_request (a)
| true | true |
f734dd86bad4fa348424eff36dc67fc52e519284 | 32 | py | Python | HRM.py | asheu96/bme590hrm | c5d3b494149859a38f75452d1e61f6fc1c8bbea3 | [
"MIT"
] | null | null | null | HRM.py | asheu96/bme590hrm | c5d3b494149859a38f75452d1e61f6fc1c8bbea3 | [
"MIT"
] | null | null | null | HRM.py | asheu96/bme590hrm | c5d3b494149859a38f75452d1e61f6fc1c8bbea3 | [
"MIT"
] | null | null | null | # module for heart rate monitor
| 16 | 31 | 0.78125 | true | true | |
f734ddcdfbef18b57cb25db07dab576af78ed413 | 2,141 | py | Python | services.py | vida18electronic/barcode | 265318d893a9dabc71231424252b2476926577ec | [
"Apache-2.0"
] | null | null | null | services.py | vida18electronic/barcode | 265318d893a9dabc71231424252b2476926577ec | [
"Apache-2.0"
] | null | null | null | services.py | vida18electronic/barcode | 265318d893a9dabc71231424252b2476926577ec | [
"Apache-2.0"
] | null | null | null | import json
import requests
import sys
import os
import socket
import fcntl
import struct
#info_device={"MAC_ADDRESS":"xx","IP_ADDRESS":"xxx","BLOCK_ID":"01","STOP_ID":"xxx"}
#info_buncher={"BUNCHER_ID":"xx","BUNCH_ID":"xx","COMPOSITION_ID":"xx","TUB_ID":"xx","TxR":"xx","GR":"xx","VARIETY_ID":"xx","BLOCK_ID":"xx"}
def getMAC(interface='wlan0'):
# Return the MAC address of the specified interface
try:
str = open('/sys/class/net/%s/address' %interface).read()
except:
str = "00:00:00:00:00:00"
return str[0:17]
def get_local_ip_address(target):
ipaddr = ''
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((target, 8000))
ipaddr = s.getsockname()[0]
s.close()
except:
pass
return ipaddr
def savedevice(info_device):
mac=str(getMAC())
mac=mac.upper()
ip=str(get_local_ip_address('10.0.1.1'))
ip=ip.upper()
info_device["MAC_ADDRESS"]=mac
info_device["IP_ADDRESS"]=ip
#print(getMAC)
hostname="http://tfsnew.vida18.com:8078/api/devices"
#responde = os.system("ping -c 1 "+hostname)
r=requests.post(hostname,json=info_device)
if str(r.json())== '1':
return True
else:
return False
def savebunch(info_buncher,info_device):
mac=str(getMAC())
mac=mac.upper()
info_device["MAC_ADDRESS"]=mac
info_total={"MAC_ADDRESS":info_device["MAC_ADDRESS"],
"STOP_ID":'S'+str(info_device["STOP_ID"]),
"BLOCK_ID":'BK'+str(info_device["BLOCK_ID"]),
"BUNCHER_ID":'B'+str(info_buncher["BUNCHER_ID"]),
"BUNCH_ID":'R'+str(info_buncher["BUNCH_ID"]),
"COMPOSITION_ID":'CM'+str(info_buncher["COMPOSITION_ID"]),
"SORTER_ID":'C'+str(info_buncher["BUNCHER_ID"]),
"TUB_ID":'T'+str(info_buncher["TUB_ID"]),
"VARIETY_ID":'V'+str(info_buncher["VARIETY_ID"])
}
hostname="http://tfsnew.vida18.com:8078/api/tracking"
r=requests.post(hostname,json=info_total)
if str(r.json())== '1':
return True
else:
return False
#print(savebunch(info_buncher,info_device))
| 29.328767 | 140 | 0.627744 | import json
import requests
import sys
import os
import socket
import fcntl
import struct
def getMAC(interface='wlan0'):
try:
str = open('/sys/class/net/%s/address' %interface).read()
except:
str = "00:00:00:00:00:00"
return str[0:17]
def get_local_ip_address(target):
ipaddr = ''
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((target, 8000))
ipaddr = s.getsockname()[0]
s.close()
except:
pass
return ipaddr
def savedevice(info_device):
mac=str(getMAC())
mac=mac.upper()
ip=str(get_local_ip_address('10.0.1.1'))
ip=ip.upper()
info_device["MAC_ADDRESS"]=mac
info_device["IP_ADDRESS"]=ip
hostname="http://tfsnew.vida18.com:8078/api/devices"
r=requests.post(hostname,json=info_device)
if str(r.json())== '1':
return True
else:
return False
def savebunch(info_buncher,info_device):
mac=str(getMAC())
mac=mac.upper()
info_device["MAC_ADDRESS"]=mac
info_total={"MAC_ADDRESS":info_device["MAC_ADDRESS"],
"STOP_ID":'S'+str(info_device["STOP_ID"]),
"BLOCK_ID":'BK'+str(info_device["BLOCK_ID"]),
"BUNCHER_ID":'B'+str(info_buncher["BUNCHER_ID"]),
"BUNCH_ID":'R'+str(info_buncher["BUNCH_ID"]),
"COMPOSITION_ID":'CM'+str(info_buncher["COMPOSITION_ID"]),
"SORTER_ID":'C'+str(info_buncher["BUNCHER_ID"]),
"TUB_ID":'T'+str(info_buncher["TUB_ID"]),
"VARIETY_ID":'V'+str(info_buncher["VARIETY_ID"])
}
hostname="http://tfsnew.vida18.com:8078/api/tracking"
r=requests.post(hostname,json=info_total)
if str(r.json())== '1':
return True
else:
return False
| true | true |
f734ddfee0c273d99cb4d87ab366012e453afc2e | 7,323 | py | Python | tests/test_memory_leak.py | fintzd/rasa | 6359be5509c7d87cd29c2ab5149bc45e843fea85 | [
"Apache-2.0"
] | 9,701 | 2019-04-16T15:46:27.000Z | 2022-03-31T11:52:18.000Z | tests/test_memory_leak.py | fintzd/rasa | 6359be5509c7d87cd29c2ab5149bc45e843fea85 | [
"Apache-2.0"
] | 6,420 | 2019-04-16T15:58:22.000Z | 2022-03-31T17:54:35.000Z | tests/test_memory_leak.py | fintzd/rasa | 6359be5509c7d87cd29c2ab5149bc45e843fea85 | [
"Apache-2.0"
] | 3,063 | 2019-04-16T15:23:52.000Z | 2022-03-31T00:01:12.000Z | import abc
import json
import subprocess
import sys
import tempfile
import time
from pathlib import Path
from typing import Text, List, Tuple, Optional, Union
import memory_profiler
import psutil
import pytest
import rasa
import rasa.shared.utils.io
PROFILING_INTERVAL = 0.1
# Enable this to plot the results locally
WRITE_RESULTS_TO_DISK = False
def _custom_default_config(
tmp_path: Union[Path, Text], epochs: int, max_history: Optional[int] = -1
) -> Text:
# Override default config to use custom amount of epochs
default_config = Path("rasa", "shared", "importers", "default_config.yml")
config = rasa.shared.utils.io.read_yaml_file(default_config)
for model_part, items in config.items():
for item in items:
if "epochs" in item:
item["epochs"] = epochs
if "max_history" in item and max_history != -1:
item["max_history"] = None
config_for_test = Path(tmp_path) / "test_config.yml"
rasa.shared.utils.io.write_yaml(config, config_for_test)
return str(config_for_test)
class MemoryLeakTest(abc.ABC):
"""Generic template for memory leak tests."""
@property
def max_memory_threshold_mb(self) -> float:
return 1000
@pytest.fixture
@abc.abstractmethod
def name_for_dumped_files(self) -> Text:
raise NotImplementedError
@abc.abstractmethod
def function_to_profile(self) -> None:
raise NotImplementedError
@pytest.mark.timeout(720, func_only=True)
def test_for_memory_leak(
self, name_for_dumped_files: Text, tmp_path: Path,
) -> None:
# Run as separate process to avoid other things affecting the memory usage.
# Unfortunately `memory-profiler` doesn't work properly with
# `multiprocessing.Process` as it can't handle the process exit
process = subprocess.Popen(
[
sys.executable,
"-c",
(
f"from {__name__} import {self.__class__.__name__}; "
f"t = {self.__class__.__name__}();"
f"t.function_to_profile()"
),
],
# Force TensorFlow to use CPU so we can track the memory usage
env={"CUDA_VISIBLE_DEVICES": "-1"},
)
# Wait until process is running to avoid race conditions with the memory
# profiling
while not psutil.pid_exists(process.pid):
time.sleep(0.01)
results = memory_profiler.memory_usage(
process,
interval=PROFILING_INTERVAL,
include_children=True,
timestamps=True,
)
# `memory-profiler` sometimes adds `None` values at the end which we don't need
results = [
memory_timestamp
for memory_timestamp in results
if memory_timestamp is not None
]
if WRITE_RESULTS_TO_DISK:
self._write_results(name_for_dumped_files, results)
max_memory_usage = max(results, key=lambda memory_time: memory_time[0])[0]
assert max_memory_usage < self.max_memory_threshold_mb
@staticmethod
def _write_results(base_name: Text, results: List[Tuple[float]]) -> None:
mprof_plot = Path(f"{base_name}_plot.txt")
mprof_results = Path(f"{base_name}_raw.json")
# plot this via `mprof plot mprof_result.txt`
with open(mprof_plot, "w") as f:
for memory, timestamp in results:
f.write(f"MEM {memory:.6f} {timestamp:.4f}\n")
# dump result as json to be able analyze them without re-running the test
with open(mprof_results, "w") as f:
f.write(json.dumps(results))
class TestNLULeakManyEpochs(MemoryLeakTest):
"""Tests for memory leaks in NLU components when training with many epochs."""
@property
def epochs(self) -> int:
return 30
@property
def max_memory_threshold_mb(self) -> float:
return 2200
def function_to_profile(self) -> None:
import rasa.model_training
with tempfile.TemporaryDirectory() as temp_dir:
rasa.model_training.train_nlu(
_custom_default_config(temp_dir, epochs=self.epochs),
Path("data", "test_nlu_no_responses", "sara_nlu_data.yml"),
output=temp_dir,
)
@pytest.fixture()
def name_for_dumped_files(self) -> Text:
return (
f"memory_usage_rasa_nlu_{rasa.__version__}_"
f"epochs{self.epochs}_training_runs1"
)
class TestCoreLeakManyEpochs(MemoryLeakTest):
"""Tests for memory leaks in Core policies when training with many epochs."""
@property
def epochs(self) -> int:
return 200
@property
def max_memory_threshold_mb(self) -> float:
return 2000
def function_to_profile(self) -> None:
import rasa.model_training
with tempfile.TemporaryDirectory() as temp_dir:
rasa.model_training.train_core(
"data/test_domains/default_with_slots.yml",
_custom_default_config(temp_dir, epochs=self.epochs, max_history=None),
"data/test_yaml_stories/stories_defaultdomain.yml",
output=temp_dir,
additional_arguments={"augmentation_factor": 20},
)
@pytest.fixture()
def name_for_dumped_files(self) -> Text:
return (
f"memory_usage_rasa_core_{rasa.__version__}_"
f"epochs{self.epochs}_training_runs1"
)
class TestCRFDenseFeaturesLeak(MemoryLeakTest):
"""Tests for memory leaks in NLU the CRF when using dense features."""
@property
def epochs(self) -> int:
return 1
@property
def max_memory_threshold_mb(self) -> float:
return 1600
def function_to_profile(self) -> None:
import rasa.model_training
config = {
"pipeline": [
{"name": "SpacyNLP"},
{"name": "SpacyTokenizer"},
{"name": "SpacyFeaturizer"},
{
"name": "CRFEntityExtractor",
"features": [
["pos", "pos2"],
[
"bias",
"prefix5",
"prefix2",
"suffix5",
"suffix3",
"suffix2",
"pos",
"pos2",
"digit",
"text_dense_features",
],
["pos", "pos2"],
],
},
]
}
with tempfile.TemporaryDirectory() as temp_dir:
config_for_test = Path(temp_dir) / "test_config.yml"
rasa.shared.utils.io.write_yaml(config, config_for_test)
rasa.model_training.train_nlu(
str(config_for_test),
str(Path("data", "test_nlu_no_responses", "sara_nlu_data.yml")),
output=temp_dir,
)
@pytest.fixture()
def name_for_dumped_files(self) -> Text:
return f"memory_usage_rasa_nlu_crf_dense_{rasa.__version__}_"
| 31.294872 | 87 | 0.583777 | import abc
import json
import subprocess
import sys
import tempfile
import time
from pathlib import Path
from typing import Text, List, Tuple, Optional, Union
import memory_profiler
import psutil
import pytest
import rasa
import rasa.shared.utils.io
PROFILING_INTERVAL = 0.1
WRITE_RESULTS_TO_DISK = False
def _custom_default_config(
tmp_path: Union[Path, Text], epochs: int, max_history: Optional[int] = -1
) -> Text:
default_config = Path("rasa", "shared", "importers", "default_config.yml")
config = rasa.shared.utils.io.read_yaml_file(default_config)
for model_part, items in config.items():
for item in items:
if "epochs" in item:
item["epochs"] = epochs
if "max_history" in item and max_history != -1:
item["max_history"] = None
config_for_test = Path(tmp_path) / "test_config.yml"
rasa.shared.utils.io.write_yaml(config, config_for_test)
return str(config_for_test)
class MemoryLeakTest(abc.ABC):
@property
def max_memory_threshold_mb(self) -> float:
return 1000
@pytest.fixture
@abc.abstractmethod
def name_for_dumped_files(self) -> Text:
raise NotImplementedError
@abc.abstractmethod
def function_to_profile(self) -> None:
raise NotImplementedError
@pytest.mark.timeout(720, func_only=True)
def test_for_memory_leak(
self, name_for_dumped_files: Text, tmp_path: Path,
) -> None:
# `multiprocessing.Process` as it can't handle the process exit
process = subprocess.Popen(
[
sys.executable,
"-c",
(
f"from {__name__} import {self.__class__.__name__}; "
f"t = {self.__class__.__name__}();"
f"t.function_to_profile()"
),
],
env={"CUDA_VISIBLE_DEVICES": "-1"},
)
while not psutil.pid_exists(process.pid):
time.sleep(0.01)
results = memory_profiler.memory_usage(
process,
interval=PROFILING_INTERVAL,
include_children=True,
timestamps=True,
)
results = [
memory_timestamp
for memory_timestamp in results
if memory_timestamp is not None
]
if WRITE_RESULTS_TO_DISK:
self._write_results(name_for_dumped_files, results)
max_memory_usage = max(results, key=lambda memory_time: memory_time[0])[0]
assert max_memory_usage < self.max_memory_threshold_mb
@staticmethod
def _write_results(base_name: Text, results: List[Tuple[float]]) -> None:
mprof_plot = Path(f"{base_name}_plot.txt")
mprof_results = Path(f"{base_name}_raw.json")
# plot this via `mprof plot mprof_result.txt`
with open(mprof_plot, "w") as f:
for memory, timestamp in results:
f.write(f"MEM {memory:.6f} {timestamp:.4f}\n")
# dump result as json to be able analyze them without re-running the test
with open(mprof_results, "w") as f:
f.write(json.dumps(results))
class TestNLULeakManyEpochs(MemoryLeakTest):
@property
def epochs(self) -> int:
return 30
@property
def max_memory_threshold_mb(self) -> float:
return 2200
def function_to_profile(self) -> None:
import rasa.model_training
with tempfile.TemporaryDirectory() as temp_dir:
rasa.model_training.train_nlu(
_custom_default_config(temp_dir, epochs=self.epochs),
Path("data", "test_nlu_no_responses", "sara_nlu_data.yml"),
output=temp_dir,
)
@pytest.fixture()
def name_for_dumped_files(self) -> Text:
return (
f"memory_usage_rasa_nlu_{rasa.__version__}_"
f"epochs{self.epochs}_training_runs1"
)
class TestCoreLeakManyEpochs(MemoryLeakTest):
@property
def epochs(self) -> int:
return 200
@property
def max_memory_threshold_mb(self) -> float:
return 2000
def function_to_profile(self) -> None:
import rasa.model_training
with tempfile.TemporaryDirectory() as temp_dir:
rasa.model_training.train_core(
"data/test_domains/default_with_slots.yml",
_custom_default_config(temp_dir, epochs=self.epochs, max_history=None),
"data/test_yaml_stories/stories_defaultdomain.yml",
output=temp_dir,
additional_arguments={"augmentation_factor": 20},
)
@pytest.fixture()
def name_for_dumped_files(self) -> Text:
return (
f"memory_usage_rasa_core_{rasa.__version__}_"
f"epochs{self.epochs}_training_runs1"
)
class TestCRFDenseFeaturesLeak(MemoryLeakTest):
@property
def epochs(self) -> int:
return 1
@property
def max_memory_threshold_mb(self) -> float:
return 1600
def function_to_profile(self) -> None:
import rasa.model_training
config = {
"pipeline": [
{"name": "SpacyNLP"},
{"name": "SpacyTokenizer"},
{"name": "SpacyFeaturizer"},
{
"name": "CRFEntityExtractor",
"features": [
["pos", "pos2"],
[
"bias",
"prefix5",
"prefix2",
"suffix5",
"suffix3",
"suffix2",
"pos",
"pos2",
"digit",
"text_dense_features",
],
["pos", "pos2"],
],
},
]
}
with tempfile.TemporaryDirectory() as temp_dir:
config_for_test = Path(temp_dir) / "test_config.yml"
rasa.shared.utils.io.write_yaml(config, config_for_test)
rasa.model_training.train_nlu(
str(config_for_test),
str(Path("data", "test_nlu_no_responses", "sara_nlu_data.yml")),
output=temp_dir,
)
@pytest.fixture()
def name_for_dumped_files(self) -> Text:
return f"memory_usage_rasa_nlu_crf_dense_{rasa.__version__}_"
| true | true |
f734dead270c5ec9243064030981190070a9e1f6 | 83 | py | Python | FusionIIIT/applications/hostel_management/urls.py | suyash-code/Fusion-op | eb1242d184482cd6585988028afb182b37abc25c | [
"bzip2-1.0.6"
] | null | null | null | FusionIIIT/applications/hostel_management/urls.py | suyash-code/Fusion-op | eb1242d184482cd6585988028afb182b37abc25c | [
"bzip2-1.0.6"
] | null | null | null | FusionIIIT/applications/hostel_management/urls.py | suyash-code/Fusion-op | eb1242d184482cd6585988028afb182b37abc25c | [
"bzip2-1.0.6"
] | 3 | 2021-04-19T14:26:01.000Z | 2021-04-22T13:34:48.000Z | from django.conf.urls import url
app_name = 'hostel_management'
urlpatterns = []
| 13.833333 | 32 | 0.759036 | from django.conf.urls import url
app_name = 'hostel_management'
urlpatterns = []
| true | true |
f734ded5634d3062fbab77a54c8816eaf43cb21c | 327 | py | Python | setup.py | PyUnchained/pickle_storage | c0a978701ae59a9feeb3e14026ff0b2353b2e7f5 | [
"MIT"
] | null | null | null | setup.py | PyUnchained/pickle_storage | c0a978701ae59a9feeb3e14026ff0b2353b2e7f5 | [
"MIT"
] | null | null | null | setup.py | PyUnchained/pickle_storage | c0a978701ae59a9feeb3e14026ff0b2353b2e7f5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='pickle-storage',
version='0.1',
description='Python Pickle-Based Data Storage',
author='Tatenda Tambo',
author_email='tatendatambo@gmail.com',
packages=find_packages(),
install_requires=['wrapt>=1.12.1']
) | 27.25 | 53 | 0.669725 |
from setuptools import setup, find_packages
setup(name='pickle-storage',
version='0.1',
description='Python Pickle-Based Data Storage',
author='Tatenda Tambo',
author_email='tatendatambo@gmail.com',
packages=find_packages(),
install_requires=['wrapt>=1.12.1']
) | true | true |
f734dee5533468bd89adc7a9869f9458710bc490 | 5,368 | py | Python | models.py | vidursatija/SongWCT | c892c2833ff9f85cfb31788babf016699c5eec8f | [
"MIT"
] | null | null | null | models.py | vidursatija/SongWCT | c892c2833ff9f85cfb31788babf016699c5eec8f | [
"MIT"
] | null | null | null | models.py | vidursatija/SongWCT | c892c2833ff9f85cfb31788babf016699c5eec8f | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
from torchsummary import summary
import numpy as np
class X_Enc(nn.Module):
def __init__(self, layers, num_classes=1000, init_weights=True):
super(X_Enc, self).__init__()
self.features = nn.Sequential(*layers) # layers
print(self.features)
if init_weights:
self._initialize_weights()
def forward(self, x):
all_maxpools = []
for l in self.features:
if isinstance(l, nn.MaxPool1d) == False:
x = l(x)
else:
x, pool_indices = l(x)
all_maxpools.append(pool_indices)
return x, all_maxpools
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers_enc(cfg):
layers = []
conv_layers = []
in_channels = cfg[0]
cfg = cfg[1:]
for v in cfg:
if v == 'M':
layers += conv_layers # [nn.Sequential(*conv_layers)]
conv_layers = []
layers += [nn.MaxPool1d(kernel_size=2, stride=2, return_indices=True)]
else:
conv1d = nn.Conv1d(in_channels, v, kernel_size=3, padding=1)
conv_layers += [conv1d, nn.ReLU(inplace=True)]
in_channels = v
if len(conv_layers) > 0:
layers += conv_layers # [nn.Sequential(*conv_layers)]
return layers
configs_enc = [
[128, 128],
[128, 128, 128, 'M', 256],
[128, 128, 128, 'M', 256, 256, 'M', 512],
[128, 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512]
]
configs_dec = [
[128, 128],
[256, 128, 'M', 128, 128],
[512, 256, 'M', 256, 128, 'M', 128, 128],
[512, 512, 'M', 512, 256, 'M', 256, 128, 'M', 128, 128]
]
def encoder(x, pretrained_path=None, **kwargs):
if pretrained_path is not None:
kwargs['init_weights'] = False
model = X_Enc(make_layers_enc(configs_enc[x-1]), **kwargs)
if pretrained_path is not None:
model.load_state_dict(torch.load(pretrained_path), strict=False)
return model
class X_Dec(nn.Module):
def __init__(self, layers, num_classes=1000, init_weights=True):
super(X_Dec, self).__init__()
self.layers = nn.Sequential(*layers)
print(self.layers)
if init_weights:
self._initialize_weights()
def forward(self, x, all_maxpools):
ct = -1
for l in self.layers:
if isinstance(l, nn.MaxUnpool1d) == False:
x = l(x)
else:
x = l(x, all_maxpools[ct])
ct -= 1
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.ConvTranspose1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers_dec(cfg):
layers = []
conv_layers = []
in_channels = cfg[0]
cfg = cfg[1:]
for i, v in enumerate(cfg):
if v == 'M':
layers += conv_layers # [nn.Sequential(*conv_layers)]
conv_layers = []
layers += [nn.MaxUnpool1d(kernel_size=2, stride=2)]
else:
conv1d = nn.ConvTranspose1d(in_channels, v, kernel_size=3, padding=1)
if i != len(cfg) - 1:
conv_layers += [conv1d, nn.ReLU(inplace=True)]
else:
conv_layers += [conv1d]
in_channels = v
if len(conv_layers) > 0:
layers += conv_layers # [nn.Sequential(*conv_layers)]
return layers
def decoder(x, pretrained_path=None, **kwargs):
if pretrained_path is not None:
kwargs['init_weights'] = False
model = X_Dec(make_layers_dec(configs_dec[x-1]), **kwargs)
if pretrained_path is not None:
model.load_state_dict(torch.load(pretrained_path), strict=False)
return model
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # PyTorch v0.4.0
encoder = vgg16_enc(x=3, pretrained=True) # .to(device)
for k in encoder.state_dict():
print(k)
summary(encoder, (3, 224, 224), device="cpu")
z, all_maxpools = encoder(torch.from_numpy(np.zeros([1, 3, 224, 224])).float())
decoder = vgg16_dec(x=3, pretrained=False) # .to(device)
for k in decoder.state_dict():
print(k)
x_rebuild = decoder(z, all_maxpools)
# summary(decoder, (256, 56, 56), device="cpu")
| 32.731707 | 91 | 0.575075 | import torch
import torch.nn as nn
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
from torchsummary import summary
import numpy as np
class X_Enc(nn.Module):
def __init__(self, layers, num_classes=1000, init_weights=True):
super(X_Enc, self).__init__()
self.features = nn.Sequential(*layers)
print(self.features)
if init_weights:
self._initialize_weights()
def forward(self, x):
all_maxpools = []
for l in self.features:
if isinstance(l, nn.MaxPool1d) == False:
x = l(x)
else:
x, pool_indices = l(x)
all_maxpools.append(pool_indices)
return x, all_maxpools
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers_enc(cfg):
layers = []
conv_layers = []
in_channels = cfg[0]
cfg = cfg[1:]
for v in cfg:
if v == 'M':
layers += conv_layers
conv_layers = []
layers += [nn.MaxPool1d(kernel_size=2, stride=2, return_indices=True)]
else:
conv1d = nn.Conv1d(in_channels, v, kernel_size=3, padding=1)
conv_layers += [conv1d, nn.ReLU(inplace=True)]
in_channels = v
if len(conv_layers) > 0:
layers += conv_layers
return layers
configs_enc = [
[128, 128],
[128, 128, 128, 'M', 256],
[128, 128, 128, 'M', 256, 256, 'M', 512],
[128, 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512]
]
configs_dec = [
[128, 128],
[256, 128, 'M', 128, 128],
[512, 256, 'M', 256, 128, 'M', 128, 128],
[512, 512, 'M', 512, 256, 'M', 256, 128, 'M', 128, 128]
]
def encoder(x, pretrained_path=None, **kwargs):
if pretrained_path is not None:
kwargs['init_weights'] = False
model = X_Enc(make_layers_enc(configs_enc[x-1]), **kwargs)
if pretrained_path is not None:
model.load_state_dict(torch.load(pretrained_path), strict=False)
return model
class X_Dec(nn.Module):
def __init__(self, layers, num_classes=1000, init_weights=True):
super(X_Dec, self).__init__()
self.layers = nn.Sequential(*layers)
print(self.layers)
if init_weights:
self._initialize_weights()
def forward(self, x, all_maxpools):
ct = -1
for l in self.layers:
if isinstance(l, nn.MaxUnpool1d) == False:
x = l(x)
else:
x = l(x, all_maxpools[ct])
ct -= 1
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.ConvTranspose1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers_dec(cfg):
layers = []
conv_layers = []
in_channels = cfg[0]
cfg = cfg[1:]
for i, v in enumerate(cfg):
if v == 'M':
layers += conv_layers
conv_layers = []
layers += [nn.MaxUnpool1d(kernel_size=2, stride=2)]
else:
conv1d = nn.ConvTranspose1d(in_channels, v, kernel_size=3, padding=1)
if i != len(cfg) - 1:
conv_layers += [conv1d, nn.ReLU(inplace=True)]
else:
conv_layers += [conv1d]
in_channels = v
if len(conv_layers) > 0:
layers += conv_layers
return layers
def decoder(x, pretrained_path=None, **kwargs):
if pretrained_path is not None:
kwargs['init_weights'] = False
model = X_Dec(make_layers_dec(configs_dec[x-1]), **kwargs)
if pretrained_path is not None:
model.load_state_dict(torch.load(pretrained_path), strict=False)
return model
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder = vgg16_enc(x=3, pretrained=True)
for k in encoder.state_dict():
print(k)
summary(encoder, (3, 224, 224), device="cpu")
z, all_maxpools = encoder(torch.from_numpy(np.zeros([1, 3, 224, 224])).float())
decoder = vgg16_dec(x=3, pretrained=False)
for k in decoder.state_dict():
print(k)
x_rebuild = decoder(z, all_maxpools)
| true | true |
f734df57e7a00474f077f260bc9b03ce6ece41c0 | 492 | py | Python | apps/gradings/migrations/0001_initial.py | cloudartisan/dojomaster | 9d5efa0345c659636f8d8b556302d0d7bb2055a8 | [
"MIT"
] | 1 | 2019-02-21T14:47:31.000Z | 2019-02-21T14:47:31.000Z | apps/gradings/migrations/0001_initial.py | cloudartisan/dojomaster | 9d5efa0345c659636f8d8b556302d0d7bb2055a8 | [
"MIT"
] | null | null | null | apps/gradings/migrations/0001_initial.py | cloudartisan/dojomaster | 9d5efa0345c659636f8d8b556302d0d7bb2055a8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-30 23:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Grading',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| 21.391304 | 114 | 0.599593 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Grading',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| true | true |
f734df86fba52bb0e5b0075c1880a25fc79ec783 | 113 | py | Python | Python/pythonProject/exercise/ex047.py | JoaoMoreira2002/Linguagens-de-programacao | b91a902188428238a567c8f52b2ac9028378c4df | [
"MIT"
] | null | null | null | Python/pythonProject/exercise/ex047.py | JoaoMoreira2002/Linguagens-de-programacao | b91a902188428238a567c8f52b2ac9028378c4df | [
"MIT"
] | null | null | null | Python/pythonProject/exercise/ex047.py | JoaoMoreira2002/Linguagens-de-programacao | b91a902188428238a567c8f52b2ac9028378c4df | [
"MIT"
] | null | null | null | for x in range(0, 11):
for c in range(0, 11):
print(x, 'x', c, '= {}'.format(x * c))
print('\t')
| 22.6 | 46 | 0.442478 | for x in range(0, 11):
for c in range(0, 11):
print(x, 'x', c, '= {}'.format(x * c))
print('\t')
| true | true |
f734e032d4a7eeaed6fa44b9a554d7707bbd9ef9 | 6,038 | py | Python | reading_data.py | eembees/solar_flares | 9022f92c0577efaf06d7425002995e4fa4df74b4 | [
"MIT"
] | null | null | null | reading_data.py | eembees/solar_flares | 9022f92c0577efaf06d7425002995e4fa4df74b4 | [
"MIT"
] | null | null | null | reading_data.py | eembees/solar_flares | 9022f92c0577efaf06d7425002995e4fa4df74b4 | [
"MIT"
] | null | null | null | from pathlib import Path
import ijson
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from json import JSONDecoder, JSONDecodeError # for reading the JSON data files
import re # for regular expressions
import os # for os related operations
from sklearn.preprocessing import maxabs_scale
def decode_obj(line, pos=0, decoder=JSONDecoder()):
no_white_space_regex = re.compile(r'[^\s]')
while True:
match = no_white_space_regex.search(line, pos)
if not match:
return
pos = match.start()
try:
obj, pos = decoder.raw_decode(line, pos)
except JSONDecodeError as err:
print('Oops! something went wrong. Error: {}'.format(err))
yield obj
def get_obj_with_last_n_val(line, n):
obj = next(decode_obj(line)) # type:dict
id = obj['id']
class_label = obj['classNum']
data = pd.DataFrame.from_dict(obj['values']) # type:pd.DataFrame
data.set_index(data.index.astype(int), inplace=True)
last_n_indices = np.arange(0, 60)[-n:]
data = data.loc[last_n_indices]
return {'id': id, 'classType': class_label, 'values': data}
def get_obj_with_all(line):
obj = next(decode_obj(line)) # type:dict
id = obj['id']
try:
class_label = obj['classNum']
except KeyError:
class_label = None
data = pd.DataFrame.from_dict(obj['values']) # type:pd.DataFrame
data.set_index(data.index.astype(int), inplace=True)
# last_n_indices = np.arange(0, 60)[-n:]
# data = data.loc[last_n_indices]
return {'id': id, 'classType': class_label, 'values': data}
def read_json_data_to_df(file_path: Path):
"""
Generates a dataframe by concatenating the last values of each
multi-variate time series. This method is designed as an example
to show how a json object can be converted into a csv file.
:param data_dir: the path to the data directory.
:param file_name: name of the file to be read, with the extension.
:return: the generated dataframe.
"""
all_df, labels, ids = [], [], []
with open(file_path, 'r') as infile: # Open the file for reading
for line in infile: # Each 'line' is one MVTS with its single label (0 or 1).
obj = get_obj_with_all(line)
all_df.append(obj['values'])
labels.append(obj['classType'])
ids.append(obj['id'])
print(type(obj))
print(obj['values'])
print(type(obj['values']))
# df =
exit()
df = pd.concat(all_df).reset_index(drop=True)
df = df.assign(LABEL=pd.Series(labels))
df = df.assign(ID=pd.Series(ids))
df.set_index([pd.Index(ids)])
# Uncomment if you want to save this as CSV
# df.to_csv(file_name + '_last_vals.csv', index=False)
return df
def read_json_data_to_arr(file_path: Path):
"""
Generates a dataframe by concatenating the last values of each
multi-variate time series. This method is designed as an example
to show how a json object can be converted into a csv file.
:param data_dir: the path to the data directory.
:param file_name: name of the file to be read, with the extension.
:return: the generated dataframe.
"""
all_df, labels, ids = [], [], []
with open(file_path, 'r') as infile: # Open the file for reading
for line in infile: # Each 'line' is one MVTS with its single label (0 or 1).
obj = get_obj_with_all(line)
# if obj['id'] < 100:
df = obj['values'].sort_index()
# remove anything 2 std dev from the mean
df = df.mask(df.sub(df.mean()).div(df.std()).abs().gt(2))
# do interpolation of variables
df = df.interpolate(method='linear', extrapolate=False)
df = df.fillna(method='ffill').fillna(method='bfill').fillna(0.0)
all_df.append(df.values)
labels.append(obj['classType'])
ids.append(obj['id'])
all_df = np.array(all_df)
labels = np.array(labels)
ids = np.array(ids)
return all_df, labels, ids
def save_DF_to_NPZ(fp: Path, out_dir):
fo = out_dir / fp.with_suffix('.npz').name
# fo_k = Path(str(fo).replace(('.npz', '_keys.npz')))
df = pd.read_json(fp, lines=True)
np.savez(fo, df=df, keys=df.keys, index=df.index)
pass
def save_arr_to_npz(arr: np.ndarray, labels: np.ndarray, ids: np.ndarray, fo: Path):
np.savez(fo, data=arr, labels=labels, index=ids)
pass
def load_npz_file(path: Path, return_ids = False):
a = np.load(path)
X = a['data']
if np.any(np.isnan(X)):
X = np.nan_to_num(X)
try:
y = a['labels']
except KeyError:
y = None
except ValueError:
y = None
if return_ids:
try:
ids = a['ids']
except KeyError:
ids = None
except ValueError:
ids = None
return X, y, ids
else:
return X, y
def save_y_preds(y_index: np.ndarray, y_pred: np.ndarray, fo: Path):
np.savez(fo, index=y_index, labels=y_pred)
pass
def preprocess_data(X, scaler=maxabs_scale):
shap = X.shape
# print(shap[1:])
if shap[1:] != (60, 25):
raise ValueError('Data shape wrong')
for i, x_i in enumerate(X):
x_i_t = np.zeros_like(x_i.transpose())
for j, series in enumerate(x_i.transpose()):
series = scaler(series)
x_i_t[j] = series
X[i] = x_i_t.transpose()
return X
if __name__ == '__main__':
data_dir = Path('/Users/mag/PycharmProjects/solar_flares/input/')
out_dir = Path('/Users/mag/PycharmProjects/solar_flares/input/npz')
# out_dir = Path('./input/npz')
file_paths = list(data_dir.glob('test*.json'))
print(file_paths)
for fp in file_paths:
fo = out_dir / fp.with_suffix('.npz').name
all_df, labels, ids = read_json_data_to_arr(fp)
save_arr_to_npz(all_df, labels, ids, fo)
| 29.598039 | 86 | 0.61792 | from pathlib import Path
import ijson
import numpy as np
import pandas as pd
from json import JSONDecoder, JSONDecodeError
import re
import os
from sklearn.preprocessing import maxabs_scale
def decode_obj(line, pos=0, decoder=JSONDecoder()):
no_white_space_regex = re.compile(r'[^\s]')
while True:
match = no_white_space_regex.search(line, pos)
if not match:
return
pos = match.start()
try:
obj, pos = decoder.raw_decode(line, pos)
except JSONDecodeError as err:
print('Oops! something went wrong. Error: {}'.format(err))
yield obj
def get_obj_with_last_n_val(line, n):
obj = next(decode_obj(line))
id = obj['id']
class_label = obj['classNum']
data = pd.DataFrame.from_dict(obj['values'])
data.set_index(data.index.astype(int), inplace=True)
last_n_indices = np.arange(0, 60)[-n:]
data = data.loc[last_n_indices]
return {'id': id, 'classType': class_label, 'values': data}
def get_obj_with_all(line):
obj = next(decode_obj(line))
id = obj['id']
try:
class_label = obj['classNum']
except KeyError:
class_label = None
data = pd.DataFrame.from_dict(obj['values'])
data.set_index(data.index.astype(int), inplace=True)
return {'id': id, 'classType': class_label, 'values': data}
def read_json_data_to_df(file_path: Path):
all_df, labels, ids = [], [], []
with open(file_path, 'r') as infile:
for line in infile:
obj = get_obj_with_all(line)
all_df.append(obj['values'])
labels.append(obj['classType'])
ids.append(obj['id'])
print(type(obj))
print(obj['values'])
print(type(obj['values']))
exit()
df = pd.concat(all_df).reset_index(drop=True)
df = df.assign(LABEL=pd.Series(labels))
df = df.assign(ID=pd.Series(ids))
df.set_index([pd.Index(ids)])
return df
def read_json_data_to_arr(file_path: Path):
all_df, labels, ids = [], [], []
with open(file_path, 'r') as infile:
for line in infile:
obj = get_obj_with_all(line)
df = obj['values'].sort_index()
df = df.mask(df.sub(df.mean()).div(df.std()).abs().gt(2))
df = df.interpolate(method='linear', extrapolate=False)
df = df.fillna(method='ffill').fillna(method='bfill').fillna(0.0)
all_df.append(df.values)
labels.append(obj['classType'])
ids.append(obj['id'])
all_df = np.array(all_df)
labels = np.array(labels)
ids = np.array(ids)
return all_df, labels, ids
def save_DF_to_NPZ(fp: Path, out_dir):
fo = out_dir / fp.with_suffix('.npz').name
df = pd.read_json(fp, lines=True)
np.savez(fo, df=df, keys=df.keys, index=df.index)
pass
def save_arr_to_npz(arr: np.ndarray, labels: np.ndarray, ids: np.ndarray, fo: Path):
np.savez(fo, data=arr, labels=labels, index=ids)
pass
def load_npz_file(path: Path, return_ids = False):
a = np.load(path)
X = a['data']
if np.any(np.isnan(X)):
X = np.nan_to_num(X)
try:
y = a['labels']
except KeyError:
y = None
except ValueError:
y = None
if return_ids:
try:
ids = a['ids']
except KeyError:
ids = None
except ValueError:
ids = None
return X, y, ids
else:
return X, y
def save_y_preds(y_index: np.ndarray, y_pred: np.ndarray, fo: Path):
np.savez(fo, index=y_index, labels=y_pred)
pass
def preprocess_data(X, scaler=maxabs_scale):
shap = X.shape
if shap[1:] != (60, 25):
raise ValueError('Data shape wrong')
for i, x_i in enumerate(X):
x_i_t = np.zeros_like(x_i.transpose())
for j, series in enumerate(x_i.transpose()):
series = scaler(series)
x_i_t[j] = series
X[i] = x_i_t.transpose()
return X
if __name__ == '__main__':
data_dir = Path('/Users/mag/PycharmProjects/solar_flares/input/')
out_dir = Path('/Users/mag/PycharmProjects/solar_flares/input/npz')
file_paths = list(data_dir.glob('test*.json'))
print(file_paths)
for fp in file_paths:
fo = out_dir / fp.with_suffix('.npz').name
all_df, labels, ids = read_json_data_to_arr(fp)
save_arr_to_npz(all_df, labels, ids, fo)
| true | true |
f734e09b76bc265ceb72504136dbcb9a86e98111 | 5,912 | py | Python | CORE/engines/constraint.py | geoffreynyaga/ostrich-project | 157cd7a3c3d9014e31ef21ca21de43f04d039997 | [
"MIT"
] | 15 | 2017-11-08T10:03:26.000Z | 2021-12-21T07:02:44.000Z | CORE/engines/constraint.py | geoffreynyaga/ostrich-project | 157cd7a3c3d9014e31ef21ca21de43f04d039997 | [
"MIT"
] | 9 | 2020-01-17T15:09:22.000Z | 2022-03-25T19:02:05.000Z | CORE/engines/constraint.py | geoffreynyaga/ostrich-project | 157cd7a3c3d9014e31ef21ca21de43f04d039997 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
##################################################################################
# File: c:\Projects\KENYA ONE PROJECT\CORE\engines\constraint.py #
# Project: c:\Projects\KENYA ONE PROJECT\CORE\engines #
# Created Date: Thursday, January 9th 2020, 8:56:55 pm #
# Author: Geoffrey Nyaga Kinyua ( <info@geoffreynyaga.com> ) #
# ----- #
# Last Modified: Thursday January 9th 2020 8:56:55 pm #
# Modified By: Geoffrey Nyaga Kinyua ( <info@geoffreynyaga.com> ) #
# ----- #
# MIT License #
# #
# Copyright (c) 2020 KENYA ONE PROJECT #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of#
# this software and associated documentation files (the "Software"), to deal in #
# the Software without restriction, including without limitation the rights to #
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies #
# of the Software, and to permit persons to whom the Software is furnished to do #
# so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
# ----- #
# Copyright (c) 2020 KENYA ONE PROJECT #
##################################################################################
import sys
sys.path.append("../")
from CORE.API.db_API import write_to_db, read_from_db
import numpy as np # type: ignore
import matplotlib.pylab as plt # type: ignore
a = np.arange(50)
ws = np.arange(10, 35, 0.01)
cdmin: float = 0.025
write_to_db("cdMin", cdmin)
do = read_from_db("rhoSL")
dalt = read_from_db("altitudeDensity") # AAAAA
k = read_from_db("k")
# v = read_from_db('cruiseSpeed') * 1.688
v: float = 140 * 1.688 # AAAAA
qcruise = 0.5 * dalt * v ** 2 # dynamic pressure at cruise
qtakeoff = 0.5 * do * v ** 2 # dynamic pressure at take-off
turnangle = 40 # turn angle
loadfactor = 1 / (np.cos(turnangle)) # loadfactor
twturn = (
qcruise
* ((cdmin / ws) + (k * (loadfactor / qcruise) ** 2) * ws)
* (v * 5850 / (0.8 * 550 * 0.6604))
)
# rate of climb
roc = read_from_db("rateOfClimb") * 3.28 * 60 # rate of climb ft/min #AAAAAAA
# Vy=sqrt((2/do)*ws * sqrt( k/(3*cdmin) ))
Vy = 150
Vv = roc / 60
qclimb = 0.5 * do * (Vy ** 2)
twclimb = (
(Vv / Vy) + ((qclimb / ws) * cdmin) + ((qclimb / ws) * cdmin) + ((k / qclimb) * ws)
) * (Vy * 5850 / (0.6 * 550))
# ground run
Sg: int = 1000 # ground run ft
Vlof: float = 70 * 1.688
clto: float = 1.4670
u: float = 0.04
cdto = 0.03
q1 = 0.5 * do * (Vlof / np.sqrt(2)) ** 2
twtakeoff = (
((Vlof ** 2) / (2 * 32.174 * Sg)) + ((q1 * cdto) / ws) + u * (1 - (q1 * clto / ws))
) * (Vlof * 5850 / (0.6 * 550))
# cruise altitude
twcruise = (((qcruise * cdmin) / ws) + ((k / qcruise) * ws)) * (
v * 5850 / (0.6 * 550 * 0.6604)
)
# service ceiling
twservceiling = (
(1.668 / np.sqrt((2 * ws / dalt) * np.sqrt(k / (3 * cdmin))))
+ (4 * np.sqrt(k * cdmin / 3))
) * ((v * 5850) / (0.7 * 550 * 0.6604))
plt.plot(ws, twclimb, label="climb")
plt.plot(ws, twturn, label="turn")
plt.plot(ws, twtakeoff, label="Takeoff")
plt.plot(ws, twservceiling, label="Service Ceiling")
plt.plot(ws, twcruise, label="cruise")
plotWS = read_from_db("WS")
plt.axvline(x=plotWS) ################################
plt.legend(loc="upper left")
if __name__ == "__main__":
plt.show()
def find_nearest(array, value):
idx = (np.abs(array - value)).argmin()
return idx
# print(find_nearest(ws, plotWS))
myidx = find_nearest(ws, plotWS)
# cruiseidx = (twcruise[myidx])
# takeoffidx = twtakeoff[myidx]
# climbidx = twclimb[myidx]
# turnidx = twturn[myidx]
# ceilingidx = twservceiling[myidx]
# print([cruiseidx,takeoffidx,climbidx,turnidx,ceilingidx])
def point():
cruiseidx = twcruise[myidx]
takeoffidx = twtakeoff[myidx]
climbidx = twclimb[myidx]
turnidx = twturn[myidx]
ceilingidx = twservceiling[myidx]
# print([cruiseidx,takeoffidx,climbidx,turnidx,ceilingidx])
# print (cruiseidx,"cruiseidx")
x = np.array([cruiseidx, takeoffidx, climbidx, turnidx, ceilingidx])
idx = x.argmax()
return x[idx]
finalBHP = point()
# print ( finalBHP,"BHP")
write_to_db("finalBHP", finalBHP)
S = (read_from_db("finalMTOW")) / (plotWS * 10.57)
write_to_db("S", S)
| 38.894737 | 88 | 0.508965 | true | true | |
f734e0e3ddd024eab587865ded77a0ef1faf2c77 | 4,984 | py | Python | DEPRECATED_PYTHON_SRC/component/_brz_win.py | 17701253801/firefly-proxy | 153e0d343d93d68a803bba6b89497f7bc2b96bc8 | [
"BSD-2-Clause"
] | 5,895 | 2015-01-01T14:33:17.000Z | 2022-03-19T03:08:48.000Z | DEPRECATED_PYTHON_SRC/component/_brz_win.py | BIGMONK/firefly-proxy | 60161af7b239ab400d39a23b61ab312f84b94570 | [
"BSD-2-Clause"
] | 626 | 2015-01-07T22:09:26.000Z | 2022-03-24T01:56:50.000Z | DEPRECATED_PYTHON_SRC/component/_brz_win.py | BIGMONK/firefly-proxy | 60161af7b239ab400d39a23b61ab312f84b94570 | [
"BSD-2-Clause"
] | 1,145 | 2015-01-04T06:50:54.000Z | 2022-03-15T13:12:17.000Z | # Copied from https://github.com/downloadam/client/blob/master/client/registry/win.py
import logging
import sys
import os
from contextlib import contextmanager
import subprocess
import _winreg as winreg
from _winreg import HKEY_CLASSES_ROOT, HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER, \
KEY_QUERY_VALUE, REG_SZ, KEY_ALL_ACCESS, KEY_WRITE, KEY_CREATE_SUB_KEY, KEY_SET_VALUE
log = logging.getLogger(__name__)
@contextmanager
def open_key(hkey, *args):
key = winreg.OpenKeyEx(hkey, *args)
yield key
winreg.CloseKey(key)
@contextmanager
def create_key(hkey, subkey):
key = winreg.CreateKey(hkey, subkey)
yield key
winreg.CloseKey(key)
def read_reg_key(hkey, subkey, name=""):
try:
with open_key(hkey, subkey, 0, KEY_QUERY_VALUE) as k:
return winreg.QueryValueEx(k, name)
except WindowsError as e:
errno, message = e.args
if errno != 2:
raise e
return (None, None)
def write_reg_key(hkey, subkey, name, value):
try:
with open_key(hkey, subkey, 0, KEY_ALL_ACCESS) as k:
winreg.SetValueEx(k, name, 0, value[0], value[1])
return True
except WindowsError as e:
errno, message = e.args
if errno != 2:
raise e
return False
def enum_reg_keys(hkey, subkey):
with open_key(hkey, subkey) as k:
i = 0
while True:
try:
name = winreg.EnumKey(k, i)
except:
break
yield name
i += 1
def _parse_browser_path(path):
try:
if path.startswith('"'):
path = path[1:].split('"', 1)[0]
return path
except:
return None
def get_default_browser():
result = _parse_browser_path(read_reg_key(HKEY_CURRENT_USER, 'Software\\Classes\\http\\shell\\open\\command')[0])
if result is None:
result = _parse_browser_path(read_reg_key(HKEY_CLASSES_ROOT, 'http\\shell\\open\\command')[0])
return result
def get_browser_path(key):
result = _parse_browser_path(read_reg_key(HKEY_CURRENT_USER, 'Software\\Clients\\StartMenuInternet\\{}\\shell\\open\\command'.format(key))[0])
if result is None:
result = _parse_browser_path(read_reg_key(HKEY_LOCAL_MACHINE, 'Software\\Clients\\StartMenuInternet\\{}\\shell\\open\\command'.format(key))[0])
return result
def iterate_browsers(default=None):
if default is None:
default = get_default_browser() or ''
default = default.lower()
ignore = set()
for hkey in (HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE):
try:
enum = list(enum_reg_keys(hkey, 'Software\\Clients\\StartMenuInternet'))
except WindowsError:
# key not exists or something?
continue
for key in enum:
if key in ignore:
continue
ignore.add(key)
path = get_browser_path(key)
if not path:
continue
if not os.path.exists(path):
continue
if key == 'IEXPLORE.EXE':
try:
version = int(read_reg_key(hkey, 'Software\\Microsoft\\Internet Explorer', 'Version')[0].split('.', 1)[0])
except AttributeError: # this maybe happens, don't know why. assume IE is outdated
version = 0
if version < 9:
outdated = True
else:
outdated = False
elif key == 'OperaStable':
outdated = True
else:
outdated = False
yield key.lower(), path, path.lower() == default, outdated
old_ie_settings = {}
def resume_ie_settings():
global old_ie_settings
key = HKEY_CURRENT_USER
subkey = 'Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
for (name, value) in old_ie_settings.items():
write_reg_key(key, subkey, name, value)
def launch_ie(executable, url, rootdir, proxy_type, proxy_ip, proxy_port):
global old_ie_settings
key = HKEY_CURRENT_USER
subkey = 'Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
new_values = {
'ProxyEnable' : (4, 1),
'ProxyOverride' : (1, u'*.local;<local>'),
'ProxyServer' : (1, u'%s:%d' % (proxy_ip, proxy_port)),
}
for (name, _) in new_values.items():
(reg_value, reg_type) = read_reg_key(key, subkey, name)
if reg_value is not None:
old_ie_settings[name] = (reg_type, reg_value)
write_reg_key(key, subkey, name, new_values[name])
cmdline = [
executable,
url,
]
cmdline = [s.encode(sys.getfilesystemencoding()) for s in cmdline]
return subprocess.Popen(cmdline)
def launch_ie_tab(executable, url, rootdir):
cmdline = [
executable,
url,
]
cmdline = [s.encode(sys.getfilesystemencoding()) for s in cmdline]
return subprocess.Popen(cmdline)
| 33.006623 | 151 | 0.611758 |
import logging
import sys
import os
from contextlib import contextmanager
import subprocess
import _winreg as winreg
from _winreg import HKEY_CLASSES_ROOT, HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER, \
KEY_QUERY_VALUE, REG_SZ, KEY_ALL_ACCESS, KEY_WRITE, KEY_CREATE_SUB_KEY, KEY_SET_VALUE
log = logging.getLogger(__name__)
@contextmanager
def open_key(hkey, *args):
key = winreg.OpenKeyEx(hkey, *args)
yield key
winreg.CloseKey(key)
@contextmanager
def create_key(hkey, subkey):
key = winreg.CreateKey(hkey, subkey)
yield key
winreg.CloseKey(key)
def read_reg_key(hkey, subkey, name=""):
try:
with open_key(hkey, subkey, 0, KEY_QUERY_VALUE) as k:
return winreg.QueryValueEx(k, name)
except WindowsError as e:
errno, message = e.args
if errno != 2:
raise e
return (None, None)
def write_reg_key(hkey, subkey, name, value):
try:
with open_key(hkey, subkey, 0, KEY_ALL_ACCESS) as k:
winreg.SetValueEx(k, name, 0, value[0], value[1])
return True
except WindowsError as e:
errno, message = e.args
if errno != 2:
raise e
return False
def enum_reg_keys(hkey, subkey):
with open_key(hkey, subkey) as k:
i = 0
while True:
try:
name = winreg.EnumKey(k, i)
except:
break
yield name
i += 1
def _parse_browser_path(path):
try:
if path.startswith('"'):
path = path[1:].split('"', 1)[0]
return path
except:
return None
def get_default_browser():
result = _parse_browser_path(read_reg_key(HKEY_CURRENT_USER, 'Software\\Classes\\http\\shell\\open\\command')[0])
if result is None:
result = _parse_browser_path(read_reg_key(HKEY_CLASSES_ROOT, 'http\\shell\\open\\command')[0])
return result
def get_browser_path(key):
result = _parse_browser_path(read_reg_key(HKEY_CURRENT_USER, 'Software\\Clients\\StartMenuInternet\\{}\\shell\\open\\command'.format(key))[0])
if result is None:
result = _parse_browser_path(read_reg_key(HKEY_LOCAL_MACHINE, 'Software\\Clients\\StartMenuInternet\\{}\\shell\\open\\command'.format(key))[0])
return result
def iterate_browsers(default=None):
if default is None:
default = get_default_browser() or ''
default = default.lower()
ignore = set()
for hkey in (HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE):
try:
enum = list(enum_reg_keys(hkey, 'Software\\Clients\\StartMenuInternet'))
except WindowsError:
continue
for key in enum:
if key in ignore:
continue
ignore.add(key)
path = get_browser_path(key)
if not path:
continue
if not os.path.exists(path):
continue
if key == 'IEXPLORE.EXE':
try:
version = int(read_reg_key(hkey, 'Software\\Microsoft\\Internet Explorer', 'Version')[0].split('.', 1)[0])
except AttributeError:
version = 0
if version < 9:
outdated = True
else:
outdated = False
elif key == 'OperaStable':
outdated = True
else:
outdated = False
yield key.lower(), path, path.lower() == default, outdated
old_ie_settings = {}
def resume_ie_settings():
global old_ie_settings
key = HKEY_CURRENT_USER
subkey = 'Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
for (name, value) in old_ie_settings.items():
write_reg_key(key, subkey, name, value)
def launch_ie(executable, url, rootdir, proxy_type, proxy_ip, proxy_port):
global old_ie_settings
key = HKEY_CURRENT_USER
subkey = 'Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
new_values = {
'ProxyEnable' : (4, 1),
'ProxyOverride' : (1, u'*.local;<local>'),
'ProxyServer' : (1, u'%s:%d' % (proxy_ip, proxy_port)),
}
for (name, _) in new_values.items():
(reg_value, reg_type) = read_reg_key(key, subkey, name)
if reg_value is not None:
old_ie_settings[name] = (reg_type, reg_value)
write_reg_key(key, subkey, name, new_values[name])
cmdline = [
executable,
url,
]
cmdline = [s.encode(sys.getfilesystemencoding()) for s in cmdline]
return subprocess.Popen(cmdline)
def launch_ie_tab(executable, url, rootdir):
cmdline = [
executable,
url,
]
cmdline = [s.encode(sys.getfilesystemencoding()) for s in cmdline]
return subprocess.Popen(cmdline)
| true | true |
f734e2ae995f4f0b1438b926ddce732f5c03bcb5 | 2,949 | py | Python | students/k3342/laboratory_works/Shaidullina_Regina/laboratory_work_1/leaderboard/migrations/0001_initial.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 10 | 2020-03-20T09:06:12.000Z | 2021-07-27T13:06:02.000Z | students/k3342/laboratory_works/Shaidullina_Regina/laboratory_work_1/leaderboard/migrations/0001_initial.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 134 | 2020-03-23T09:47:48.000Z | 2022-03-12T01:05:19.000Z | students/k3342/laboratory_works/Shaidullina_Regina/laboratory_work_1/leaderboard/migrations/0001_initial.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 71 | 2020-03-20T12:45:56.000Z | 2021-10-31T19:22:25.000Z | # Generated by Django 3.0.4 on 2020-04-18 12:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('car_number', models.CharField(max_length=6)),
('description', models.CharField(max_length=100)),
],
options={
'db_table': 'Car',
},
),
migrations.CreateModel(
name='Team',
fields=[
('name', models.CharField(max_length=30, primary_key=True, serialize=False)),
('country', models.CharField(max_length=30)),
('number_of_racers', models.IntegerField()),
],
options={
'db_table': 'Team',
},
),
migrations.CreateModel(
name='Racer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('surname', models.CharField(max_length=30)),
('name', models.CharField(max_length=30)),
('middlename', models.CharField(max_length=30)),
('description', models.CharField(max_length=100)),
('experience', models.CharField(max_length=30)),
('racer_class', models.CharField(choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D')], max_length=1)),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='leaderboard.Car')),
('team_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='leaderboard.Team')),
],
options={
'db_table': 'Racer',
},
),
migrations.CreateModel(
name='Race',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('category', models.CharField(choices=[('OW', 'Open-wheel racing'), ('TC', 'Touring car racing'), ('SpC', 'Sports car racing'), ('PC', 'Production-car racing'), ('OM', 'One-make racing'), ('TAS', 'Time Attack Series'), ('StC', 'Stock car racing'), ('R', 'Rallying'), ('D', 'Drag racing'), ('OR', 'Off-road racing'), ('K', 'Kart racing'), ('H', 'Historical racing'), ('Other', 'Other')], max_length=5)),
('date', models.DateField()),
('winner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='leaderboard.Racer')),
],
options={
'db_table': 'Race',
},
),
]
| 43.367647 | 418 | 0.528654 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('car_number', models.CharField(max_length=6)),
('description', models.CharField(max_length=100)),
],
options={
'db_table': 'Car',
},
),
migrations.CreateModel(
name='Team',
fields=[
('name', models.CharField(max_length=30, primary_key=True, serialize=False)),
('country', models.CharField(max_length=30)),
('number_of_racers', models.IntegerField()),
],
options={
'db_table': 'Team',
},
),
migrations.CreateModel(
name='Racer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('surname', models.CharField(max_length=30)),
('name', models.CharField(max_length=30)),
('middlename', models.CharField(max_length=30)),
('description', models.CharField(max_length=100)),
('experience', models.CharField(max_length=30)),
('racer_class', models.CharField(choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D')], max_length=1)),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='leaderboard.Car')),
('team_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='leaderboard.Team')),
],
options={
'db_table': 'Racer',
},
),
migrations.CreateModel(
name='Race',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('category', models.CharField(choices=[('OW', 'Open-wheel racing'), ('TC', 'Touring car racing'), ('SpC', 'Sports car racing'), ('PC', 'Production-car racing'), ('OM', 'One-make racing'), ('TAS', 'Time Attack Series'), ('StC', 'Stock car racing'), ('R', 'Rallying'), ('D', 'Drag racing'), ('OR', 'Off-road racing'), ('K', 'Kart racing'), ('H', 'Historical racing'), ('Other', 'Other')], max_length=5)),
('date', models.DateField()),
('winner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='leaderboard.Racer')),
],
options={
'db_table': 'Race',
},
),
]
| true | true |
f734e32ec87c1cc2ff0c847206e45f81f0a44682 | 5,052 | py | Python | kubernetes/client/models/v1beta1_allowed_host_path.py | lp67/python | 33c5ea9835356410ce4a9fa54a02c6a2a22143c6 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1beta1_allowed_host_path.py | lp67/python | 33c5ea9835356410ce4a9fa54a02c6a2a22143c6 | [
"Apache-2.0"
] | 4 | 2019-11-19T10:33:47.000Z | 2022-03-01T03:33:52.000Z | kubernetes/client/models/v1beta1_allowed_host_path.py | mohramadan911/PythonClientAPI | 5d111812c81b7a573ac8661d1aec60bb97072412 | [
"Apache-2.0"
] | 2 | 2021-08-10T16:35:31.000Z | 2021-09-14T04:53:06.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.20
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1beta1AllowedHostPath(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'path_prefix': 'str',
'read_only': 'bool'
}
attribute_map = {
'path_prefix': 'pathPrefix',
'read_only': 'readOnly'
}
def __init__(self, path_prefix=None, read_only=None, local_vars_configuration=None): # noqa: E501
"""V1beta1AllowedHostPath - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._path_prefix = None
self._read_only = None
self.discriminator = None
if path_prefix is not None:
self.path_prefix = path_prefix
if read_only is not None:
self.read_only = read_only
@property
def path_prefix(self):
"""Gets the path_prefix of this V1beta1AllowedHostPath. # noqa: E501
pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path. Examples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo` # noqa: E501
:return: The path_prefix of this V1beta1AllowedHostPath. # noqa: E501
:rtype: str
"""
return self._path_prefix
@path_prefix.setter
def path_prefix(self, path_prefix):
"""Sets the path_prefix of this V1beta1AllowedHostPath.
pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path. Examples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo` # noqa: E501
:param path_prefix: The path_prefix of this V1beta1AllowedHostPath. # noqa: E501
:type: str
"""
self._path_prefix = path_prefix
@property
def read_only(self):
"""Gets the read_only of this V1beta1AllowedHostPath. # noqa: E501
when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. # noqa: E501
:return: The read_only of this V1beta1AllowedHostPath. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1beta1AllowedHostPath.
when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. # noqa: E501
:param read_only: The read_only of this V1beta1AllowedHostPath. # noqa: E501
:type: bool
"""
self._read_only = read_only
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1AllowedHostPath):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1AllowedHostPath):
return True
return self.to_dict() != other.to_dict()
| 33.456954 | 294 | 0.615796 |
import pprint
import re
import six
from kubernetes.client.configuration import Configuration
class V1beta1AllowedHostPath(object):
openapi_types = {
'path_prefix': 'str',
'read_only': 'bool'
}
attribute_map = {
'path_prefix': 'pathPrefix',
'read_only': 'readOnly'
}
def __init__(self, path_prefix=None, read_only=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._path_prefix = None
self._read_only = None
self.discriminator = None
if path_prefix is not None:
self.path_prefix = path_prefix
if read_only is not None:
self.read_only = read_only
@property
def path_prefix(self):
return self._path_prefix
@path_prefix.setter
def path_prefix(self, path_prefix):
self._path_prefix = path_prefix
@property
def read_only(self):
return self._read_only
@read_only.setter
def read_only(self, read_only):
self._read_only = read_only
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1beta1AllowedHostPath):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1beta1AllowedHostPath):
return True
return self.to_dict() != other.to_dict()
| true | true |
f734e3a1ce3a24d57c1f838699b9ec656d8a897c | 7,219 | py | Python | src/antidote/_constants.py | Finistere/antidote | 97751e0e6a1b8bd638a1c33212345c7a84ad97b8 | [
"MIT"
] | 52 | 2017-12-17T19:52:37.000Z | 2022-03-29T10:24:04.000Z | src/antidote/_constants.py | Finistere/antidote | 97751e0e6a1b8bd638a1c33212345c7a84ad97b8 | [
"MIT"
] | 32 | 2018-11-02T08:49:16.000Z | 2022-03-25T22:23:30.000Z | src/antidote/_constants.py | Finistere/antidote | 97751e0e6a1b8bd638a1c33212345c7a84ad97b8 | [
"MIT"
] | 5 | 2019-05-17T18:26:14.000Z | 2021-12-25T23:13:31.000Z | from typing import (Any, Dict, Generic, Hashable, Optional, TYPE_CHECKING, Tuple, Type,
TypeVar, Union, cast, overload)
from ._compatibility.typing import final, Protocol
from ._internal import API
from ._internal.utils import AbstractMeta, Default, FinalImmutable, FinalMeta, debug_repr
from ._internal.utils.immutable import Immutable, ImmutableGenericMeta
from ._providers.lazy import Lazy
from .core import Container, DependencyDebug, DependencyValue, Scope
T = TypeVar('T')
if TYPE_CHECKING:
from .constants import Constants
# TODO: Once Python 3.6 support drops, fix this.
# We're lying to Mypy here. That's not how the actual descriptor, even though it's
# somewhat close. But inheriting Generic implies not being final anymore in Python 3.6,
# until PEP 560, and internally there's no need for Generic.
class Const(Generic[T]):
__slots__ = ()
@overload
def __get__(self, # noqa: E704
instance: 'Constants',
owner: 'Type[Constants]') -> T: ... # pragma: no cover
@overload
def __get__(self, # noqa: E704
instance: None,
owner: 'Type[Constants]') -> 'Const[T]': ... # pragma: no cover
def __get__(self,
instance: 'Optional[Constants]',
owner: 'Type[Constants]') -> object: # pragma: no cover
pass
@API.private
@final
class MakeConst(metaclass=FinalMeta):
def __call__(self,
__arg: Optional[object] = None,
*,
default: Any = Default.sentinel) -> Const[object]:
# Not true yet, but will be changed by ConstantsMeta
return cast(Const[object], LazyConstToDo(__arg, None, default))
def __getitem__(self, tpe: Type[T]) -> 'MakeTypedConst[T]':
return MakeTypedConst(tpe)
@API.private
@final
class MakeTypedConst(Immutable, Generic[T], metaclass=ImmutableGenericMeta):
__slots__ = ('__type',)
__type: Type[T]
def __call__(self,
__arg: Optional[object] = None,
*,
default: Union[T, Default] = Default.sentinel) -> Const[T]:
if not isinstance(default, (self.__type, Default)):
raise TypeError(f"default is not an instance of {self.__type}, "
f"but {type(default)}")
# Not true yet, but will be changed by ConstantsMeta
return cast(Const[T], LazyConstToDo(__arg, self.__type, default))
@API.private
@final
class LazyConstToDo(FinalImmutable):
__slots__ = ('arg', 'type_', 'default')
arg: Optional[object]
type_: Optional[type]
default: object
@API.private
class ConstantsMeta(AbstractMeta):
def __new__(mcs: 'Type[ConstantsMeta]',
name: str,
bases: Tuple[type, ...],
namespace: Dict[str, object],
**kwargs: object
) -> 'ConstantsMeta':
cls = cast(
ConstantsMeta,
super().__new__(mcs, name, bases, namespace, **kwargs) # type: ignore
)
if not kwargs.get('abstract'):
_configure_constants(cls)
return cls
@API.private
def _configure_constants(cls: ConstantsMeta) -> None:
from .constants import Constants
from .service import service
conf = getattr(cls, '__antidote__', None)
if not isinstance(conf, Constants.Conf):
raise TypeError(f"Constants configuration (__antidote__) is expected to be a "
f"{Constants.Conf}, not a {type(conf)}")
cls = service(cls, singleton=True, wiring=conf.wiring)
for name, v in list(cls.__dict__.items()):
if isinstance(v, LazyConstToDo):
setattr(cls,
name,
LazyConstDescriptor(
name=name,
dependency=cls,
method_name=Constants.provide_const.__name__,
arg=v.arg,
default=v.default,
type_=v.type_ or object,
auto_cast=v.type_ is not None and v.type_ in conf.auto_cast))
@API.private
@final
class LazyConstDescriptor(FinalImmutable):
__slots__ = ('name', 'dependency', 'method_name', 'arg', 'default', 'type_',
'auto_cast', '_cache')
name: str
dependency: Hashable
method_name: str
arg: object
default: object
type_: type
auto_cast: bool
_cache: str
def __init__(self,
*,
name: str,
dependency: Hashable,
method_name: str,
arg: object,
default: object,
type_: type,
auto_cast: bool
):
assert isinstance(default, (Default, type_))
super().__init__(
name=name,
dependency=dependency,
method_name=method_name,
arg=arg,
default=default,
type_=type_,
auto_cast=auto_cast,
_cache=f"__antidote_dependency_{hex(id(self))}"
)
def __get__(self, instance: object, owner: type) -> object:
if instance is None:
try:
return getattr(owner, self._cache)
except AttributeError:
dependency = LazyConst(self)
setattr(owner, self._cache, dependency)
return dependency
try:
value = getattr(instance, self.method_name)(name=self.name,
arg=self.arg)
except LookupError:
if self.default is not Default.sentinel:
return self.default
raise
if self.auto_cast:
value = self.type_(value)
if not isinstance(value, self.type_):
raise TypeError(f"Constant {self.name} is not an instance of {self.type_}, "
f"but {type(value)}")
return value
@API.private
@final
class LazyConst(FinalImmutable, Lazy):
__slots__ = ('descriptor',)
descriptor: LazyConstDescriptor
def __init__(self, descriptor: LazyConstDescriptor) -> None:
super().__init__(descriptor=descriptor)
def debug_info(self) -> DependencyDebug:
descriptor = cast(LazyConstDescriptor, self.descriptor)
cls = cast(type, descriptor.dependency)
return DependencyDebug(f"{debug_repr(cls)}.{descriptor.name}",
scope=Scope.singleton(),
# TODO: Would be great if the first argument of the method
# didn't show as unknown as it's always provided.
wired=[getattr(cls, descriptor.method_name), cls])
def provide(self, container: Container) -> DependencyValue:
# TODO: Waiting for a fix: https://github.com/python/mypy/issues/6910
descriptor = cast(LazyConstDescriptor, self.descriptor)
return DependencyValue(
descriptor.__get__(
container.get(descriptor.dependency),
None # type: ignore
),
scope=Scope.singleton()
)
| 33.892019 | 89 | 0.575842 | from typing import (Any, Dict, Generic, Hashable, Optional, TYPE_CHECKING, Tuple, Type,
TypeVar, Union, cast, overload)
from ._compatibility.typing import final, Protocol
from ._internal import API
from ._internal.utils import AbstractMeta, Default, FinalImmutable, FinalMeta, debug_repr
from ._internal.utils.immutable import Immutable, ImmutableGenericMeta
from ._providers.lazy import Lazy
from .core import Container, DependencyDebug, DependencyValue, Scope
T = TypeVar('T')
if TYPE_CHECKING:
from .constants import Constants
# somewhat close. But inheriting Generic implies not being final anymore in Python 3.6,
# until PEP 560, and internally there's no need for Generic.
class Const(Generic[T]):
__slots__ = ()
@overload
def __get__(self,
instance: 'Constants',
owner: 'Type[Constants]') -> T: ...
@overload
def __get__(self,
instance: None,
owner: 'Type[Constants]') -> 'Const[T]': ...
def __get__(self,
instance: 'Optional[Constants]',
owner: 'Type[Constants]') -> object:
pass
@API.private
@final
class MakeConst(metaclass=FinalMeta):
def __call__(self,
__arg: Optional[object] = None,
*,
default: Any = Default.sentinel) -> Const[object]:
return cast(Const[object], LazyConstToDo(__arg, None, default))
def __getitem__(self, tpe: Type[T]) -> 'MakeTypedConst[T]':
return MakeTypedConst(tpe)
@API.private
@final
class MakeTypedConst(Immutable, Generic[T], metaclass=ImmutableGenericMeta):
__slots__ = ('__type',)
__type: Type[T]
def __call__(self,
__arg: Optional[object] = None,
*,
default: Union[T, Default] = Default.sentinel) -> Const[T]:
if not isinstance(default, (self.__type, Default)):
raise TypeError(f"default is not an instance of {self.__type}, "
f"but {type(default)}")
return cast(Const[T], LazyConstToDo(__arg, self.__type, default))
@API.private
@final
class LazyConstToDo(FinalImmutable):
__slots__ = ('arg', 'type_', 'default')
arg: Optional[object]
type_: Optional[type]
default: object
@API.private
class ConstantsMeta(AbstractMeta):
def __new__(mcs: 'Type[ConstantsMeta]',
name: str,
bases: Tuple[type, ...],
namespace: Dict[str, object],
**kwargs: object
) -> 'ConstantsMeta':
cls = cast(
ConstantsMeta,
super().__new__(mcs, name, bases, namespace, **kwargs)
)
if not kwargs.get('abstract'):
_configure_constants(cls)
return cls
@API.private
def _configure_constants(cls: ConstantsMeta) -> None:
from .constants import Constants
from .service import service
conf = getattr(cls, '__antidote__', None)
if not isinstance(conf, Constants.Conf):
raise TypeError(f"Constants configuration (__antidote__) is expected to be a "
f"{Constants.Conf}, not a {type(conf)}")
cls = service(cls, singleton=True, wiring=conf.wiring)
for name, v in list(cls.__dict__.items()):
if isinstance(v, LazyConstToDo):
setattr(cls,
name,
LazyConstDescriptor(
name=name,
dependency=cls,
method_name=Constants.provide_const.__name__,
arg=v.arg,
default=v.default,
type_=v.type_ or object,
auto_cast=v.type_ is not None and v.type_ in conf.auto_cast))
@API.private
@final
class LazyConstDescriptor(FinalImmutable):
__slots__ = ('name', 'dependency', 'method_name', 'arg', 'default', 'type_',
'auto_cast', '_cache')
name: str
dependency: Hashable
method_name: str
arg: object
default: object
type_: type
auto_cast: bool
_cache: str
def __init__(self,
*,
name: str,
dependency: Hashable,
method_name: str,
arg: object,
default: object,
type_: type,
auto_cast: bool
):
assert isinstance(default, (Default, type_))
super().__init__(
name=name,
dependency=dependency,
method_name=method_name,
arg=arg,
default=default,
type_=type_,
auto_cast=auto_cast,
_cache=f"__antidote_dependency_{hex(id(self))}"
)
def __get__(self, instance: object, owner: type) -> object:
if instance is None:
try:
return getattr(owner, self._cache)
except AttributeError:
dependency = LazyConst(self)
setattr(owner, self._cache, dependency)
return dependency
try:
value = getattr(instance, self.method_name)(name=self.name,
arg=self.arg)
except LookupError:
if self.default is not Default.sentinel:
return self.default
raise
if self.auto_cast:
value = self.type_(value)
if not isinstance(value, self.type_):
raise TypeError(f"Constant {self.name} is not an instance of {self.type_}, "
f"but {type(value)}")
return value
@API.private
@final
class LazyConst(FinalImmutable, Lazy):
__slots__ = ('descriptor',)
descriptor: LazyConstDescriptor
def __init__(self, descriptor: LazyConstDescriptor) -> None:
super().__init__(descriptor=descriptor)
def debug_info(self) -> DependencyDebug:
descriptor = cast(LazyConstDescriptor, self.descriptor)
cls = cast(type, descriptor.dependency)
return DependencyDebug(f"{debug_repr(cls)}.{descriptor.name}",
scope=Scope.singleton(),
wired=[getattr(cls, descriptor.method_name), cls])
def provide(self, container: Container) -> DependencyValue:
descriptor = cast(LazyConstDescriptor, self.descriptor)
return DependencyValue(
descriptor.__get__(
container.get(descriptor.dependency),
None
),
scope=Scope.singleton()
)
| true | true |
f734e42e8c40b73c89d6a81190560d99c06b2daa | 582 | py | Python | plot.py | nkuxx161/baseline-SR | c4caf06c5a5a88d7f8e27069018316b319f0913b | [
"MIT"
] | null | null | null | plot.py | nkuxx161/baseline-SR | c4caf06c5a5a88d7f8e27069018316b319f0913b | [
"MIT"
] | null | null | null | plot.py | nkuxx161/baseline-SR | c4caf06c5a5a88d7f8e27069018316b319f0913b | [
"MIT"
] | null | null | null | import pandas as pd
import os
curve_name = '5_k7'
data = pd.read_csv(os.path.join('result', curve_name+'.csv'))
timestamp = data['timestamp']
value = data['value']
mag = data['mag']
isAnomaly = data['isAnomaly']
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
plt.subplot(3, 1, 1)
plt.plot(timestamp, value)
plt.title('value')
plt.subplot(3, 1, 2)
plt.plot(timestamp, mag)
plt.title('mag')
plt.subplot(3, 1, 3)
plt.plot(timestamp, isAnomaly)
plt.title('isAnomaly')
plt.savefig(os.path.join('./images', 'SR_'+curve_name+'.png'))
plt.show()
plt.close() | 19.4 | 62 | 0.704467 | import pandas as pd
import os
curve_name = '5_k7'
data = pd.read_csv(os.path.join('result', curve_name+'.csv'))
timestamp = data['timestamp']
value = data['value']
mag = data['mag']
isAnomaly = data['isAnomaly']
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
plt.subplot(3, 1, 1)
plt.plot(timestamp, value)
plt.title('value')
plt.subplot(3, 1, 2)
plt.plot(timestamp, mag)
plt.title('mag')
plt.subplot(3, 1, 3)
plt.plot(timestamp, isAnomaly)
plt.title('isAnomaly')
plt.savefig(os.path.join('./images', 'SR_'+curve_name+'.png'))
plt.show()
plt.close() | true | true |
f734e53915eb41c00db1f0b9283810f28578ae28 | 1,048 | py | Python | troc/apps/record/migrations/0001_initial.py | Windfarer/species2 | 15849c5805621410f3e8c26d27213f9bcf483fd1 | [
"MIT"
] | 1 | 2020-01-02T11:50:50.000Z | 2020-01-02T11:50:50.000Z | troc/apps/record/migrations/0001_initial.py | Windfarer/species2 | 15849c5805621410f3e8c26d27213f9bcf483fd1 | [
"MIT"
] | 5 | 2019-12-15T07:43:46.000Z | 2022-02-26T17:47:26.000Z | troc/apps/record/migrations/0001_initial.py | Windfarer/species2 | 15849c5805621410f3e8c26d27213f9bcf483fd1 | [
"MIT"
] | 1 | 2020-06-13T02:25:42.000Z | 2020-06-13T02:25:42.000Z | # Generated by Django 2.2.5 on 2019-10-07 08:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('species', '0002_auto_20190908_0902'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('updated_at', models.DateTimeField(default=django.utils.timezone.now)),
('species', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='species.Species')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 34.933333 | 118 | 0.655534 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('species', '0002_auto_20190908_0902'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('updated_at', models.DateTimeField(default=django.utils.timezone.now)),
('species', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='species.Species')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f734e66795d03e7b634e13c3a1bbf25ad1e1bcbd | 20,783 | py | Python | models/_sources/model_trainer_c4d127b7cc8008ff2c0c849733ead6e1.py | catniplab/ML-music-analysis | 793d54ed16166fbcd9acf4eec24998892334e064 | [
"MIT"
] | null | null | null | models/_sources/model_trainer_c4d127b7cc8008ff2c0c849733ead6e1.py | catniplab/ML-music-analysis | 793d54ed16166fbcd9acf4eec24998892334e064 | [
"MIT"
] | null | null | null | models/_sources/model_trainer_c4d127b7cc8008ff2c0c849733ead6e1.py | catniplab/ML-music-analysis | 793d54ed16166fbcd9acf4eec24998892334e064 | [
"MIT"
] | 1 | 2021-12-01T22:57:56.000Z | 2021-12-01T22:57:56.000Z | """
This script creates an instance of a sacred experiment and defines default configurations for training a neural network or a regression model.
"""
from src.neural_nets.models import get_model
from src.neural_nets.load_data import get_loader
from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss
import src.regression.logistic_regression as reg
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchsso.optim as soptim
import torch.nn.functional as F
import random
from torch.utils.data import DataLoader
from sacred import Experiment
from torch import Tensor, device
from copy import deepcopy
from time import sleep
from tqdm import tqdm
from typing import List
from itertools import product
# create a new sacred experiment whose name is an integer
ex = Experiment(name=str(random.randint(0, 1000000)))
# default configurations
@ex.config
def cfg():
# system
cuda = torch.cuda.is_available()
gpu = 0
base_dir = os.getcwd()
# supported datasets
# JSB_Chorales (short)
# Nottingham (medium)
# Piano_midi (long)
# MuseData (extra long)
dataset = "JSB_Chorales"
# training
num_epochs = 150
batch_size = 128
# mask some low notes and some high notes because they never show up
low_off_notes = 0
high_off_notes = 88
lr = 0.001
decay = 1.0
optmzr = "SGD"
regularization = 0.0
# hyperparameter search
do_hpsearch = False
learning_rates = 10**np.linspace(-2, -4, 5)
decays = 1 - np.linspace(0, 0.1, num=5)
regularizations = 10**np.linspace(-2, -4, num=5)
hps_epochs = 50
# Supported architectures
# REGRESSION
# LDS
# TANH
architecture = 'LDS'
readout = 'linear'
gradient_clipping = 1
jit = False # not fully implemented
# for regression
lag = 1
window = 1
# for neural networks
input_size = 88
hidden_size = 300
num_layers = 1
output_size = 88
# see models.py and initialization.py for details
init = 'default'
scale = 1.0
parity = None # see models.py
t_distrib = torch.distributions.Uniform(0, 0.75)
path = 'results/77/final_state_dict.pt'
# when to save state dictionaries
save_init_model = True
save_final_model = True
save_every_epoch = False
# detect backprop anomalies
detect_anomaly = False
# give all random number generators the same seed
def _seed_all(_seed) -> None:
torch.manual_seed(_seed)
np.random.seed(_seed)
random.seed(_seed)
# this context is used when we are running things on the cpu
class NullContext(object):
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
# this function simply trains regression models and logs the results
# see regression.trainer for details
@ex.capture
def sklearn_experiment(dataset: str,
save_dir: str,
num_epochs: int,
high_off_notes: int,
low_off_notes: int,
lag: int,
window: int,
_seed,
_log,
_run):
"""
:param dataset: name of the dataset to be used
:save_dir: temporary directory where artifacts are being stored
:lag: how many time steps into the future the regression model is to predict
:window: how many time steps the regression model is to take into account
:param _seed: sacred random seed
:param _log: sacred object used to output to the command line
:param _run: sacred object used to monitor the runtime
"""
num_notes = high_off_notes - low_off_notes
models = reg.train_models(dataset,
num_epochs,
low_off_notes,
high_off_notes,
_seed,
lag=lag,
window=window)
coefs = np.zeros((num_notes, num_notes*window))
intercepts = np.zeros(num_notes*window)
for i in range(num_notes):
model = models[i]
# if there were no notes played for this channel, a model won't be trained
# simply save all parameters as -1 to discourage the note from being played
if model == None:
coefs[i] = -1
intercepts[i] = -1
else:
coefs[i] = model.coef_
intercepts[i] = model.intercept_
np.save(save_dir + 'coefs.npy', coefs)
np.save(save_dir + 'intercepts.npy', intercepts)
_run.add_artifact(save_dir + 'coefs.npy')
_run.add_artifact(save_dir + 'intercepts.npy')
train_loss = reg.compute_loss(models,
dataset,
'traindata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
test_loss = reg.compute_loss(models,
dataset,
'testdata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
valid_loss = reg.compute_loss(models,
dataset,
'validdata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
_run.log_scalar('trainLoss', train_loss)
_run.log_scalar('testLoss', test_loss)
_run.log_scalar('validLoss', valid_loss)
train_acc = reg.compute_accuracy(models,
dataset,
'traindata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
test_acc = reg.compute_accuracy(models,
dataset,
'testdata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
valid_acc = reg.compute_accuracy(models,
dataset,
'validdata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
_run.log_scalar('trainAccuracy', train_acc)
_run.log_scalar('testAccuracy', test_acc)
_run.log_scalar('validAccuracy', valid_acc)
# a single optimization step
@ex.capture
def train_iter(device: device,
cuda_device: torch.cuda.device,
input_tensor: Tensor,
target: Tensor,
mask: Tensor,
model: nn.Module,
loss_fcn: nn.Module,
optimizer: optim.Optimizer,
save_every_epoch: bool,
save_dir: str,
train_loader: DataLoader,
test_loader: DataLoader,
valid_loader: DataLoader,
low_off_notes: int,
high_off_notes: int,
_log,
_run,
logging=True):
input_tensor = input_tensor.to(device)
# number of songs in this batch
N = input_tensor.shape[0]
output, hidden_tensors = model(input_tensor)
loss = loss_fcn(output, target, mask, model)/N
optimizer.zero_grad()
loss.backward()
optimizer.step()
# use sacred to log training loss and accuracy
if logging:
train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes)
_run.log_scalar("trainLoss", loss.cpu().detach().item())
_run.log_scalar("trainAccuracy", train_acc)
# save a copy of the model and make sacred remember it each epoch
if save_every_epoch and logging:
sd = deepcopy(model.state_dict())
torch.save(init_sd, save_dir + 'state_dict_' + str(epoch) + '.pt')
_run.add_artifact(save_dir + 'state_dict_' + str(epoch) + '.pt')
# train a neural network
# returns the final loss and accuracy on the training, testing, and validation sets
@ex.capture
def pytorch_train_loop(cuda: bool,
model_dict: dict,
initializer: dict,
train_loader: DataLoader,
test_loader: DataLoader,
valid_loader: DataLoader,
low_off_notes: int,
high_off_notes: int,
optmzr: str,
lr: float,
decay: float,
regularization: float,
num_epochs: int,
save_dir: str,
save_init_model,
save_every_epoch,
save_final_model,
_seed,
_log,
_run,
logging=True):
# construct and initialize the model
model = get_model(model_dict, initializer, cuda)
# save a copy of the initial model and make sacred remember it
if save_init_model and logging:
init_sd = deepcopy(model.state_dict())
torch.save(init_sd, save_dir + 'initial_state_dict.pt')
_run.add_artifact(save_dir + 'initial_state_dict.pt')
# if we are on cuda we construct the device and run everything on it
cuda_device = NullContext()
device = torch.device('cpu')
if cuda:
dev_name = 'cuda:' + str(gpu)
cuda_device = torch.cuda.device(dev_name)
device = torch.device(dev_name)
model = model.to(device)
with cuda_device:
# see metrics.py
loss_fcn = MaskedBCE(regularization, low_off_notes=low_off_notes, high_off_notes=high_off_notes)
# compute the metrics before training and log them
if logging:
train_loss = compute_loss(loss_fcn, model, train_loader)
test_loss = compute_loss(loss_fcn, model, test_loader)
val_loss = compute_loss(loss_fcn, model, valid_loader)
_run.log_scalar("trainLoss", train_loss)
_run.log_scalar("testLoss", test_loss)
_run.log_scalar("validLoss", val_loss)
train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes)
test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes)
val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes)
_run.log_scalar("trainAccuracy", train_acc)
_run.log_scalar("testAccuracy", test_acc)
_run.log_scalar("validAccuracy", val_acc)
# construct the optimizer
optimizer = None
if optmzr == "SGD":
optimizer = optim.SGD(model.parameters(), lr=lr)
elif optmzr == "Adam":
optimizer = optim.Adam(model.parameters(), lr=lr)
elif optmzr == "RMSprop":
optimizer = optim.RMSprop(model.parameters(), lr=lr)
else:
raise ValueError("Optimizer {} not recognized.".format(optmzr))
# learning rate decay
scheduler = None
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: decay**epoch)
# begin training loop
for epoch in tqdm(range(num_epochs)):
for input_tensor, target, mask in train_loader:
train_iter(device,
cuda_device,
input_tensor,
target,
mask,
model,
loss_fcn,
optimizer,
save_every_epoch,
save_dir,
train_loader,
test_loader,
valid_loader,
low_off_notes,
high_off_notes,
_log,
_run,
logging=logging)
# learning rate decay
scheduler.step()
# use sacred to log testing and validation loss and accuracy
if logging:
test_loss = compute_loss(loss_fcn, model, test_loader)
val_loss = compute_loss(loss_fcn, model, valid_loader)
test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes)
val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes)
_run.log_scalar("testLoss", test_loss)
_run.log_scalar("validLoss", val_loss)
_run.log_scalar("testAccuracy", test_acc)
_run.log_scalar("validAccuracy", val_acc)
# save a copy of the trained model and make sacred remember it
if save_final_model and logging:
fin_sd = deepcopy(model.state_dict())
torch.save(fin_sd, save_dir + 'final_state_dict.pt')
_run.add_artifact(save_dir + 'final_state_dict.pt')
# recompute the metrics so that this function can return them
train_loss = compute_loss(loss_fcn, model, train_loader)
test_loss = compute_loss(loss_fcn, model, test_loader)
val_loss = compute_loss(loss_fcn, model, valid_loader)
train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes)
test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes)
val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes)
return ((train_loss, test_loss, val_loss), (train_acc, test_acc, val_acc))
# main function
@ex.automain
def train_loop(cuda,
gpu,
base_dir,
dataset,
num_epochs,
batch_size,
low_off_notes,
high_off_notes,
lr,
decay,
optmzr,
regularization,
do_hpsearch,
learning_rates,
decays,
regularizations,
hps_epochs,
architecture,
readout,
gradient_clipping,
jit,
lag,
window,
input_size,
hidden_size,
num_layers,
output_size,
detect_anomaly,
init,
scale,
parity,
t_distrib,
path,
save_init_model,
save_final_model,
save_every_epoch,
_seed,
_log,
_run):
# save artifacts to a temporary directory that gets erased when the experiment is over
save_dir = base_dir + '/tmp_' + str(_seed)
os.system('mkdir ' + save_dir)
save_dir += '/'
# give all random number generators the same seed
_seed_all(_seed)
sklearn_program = architecture == 'REGRESSION'
# regression models and neural networks are trained very differently
if sklearn_program:
sklearn_experiment(dataset,
save_dir,
num_epochs,
high_off_notes,
low_off_notes,
lag,
window,
_seed,
_log,
_run)
# run a pytorch program
else:
model_dict = {'architecture': architecture,
'readout': readout,
'gradient_clipping': gradient_clipping,
'jit': jit,
'lag': lag,
'window': window,
'input_size': input_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'output_size': output_size
}
initializer = {'init': init,
'scale': scale,
'parity': parity,
't_distrib': t_distrib,
'path': path,
'low_off_notes': low_off_notes,
'high_off_notes': high_off_notes
}
# if we are debugging we may want to detect autograd anomalies
torch.autograd.set_detect_anomaly(detect_anomaly)
# construct the pytorch data loaders
train_loader, test_loader, valid_loader = get_loader(dataset, batch_size)
# standard training loop
if not do_hpsearch:
# the training loop function returns the metrics achieved at the end of training
# they will be logged by default, no need to do anything with them here
metrics = pytorch_train_loop(cuda,
model_dict,
initializer,
train_loader,
test_loader,
valid_loader,
low_off_notes,
high_off_notes,
optmzr,
lr,
decay,
regularization,
num_epochs,
save_dir,
save_init_model,
save_every_epoch,
save_final_model,
_seed,
_log,
_run)
# only goal here is to find the best hyper parameters
else:
min_test_loss = float('inf')
best_lr = 0
best_dcay = 0
best_reg = 0
hyperparams = product(learning_rates, decays, regularizations)
for rate, dcay, reg in hyperparams:
# train a model with the given hyperparameters
# don't log anything, otherwise we will have a ridiculous amount of extraneous info
metrics = pytorch_train_loop(cuda,
model_dict,
initializer,
train_loader,
test_loader,
valid_loader,
optmzr,
rate,
dcay,
reg,
hps_epochs,
save_dir,
save_init_model,
save_every_epoch,
save_final_model,
_seed,
_log,
_run,
logging=False)
# loss is first index, test set is second index
test_loss = metrics[0][1]
# compare loss against other hyperparams and update if necessary
if test_loss == test_loss and test_loss < min_test_loss:
min_test_loss = test_loss
best_lr = rate
best_dcay = dcay
best_reg = reg
# record the best hyperparameters
_run.log_scalar("learning_rate", best_lr)
_run.log_scalar("decay", best_dcay)
_run.log_scalar("regularization", best_reg)
# wait a second then remove the temporary directory used for storing artifacts
sleep(1)
os.system('rm -r ' + save_dir)
| 35.405451 | 142 | 0.505172 |
from src.neural_nets.models import get_model
from src.neural_nets.load_data import get_loader
from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss
import src.regression.logistic_regression as reg
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchsso.optim as soptim
import torch.nn.functional as F
import random
from torch.utils.data import DataLoader
from sacred import Experiment
from torch import Tensor, device
from copy import deepcopy
from time import sleep
from tqdm import tqdm
from typing import List
from itertools import product
ex = Experiment(name=str(random.randint(0, 1000000)))
@ex.config
def cfg():
cuda = torch.cuda.is_available()
gpu = 0
base_dir = os.getcwd()
dataset = "JSB_Chorales"
num_epochs = 150
batch_size = 128
low_off_notes = 0
high_off_notes = 88
lr = 0.001
decay = 1.0
optmzr = "SGD"
regularization = 0.0
do_hpsearch = False
learning_rates = 10**np.linspace(-2, -4, 5)
decays = 1 - np.linspace(0, 0.1, num=5)
regularizations = 10**np.linspace(-2, -4, num=5)
hps_epochs = 50
architecture = 'LDS'
readout = 'linear'
gradient_clipping = 1
jit = False
lag = 1
window = 1
input_size = 88
hidden_size = 300
num_layers = 1
output_size = 88
init = 'default'
scale = 1.0
parity = None
t_distrib = torch.distributions.Uniform(0, 0.75)
path = 'results/77/final_state_dict.pt'
save_init_model = True
save_final_model = True
save_every_epoch = False
detect_anomaly = False
def _seed_all(_seed) -> None:
torch.manual_seed(_seed)
np.random.seed(_seed)
random.seed(_seed)
class NullContext(object):
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
@ex.capture
def sklearn_experiment(dataset: str,
save_dir: str,
num_epochs: int,
high_off_notes: int,
low_off_notes: int,
lag: int,
window: int,
_seed,
_log,
_run):
num_notes = high_off_notes - low_off_notes
models = reg.train_models(dataset,
num_epochs,
low_off_notes,
high_off_notes,
_seed,
lag=lag,
window=window)
coefs = np.zeros((num_notes, num_notes*window))
intercepts = np.zeros(num_notes*window)
for i in range(num_notes):
model = models[i]
# simply save all parameters as -1 to discourage the note from being played
if model == None:
coefs[i] = -1
intercepts[i] = -1
else:
coefs[i] = model.coef_
intercepts[i] = model.intercept_
np.save(save_dir + 'coefs.npy', coefs)
np.save(save_dir + 'intercepts.npy', intercepts)
_run.add_artifact(save_dir + 'coefs.npy')
_run.add_artifact(save_dir + 'intercepts.npy')
train_loss = reg.compute_loss(models,
dataset,
'traindata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
test_loss = reg.compute_loss(models,
dataset,
'testdata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
valid_loss = reg.compute_loss(models,
dataset,
'validdata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
_run.log_scalar('trainLoss', train_loss)
_run.log_scalar('testLoss', test_loss)
_run.log_scalar('validLoss', valid_loss)
train_acc = reg.compute_accuracy(models,
dataset,
'traindata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
test_acc = reg.compute_accuracy(models,
dataset,
'testdata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
valid_acc = reg.compute_accuracy(models,
dataset,
'validdata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
_run.log_scalar('trainAccuracy', train_acc)
_run.log_scalar('testAccuracy', test_acc)
_run.log_scalar('validAccuracy', valid_acc)
# a single optimization step
@ex.capture
def train_iter(device: device,
cuda_device: torch.cuda.device,
input_tensor: Tensor,
target: Tensor,
mask: Tensor,
model: nn.Module,
loss_fcn: nn.Module,
optimizer: optim.Optimizer,
save_every_epoch: bool,
save_dir: str,
train_loader: DataLoader,
test_loader: DataLoader,
valid_loader: DataLoader,
low_off_notes: int,
high_off_notes: int,
_log,
_run,
logging=True):
input_tensor = input_tensor.to(device)
# number of songs in this batch
N = input_tensor.shape[0]
output, hidden_tensors = model(input_tensor)
loss = loss_fcn(output, target, mask, model)/N
optimizer.zero_grad()
loss.backward()
optimizer.step()
# use sacred to log training loss and accuracy
if logging:
train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes)
_run.log_scalar("trainLoss", loss.cpu().detach().item())
_run.log_scalar("trainAccuracy", train_acc)
# save a copy of the model and make sacred remember it each epoch
if save_every_epoch and logging:
sd = deepcopy(model.state_dict())
torch.save(init_sd, save_dir + 'state_dict_' + str(epoch) + '.pt')
_run.add_artifact(save_dir + 'state_dict_' + str(epoch) + '.pt')
# train a neural network
# returns the final loss and accuracy on the training, testing, and validation sets
@ex.capture
def pytorch_train_loop(cuda: bool,
model_dict: dict,
initializer: dict,
train_loader: DataLoader,
test_loader: DataLoader,
valid_loader: DataLoader,
low_off_notes: int,
high_off_notes: int,
optmzr: str,
lr: float,
decay: float,
regularization: float,
num_epochs: int,
save_dir: str,
save_init_model,
save_every_epoch,
save_final_model,
_seed,
_log,
_run,
logging=True):
# construct and initialize the model
model = get_model(model_dict, initializer, cuda)
# save a copy of the initial model and make sacred remember it
if save_init_model and logging:
init_sd = deepcopy(model.state_dict())
torch.save(init_sd, save_dir + 'initial_state_dict.pt')
_run.add_artifact(save_dir + 'initial_state_dict.pt')
# if we are on cuda we construct the device and run everything on it
cuda_device = NullContext()
device = torch.device('cpu')
if cuda:
dev_name = 'cuda:' + str(gpu)
cuda_device = torch.cuda.device(dev_name)
device = torch.device(dev_name)
model = model.to(device)
with cuda_device:
# see metrics.py
loss_fcn = MaskedBCE(regularization, low_off_notes=low_off_notes, high_off_notes=high_off_notes)
# compute the metrics before training and log them
if logging:
train_loss = compute_loss(loss_fcn, model, train_loader)
test_loss = compute_loss(loss_fcn, model, test_loader)
val_loss = compute_loss(loss_fcn, model, valid_loader)
_run.log_scalar("trainLoss", train_loss)
_run.log_scalar("testLoss", test_loss)
_run.log_scalar("validLoss", val_loss)
train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes)
test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes)
val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes)
_run.log_scalar("trainAccuracy", train_acc)
_run.log_scalar("testAccuracy", test_acc)
_run.log_scalar("validAccuracy", val_acc)
# construct the optimizer
optimizer = None
if optmzr == "SGD":
optimizer = optim.SGD(model.parameters(), lr=lr)
elif optmzr == "Adam":
optimizer = optim.Adam(model.parameters(), lr=lr)
elif optmzr == "RMSprop":
optimizer = optim.RMSprop(model.parameters(), lr=lr)
else:
raise ValueError("Optimizer {} not recognized.".format(optmzr))
# learning rate decay
scheduler = None
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: decay**epoch)
# begin training loop
for epoch in tqdm(range(num_epochs)):
for input_tensor, target, mask in train_loader:
train_iter(device,
cuda_device,
input_tensor,
target,
mask,
model,
loss_fcn,
optimizer,
save_every_epoch,
save_dir,
train_loader,
test_loader,
valid_loader,
low_off_notes,
high_off_notes,
_log,
_run,
logging=logging)
# learning rate decay
scheduler.step()
# use sacred to log testing and validation loss and accuracy
if logging:
test_loss = compute_loss(loss_fcn, model, test_loader)
val_loss = compute_loss(loss_fcn, model, valid_loader)
test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes)
val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes)
_run.log_scalar("testLoss", test_loss)
_run.log_scalar("validLoss", val_loss)
_run.log_scalar("testAccuracy", test_acc)
_run.log_scalar("validAccuracy", val_acc)
# save a copy of the trained model and make sacred remember it
if save_final_model and logging:
fin_sd = deepcopy(model.state_dict())
torch.save(fin_sd, save_dir + 'final_state_dict.pt')
_run.add_artifact(save_dir + 'final_state_dict.pt')
# recompute the metrics so that this function can return them
train_loss = compute_loss(loss_fcn, model, train_loader)
test_loss = compute_loss(loss_fcn, model, test_loader)
val_loss = compute_loss(loss_fcn, model, valid_loader)
train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes)
test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes)
val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes)
return ((train_loss, test_loss, val_loss), (train_acc, test_acc, val_acc))
# main function
@ex.automain
def train_loop(cuda,
gpu,
base_dir,
dataset,
num_epochs,
batch_size,
low_off_notes,
high_off_notes,
lr,
decay,
optmzr,
regularization,
do_hpsearch,
learning_rates,
decays,
regularizations,
hps_epochs,
architecture,
readout,
gradient_clipping,
jit,
lag,
window,
input_size,
hidden_size,
num_layers,
output_size,
detect_anomaly,
init,
scale,
parity,
t_distrib,
path,
save_init_model,
save_final_model,
save_every_epoch,
_seed,
_log,
_run):
# save artifacts to a temporary directory that gets erased when the experiment is over
save_dir = base_dir + '/tmp_' + str(_seed)
os.system('mkdir ' + save_dir)
save_dir += '/'
# give all random number generators the same seed
_seed_all(_seed)
sklearn_program = architecture == 'REGRESSION'
# regression models and neural networks are trained very differently
if sklearn_program:
sklearn_experiment(dataset,
save_dir,
num_epochs,
high_off_notes,
low_off_notes,
lag,
window,
_seed,
_log,
_run)
# run a pytorch program
else:
model_dict = {'architecture': architecture,
'readout': readout,
'gradient_clipping': gradient_clipping,
'jit': jit,
'lag': lag,
'window': window,
'input_size': input_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'output_size': output_size
}
initializer = {'init': init,
'scale': scale,
'parity': parity,
't_distrib': t_distrib,
'path': path,
'low_off_notes': low_off_notes,
'high_off_notes': high_off_notes
}
# if we are debugging we may want to detect autograd anomalies
torch.autograd.set_detect_anomaly(detect_anomaly)
# construct the pytorch data loaders
train_loader, test_loader, valid_loader = get_loader(dataset, batch_size)
# standard training loop
if not do_hpsearch:
# the training loop function returns the metrics achieved at the end of training
# they will be logged by default, no need to do anything with them here
metrics = pytorch_train_loop(cuda,
model_dict,
initializer,
train_loader,
test_loader,
valid_loader,
low_off_notes,
high_off_notes,
optmzr,
lr,
decay,
regularization,
num_epochs,
save_dir,
save_init_model,
save_every_epoch,
save_final_model,
_seed,
_log,
_run)
# only goal here is to find the best hyper parameters
else:
min_test_loss = float('inf')
best_lr = 0
best_dcay = 0
best_reg = 0
hyperparams = product(learning_rates, decays, regularizations)
for rate, dcay, reg in hyperparams:
# train a model with the given hyperparameters
# don't log anything, otherwise we will have a ridiculous amount of extraneous info
metrics = pytorch_train_loop(cuda,
model_dict,
initializer,
train_loader,
test_loader,
valid_loader,
optmzr,
rate,
dcay,
reg,
hps_epochs,
save_dir,
save_init_model,
save_every_epoch,
save_final_model,
_seed,
_log,
_run,
logging=False)
test_loss = metrics[0][1]
if test_loss == test_loss and test_loss < min_test_loss:
min_test_loss = test_loss
best_lr = rate
best_dcay = dcay
best_reg = reg
_run.log_scalar("learning_rate", best_lr)
_run.log_scalar("decay", best_dcay)
_run.log_scalar("regularization", best_reg)
sleep(1)
os.system('rm -r ' + save_dir)
| true | true |
f734e6fd78fd01730a7c42ee39a5882d91564b45 | 9,026 | py | Python | test/ut/tools/annotation/testcase/usercode/mnist.py | dutxubo/nni | c16f4e1c89b54b8b80661ef0072433d255ad2d24 | [
"MIT"
] | 9,680 | 2019-05-07T01:42:30.000Z | 2022-03-31T16:48:33.000Z | test/ut/tools/annotation/testcase/usercode/mnist.py | dutxubo/nni | c16f4e1c89b54b8b80661ef0072433d255ad2d24 | [
"MIT"
] | 1,957 | 2019-05-06T21:44:21.000Z | 2022-03-31T09:21:53.000Z | test/ut/tools/annotation/testcase/usercode/mnist.py | dutxubo/nni | c16f4e1c89b54b8b80661ef0072433d255ad2d24 | [
"MIT"
] | 1,571 | 2019-05-07T06:42:55.000Z | 2022-03-31T03:19:24.000Z | # -*- encoding:utf8 -*-
"""A deep MNIST classifier using convolutional layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
import tempfile
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
logger = logging.getLogger('mnist')
FLAGS = None
class MnistNetwork(object):
def __init__(self,
channel_1_num = 32,
channel_2_num = 64,
conv_size = 5,
hidden_size = 1024,
pool_size = 2,
learning_rate = 0.0001,
x_dim = 784,
y_dim = 10):
self.channel_1_num = channel_1_num
self.channel_2_num = channel_2_num
'''@nni.variable(nni.choice(2,3,5,7),name=self.conv_size)'''
self.conv_size = conv_size
'''@nni.variable(nni.choice(124,512,1024),name=self.hidden_size)'''
self.hidden_size = hidden_size
self.pool_size = pool_size
'''@nni.variable(nni.randint(2,3,5),name=self.learning_rate)'''
self.learning_rate = learning_rate
self.x_dim = x_dim
self.y_dim = y_dim
def build_network(self):
self.x = tf.placeholder(tf.float32, [None, self.x_dim], name = 'input_x')
self.y = tf.placeholder(tf.float32, [None, self.y_dim], name = 'input_y')
self.keep_prob = tf.placeholder(tf.float32, name = 'keep_prob')
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
try:
input_dim = int(math.sqrt(self.x_dim))
except:
#print('input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))
logger.debug('input dim cannot be sqrt and reshape. input dim: ', str(self.x_dim))
raise
x_image = tf.reshape(self.x, [-1, input_dim, input_dim, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
W_conv1 = weight_variable([self.conv_size, self.conv_size, 1, self.channel_1_num])
b_conv1 = bias_variable([self.channel_1_num])
"""@nni.function_choice(tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1),tf.nn.sigmoid(conv2d(x_image, W_conv1) + b_conv1),tf.nn.tanh(conv2d(x_image, W_conv1) + b_conv1),name=tf.nn.relu)"""
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
"""@nni.function_choice(max_pool(h_conv1, self.pool_size),avg_pool(h_conv1, self.pool_size),name=max_pool)"""
h_pool1 = max_pool(h_conv1, self.pool_size)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
W_conv2 = weight_variable([self.conv_size, self.conv_size, self.channel_1_num, self.channel_2_num])
b_conv2 = bias_variable([self.channel_2_num])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
#"""@nni.dynamic(input={cnn_block:1, concat:2},function_choice={"cnn_block":(x,nni.choice([3,4])),"cnn_block":(x),"concat":(x,y)},limit={"cnn_block.input":[concat,input],"concat.input":[this.depth-1,this.depth-3,this.depth-5],"graph.width":[1]})"""
h_pool2 = max_pool(h_conv2, self.pool_size)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
last_dim = int(input_dim / (self.pool_size * self.pool_size))
with tf.name_scope('fc1'):
W_fc1 = weight_variable([last_dim * last_dim * self.channel_2_num, self.hidden_size])
b_fc1 = bias_variable([self.hidden_size])
h_pool2_flat = tf.reshape(h_pool2, [-1, last_dim * last_dim * self.channel_2_num])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of features.
with tf.name_scope('dropout'):
h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
W_fc2 = weight_variable([self.hidden_size, self.y_dim])
b_fc2 = bias_variable([self.y_dim])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
with tf.name_scope('loss'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = self.y, logits = y_conv))
with tf.name_scope('adam_optimizer'):
self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool(x, pool_size):
"""max_pool downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, pool_size, pool_size, 1],
strides=[1, pool_size, pool_size, 1], padding='SAME')
def avg_pool(x,pool_size):
return tf.nn.avg_pool(x, ksize=[1, pool_size, pool_size, 1],
strides=[1, pool_size, pool_size, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main():
# Import data
data_dir= '/tmp/tensorflow/mnist/input_data'
mnist = input_data.read_data_sets(data_dir, one_hot=True)
logger.debug('Mnist download data done.')
# Create the model
# Build the graph for the deep net
mnist_network = MnistNetwork()
mnist_network.build_network()
logger.debug('Mnist build network done.')
# Write log
graph_location = tempfile.mkdtemp()
logger.debug('Saving graph to: %s', graph_location)
# print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
test_acc = 0.0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
batch_num=200
for i in range(batch_num):
'''@nni.variable(nni.choice(50,250,500),name=batch_size)'''
batch_size=50
batch = mnist.train.next_batch(batch_size)
'''@nni.variable(nni.choice(1,5),name=dropout_rate)'''
dropout_rate=0.5
mnist_network.train_step.run(feed_dict={mnist_network.x: batch[0], mnist_network.y: batch[1], mnist_network.keep_prob: dropout_rate})
if i % 100 == 0:
#train_accuracy = mnist_network.accuracy.eval(feed_dict={
# mnist_network.x: batch[0], mnist_network.y: batch[1], mnist_network.keep_prob: params['dropout_rate']})
#print('step %d, training accuracy %g' % (i, train_accuracy))
test_acc = mnist_network.accuracy.eval(feed_dict={
mnist_network.x: mnist.test.images, mnist_network.y: mnist.test.labels, mnist_network.keep_prob: 1.0})
'''@nni.report_intermediate_result(test_acc)'''
test_acc = mnist_network.accuracy.eval(feed_dict={
mnist_network.x: mnist.test.images, mnist_network.y: mnist.test.labels, mnist_network.keep_prob: 1.0})
'''@nni.report_final_result(test_acc)'''
def generate_default_params():
params = {'data_dir': '/tmp/tensorflow/mnist/input_data',
'dropout_rate': 0.5,
'channel_1_num': 32,
'channel_2_num': 64,
'conv_size': 5,
'pool_size': 2,
'hidden_size': 1024,
'batch_size': 50,
'batch_num': 200,
'learning_rate': 1e-4}
return params
if __name__ == '__main__':
# run command: python mnist.py --init_file_path ./init.json
#FLAGS, unparsed = parse_command()
#original_params = parse_init_json(FLAGS.init_file_path, {})
#pipe_interface.set_params_to_env()
'''@nni.get_next_parameter()'''
try:
params = generate_default_params()
logger.debug('params')
logger.debug('params update')
main()
except:
logger.exception('Got some exception in while loop in mnist.py')
raise
| 42.980952 | 260 | 0.634611 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
import tempfile
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
logger = logging.getLogger('mnist')
FLAGS = None
class MnistNetwork(object):
def __init__(self,
channel_1_num = 32,
channel_2_num = 64,
conv_size = 5,
hidden_size = 1024,
pool_size = 2,
learning_rate = 0.0001,
x_dim = 784,
y_dim = 10):
self.channel_1_num = channel_1_num
self.channel_2_num = channel_2_num
self.conv_size = conv_size
self.hidden_size = hidden_size
self.pool_size = pool_size
self.learning_rate = learning_rate
self.x_dim = x_dim
self.y_dim = y_dim
def build_network(self):
self.x = tf.placeholder(tf.float32, [None, self.x_dim], name = 'input_x')
self.y = tf.placeholder(tf.float32, [None, self.y_dim], name = 'input_y')
self.keep_prob = tf.placeholder(tf.float32, name = 'keep_prob')
with tf.name_scope('reshape'):
try:
input_dim = int(math.sqrt(self.x_dim))
except:
logger.debug('input dim cannot be sqrt and reshape. input dim: ', str(self.x_dim))
raise
x_image = tf.reshape(self.x, [-1, input_dim, input_dim, 1])
with tf.name_scope('conv1'):
W_conv1 = weight_variable([self.conv_size, self.conv_size, 1, self.channel_1_num])
b_conv1 = bias_variable([self.channel_1_num])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
with tf.name_scope('pool1'):
h_pool1 = max_pool(h_conv1, self.pool_size)
with tf.name_scope('conv2'):
W_conv2 = weight_variable([self.conv_size, self.conv_size, self.channel_1_num, self.channel_2_num])
b_conv2 = bias_variable([self.channel_2_num])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
with tf.name_scope('pool2'):
h_pool2 = max_pool(h_conv2, self.pool_size)
last_dim = int(input_dim / (self.pool_size * self.pool_size))
with tf.name_scope('fc1'):
W_fc1 = weight_variable([last_dim * last_dim * self.channel_2_num, self.hidden_size])
b_fc1 = bias_variable([self.hidden_size])
h_pool2_flat = tf.reshape(h_pool2, [-1, last_dim * last_dim * self.channel_2_num])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
with tf.name_scope('dropout'):
h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)
with tf.name_scope('fc2'):
W_fc2 = weight_variable([self.hidden_size, self.y_dim])
b_fc2 = bias_variable([self.y_dim])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
with tf.name_scope('loss'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = self.y, logits = y_conv))
with tf.name_scope('adam_optimizer'):
self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool(x, pool_size):
return tf.nn.max_pool(x, ksize=[1, pool_size, pool_size, 1],
strides=[1, pool_size, pool_size, 1], padding='SAME')
def avg_pool(x,pool_size):
return tf.nn.avg_pool(x, ksize=[1, pool_size, pool_size, 1],
strides=[1, pool_size, pool_size, 1], padding='SAME')
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main():
data_dir= '/tmp/tensorflow/mnist/input_data'
mnist = input_data.read_data_sets(data_dir, one_hot=True)
logger.debug('Mnist download data done.')
mnist_network = MnistNetwork()
mnist_network.build_network()
logger.debug('Mnist build network done.')
graph_location = tempfile.mkdtemp()
logger.debug('Saving graph to: %s', graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
test_acc = 0.0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
batch_num=200
for i in range(batch_num):
batch_size=50
batch = mnist.train.next_batch(batch_size)
dropout_rate=0.5
mnist_network.train_step.run(feed_dict={mnist_network.x: batch[0], mnist_network.y: batch[1], mnist_network.keep_prob: dropout_rate})
if i % 100 == 0:
test_acc = mnist_network.accuracy.eval(feed_dict={
mnist_network.x: mnist.test.images, mnist_network.y: mnist.test.labels, mnist_network.keep_prob: 1.0})
test_acc = mnist_network.accuracy.eval(feed_dict={
mnist_network.x: mnist.test.images, mnist_network.y: mnist.test.labels, mnist_network.keep_prob: 1.0})
def generate_default_params():
params = {'data_dir': '/tmp/tensorflow/mnist/input_data',
'dropout_rate': 0.5,
'channel_1_num': 32,
'channel_2_num': 64,
'conv_size': 5,
'pool_size': 2,
'hidden_size': 1024,
'batch_size': 50,
'batch_num': 200,
'learning_rate': 1e-4}
return params
if __name__ == '__main__':
try:
params = generate_default_params()
logger.debug('params')
logger.debug('params update')
main()
except:
logger.exception('Got some exception in while loop in mnist.py')
raise
| true | true |
f734e7ac84c8e5d7674c3e443378662983d9d4e5 | 386 | py | Python | demos/python/python_musl/demo.py | jessehui/occlum | 8a5f3033881c090340d678f2aecdca4ac6355bf4 | [
"BSD-3-Clause-Clear"
] | 928 | 2019-07-04T12:00:04.000Z | 2022-03-30T02:45:06.000Z | demos/python/python_musl/demo.py | jessehui/occlum | 8a5f3033881c090340d678f2aecdca4ac6355bf4 | [
"BSD-3-Clause-Clear"
] | 333 | 2019-07-01T07:42:49.000Z | 2022-03-31T10:53:19.000Z | demos/python/python_musl/demo.py | jessehui/occlum | 8a5f3033881c090340d678f2aecdca4ac6355bf4 | [
"BSD-3-Clause-Clear"
] | 161 | 2019-07-19T03:46:44.000Z | 2022-03-28T02:16:31.000Z | import pandas as pd
import numpy as np
from sklearn.datasets import dump_svmlight_file
df1 = pd.read_csv("./dataset/input_label.csv")
df2 = pd.read_csv("./dataset/input.csv")
res = pd.merge(df1, df2, how='left', left_on='id', right_on='id')
X = res[np.setdiff1d(res.columns,['label','id'])]
y = res.label
dump_svmlight_file(X,y,'/host/smvlight.dat',zero_based=True,multilabel=False)
| 29.692308 | 77 | 0.733161 | import pandas as pd
import numpy as np
from sklearn.datasets import dump_svmlight_file
df1 = pd.read_csv("./dataset/input_label.csv")
df2 = pd.read_csv("./dataset/input.csv")
res = pd.merge(df1, df2, how='left', left_on='id', right_on='id')
X = res[np.setdiff1d(res.columns,['label','id'])]
y = res.label
dump_svmlight_file(X,y,'/host/smvlight.dat',zero_based=True,multilabel=False)
| true | true |
f734ea1c07870fb34749ffbc4233d57c196b4351 | 8,912 | py | Python | pgAdmin/pgadmin4/web/pgadmin/tools/sqleditor/tests/test_encoding_charset.py | WeilerWebServices/PostgreSQL | ae594ed077bebbad1be3c1d95c38b7c2c2683e8c | [
"PostgreSQL"
] | null | null | null | pgAdmin/pgadmin4/web/pgadmin/tools/sqleditor/tests/test_encoding_charset.py | WeilerWebServices/PostgreSQL | ae594ed077bebbad1be3c1d95c38b7c2c2683e8c | [
"PostgreSQL"
] | null | null | null | pgAdmin/pgadmin4/web/pgadmin/tools/sqleditor/tests/test_encoding_charset.py | WeilerWebServices/PostgreSQL | ae594ed077bebbad1be3c1d95c38b7c2c2683e8c | [
"PostgreSQL"
] | null | null | null | # -*- coding: utf-8 -*-
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
from pgadmin.utils.route import BaseTestGenerator
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from regression.python_test_utils import test_utils
import json
from pgadmin.utils import server_utils
import random
class TestEncodingCharset(BaseTestGenerator):
"""
This class validates character support in pgAdmin4 for
different PostgresDB encodings
"""
skip_on_database = ['gpdb']
scenarios = [
(
'With Encoding UTF8',
dict(
db_encoding='UTF8',
lc_collate='C',
test_str='A'
)),
(
'With Encoding EUC_CN',
dict(
db_encoding='EUC_CN',
lc_collate='C',
test_str='A'
)),
(
'With Encoding SQL_ASCII',
dict(
db_encoding='SQL_ASCII',
lc_collate='C',
test_str='Tif'
)),
(
'With Encoding LATIN1',
dict(
db_encoding='LATIN1',
lc_collate='C',
test_str='äöüßÑ'
)),
(
'With Encoding LATIN2',
dict(
db_encoding='LATIN2',
lc_collate='C',
test_str='§'
)),
(
'With Encoding LATIN9',
dict(
db_encoding='LATIN9',
lc_collate='C',
test_str='äöüß'
)),
(
'With Encoding EUC_JIS_2004',
dict(
db_encoding='EUC_JIS_2004',
lc_collate='C',
test_str='じんぼはりんごをたべる'
)),
(
'With Encoding WIN1256',
dict(
db_encoding='WIN1256',
lc_collate='C',
test_str='صباح الخير'
)),
(
'With Encoding WIN866',
dict(
db_encoding='WIN866',
lc_collate='C',
test_str='Альтернативная'
)),
(
'With Encoding WIN874',
dict(
db_encoding='WIN874',
lc_collate='C',
test_str='กลิ่นหอม'
)),
(
'With Encoding WIN1250',
dict(
db_encoding='WIN1250',
lc_collate='C',
test_str='ŔÁÄÇ'
)),
(
'With Encoding WIN1251',
dict(
db_encoding='WIN1251',
lc_collate='C',
test_str='ЖИЙЮ'
)),
(
'With Encoding WIN1252',
dict(
db_encoding='WIN1252',
lc_collate='C',
test_str='ÆØÙü'
)),
(
'With Encoding WIN1253',
dict(
db_encoding='WIN1253',
lc_collate='C',
test_str='ΨΪμΫ'
)),
(
'With Encoding WIN1254',
dict(
db_encoding='WIN1254',
lc_collate='C',
test_str='ĞğØŠ'
)),
(
'With Encoding WIN1255',
dict(
db_encoding='WIN1255',
lc_collate='C',
test_str='₪¥©¾'
)),
(
'With Encoding WIN1256',
dict(
db_encoding='WIN1256',
lc_collate='C',
test_str='بؤغق'
)),
(
'With Encoding WIN1257',
dict(
db_encoding='WIN1257',
lc_collate='C',
test_str='‰ķģž'
)),
(
'With Encoding WIN1258',
dict(
db_encoding='WIN1258',
lc_collate='C',
test_str='₫SHYÑđ'
)),
(
'With Encoding EUC_CN',
dict(
db_encoding='EUC_CN',
lc_collate='C',
test_str='汉字不灭'
)),
(
'With Encoding EUC_JP',
dict(
db_encoding='EUC_JP',
lc_collate='C',
test_str='での日本'
)),
(
'With Encoding EUC_KR',
dict(
db_encoding='EUC_KR',
lc_collate='C',
test_str='ㄱㄲㄴㄷ'
)),
(
'With Encoding EUC_TW',
dict(
db_encoding='EUC_TW',
lc_collate='C',
test_str='中文'
)),
(
'With Encoding ISO_8859_5',
dict(
db_encoding='ISO_8859_5',
lc_collate='C',
test_str='ЁЎФЮ'
)),
(
'With Encoding ISO_8859_6',
dict(
db_encoding='ISO_8859_6',
lc_collate='C',
test_str='العَرَبِيَّة'
)),
(
'With Encoding ISO_8859_7',
dict(
db_encoding='ISO_8859_7',
lc_collate='C',
test_str='ελληνικά'
)),
(
'With Encoding ISO_8859_8',
dict(
db_encoding='ISO_8859_8',
lc_collate='C',
test_str='דבא'
)),
(
'With Encoding KOI8R',
dict(
db_encoding='KOI8R',
lc_collate='C',
test_str='Альтернативная'
)),
(
'With Encoding KOI8U',
dict(
db_encoding='KOI8U',
lc_collate='C',
test_str='українська'
)),
]
def setUp(self):
self.encode_db_name = 'encoding_' + self.db_encoding + \
str(random.randint(10000, 65535))
self.encode_sid = self.server_information['server_id']
server_con = server_utils.connect_server(self, self.encode_sid)
if hasattr(self, 'skip_on_database'):
if 'data' in server_con and 'type' in server_con['data']:
if server_con['data']['type'] in self.skip_on_database:
self.skipTest('cannot run in: %s' %
server_con['data']['type'])
self.encode_did = test_utils.create_database(
self.server, self.encode_db_name,
(self.db_encoding, self.lc_collate))
def runTest(self):
db_con = database_utils.connect_database(self,
test_utils.SERVER_GROUP,
self.encode_sid,
self.encode_did)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to the database.")
# Initialize query tool
self.trans_id = str(random.randint(1, 9999999))
url = '/datagrid/initialize/query_tool/{0}/{1}/{2}/{3}'\
.format(self.trans_id, test_utils.SERVER_GROUP, self.encode_sid,
self.encode_did)
response = self.tester.post(url)
self.assertEqual(response.status_code, 200)
# Check character
url = "/sqleditor/query_tool/start/{0}".format(self.trans_id)
sql = "select E'{0}';".format(self.test_str)
response = self.tester.post(url, data=json.dumps({"sql": sql}),
content_type='html/json')
self.assertEqual(response.status_code, 200)
url = '/sqleditor/poll/{0}'.format(self.trans_id)
response = self.tester.get(url)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.data.decode('utf-8'))
self.assertEqual(response_data['data']['rows_fetched_to'], 1)
result = response_data['data']['result'][0][0]
self.assertEqual(result, self.test_str)
database_utils.disconnect_database(self, self.encode_sid,
self.encode_did)
def tearDown(self):
main_conn = test_utils.get_db_connection(
self.server['db'],
self.server['username'],
self.server['db_password'],
self.server['host'],
self.server['port'],
self.server['sslmode']
)
test_utils.drop_database(main_conn, self.encode_db_name)
| 30.520548 | 76 | 0.439408 |
With Encoding KOI8R',
dict(
db_encoding='KOI8R',
lc_collate='C',
test_str='Альтернативная'
)),
(
'With Encoding KOI8U',
dict(
db_encoding='KOI8U',
lc_collate='C',
test_str='українська'
)),
]
def setUp(self):
self.encode_db_name = 'encoding_' + self.db_encoding + \
str(random.randint(10000, 65535))
self.encode_sid = self.server_information['server_id']
server_con = server_utils.connect_server(self, self.encode_sid)
if hasattr(self, 'skip_on_database'):
if 'data' in server_con and 'type' in server_con['data']:
if server_con['data']['type'] in self.skip_on_database:
self.skipTest('cannot run in: %s' %
server_con['data']['type'])
self.encode_did = test_utils.create_database(
self.server, self.encode_db_name,
(self.db_encoding, self.lc_collate))
def runTest(self):
db_con = database_utils.connect_database(self,
test_utils.SERVER_GROUP,
self.encode_sid,
self.encode_did)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to the database.")
self.trans_id = str(random.randint(1, 9999999))
url = '/datagrid/initialize/query_tool/{0}/{1}/{2}/{3}'\
.format(self.trans_id, test_utils.SERVER_GROUP, self.encode_sid,
self.encode_did)
response = self.tester.post(url)
self.assertEqual(response.status_code, 200)
url = "/sqleditor/query_tool/start/{0}".format(self.trans_id)
sql = "select E'{0}';".format(self.test_str)
response = self.tester.post(url, data=json.dumps({"sql": sql}),
content_type='html/json')
self.assertEqual(response.status_code, 200)
url = '/sqleditor/poll/{0}'.format(self.trans_id)
response = self.tester.get(url)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.data.decode('utf-8'))
self.assertEqual(response_data['data']['rows_fetched_to'], 1)
result = response_data['data']['result'][0][0]
self.assertEqual(result, self.test_str)
database_utils.disconnect_database(self, self.encode_sid,
self.encode_did)
def tearDown(self):
main_conn = test_utils.get_db_connection(
self.server['db'],
self.server['username'],
self.server['db_password'],
self.server['host'],
self.server['port'],
self.server['sslmode']
)
test_utils.drop_database(main_conn, self.encode_db_name)
| true | true |
f734ea202c7b17c7d2628d4129edacee942ccbf4 | 1,324 | py | Python | oscar/apps/dashboard/ranges/app.py | endgame/django-oscar | e5d78436e20b55902537a6cc82edf4e22568f9d6 | [
"BSD-3-Clause"
] | null | null | null | oscar/apps/dashboard/ranges/app.py | endgame/django-oscar | e5d78436e20b55902537a6cc82edf4e22568f9d6 | [
"BSD-3-Clause"
] | null | null | null | oscar/apps/dashboard/ranges/app.py | endgame/django-oscar | e5d78436e20b55902537a6cc82edf4e22568f9d6 | [
"BSD-3-Clause"
] | 1 | 2019-07-10T06:32:14.000Z | 2019-07-10T06:32:14.000Z | from django.conf.urls import patterns, url
from django.contrib.admin.views.decorators import staff_member_required
from django.utils.translation import ugettext_lazy as _
from oscar.core.application import Application
from oscar.apps.dashboard.ranges import views
from oscar.apps.dashboard.nav import register, Node
node = Node(_('Ranges'), 'dashboard:range-list')
register(node, 70)
class RangeDashboardApplication(Application):
name = None
list_view = views.RangeListView
create_view = views.RangeCreateView
update_view = views.RangeUpdateView
delete_view = views.RangeDeleteView
products_view = views.RangeProductListView
def get_urls(self):
urlpatterns = patterns('',
url(r'^$', self.list_view.as_view(), name='range-list'),
url(r'^create/$', self.create_view.as_view(), name='range-create'),
url(r'^(?P<pk>\d+)/$', self.update_view.as_view(), name='range-update'),
url(r'^(?P<pk>\d+)/delete/$', self.delete_view.as_view(), name='range-delete'),
url(r'^(?P<pk>\d+)/products/$', self.products_view.as_view(), name='range-products'),
)
return self.post_process_urls(urlpatterns)
def get_url_decorator(self, url_name):
return staff_member_required
application = RangeDashboardApplication()
| 35.783784 | 97 | 0.700906 | from django.conf.urls import patterns, url
from django.contrib.admin.views.decorators import staff_member_required
from django.utils.translation import ugettext_lazy as _
from oscar.core.application import Application
from oscar.apps.dashboard.ranges import views
from oscar.apps.dashboard.nav import register, Node
node = Node(_('Ranges'), 'dashboard:range-list')
register(node, 70)
class RangeDashboardApplication(Application):
name = None
list_view = views.RangeListView
create_view = views.RangeCreateView
update_view = views.RangeUpdateView
delete_view = views.RangeDeleteView
products_view = views.RangeProductListView
def get_urls(self):
urlpatterns = patterns('',
url(r'^$', self.list_view.as_view(), name='range-list'),
url(r'^create/$', self.create_view.as_view(), name='range-create'),
url(r'^(?P<pk>\d+)/$', self.update_view.as_view(), name='range-update'),
url(r'^(?P<pk>\d+)/delete/$', self.delete_view.as_view(), name='range-delete'),
url(r'^(?P<pk>\d+)/products/$', self.products_view.as_view(), name='range-products'),
)
return self.post_process_urls(urlpatterns)
def get_url_decorator(self, url_name):
return staff_member_required
application = RangeDashboardApplication()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.